content
stringlengths
0
14.9M
filename
stringlengths
44
136
--- title: "Example of global variable importance" author: "Anna Kozak" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Example of global variable importance} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Example of global variable importance In this vignette, we present a global variable importance measure based on Partial Dependence Profiles (PDP) for the random forest regression model. ```{r, include=FALSE, warning=FALSE, error=FALSE, message=FALSE} library("ggplot2") ``` ### 1 Dataset We work on Apartments dataset from `DALEX` package. ```{r, warning = FALSE, echo = FALSE, message = FALSE, include = TRUE} library("DALEX") data(apartments) head(apartments) ``` ### 2 Random forest regression model Now, we define a random forest regression model and use `explain()` function from `DALEX`. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} library("randomForest") apartments_rf_model <- randomForest(m2.price ~ construction.year + surface + floor + no.rooms, data = apartments) explainer_rf <- explain(apartments_rf_model, data = apartmentsTest[,2:5], y = apartmentsTest$m2.price) ``` ### 3 Calculate Partial Dependence Profiles Let see the Partial Dependence Profiles calculated with `DALEX::model_profile()` function. The PDP also can be calculated with `DALEX::variable_profile()` or `ingredients::partial_dependence()`. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} profiles <- model_profile(explainer_rf) plot(profiles) ``` ### 4 Calculate measure of global variable importance Now, we calculated a measure of global variable importance via oscillation based on PDP. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} library("vivo") measure <- global_variable_importance(profiles) ``` ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} plot(measure) ``` The most important variable is surface, then no.rooms, floor, and construction.year. ### 5 Comparison of the importance of variables for two or more models Let created a linear regression model and `explain` object. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} apartments_lm_model <- lm(m2.price ~ construction.year + surface + floor + no.rooms, data = apartments) explainer_lm <- explain(apartments_lm_model, data = apartmentsTest[,2:5], y = apartmentsTest$m2.price) ``` We calculated Partial Dependence Profiles and measure. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} profiles_lm <- model_profile(explainer_lm) measure_lm <- global_variable_importance(profiles_lm) ``` ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} plot(measure_lm, measure, type = "lines") ``` Now we can see the order of importance of variables by model.
/scratch/gouwar.j/cran-all/cranData/vivo/inst/doc/vignette_apartments_global.Rmd
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ---- include=FALSE, warning=FALSE, error=FALSE, message=FALSE---------------- library("ggplot2") ## ---- warning = FALSE, echo = FALSE, message = FALSE, include = TRUE---------- library("DALEX") data(apartments) head(apartments) ## ---- warning = FALSE, error = FALSE, message = FALSE, include = TRUE--------- library("randomForest") apartments_rf_model <- randomForest(m2.price ~ construction.year + surface + floor + no.rooms, data = apartments) explainer_rf <- explain(apartments_rf_model, data = apartmentsTest[,2:5], y = apartmentsTest$m2.price) ## ---- warning = FALSE, error = FALSE, message = FALSE, include = TRUE--------- new_apartment <- data.frame(construction.year = 1998, surface = 88, floor = 2L, no.rooms = 3) predict(apartments_rf_model, new_apartment) ## ---- warning = FALSE, error = FALSE, message = FALSE, include = TRUE--------- library("ingredients") profiles <- predict_profile(explainer_rf, new_apartment) plot(profiles) + show_observations(profiles) ## ---- warning = FALSE, error = FALSE, message = FALSE, include = TRUE--------- library("vivo") measure <- local_variable_importance(profiles, apartments[,2:5], absolute_deviation = TRUE, point = TRUE, density = TRUE) ## ---- warning = FALSE, error = FALSE, message = FALSE, include = TRUE--------- plot(measure) ## ---- warning = FALSE, error = FALSE, message = FALSE, include = TRUE--------- measure_2 <- local_variable_importance(profiles, apartments[,2:5], absolute_deviation = FALSE, point = TRUE, density = TRUE) measure_3 <- local_variable_importance(profiles, apartments[,2:5], absolute_deviation = FALSE, point = TRUE, density = FALSE) ## ---- warning = FALSE, error = FALSE, message = FALSE, include = TRUE--------- plot(measure, measure_2, measure_3, color = "_label_method_") ## ---- warning = FALSE, error = FALSE, message = FALSE, include = TRUE--------- plot(measure, measure_2, measure_3, color = "_label_method_", type = "lines") ## ---- warning = FALSE, error = FALSE, message = FALSE, include = TRUE--------- apartments_lm_model <- lm(m2.price ~ construction.year + surface + floor + no.rooms, data = apartments) explainer_lm <- explain(apartments_lm_model, data = apartmentsTest[,2:5], y = apartmentsTest$m2.price) ## ---- warning = FALSE, error = FALSE, message = FALSE, include = TRUE--------- profiles_lm <- predict_profile(explainer_lm, new_apartment) measure_lm <- local_variable_importance(profiles_lm, apartments[,2:5], absolute_deviation = TRUE, point = TRUE, density = TRUE) ## ---- warning = FALSE, error = FALSE, message = FALSE, include = TRUE--------- plot(measure, measure_lm, color = "_label_model_", type = "lines")
/scratch/gouwar.j/cran-all/cranData/vivo/inst/doc/vignette_apartments_local.R
--- title: "Example of local variable importance" author: "Anna Kozak" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Example of local variable importance} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Example of local variable importance In this vignette, we present a local variable importance measure based on Ceteris Paribus profiles for random forest regression model. ```{r, include=FALSE, warning=FALSE, error=FALSE, message=FALSE} library("ggplot2") ``` ### 1 Dataset We work on Apartments dataset from `DALEX` package. ```{r, warning = FALSE, echo = FALSE, message = FALSE, include = TRUE} library("DALEX") data(apartments) head(apartments) ``` ### 2 Random forest regression model Now, we define a random forest regression model and use explain from `DALEX`. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} library("randomForest") apartments_rf_model <- randomForest(m2.price ~ construction.year + surface + floor + no.rooms, data = apartments) explainer_rf <- explain(apartments_rf_model, data = apartmentsTest[,2:5], y = apartmentsTest$m2.price) ``` ### 3 New observation We need to specify an observation. Let consider a new apartment with the following attributes. Moreover, we calculate predict value for this new observation. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} new_apartment <- data.frame(construction.year = 1998, surface = 88, floor = 2L, no.rooms = 3) predict(apartments_rf_model, new_apartment) ``` ### 4 Calculate Ceteris Paribus profiles Let see the Ceteris Paribus Plots calculated with `DALEX::predict_profile()` function. The CP also can be calculated with `DALEX::individual_profile()` or `ingredients::ceteris_paribus()`. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} library("ingredients") profiles <- predict_profile(explainer_rf, new_apartment) plot(profiles) + show_observations(profiles) ``` ### 5 Calculate measure of local variable importance Now, we calculated a measure of local variable importance via oscillation based on Ceteris Paribus profiles. We use variant with all parameters equals to TRUE. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} library("vivo") measure <- local_variable_importance(profiles, apartments[,2:5], absolute_deviation = TRUE, point = TRUE, density = TRUE) ``` ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} plot(measure) ``` For the new observation the most important variable is surface, then floor, construction.year and no.rooms. ### 6 Comparison of two or more methods of calculating the importance of variables We calculated local variable importance for different parameters and we can plot together, on bar plot or lines plot. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} measure_2 <- local_variable_importance(profiles, apartments[,2:5], absolute_deviation = FALSE, point = TRUE, density = TRUE) measure_3 <- local_variable_importance(profiles, apartments[,2:5], absolute_deviation = FALSE, point = TRUE, density = FALSE) ``` ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} plot(measure, measure_2, measure_3, color = "_label_method_") ``` ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} plot(measure, measure_2, measure_3, color = "_label_method_", type = "lines") ``` ### 7 Comparison of the importance of variables for two or more models Let created a linear regression model and `explain` object. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} apartments_lm_model <- lm(m2.price ~ construction.year + surface + floor + no.rooms, data = apartments) explainer_lm <- explain(apartments_lm_model, data = apartmentsTest[,2:5], y = apartmentsTest$m2.price) ``` We calculated Ceteris Paribus profiles and measure. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} profiles_lm <- predict_profile(explainer_lm, new_apartment) measure_lm <- local_variable_importance(profiles_lm, apartments[,2:5], absolute_deviation = TRUE, point = TRUE, density = TRUE) ``` ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} plot(measure, measure_lm, color = "_label_model_", type = "lines") ``` Now we can see the order of importance of variables by model for selected observation.
/scratch/gouwar.j/cran-all/cranData/vivo/inst/doc/vignette_apartments_local.Rmd
--- title: "Example of global variable importance" author: "Anna Kozak" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Example of global variable importance} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Example of global variable importance In this vignette, we present a global variable importance measure based on Partial Dependence Profiles (PDP) for the random forest regression model. ```{r, include=FALSE, warning=FALSE, error=FALSE, message=FALSE} library("ggplot2") ``` ### 1 Dataset We work on Apartments dataset from `DALEX` package. ```{r, warning = FALSE, echo = FALSE, message = FALSE, include = TRUE} library("DALEX") data(apartments) head(apartments) ``` ### 2 Random forest regression model Now, we define a random forest regression model and use `explain()` function from `DALEX`. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} library("randomForest") apartments_rf_model <- randomForest(m2.price ~ construction.year + surface + floor + no.rooms, data = apartments) explainer_rf <- explain(apartments_rf_model, data = apartmentsTest[,2:5], y = apartmentsTest$m2.price) ``` ### 3 Calculate Partial Dependence Profiles Let see the Partial Dependence Profiles calculated with `DALEX::model_profile()` function. The PDP also can be calculated with `DALEX::variable_profile()` or `ingredients::partial_dependence()`. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} profiles <- model_profile(explainer_rf) plot(profiles) ``` ### 4 Calculate measure of global variable importance Now, we calculated a measure of global variable importance via oscillation based on PDP. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} library("vivo") measure <- global_variable_importance(profiles) ``` ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} plot(measure) ``` The most important variable is surface, then no.rooms, floor, and construction.year. ### 5 Comparison of the importance of variables for two or more models Let created a linear regression model and `explain` object. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} apartments_lm_model <- lm(m2.price ~ construction.year + surface + floor + no.rooms, data = apartments) explainer_lm <- explain(apartments_lm_model, data = apartmentsTest[,2:5], y = apartmentsTest$m2.price) ``` We calculated Partial Dependence Profiles and measure. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} profiles_lm <- model_profile(explainer_lm) measure_lm <- global_variable_importance(profiles_lm) ``` ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} plot(measure_lm, measure, type = "lines") ``` Now we can see the order of importance of variables by model.
/scratch/gouwar.j/cran-all/cranData/vivo/vignettes/vignette_apartments_global.Rmd
--- title: "Example of local variable importance" author: "Anna Kozak" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Example of local variable importance} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Example of local variable importance In this vignette, we present a local variable importance measure based on Ceteris Paribus profiles for random forest regression model. ```{r, include=FALSE, warning=FALSE, error=FALSE, message=FALSE} library("ggplot2") ``` ### 1 Dataset We work on Apartments dataset from `DALEX` package. ```{r, warning = FALSE, echo = FALSE, message = FALSE, include = TRUE} library("DALEX") data(apartments) head(apartments) ``` ### 2 Random forest regression model Now, we define a random forest regression model and use explain from `DALEX`. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} library("randomForest") apartments_rf_model <- randomForest(m2.price ~ construction.year + surface + floor + no.rooms, data = apartments) explainer_rf <- explain(apartments_rf_model, data = apartmentsTest[,2:5], y = apartmentsTest$m2.price) ``` ### 3 New observation We need to specify an observation. Let consider a new apartment with the following attributes. Moreover, we calculate predict value for this new observation. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} new_apartment <- data.frame(construction.year = 1998, surface = 88, floor = 2L, no.rooms = 3) predict(apartments_rf_model, new_apartment) ``` ### 4 Calculate Ceteris Paribus profiles Let see the Ceteris Paribus Plots calculated with `DALEX::predict_profile()` function. The CP also can be calculated with `DALEX::individual_profile()` or `ingredients::ceteris_paribus()`. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} library("ingredients") profiles <- predict_profile(explainer_rf, new_apartment) plot(profiles) + show_observations(profiles) ``` ### 5 Calculate measure of local variable importance Now, we calculated a measure of local variable importance via oscillation based on Ceteris Paribus profiles. We use variant with all parameters equals to TRUE. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} library("vivo") measure <- local_variable_importance(profiles, apartments[,2:5], absolute_deviation = TRUE, point = TRUE, density = TRUE) ``` ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} plot(measure) ``` For the new observation the most important variable is surface, then floor, construction.year and no.rooms. ### 6 Comparison of two or more methods of calculating the importance of variables We calculated local variable importance for different parameters and we can plot together, on bar plot or lines plot. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} measure_2 <- local_variable_importance(profiles, apartments[,2:5], absolute_deviation = FALSE, point = TRUE, density = TRUE) measure_3 <- local_variable_importance(profiles, apartments[,2:5], absolute_deviation = FALSE, point = TRUE, density = FALSE) ``` ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} plot(measure, measure_2, measure_3, color = "_label_method_") ``` ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} plot(measure, measure_2, measure_3, color = "_label_method_", type = "lines") ``` ### 7 Comparison of the importance of variables for two or more models Let created a linear regression model and `explain` object. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} apartments_lm_model <- lm(m2.price ~ construction.year + surface + floor + no.rooms, data = apartments) explainer_lm <- explain(apartments_lm_model, data = apartmentsTest[,2:5], y = apartmentsTest$m2.price) ``` We calculated Ceteris Paribus profiles and measure. ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} profiles_lm <- predict_profile(explainer_lm, new_apartment) measure_lm <- local_variable_importance(profiles_lm, apartments[,2:5], absolute_deviation = TRUE, point = TRUE, density = TRUE) ``` ```{r, warning = FALSE, error = FALSE, message = FALSE, include = TRUE} plot(measure, measure_lm, color = "_label_model_", type = "lines") ``` Now we can see the order of importance of variables by model for selected observation.
/scratch/gouwar.j/cran-all/cranData/vivo/vignettes/vignette_apartments_local.Rmd
dataframeToD3 <- function(df) { if (missing(df) || is.null(df)) { return(list()) } if (!is.data.frame(df)) { stop("vizdraws: the input must be a dataframe", call. = FALSE) } row.names(df) <- NULL apply(df, 1, function(row) as.list(row[!is.na(row)])) }
/scratch/gouwar.j/cran-all/cranData/vizdraws/R/dataframeToD3.R
utils::globalVariables(c("prob", "section", "y", "qbeta", "qgamma", "qnorm", "qt", "qunif"))
/scratch/gouwar.j/cran-all/cranData/vizdraws/R/globals.R
#' Visualize Probabilities using a Lollipop Chart #' #' This function creates a lollipop chart to visualize probabilities. #' #' @param data A data frame containing the probabilities to visualize. #' @param plotBackgroundColor The background color of the plot. #' @param plotBackgroundOpacity The opacity of the plot background. #' @param title The title of the plot. #' @param leftArea The label for the left area of the plot. #' @param rightArea The label for the right area of the plot. #' @param mediumText The font size for medium text elements. #' @param bigText The font size for big text elements. #' @param width The width of the widget (optional). #' @param height The height of the widget (optional). #' @param elementId The element ID of the widget (optional). #' @param logoPath Logo path. Defaults to \code{NULL}. #' @param logoSize Logo size. Defaults to \code{FALSE}. #' @param logoLocation Logo location. \code{c('bottom-right', 'top-left', 'top-right', 'bottom-left')}. #' @param rightAreaText The tooltip text for the right area of the plot. #' @param leftAreaText The tooltip text for the left area of the plot. #' #' @return A HTML widget object representing the lollipop chart. #' #' @details #' The data frame should have three columns: `name`, `value`, and `color`. #' The `name` column specifies the names of the data points, while the #' `value` column specifies the corresponding probabilities. The `color` #' column specifies the color of each lollipop. #' #' @examples #' data <- data.frame( #' Name = c("Outcome 1", "Outcome 2", "Outcome 3"), #' Prior = c(0.5, 0.5, 0.5), #' Posterior = c(0.2, 0.6, 0.9) #' ) #' lollipops(data, #' logoPath = 'https://upload.wikimedia.org/wikipedia/commons/b/b8/YouTube_Logo_2017.svg', #' logoLocation = 'bottom-left') #' #' @export #' lollipops <- function(data, plotBackgroundColor = "white", plotBackgroundOpacity = 0.8, title = "Probability of an impact", leftArea = "Negative", rightArea = "Positive", mediumText = 18, bigText = 28, width = NULL, height = NULL, elementId = NULL, logoPath = NULL, logoSize = 100, logoLocation = c('bottom-left', 'top-left', 'top-right', 'bottom-right'), rightAreaText = "A positive impact is not necesarly a large impact.", leftAreaText = "A negative impact is not necesarly a large impact.") { verifyDataConditions(data) logoLocation <- match.arg(logoLocation) opts = list(data = dataframeToD3(data.frame(data)), plotBackgroundColor = plotBackgroundColor, plotBackgroundOpacity = plotBackgroundOpacity, title = title, leftArea = leftArea, rightArea = rightArea, mediumText = mediumText, bigText = bigText, logoPath = logoPath, logoSize = logoSize, logoLocation = logoLocation, rightAreaText = rightAreaText, leftAreaText = leftAreaText ) # Define sizing policy sizingPolicy = htmlwidgets::sizingPolicy( defaultWidth = 400, defaultHeight = 400, browser.fill = TRUE ) # create widget htmlwidgets::createWidget( name = 'lollipops', opts, width = width, height = height, package = 'vizdraws', elementId = elementId, sizingPolicy = sizingPolicy ) } verifyDataConditions <- function(data) { # Check if 'data' is a data.frame if (!is.data.frame(data)) { stop("Input is not a data.frame.") } # Check if 'data' has the required columns requiredColumns <- c("Name", "Prior", "Posterior") if (!all(requiredColumns %in% colnames(data))) { stop("Data.frame is missing required columns: Name, Prior, Posterior.") } # Check if 'Prior' and 'Posterior' are numeric if (!all(sapply(data[c("Prior", "Posterior")], is.numeric))) { stop("Columns 'Prior' and 'Posterior' must be numeric.") } # Check if values in 'Prior' and 'Posterior' are between 0 and 1 if (!all(data$Prior >= 0 & data$Prior <= 1) || !all(data$Posterior >= 0 & data$Posterior <= 1)) { stop("Values in 'Prior' and 'Posterior' must be between 0 and 1.") } # If all conditions are met, return TRUE return(TRUE) }
/scratch/gouwar.j/cran-all/cranData/vizdraws/R/lollipops.R
parse_prior <- function(prior = NULL) { if (is.null(prior) | is.numeric(prior)) { return(prior) } else if (is.character(prior)) { prior <- stringr::str_to_lower(prior) #Check that it matches one of the distribution options valid_distns <- list(n = qnorm, normal = qnorm, unif = qunif, uniform = qunif, beta = qbeta, gamma = qgamma) #These take 2 options if (prior=='std_normal') { return(qnorm(1:1000/1001)) } else if (stringr::str_detect(prior,'^(n|normal|unif|uniform|beta|gamma)\\(([-0-9.]+), ?([-0-9.]+)\\)$')) { matches <- stringr::str_match(prior,'^(n|normal|unif|uniform|beta|gamma)\\(([-0-9.]+), ?([-0-9.]+)\\)$') fxn <- valid_distns[[matches[,2]]] arg1 <- as.numeric(matches[,3]) arg2 <- as.numeric(matches[,4]) return(fxn(1:1000/1001, arg1, arg2)) } else if (stringr::str_detect(prior,'^halfnormal\\(([-0-9.]+), ?([-0-9.]+)\\)$')) { matches <- stringr::str_match(prior,'^halfnormal\\(([-0-9.]+), ?([-0-9.]+)\\)$') arg1 <- as.numeric(matches[,2]) arg2 <- as.numeric(matches[,3]) return(qnorm(0.5 + 1:1000/2001, arg1, arg2)) } else if (stringr::str_detect(prior,'^(t|student_t)\\(([-0-9.]+), ?([-0-9.]+), ?([-0-9.]+)\\)$')) { matches <- stringr::str_match(prior,'^(t|student_t)\\(([-0-9.]+), ?([-0-9.]+), ?([-0-9.]+)\\)$') arg1 <- as.numeric(matches[,3]) arg2 <- as.numeric(matches[,4]) arg3 <- as.numeric(matches[,5]) return(qt(1:1000/1001, arg1, arg2, arg3)) } else { stop('prior incorrectly specified. Valid distributions are std_normal, N, normal, halfnormal, unif, uniform, beta, gamma, t, and student_t.') } #They entered something but it's not NULL, string, or numeric. Error out. } else { stop('prior incorrectly specified.') } }
/scratch/gouwar.j/cran-all/cranData/vizdraws/R/parse_prior.R
#' Pipe operator #' #' See \code{magrittr::\link[magrittr]{\%>\%}} for details. #' #' @name %>% #' @rdname pipe #' @keywords internal #' @export #' @importFrom magrittr %>% #' @usage lhs \%>\% rhs NULL
/scratch/gouwar.j/cran-all/cranData/vizdraws/R/utils-pipe.R
#' Visualize Draws from Prior or Posterior Distributions #' #' A function to visualize draws from either the prior or posterior distribution, facilitating interpretation and decision-making. #' #' @title vizdraws #' #' @param prior (optional) Prior distribution or draws from it. Supported distributions: `Normal`, `uniform`, `beta`, and `gamma`. Provide either this or the posterior. #' @param posterior (optional) Draws from the posterior distribution. Provide either this or the prior. #' @param MME Minimum meaningful effect. If not provided, MME is set to zero. #' @param threshold If the probability is greater than this threshold, a decision is considered comfortable. #' @param units Optional argument to specify the units of x (e.g., dollars or applications). #' @param colors Colors for the left, middle, and right areas. Defaults to c("#e41a1c", "#377eb8", "#4daf4a"). #' @param width Width for shiny. #' @param height Height for shiny. #' @param quantity Defaults to \code{FALSE}. When set to true, the text will reflect predicting a quantity rather than a treatment effect. #' @param backgroundColor Defaults to \code{'#FFFFFF'}. #' @param backgroundOpacity Defaults to \code{0.9}. #' @param tense Either "future" or "past." This is the tense used in the description if quantity is set to TRUE. \code{NULL}. #' @param xlab Defaults to \code{NULL}. #' @param breaks Defaults to \code{NULL}. #' @param break_names Defaults to \code{NULL}. #' @param xlim Defaults to \code{NULL}. #' @param font_scale Defaults to \code{1}. #' @param display_mode_name Defaults to \code{FALSE}. #' @param title Defaults to \code{''}. #' @param stop_trans Defaults to \code{FALSE}. When set to true, the initial transition stops at posterior density. #' @param percentage Defaults to \code{FALSE}. When set to true, the x-axis tick format will be set to percentage. #' @param elementId Use an explicit element ID for the widget (rather than an automatically generated one). elementID for shiny. #' @param logoPath Logo path. Defaults to \code{NULL}. #' @param logoSize Logo size. Defaults to \code{FALSE}. #' @param logoLocation Logo location. \code{c('bottom-right', 'top-left', 'top-right', 'bottom-left')}. #' #' @return A HTML widget object. #' @export #' #' @examples #' if(interactive()){ #' set.seed(9782) #' library(vizdraws) #' vizdraws(prior = rnorm(100000)) #' } vizdraws <- function(prior = NULL, posterior = NULL, MME = 0, threshold = NULL, units = NULL, quantity = FALSE, tense = c("future", "past"), backgroundColor = "#FFFFFF", backgroundOpacity = 0.9, xlab = NULL, breaks = NULL, break_names = NULL, colors = NULL, width = NULL, height = NULL, xlim = NULL, font_scale = 1, display_mode_name = FALSE, title = '', stop_trans = FALSE, percentage = FALSE, elementId = NULL, logoPath = NULL, logoSize = 100, logoLocation = c('bottom-right', 'top-left', 'top-right', 'bottom-left')) { logoLocation <- match.arg(logoLocation) if (MME < 0) stop("MME should be greater than 0") if (!is.null(breaks) & MME != 0) stop('MME and breaks cannot both be specified') if (length(breaks) > 10) stop('Can\'t specicfy more than 10 breaks') if (!is.null(breaks) & is.null(break_names)) warning('Please supply break_names if specifying option breaks') if (!is.null(breaks) & !is.null(break_names) & length(break_names) <= length(breaks)) stop('Not enough break_names specified') if (!is.null(breaks) & !is.null(colors) & length(colors) <= length(breaks)) stop('Not enough colors specified') tense <- match.arg(tense) if (tense == "future") { tense <- "will be" }else{ tense <- "was" } prior <- parse_prior(prior) if (!is.null(xlim)) { xlim <- sort(xlim) if (length(xlim) != 2) stop ('xlim must have exactly 2 elements') } #Need breaks to be in order if (!is.null(breaks) & sum(breaks != sort(breaks)) > 0) { breaks <- sort(breaks) if (!is.null(colors) | !is.null(break_names)) warning( 'breaks given out of order. Assuming colors/names supplied in ascending order, NOT in same order as breaks' ) } if (is.null(threshold)) { allow_threshold <- FALSE } else if ((threshold <= 0 | threshold >= 1)) { stop("threshold should be between 0 and 1") } else { allow_threshold <- TRUE } if (is.null(prior) & is.null(posterior)) stop("must specify at least one of prior or posterior") if (is.null(prior) | is.null(posterior)) allow_mode_trans <- FALSE else allow_mode_trans <- TRUE # Calculate the breaks if (is.null(breaks)) { breaks <- if (MME != 0) { c(-MME, MME) } else 0 } if (!is.null(units)) unit_text <- paste0(' ', units) else unit_text <- '' #Default names and colors defaults <- list( break_names = c( 'Much much worse', 'Much worse', 'Worse', 'A bit worse', 'A little bit worse', 'Equivalent', 'A little bit better', 'A bit better', 'Better', 'Much Better', 'Much much better' ), colors = c( '#a50f15', '#de2d26', '#e41a1c', '#fcae91', '#fee5d9', '#377eb8', '#edf8e9', '#bae4b3', '#4daf4a', '#31a354', '#006d2c' ) ) group_options <- list(break_names = break_names, colors = colors) for (x in c('colors', 'break_names')) { n_per_side <- ceiling(length(breaks) / 2) no_middle <- length(breaks) %% 2 def <- defaults[[x]] if (is.null(group_options[[x]])) { if (n_per_side == 1) group_options[[x]] <- c(def[3], def[6], def[9]) else if (n_per_side == 2) group_options[[x]] <- c(def[2], def[4], def[6], def[8], def[10]) else if (n_per_side == 3) group_options[[x]] <- c(def[1], def[3], def[5], def[6], def[7], def[9], def[11]) else if (n_per_side == 4) group_options[[x]] <- c(def[1], def[2], def[4], def[5], def[6], def[7], def[8], def[10], def[11]) else if (n_per_side == 5) group_options[[x]] <- def if (no_middle) group_options[[x]] <- group_options[[x]][-(n_per_side + 1)] } } #Start graph showing prior, unless it's not provided start <- ifelse(is.null(prior), 'posterior', 'prior') #If only one was provided, duplicate it for the other #That way we can be sure they'll have the same range etc data = list(prior = prior, posterior = posterior) if (is.null(prior)) data$prior = posterior if (is.null(posterior)) data$posterior = prior n_dens <- 2 ^ 11 # Figure out range of densities rng <- lapply(data, function(d) { data.frame(stats::density(d, n = n_dens, adjust = 1)[c("x", "y")]) %>% dplyr::summarize(xmin = min(x), xmax = max(x)) }) xmin <- min(rng$prior, rng$posterior) xmax <- max(rng$prior, rng$posterior) # Calculate density values for input data dens <- lapply(data, function(d) { data.frame(stats::density( d, n = n_dens, adjust = 1, from = xmin, to = xmax )[c("x", "y")]) }) # forward options using x opts = list( dens = dataframeToD3( data.frame( x = dens$prior$x, y_prior = dens$prior$y, y_posterior = dens$posterior$y ) ), prior = dataframeToD3(data.frame(x = data$prior)), posterior = dataframeToD3(data.frame(x = data$posterior)), breaks = breaks, break_names = group_options$break_names, colors = group_options$colors, allow_threshold = allow_threshold, threshold = threshold, unit_text = unit_text, is_quantity = quantity, xlab = xlab, start_mode = start, start_status = 'distribution', initial_trans = TRUE, stop_trans = stop_trans, percentage = percentage, allow_mode_trans = allow_mode_trans, xlim = xlim, font_scale = font_scale, display_mode_name = display_mode_name, title = title, tense = tense, backgroundColor = backgroundColor, backgroundOpacity = backgroundOpacity, logoPath = logoPath, logoSize = logoSize, logoLocation = logoLocation ) # Define sizing policy sizingPolicy = htmlwidgets::sizingPolicy( defaultWidth = 400, defaultHeight = 400, browser.fill = TRUE ) # create widget htmlwidgets::createWidget( name = 'vizdraws', opts, width = width, height = height, package = 'vizdraws', elementId = elementId, sizingPolicy = sizingPolicy ) } #' Shiny bindings for vizdraws #' #' Output and render functions for using vizdraws within Shiny #' applications and interactive Rmd documents. #' #' @param outputId output variable to read from #' @param width,height Must be a valid CSS unit (like \code{'100\%'}, #' \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a #' string and have \code{'px'} appended. #' @param expr An expression that generates a vizdraws #' @param env The environment in which to evaluate \code{expr}. #' @param quoted Is \code{expr} a quoted expression (with \code{quote()})? This #' is useful if you want to save an expression in a variable. #' #' @name vizdraws-shiny #' #' @export vizdrawsOutput <- function(outputId, width = '100%', height = '100%') { htmlwidgets::shinyWidgetOutput(outputId, 'vizdraws', width, height, package = 'vizdraws') } #' @rdname vizdraws-shiny #' @export rendervizdraws <- function(expr, env = parent.frame(), quoted = FALSE) { if (!quoted) { expr <- substitute(expr) } # force quoted htmlwidgets::shinyRenderWidget(expr, vizdrawsOutput, env, quoted = TRUE) }
/scratch/gouwar.j/cran-all/cranData/vizdraws/R/vizdraws.R
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----prior, eval=FALSE-------------------------------------------------------- # library(vizdraws) # vizdraws(prior = 'normal(0.05, 0.2)') # ## ----posterior, eval=FALSE---------------------------------------------------- # vizdraws(prior = 'normal(0.05, 0.2)', # posterior = rnorm(n = 10000, mean = 0.3, sd = 0.5)) ## ----mme, eval=FALSE---------------------------------------------------------- # vizdraws(prior = rnorm(n = 10000, mean = 0, sd = 1), # posterior = rnorm(n = 10000, mean = 0.3, sd = 0.5), # xlim = c(-3,3), # MME = 0.1) #
/scratch/gouwar.j/cran-all/cranData/vizdraws/inst/doc/introduction.R
--- title: "Introduction" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Introduction} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Draws from the prior distribution ```{r prior, eval=FALSE} library(vizdraws) vizdraws(prior = 'normal(0.05, 0.2)') ``` <div align="center"> <iframe style="min-height:50vh" src="https://ignacio.martinez.fyi/vizdraws/prior.html" frameborder="0" scrolling="no" seamless="seamless" width="100%"></iframe> </div> ## Updating the prior ```{r posterior, eval=FALSE} vizdraws(prior = 'normal(0.05, 0.2)', posterior = rnorm(n = 10000, mean = 0.3, sd = 0.5)) ``` <div align="center"> <iframe style="min-height:50vh" src="https://ignacio.martinez.fyi/vizdraws/prior2posterior.html" frameborder="0" scrolling="no" seamless="seamless" width="100%"></iframe> </div> ## Minimum meaningful effect ```{r mme, eval=FALSE} vizdraws(prior = rnorm(n = 10000, mean = 0, sd = 1), posterior = rnorm(n = 10000, mean = 0.3, sd = 0.5), xlim = c(-3,3), MME = 0.1) ``` <div align="center"> <iframe style="min-height:50vh" src="https://ignacio.martinez.fyi/vizdraws/mme.html" frameborder="0" scrolling="no" seamless="seamless" width="100%"></iframe> </div>
/scratch/gouwar.j/cran-all/cranData/vizdraws/inst/doc/introduction.Rmd
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup, eval=FALSE-------------------------------------------------------- # library(shiny) # library(shinydashboard) # library(glue) # # ui <- dashboardPage( # dashboardHeader(), # dashboardSidebar( # numericInput( # "prior_mean", # label = h3("Prior Mean"), # value = 0, # step = 0.1 # ), # numericInput( # "prior_sd", # label = h3("Prior SD"), # value = 1, # min = 0.01, # step = 0.1 # ) # ), # dashboardBody(vizdraws::vizdrawsOutput('chart')) # ) # # server <- function(input, output) { # output$chart <- vizdraws::rendervizdraws({ # vizdraws::vizdraws( # prior = glue("N({input$prior_mean},{input$prior_sd})"), # posterior = rnorm(1000, mean = 0.2, sd = 0.1), # MME = 0.1, # threshold = 0.7, # display_mode_name = TRUE, # title = "Shiny Example" # ) # }) # } # # shinyApp(ui, server) ## ----iframe, echo=FALSE, out.width="100%"------------------------------------- knitr::include_url("https://ignacio.shinyapps.io/shiny_vizdraws/", height = "450px")
/scratch/gouwar.j/cran-all/cranData/vizdraws/inst/doc/shiny-example.R
--- title: "Shiny" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Shiny} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## [A very simple shiny app](https://ignacio.shinyapps.io/shiny_vizdraws/) ```{r setup, eval=FALSE} library(shiny) library(shinydashboard) library(glue) ui <- dashboardPage( dashboardHeader(), dashboardSidebar( numericInput( "prior_mean", label = h3("Prior Mean"), value = 0, step = 0.1 ), numericInput( "prior_sd", label = h3("Prior SD"), value = 1, min = 0.01, step = 0.1 ) ), dashboardBody(vizdraws::vizdrawsOutput('chart')) ) server <- function(input, output) { output$chart <- vizdraws::rendervizdraws({ vizdraws::vizdraws( prior = glue("N({input$prior_mean},{input$prior_sd})"), posterior = rnorm(1000, mean = 0.2, sd = 0.1), MME = 0.1, threshold = 0.7, display_mode_name = TRUE, title = "Shiny Example" ) }) } shinyApp(ui, server) ``` ```{r iframe, echo=FALSE, out.width="100%"} knitr::include_url("https://ignacio.shinyapps.io/shiny_vizdraws/", height = "450px") ```
/scratch/gouwar.j/cran-all/cranData/vizdraws/inst/doc/shiny-example.Rmd
--- title: "Introduction" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Introduction} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Draws from the prior distribution ```{r prior, eval=FALSE} library(vizdraws) vizdraws(prior = 'normal(0.05, 0.2)') ``` <div align="center"> <iframe style="min-height:50vh" src="https://ignacio.martinez.fyi/vizdraws/prior.html" frameborder="0" scrolling="no" seamless="seamless" width="100%"></iframe> </div> ## Updating the prior ```{r posterior, eval=FALSE} vizdraws(prior = 'normal(0.05, 0.2)', posterior = rnorm(n = 10000, mean = 0.3, sd = 0.5)) ``` <div align="center"> <iframe style="min-height:50vh" src="https://ignacio.martinez.fyi/vizdraws/prior2posterior.html" frameborder="0" scrolling="no" seamless="seamless" width="100%"></iframe> </div> ## Minimum meaningful effect ```{r mme, eval=FALSE} vizdraws(prior = rnorm(n = 10000, mean = 0, sd = 1), posterior = rnorm(n = 10000, mean = 0.3, sd = 0.5), xlim = c(-3,3), MME = 0.1) ``` <div align="center"> <iframe style="min-height:50vh" src="https://ignacio.martinez.fyi/vizdraws/mme.html" frameborder="0" scrolling="no" seamless="seamless" width="100%"></iframe> </div>
/scratch/gouwar.j/cran-all/cranData/vizdraws/vignettes/introduction.Rmd
--- title: "Shiny" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Shiny} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## [A very simple shiny app](https://ignacio.shinyapps.io/shiny_vizdraws/) ```{r setup, eval=FALSE} library(shiny) library(shinydashboard) library(glue) ui <- dashboardPage( dashboardHeader(), dashboardSidebar( numericInput( "prior_mean", label = h3("Prior Mean"), value = 0, step = 0.1 ), numericInput( "prior_sd", label = h3("Prior SD"), value = 1, min = 0.01, step = 0.1 ) ), dashboardBody(vizdraws::vizdrawsOutput('chart')) ) server <- function(input, output) { output$chart <- vizdraws::rendervizdraws({ vizdraws::vizdraws( prior = glue("N({input$prior_mean},{input$prior_sd})"), posterior = rnorm(1000, mean = 0.2, sd = 0.1), MME = 0.1, threshold = 0.7, display_mode_name = TRUE, title = "Shiny Example" ) }) } shinyApp(ui, server) ``` ```{r iframe, echo=FALSE, out.width="100%"} knitr::include_url("https://ignacio.shinyapps.io/shiny_vizdraws/", height = "450px") ```
/scratch/gouwar.j/cran-all/cranData/vizdraws/vignettes/shiny-example.Rmd
#' Client authorization #' #' @param client_id Application ID #' @param scope Requested application access permissions (see below). #' @param email Email or phone number #' @param password Password #' #' @details #' List of Available Settings of \href{https://vk.com/dev/permissions}{Access Permissions}: #' \itemize{ #' \item \strong{friends} Access to friends. #' \item \strong{photos} Access to photos. #' \item \strong{audio} Access to audios. #' \item \strong{video} Access to videos. #' \item \strong{docs} Access to documents. #' \item \strong{notes} Access to user notes. #' \item \strong{pages} Access to wiki pages. #' \item \strong{status} Access to user status. #' \item \strong{wall} Access to standard and advanced methods for the wall. #' \item \strong{groups} Access to user groups. #' \item \strong{messages} Access to advanced methods for messaging. #' \item \strong{notifications} Access to notifications about answers to the user. #' } #' @importFrom utils browseURL #' @examples \dontrun{ #' # an example of an authenticated request #' vkOAuth(client_id = 123456, #' scope = "friends,groups,messages", #' email = "[email protected]", #' password = "your_secret_password") #' #' # save access token for future sessions #' at <- getAccessToken() #' #' # an example of request #' me() #' #' # an example of an authenticated request without specifying email and password #' vkOAuth(client_id = 123456, scope = "friends,groups,messages") #' #' # copy access token from browser address bar #' setAccessToken("your_secret_access_token") #' } #' @export vkOAuth <- function(client_id, scope='friends', email, password) { if (missing(client_id)) stop('argument "client_id" is missing, with no default') if (!is.numeric(client_id) || floor(client_id) != client_id) stop('argument "client_id" must be an integer value') if (!is.character(scope)) stop('argument "scope" must be a string') auth_url <- paste0('https://oauth.vk.com/authorize?client_id=', client_id, '&redirect_uri=https://oauth.vk.com/blank.hmtl&scope=', scope, '&response_type=token&display=page') if (missing(email) && missing(password) && interactive()) { browseURL(auth_url) } else { if (missing(email)) stop('argument "email" is missing, with no default') if (!is.character(email)) stop('argument "email" must be a string') if (missing(password)) stop('argument "password" is missing, with no default') if (!is.character(password)) stop('argument "password" must be a string') if (!requireNamespace('XML', quietly = TRUE)) stop('The package XML was not installed') response <- httr::GET(auth_url) for (i in 1:2) { authorize_form <- XML::htmlParse(httr::content(response, "text", encoding = "UTF-8")) form_attrs <- XML::xpathSApply(authorize_form, "//form/input", XML::xmlGetAttr, "value") response <- httr::POST('https://login.vk.com/?act=login&soft=1&utf8=1', body = list('_origin' = form_attrs[1], 'ip_h' = form_attrs[2], 'lg_h' = form_attrs[3], 'to' = form_attrs[4], 'email' = email, 'pass' = password), encode = 'form' , httr::config(followlocation = 0L)) response <- httr::GET('https://login.vk.com/?act=login&soft=1&utf8=1', query = list('_origin' = form_attrs[1], 'ip_h' = form_attrs[2], 'lg_h' = form_attrs[3], 'to' = form_attrs[4], 'email' = email, 'pass' = password), httr::add_headers('Content-Type' = 'application/x-www-form-urlencoded')) } authorize_form <- XML::htmlParse(httr::content(response, "text", encoding = "UTF-8")) action <- XML::xpathSApply(authorize_form, "//form", XML::xmlGetAttr, "action") if (length(action)) response <- httr::GET(action, httr::add_headers('Content-Type' = 'application/x-www-form-urlencoded')) for (i in 1:length(response$all_headers)) { location <- response$all_headers[[i]]$headers$location if (!is.null(location) & grepl("access_token", location)) { access_token <- gsub(".*?access_token=(.*?)&.*", "\\1", location) break } } setAccessToken(access_token) } } #' Client authorization (for web application) #' #' @param app_name Application name #' @param client_id Application ID #' @param client_secret Application secret key #' @export vkOAuthWeb <- function(app_name, client_id, client_secret) { if (!requireNamespace("httpuv", quietly = TRUE)) stop("The package httpuv was not installed") if (missing(app_name)) stop('argument "app_name" is missing, with no default') if (!is.character(app_name)) stop('argument "app_name" must be a string') if (missing(client_id)) stop('argument "client_id" is missing, with no default') if (!is.numeric(client_id) || floor(client_id) != client_id) stop('argument "client_id" must be an integer value') if (missing(client_secret)) stop('argument "client_secret" is missing, with no default') if (!is.character(client_secret)) stop('argument "client_secret" must be a string') accessURL <- "https://oauth.vk.com/access_token" authURL <- "https://oauth.vk.com/authorize" vk <- httr::oauth_endpoint(authorize = authURL, access = accessURL) vk_app <- httr::oauth_app(app_name, client_id, client_secret) ig_oauth <- httr::oauth2.0_token(vk, vk_app, type = 'application/x-www-form-urlencoded', cache = FALSE) my_session <- strsplit(toString(names(ig_oauth$credentials)), '"') access_token <- paste0('access_token=', my_session[[1]][4]) setAccessToken(access_token) } #' Set access token #' @param access_token Access token #' @export setAccessToken <- function(access_token = '') { .vkr$access_token <- access_token } #' Get access token #' @export getAccessToken <- function() { if (is.null(.vkr$access_token)) warning("Could not find access token. For more details see ?vkOAuth or ?setAccessToken", call. = FALSE) .vkr$access_token }
/scratch/gouwar.j/cran-all/cranData/vkR/R/auth.R
#' Returns a list of topics on a community's discussion board #' #' @param group_id ID of the community that owns the discussion board. #' @param topics_ids IDs of topics to be returned (100 maximum). By default, all topics are returned. If this parameter is set, the order, offset, and count parameters are ignored. #' @param order Sort order: #' \itemize{ #' \item 1 - by date updated in reverse chronological order; #' \item 2 - by date created in reverse chronological order; #' \item -1 - by date updated in chronological order; #' \item -2 - by date created in chronological order. #' } #' If no sort order is specified, topics are returned in the order specified by the group administrator. Pinned topics are returned first, regardless of the sorting. #' @param offset Offset needed to return a specific subset of topics. #' @param count Number of topics to return (default 40, maximum value 100). #' @param extended 1 — to return information about users who created topics or who posted there last; 0 — to return no additional fields (default). #' @param preview 1 — to return the first comment in each topic; 2 — to return the last comment in each topic; 0 — to return no comments. #' @param preview_length Number of characters after which to truncate the previewed comment. To preview the full comment, specify 0. #' @param v Version of API #' @export getTopics <- function(group_id='', topics_ids='', order='', offset=0, count=40, extended=0, preview=0, preview_length=90, v=getAPIVersion()) { query <- queryBuilder('board.getTopics', group_id=group_id, topics_ids=topics_ids, order=order, offset=offset, count=count, extended=extended, preview=preview, preview_length=preview_length, v=v) request_delay() response <- jsonlite::fromJSON(query) if (has_error(response)) return(try_handle_error(response)) response$response } #' Returns a list of topics on a community's discussion board #' #' @param group_id ID of the community that owns the discussion board. #' @param order Sort order: #' \itemize{ #' \item 1 - by date updated in reverse chronological order; #' \item 2 - by date created in reverse chronological order; #' \item -1 - by date updated in chronological order; #' \item -2 - by date created in chronological order. #' } #' If no sort order is specified, topics are returned in the order specified by the group administrator. Pinned topics are returned first, regardless of the sorting. #' @param offset Offset needed to return a specific subset of topics. #' @param count Number of topics to return (default 40, 0 - for all topics). #' @param preview 1 — to return the first comment in each topic; 2 — to return the last comment in each topic; 0 — to return no comments. #' @param preview_length Number of characters after which to truncate the previewed comment. To preview the full comment, specify 0. #' @param use_db Use database #' @param db_params Collection name and suffix #' @param progress_bar Display progress bar #' @param v Version of API #' @export getTopicsExecute <- function(group_id='', order='', offset=0, count=40, preview=0, preview_length=90, use_db=FALSE, db_params=list(), progress_bar=FALSE, v=getAPIVersion()) { get_topics2500 <- function(group_id='', order='', offset=0, max_count=0, preview=0, preview_length=90, v=getAPIVersion()) { if (max_count > 2500) max_count <- 2500 if (max_count <= 100) { execute(paste0('return API.board.getTopics({"group_id":"', group_id, '", "order":"', order, '", "offset":"', offset, '", "count":"', max_count, '", "preview":"', preview, '", "preview_length":"', preview_length, '", "v":"', v, '"}).items;')) } else { code <- 'var topics = [];' code <- paste0(code, 'topics = topics + API.board.getTopics({"group_id":"', group_id, '", "order":"', order, '", "offset":"', offset, '", "count":"', 100, '", "preview":"', preview, '", "preview_length":"', preview_length, '", "v":"', v, '"}).items;') code <- paste0(code, 'var offset = 100 + ', offset, '; var count = 100; var max_offset = offset + ', max_count, '; while (offset < max_offset && topics.length <= offset && offset-', offset, '<', max_count, ') { if (', max_count, ' - topics.length < 100) { count = ', max_count, ' - topics.length; }; topics = topics + API.board.getTopics({"group_id":"', group_id, '", "order":"', order, '", "offset": offset, "count": count, "preview":"', preview, '", "preview_length":"', preview_length, '", "v":"', v, '"}).items; offset = offset + 100; }; return topics;') execute(code) } } code <- paste0('return API.board.getTopics({"group_id":"', group_id, '", "order":"', order, '", "offset":"', offset, '", "count":"', 1, '", "preview":"', preview, '", "preview_length":"', preview_length, '", "v":"', v, '"});') response <- execute(code) topics <- response$items max_count <- ifelse((response$count - offset) > count & count != 0, count, response$count - offset) if (max_count == 0) return(list(group_id = group_id, topics = response$items, count = response$count)) if (use_db) { collection <- or(db_params[['collection']], group_id) suffix <- or(db_params[['suffix']], 'board') key <- or(db_params[['key']], 'id') if (collection_exists(collection, suffix)) db_update(object = topics, key = key, collection = collection, suffix = suffix, upsert = TRUE) else db_save(object = topics, collection = collection, suffix = suffix) } if (progress_bar) { pb <- txtProgressBar(min = 0, max = max_count, style = 3) setTxtProgressBar(pb, nrow(topics)) } num_records <- max_count - nrow(topics) while (nrow(topics) < max_count) { tryCatch({ topics2500 <- get_topics2500(group_id = group_id, order = order, preview = preview, preview_length = preview_length, max_count = num_records, offset = offset + nrow(topics), v = v) if (use_db) db_update(object = topics2500, key = key, collection = collection, suffix = suffix, upsert = TRUE) topics <- jsonlite::rbind_pages(list(topics, topics2500)) num_records <- ifelse((max_count - nrow(topics)) > num_records, num_records, max_count - nrow(topics)) }, vk_error13 = function(e) { num_records <<- as.integer(num_records / 2) warning(simpleWarning(paste0('Parameter "count" was tuned: ', num_records, ' per request.'))) }) if (progress_bar) setTxtProgressBar(pb, nrow(topics)) } if (progress_bar) close(pb) board <- list(group_id = group_id, topics = topics, count = response$count) class(board) <- c(class(board), "topics.list") return(board) } #' Returns a list of comments on a topic on a community's discussion board #' #' @param group_id ID of the community that owns the discussion board. #' @param topic_id Topic ID. #' @param need_likes 1 - to return the likes field, 0 - not to return the likes field (default). #' @param start_comment_id Positive number. #' @param offset Offset needed to return a specific subset of comments. #' @param count Number of comments to return (default 20, maximum 100). #' @param extended 1 — to return information about users who posted comments; 0 — to return no additional fields (default). #' @param sort Sort order: asc - chronological, desc - reverse chronological. #' @param v Version of API #' @export boardGetComments <- function(group_id='', topic_id='', need_likes=0, start_comment_id='', offset=0, count=20, sort='', extended=0, v=getAPIVersion()) { query <- queryBuilder('board.getComments', group_id = group_id, topic_id = topic_id, need_likes = need_likes, start_comment_id = start_comment_id, offset = offset, count = count, extended = extended, sort = sort, v = v) request_delay() response <- jsonlite::fromJSON(query) if (has_error(response)) return(try_handle_error(response)) response$response } #' Returns a list of comments on a topic on a community's discussion board #' #' @param group_id ID of the community that owns the discussion board. #' @param topic_id Topic ID. #' @param need_likes 1 - to return the likes field, 0 - not to return the likes field (default). #' @param start_comment_id Positive number. #' @param offset Offset needed to return a specific subset of comments. #' @param count Number of comments to return (default 20, 0 - for all comments). #' @param sort Sort order: asc - chronological, desc - reverse chronological. #' @param progress_bar Display progress bar. #' @param v Version of API #' @export boardGetCommentsExecute <- function(group_id='', topic_id='', need_likes=0, start_comment_id='', offset=0, count=20, sort='', progress_bar=FALSE, v=getAPIVersion()) { get_comments2500 <- function(group_id='', topic_id='', need_likes=0, start_comment_id='', offset=0, max_count='', sort='', v=getAPIVersion()) { if (max_count > 2500) max_count <- 2500 if (max_count <= 100) { execute(paste0('return API.board.getComments({ "group_id":"', group_id, '", "topic_id":"', topic_id, '", "count":"', max_count, '", "offset":"', offset, '", "need_likes":"', need_likes, '", "start_comment_id":"', start_comment_id, '", "sort":"', sort, '", "v":"', v, '"}).items;')) } else { code <- 'var comments = [];' code <- paste0(code, 'comments = comments + API.board.getComments({ "group_id":"', group_id, '", "topic_id":"', topic_id, '", "count":"', 100, '", "offset":"', offset, '", "need_likes":"', need_likes, '", "start_comment_id":"', start_comment_id, '", "sort":"', sort, '", "v":"', v, '"}).items;') code <- paste0(code, 'var offset = 100 + ', offset, '; var count = 100; var max_offset = offset + ', max_count, '; while (offset < max_offset && comments.length <= offset && offset-', offset, '<', max_count, ') { if (', max_count, ' - comments.length < 100) { count = ', max_count, ' - comments.length; }; comments = comments + API.board.getComments({ "group_id":"', group_id, '", "topic_id":"', topic_id, '", "offset":offset, "count":count, "need_likes":"', need_likes, '", "start_comment_id":"', start_comment_id, '", "sort":"', sort, '", "v":"', v, '"}).items; offset = offset + 100; }; return comments;') execute(code) } } code <- paste0('return API.board.getComments({ "group_id":"', group_id, '", "topic_id":"', topic_id, '", "count":"', 1, '", "offset":"', offset, '", "need_likes":"', need_likes, '", "start_comment_id":"', start_comment_id, '", "sort":"', sort, '", "v":"', v, '"});') response <- execute(code) comments <- response$items max_count <- ifelse((response$count - offset) > count & count != 0, count, response$count - offset) if (max_count == 0) return(list(comments = response$items, count = response$count)) offset_counter <- 0 if (progress_bar) { pb <- txtProgressBar(min = 0, max = max_count, style = 3) setTxtProgressBar(pb, nrow(comments)) } while (nrow(comments) < max_count) { tryCatch({ comments2500 <- get_comments2500(group_id = group_id, topic_id = topic_id, need_likes = need_likes, sort = sort, start_comment_id = start_comment_id, max_count = (max_count - nrow(comments)), offset = (1 + offset + offset_counter * 2500), v = v) comments <- jsonlite::rbind_pages(list(comments, comments2500)) offset_counter <- offset_counter + 1 }, error = function(e) { warning(e) }) if (progress_bar) setTxtProgressBar(pb, nrow(comments)) } if (progress_bar) close(pb) list(comments = comments, count = response$count) } #' Returns a list of comments on a community's discussion board #' #' @param topics A list of topics (from getTopicsExecute()) #' @param progress_bar Display progress bar #' @param v Version of API #' @export boardGetCommentsList <- function(topics, progress_bar = FALSE, v = getAPIVersion()) { get_comments <- function(group_id, topics, v = getAPIVersion()) { num_requests <- ceiling(nrow(topics) / 25) from <- 1 to <- 25 comments <- list() for (i in 1:num_requests) { code <- 'var comments_per_topic = {}; var comments;' if (to > nrow(topics)) to <- nrow(topics) for (index in from:to) { code <- paste0(code, 'comments = API.board.getComments({ "group_id":"', group_id, '", "topic_id":"', topics[index, ]$id, '", "need_likes":"', 1, '", "count":"', 100, '", "v":"', v, '"}).items; comments_per_topic.topic', topics[index, ]$id, "=comments;", sep = "") } code <- paste0(code, 'return comments_per_topic;') comments <- append(comments, execute(code)) from <- from + 25 to <- to + 25 } names(comments) <- topics$id comments } if ("topics.list" %in% class(topics)) { group_id <- topics$group_id topics <- topics$topics } cmt_groups <- split(topics, topics$comments > 100) topics_le100 <- cmt_groups[['FALSE']] topics_gt100 <- cmt_groups[['TRUE']] comments <- list() from <- 1 max_count <- nrow(topics_le100) to <- ifelse(max_count >= 75, 75, max_count) if (progress_bar) { pb <- txtProgressBar(min = 0, max = nrow(topics), style = 3) setTxtProgressBar(pb, 0) } repeat { comments75 <- get_comments(group_id, topics_le100[from:to, ], v) comments <- append(comments, comments75) if (progress_bar) setTxtProgressBar(pb, length(comments)) if (to >= max_count) break from <- to + 1 to <- ifelse(to + 75 >= max_count, max_count, to + 75) } if (!is.null(topics_gt100)) { for (i in 1:nrow(topics_gt100)) { topic_id <- topics_gt100$id[i] comments[[paste0(topic_id)]] <- boardGetCommentsExecute(group_id = group_id, topic_id = topic_id, count = 0, v = v)$comments if (progress_bar) setTxtProgressBar(pb, length(comments)) } } if (progress_bar) close(pb) comments_ordered <- list() for (i in 1:nrow(topics)) { comments_ordered[[paste0(topics$id[i])]] <- comments[[paste0(topics$id[i])]] } class(comments_ordered) <- c(class(comments_ordered), "vk.board.comments") comments_ordered }
/scratch/gouwar.j/cran-all/cranData/vkR/R/board.R
#' Returns a list of countries #' #' @param need_all 1 - to return a full list of all countries; 0 - to return a list of countries near the current user's country #' @param code Country codes in ISO 3166-1 alpha-2 standard #' @param offset Offset needed to return a specific subset of countries #' @param count Number of countries to return #' @param v Version of API #' @examples #' \dontrun{ #' databaseGetCountries(count=234) #' } #' @export databaseGetCountries <- function(need_all='1', code='', offset='', count='100', v=getAPIVersion()) { query <- queryBuilder('database.getCountries', need_all = need_all, code = code, offset = offset, count = count, v = v) request_delay() response <- jsonlite::fromJSON(query) response$response } #' Returns a list of regions #' #' @param country_id Country ID, received in database.getCountries method #' @param q Search query #' @param offset Offset needed to return specific subset of regions #' @param count Number of regions to return #' @param v Version of API #' @examples #' \dontrun{ #' databaseGetRegions(229) #' } #' @export databaseGetRegions <- function(country_id='', q='', offset='', count='100', v=getAPIVersion()) { query <- queryBuilder('database.getRegions', country_id = country_id, q = q, offset = offset, count = count, v = v) request_delay() response <- jsonlite::fromJSON(query) response$response } #' Returns information about streets by their IDs #' #' @param street_ids Street IDs #' @param v Version of API #' @examples #' \dontrun{ #' databaseGetStreetsById(1) #' } #' @export databaseGetStreetsById <- function(street_ids='', v=getAPIVersion()) { query <- queryBuilder('database.getStreetsById', street_ids = street_ids, v = v) request_delay() response <- jsonlite::fromJSON(query) response$response } #' Returns information about countries by their IDs #' #' @param country_ids Country IDs #' @param v Version of API #' @examples #' \dontrun{ #' databaseGetCountriesById('1,2,3,4') #' } #' @export databaseGetCountriesById <- function(country_ids, v=getAPIVersion()) { query <- queryBuilder('database.getCountriesById', country_ids = country_ids, v = v) request_delay() response <- jsonlite::fromJSON(query) response$response } #' Returns a list of cities #' #' @param country_id Country ID #' @param region_id Region ID #' @param q Search query #' @param need_all 1 - to return all cities in the country; 0 - to return major cities in the country (default) #' @param offset Offset needed to return a specific subset of cities #' @param count Number of cities to return #' @param v Version of API #' @examples #' \dontrun{ #' databaseGetCities(country_id=1, need_all=0) #' } #' @export databaseGetCities <- function(country_id='', region_id='', q='', need_all='1', offset='', count='100', v=getAPIVersion()) { query <- queryBuilder('database.getCities', country_id = country_id, region_id = region_id, q = q, need_all = need_all, offset = offset, count = count, v = v) request_delay() response <- jsonlite::fromJSON(query) response$response } #' Returns information about cities by their IDs #' #' @param city_ids City IDs #' @param v Version of API #' @examples #' \dontrun{ #' databaseGetCitiesById('1,2') #' } #' @export databaseGetCitiesById <- function(city_ids='', v=getAPIVersion()) { query <- queryBuilder('database.getCitiesById', city_ids = city_ids, v = v) request_delay() response <- jsonlite::fromJSON(query) response$response } #' Returns a list of higher education institutions #' #' @param q Search query #' @param country_id Country ID #' @param city_id City ID #' @param offset Offset needed to return a specific subset of universities #' @param count Number of universities to return #' @param v Version of API #' @examples #' \dontrun{ #' databaseGetUniversities(city_id = '2') #' } #' @export databaseGetUniversities <- function(q='', country_id='', city_id='', offset='', count='100', v=getAPIVersion()) { query <- queryBuilder('database.getUniversities', q = q, country_id = country_id, city_id = city_id, offset = offset, count = count, v = v) request_delay() response <- jsonlite::fromJSON(query) response$response } #' Returns a list of schools #' #' @param q Search query #' @param city_id City ID #' @param offset Offset needed to return a specific subset of schools #' @param count Number of schools to return #' @param v Version of API #' @examples #' \dontrun{ #' databaseGetSchools(city_id = 2) #' } #' @export databaseGetSchools <- function(q='', city_id='', offset='', count='100', v=getAPIVersion()) { query <- queryBuilder('database.getSchools', q = q, city_id = city_id, offset = offset, count = count, v = v) request_delay() response <- jsonlite::fromJSON(query) response$response } #' Returns a list of available classes #' #' @param country_id Country ID #' @param v Version of API #' @examples #' \dontrun{ #' databaseGetSchoolClasses(1) #' } #' @export databaseGetSchoolClasses <- function(country_id='', v=getAPIVersion()) { query <- queryBuilder('database.getSchoolClasses', country_id = country_id, v = v) request_delay() response <- jsonlite::fromJSON(query) response$response } #' Returns a list of faculties (i.e., university departments) #' #' @param university_id University ID #' @param offset Offset needed to return a specific subset of faculties #' @param count Number of faculties to return #' @param v Version of API #' @examples #' \dontrun{ #' databaseGetFaculties(53) #' } #' @export databaseGetFaculties <- function(university_id='', offset='', count='100', v=getAPIVersion()) { query <- queryBuilder('database.getFaculties', university_id = university_id, offset = offset, count = count, v = v) request_delay() response <- jsonlite::fromJSON(query) response$response } #' Returns list of chairs on a specified faculty #' #' @param faculty_id ID of the faculty to get chairs from #' @param offset Offset required to get a certain subset of chairs #' @param count Amount of chairs to get #' @param v Version of API #' @examples #' \dontrun{ #' databaseGetChairs(206) #' } #' @export databaseGetChairs <- function(faculty_id='', offset='', count='100', v=getAPIVersion()) { query <- queryBuilder('database.getChairs', faculty_id = faculty_id, offset = offset, count = count, v = v) request_delay() response <- jsonlite::fromJSON(query) response$response } #' Get country ID and title by given city ID #' #' @param city_id City ID #' @export getCountryByCityId <- function(city_id) { res <- usersSearch(q = "", fields = "country", city = city_id, count = 1) if (res$count == 0) stop("No users from this city") res <- res$items if (length(res) == 0) # Sometimes response is empty when count=1 res <- usersSearch(q = "", fields = "country", city = city_id, count = 2)$items res$country }
/scratch/gouwar.j/cran-all/cranData/vkR/R/database.R
#' Returns a list of user IDs or detailed information about a user's friends #' #' @param user_id User ID. By default, the current user ID #' @param order Sort order (name - by name, hints - by rating) #' @param list_id ID of the friend list returned by the friends.getLists method to be used as the source. This parameter is taken into account only when the uid parameter is set to the current user ID #' @param count Number of friends to return #' @param offset Offset needed to return a specific subset of friends #' @param fields Profile fields to return #' @param name_case Case for declension of user name and surname #' @param flatten Automatically flatten nested data frames into a single non-nested data frame #' @param v Version of API #' @examples #' \dontrun{ #' friends_list <- getFriends(user_id=1, order='name', fields='bdate') #' friends <- friends_list$items #' } #' @export getFriends <- function(user_id='', order='', list_id='', count='', offset='', fields='', name_case='', flatten=FALSE, v=getAPIVersion()) { query <- queryBuilder('friends.get', user_id = user_id, order = order, list_id = list_id, count = count, offset = offset, fields = fields, name_case = name_case, v = v) request_delay() response <- jsonlite::fromJSON(query) if (has_error(response)) return(try_handle_error(response)) response <- response$response if (isTRUE(flatten)) response$items <- jsonlite::flatten(response$items) class(response) <- c(class(response), "vk.friends") response } #' Returns a list of user IDs of the mutual friends of two users #' #' @param source_id ID of the user whose friends will be checked against the friends of the user specified in target_uid #' @param target_uid ID of the user whose friends will be checked against the friends of the user specified in source_uid #' @param target_uids List of target uids (list of comma-separated positive numbers, the maximum number of elements allowed is 100) #' @param order Sort order #' @param count Number of mutual friends to return #' @param offset Offset needed to return a specific subset of mutual friends #' @param v Version of API #' @examples #' \dontrun{ #' mutual_friends <- getMutual(target_uid=1) #' } #' @export getMutual <- function(source_id='', target_uid='', target_uids='', order='', count='', offset='', v=getAPIVersion()) { .Deprecated("getMutualExecute()") body <- list(source_id = source_id, target_uid = target_uid, order = order, count = count, offset = offset) if (length(target_uids) > 1) { target_uids <- paste(target_uids, collapse = ",") body <- append(body, list(target_uids = target_uids)) query <- queryBuilder('friends.getMutual', v = v) } else { query <- queryBuilder('friends.getMutual', target_uids = target_uids, v = v) } request_delay() response <- jsonlite::fromJSON(httr::content( httr::POST(URLencode(query), body = body), "text", encoding="UTF-8")) if (has_error(response)) return(try_handle_error(response)) response$response } #' Returns a list of user IDs of the mutual friends of two users #' #' @param source_id ID of the user whose friends will be checked against the friends of the user specified in target_uid #' @param target_uid ID of the user whose friends will be checked against the friends of the user specified in source_uid #' @param target_uids List of target uids #' @param order Sort order #' @param count Number of mutual friends to return #' @param offset Offset needed to return a specific subset of mutual friends #' @param progress_bar Display progress bar #' @param v Version of API #' @examples #' \dontrun{ #' mutual_friends <- getMutualExecute(target_uid=1) #' } #' @export getMutualExecute <- function(source_id='', target_uid='', target_uids='', order='', count='', offset='', progress_bar=FALSE, v=getAPIVersion()) { get_mutual_friends <- function(source_id='', target_uids='', order='', count='', offset='', v=getAPIVersion()) { code <- 'var mutual_friends = [];' num_requests <- ifelse(length(target_uids) %% 100 == 0, (length(target_uids) %/% 100), (length(target_uids) %/% 100) + 1) from <- 1 to <- ifelse(num_requests >= 2, 100, length(target_uids)) for (i in 1:num_requests) { code <- paste0(code, 'mutual_friends = mutual_friends + API.friends.getMutual({ "source_id":"', source_id, '", "target_uids":"', paste0(target_uids[from:to], collapse = ','), '", "order":"', order, '", "count":"', count, '", "offset":"', offset, '", "v":"', v, '"});') from <- to + 1 to <- to + ifelse(length(target_uids) - (to + 100) >= 0, 100, length(target_uids) - to) } code <- paste0(code, 'return mutual_friends;') if (nchar(code) > 65535) stop("The POST request is limited by 65535 bytes") execute(code) } if (target_uid != '') target_uids <- target_uid mutual_friends <- data.frame() from <- 1 to <- 2500 if (progress_bar) { pb <- txtProgressBar(min = 0, max = length(target_uids), style = 3) setTxtProgressBar(pb, 0) } repeat { if (to >= length(target_uids)) to <- length(target_uids) friends <- get_mutual_friends(source_id = source_id, target_uids[from:to], order = order, count = count, offset = offset, v = v) mutual_friends <- jsonlite::rbind_pages(list(mutual_friends, friends)) if (progress_bar) setTxtProgressBar(pb, nrow(mutual_friends)) if (to >= length(target_uids)) break from <- to + 1 to <- to + 2500 } if (progress_bar) close(pb) mutual_friends } #' Checks the friendship status between two users #' #' @param source_id Source user ID #' @param target_id Target user ID #' @examples #' \dontrun{ #' areFriends(me(), 123456) #' } #' @export areFriends <- function(source_id, target_id) { if (!is.numeric(source_id)) stop('source_id must be positive integer') if (!is.numeric(target_id)) stop('target_id must be positive integer') source_id %in% getFriends(target_id)$items | target_id %in% getFriends(source_id)$items } #' Returns a list of friends IDs for the specified users #' #' @param user_ids User IDs (maximum 25) #' @param v Version of API #' @importFrom stats na.omit #' @examples \dontrun{ #' my_friends <- getFriends() #' friends_of_friends <- getFriendsBy25(my_friends$items[1:25]) #' } #' @export getFriendsBy25 <- function(user_ids, v=getAPIVersion()) { user_ids <- na.omit(user_ids) user_ids <- unique(user_ids) if (length(user_ids) > 25) stop("Number of user IDs must be less or equal to 25", call. = FALSE) code <- "var all_friends = {}; var request;" for (idx in 1:length(user_ids)) { code <- paste(code, "request=API.friends.get({\"user_id\":", user_ids[idx], ", \"v\":", v, "}).items; all_friends.user", user_ids[idx], "=request;", sep = "") } code <- paste(code, "return all_friends;") response <- execute(code) if (!is.null(response)) names(response) <- user_ids class(response) <- c(class(response), "vk.friends.ids") response } #' Returns a list of friends IDs for the specified users #' #' @param users_ids User IDs #' @param v Version of API #' @examples #' \dontrun{ #' friends <- getFriendsFor(sample(x=seq(1:10000000), size=100, replace=FALSE)) #' users <- getUsersExecute(friends, fields = 'sex') #' } #' @export getFriendsFor <- function(users_ids, v=getAPIVersion()) { users_friends <- list() from <- 1 to <- 25 repeat { users_friends_25 <- getFriendsBy25(users_ids[from:to], v) users_friends <- append(users_friends, users_friends_25) if (to >= length(users_ids)) break from <- to + 1 to <- to + 25 } class(users_friends) <- c(class(users_friends), "vk.friends.ids") users_friends }
/scratch/gouwar.j/cran-all/cranData/vkR/R/friends.R
#' Returns a list of the communities to which a user belongs #' #' @param user_id User ID #' @param extended 1 - to return complete information about a user's communities; 0 - to return a list of community IDs without any additional fields (default) #' @param filter Types of communities to return: admin, editor, moder, groups, publics, events #' @param fields List of additional fields to be returned #' @param offset Offset needed to return a specific subset of communities #' @param count Number of communities to return (maximum value 1000) #' @param v Version of API #' @examples #' \dontrun{ #' groups <- getGroups(me(), extended = 1, fields = 'city') #' } #' @export getGroups <- function(user_id='', extended='', filter='', fields='', offset='', count='', v=getAPIVersion()) { query <- queryBuilder('groups.get', user_id = user_id, extended = extended, filter = filter, fields = fields, offset = offset, count = count, v = v) request_delay() response <- jsonlite::fromJSON(query) if (has_error(response)) return(try_handle_error(response)) response$response } #' Returns a list of community members #' #' @param group_id ID or screen name of the community #' @param sort Sort order #' @param offset Offset needed to return a specific subset of community members #' @param count Number of community members to return (maximum value 1000) #' @param fields List of additional fields to be returned #' @param filter friends - only friends in this community will be returned; unsure - only those who pressed 'I may attend' will be returned (if it's an event) #' @param v Version of API #' @examples #' \dontrun{ #' members <- getGroupsMembers(1, fields='sex,bdate,city') #' } #' @export getGroupsMembers <- function(group_id='', sort='', offset='', count='', fields='', filter='', v=getAPIVersion()) { query <- queryBuilder('groups.getMembers', group_id = group_id, sort = sort, offset = offset, count = count, fields = fields, filter = filter, v = v) request_delay() response <- jsonlite::fromJSON(query) if (has_error(response)) return(try_handle_error(response)) response$response } #' Returns a list of community members #' #' @param group_id ID or screen name of the community #' @param sort Sort order. Available values: id_asc, id_desc, time_asc, time_desc. time_asc and time_desc are availavle only if the method is called by the group's moderator #' @param offset Offset needed to return a specific subset of community members #' @param count Number of community members to (0 - get all community members) #' @param fields List of additional fields to be returned #' @param filter friends - only friends in this community will be returned; unsure - only those who pressed 'I may attend' will be returned (if it's an event) #' @param flatten Automatically flatten nested data frames into a single non-nested data frame #' @param progress_bar Display progress bar #' @param v Version of API #' @importFrom utils setTxtProgressBar txtProgressBar #' @examples \dontrun{ #' members <- getGroupsMembersExecute(1, fields='sex,bdate,city', progress_bar = TRUE) #' } #' @export getGroupsMembersExecute <- function(group_id='', sort='', offset=0, count=0, fields='', filter='', flatten=FALSE, progress_bar=FALSE, v=getAPIVersion()) { getGroupsMembers20 <- function(group_id='', sort='', offset=0, count='', fields='', filter='', v=getAPIVersion()) { if (count > 20000) count <- 20000 if (count <= 1000) { execute(paste0('return API.groups.getMembers({"group_id":"', group_id, '", "sort":"', sort, '", "offset":"', offset, '", "count":"', count, '", "fields":"', fields, '", "filter":"', filter, '", "v":"', v, '"}).items;')) } else { code <- 'var groups_members = [];' code <- paste0(code, 'groups_members = groups_members + API.groups.getMembers({"group_id":"', group_id, '", "sort":"', sort, '", "offset":"', offset, '", "count":"', 1000, '", "fields":"', fields, '", "filter":"', filter, '", "v":"', v, '"}).items;') code <- paste0(code, 'var offset = 1000 + ', offset, '; var count = 1000; var max_offset = offset + ', count, '; while (offset < max_offset && groups_members.length <= offset) { if (', count, ' - groups_members.length < 1000) { count = ', count, ' - groups_members.length; }; groups_members = groups_members + API.groups.getMembers({"group_id":"', group_id, '", "sort":"', sort, '", "fields":"', fields, '", "filter":"', filter, '", "v":"', v, '", "count":count, "offset":offset}).items; offset = offset + 1000; }; return groups_members;') execute(code) } } code <- paste0('return API.groups.getMembers({"group_id":"', group_id, '", "sort":"', sort, '", "offset":"', offset, '", "count":"', 1, '", "fields":"', fields, '", "filter":"', filter, '", "v":"', v, '"});') response <- execute(code) members <- response$items len <- ifelse(is.vector(members), length, nrow) max_count <- ifelse((response$count - offset) > count & count != 0, count, response$count - offset) if (progress_bar) { pb <- txtProgressBar(min = 0, max = max_count, style = 3) setTxtProgressBar(pb, len(members)) } num_records <- ifelse(max_count - len(members) > 20000, 20000, max_count - len(members)) while (len(members) < max_count) { tryCatch({members20 <- getGroupsMembers20(group_id = group_id, sort = sort, offset = offset + len(members), count = num_records, fields = fields, filter = filter, v = v) if (is.vector(members)) members <- append(members, members20) else members <- jsonlite::rbind_pages(list(members, members20)) num_records <- ifelse((max_count - len(members)) > num_records, num_records, max_count - len(members))}, vk_error500 = function(e) { num_records <<- as.integer(num_records / 2) warning(simpleWarning(paste0('Parameter "count" was tuned: ', num_records, ' per request.'))) }, vk_error404 = function(e) { num_records <<- as.integer(num_records / 2) warning(simpleWarning(paste0('Parameter "count" was tuned: ', num_records, ' per request.'))) }) if (progress_bar) setTxtProgressBar(pb, len(members)) } if (progress_bar) close(pb) if (isTRUE(flatten) & !is.vector(members)) members <- jsonlite::flatten(members) members } #' Returns a list of the communities for the specified users #' #' @param users A list of users #' @param extended 1 - to return complete information about a user's communities; 0 - to return a list of community IDs without any additional fields (default) #' @param filter Types of communities to return: admin, editor, moder, groups, publics, events #' @param fields List of additional fields to be returned #' @param progress_bar Display progress bar #' @param v Version of API #' @importFrom utils setTxtProgressBar txtProgressBar #' @examples \dontrun{ #' members <- getGroupsForUsers(c(me(), 123456), extended = 1, fields='city', progress_bar = TRUE) #' } #' @export getGroupsForUsers <- function(users, extended='', filter='', fields='', progress_bar = FALSE, v = getAPIVersion()) { get_groups_for_users <- function(users, extended='', filter='', fields='', offset='', v = getAPIVersion()) { num_requests <- ceiling(length(users) / 25) from <- 1 to <- 25 groups <- list() for (i in 1:num_requests) { code <- 'var groups_per_user = {}; var groups;' if (to > length(users)) to <- length(users) for (index in from:to) { code <- paste0(code, 'groups = API.groups.get({ "user_id":"', users[index], '", "extended":"', extended, '", "filter":"', filter, '", "fields":"', fields, '", "offset":"', offset, '", "count":"', 1000, '", "v":"', v, '"}).items; groups_per_user.user', users[index], "=groups;", sep = "") } code <- paste0(code, 'return groups_per_user;') groups <- append(groups, execute(code)) from <- from + 25 to <- to + 25 } names(groups) <- users groups } if ("vk.users" %in% class(users)) users <- users$id groups <- list() from <- 1 max_count <- length(users) to <- ifelse(max_count >= 75, 75, max_count) if (progress_bar) { pb <- txtProgressBar(min = 0, max = length(users), style = 3) setTxtProgressBar(pb, 0) } repeat { groups75 <- get_groups_for_users(users = users[from:to], extended = extended, filter = filter, fields = fields, v = v) groups <- append(groups, groups75) if (progress_bar) setTxtProgressBar(pb, length(groups)) if (to >= max_count) break from <- to + 1 to <- ifelse(to + 75 >= max_count, max_count, to + 75) } if (progress_bar) close(pb) class(groups) <- c(class(groups), "vk.groups_per_user") groups } #' Returns information about communities by their IDs #' #' @param group_ids IDs or screen names of communities #' @param group_id ID or screen name of the community #' @param fields Group fields to return #' @param v Version of API #' @export getGroupsById <- function(group_ids='', group_id='', fields='', v=getAPIVersion()) { query <- queryBuilder('groups.getById', group_ids = group_ids, group_id = group_id, fields = fields, v = v) request_delay() response <- jsonlite::fromJSON(httr::content(httr::POST(query), "text", encoding = "UTF-8")) if (has_error(response)) return(try_handle_error(response)) response$response } #' Returns a list of communities matching the search criteria #' #' @param q Search query string #' @param type Community type. Possible values: group, page, event #' @param country_id Country ID #' @param city_id City ID. If this parameter is transmitted, country_id is ignored #' @param future 1 — to return only upcoming events. Works with the type = event only #' @param market 1 — to return communities with enabled market only #' @param sort Sort order. Possible values: #' \itemize{ #' \item 0 — default sorting (similar the full version of the site); #' \item 1 — by growth speed; #' \item 2— by the "day attendance/members number" ratio; #' \item 3 — by the "Likes number/members number" ratio; #' \item 4 — by the "comments number/members number" ratio; #' \item 5 — by the "boards entries number/members number" ratio. #' } #' @param offset Offset needed to return a specific subset of results #' @param count Number of communities to return (default 20, maximum value 1000) #' @param v Version of API #' @export groupsSearch <- function(q='', type='', country_id='', city_id='', future=0, market=0, sort=0, offset=0, count=20, v=getAPIVersion()) { query <- queryBuilder('groups.search', q = q, type = type, country_id = country_id, city_id = city_id, future = future, market = market, sort = sort, offset = offset, count = count, v = v) request_delay() response <- jsonlite::fromJSON(query) if (has_error(response)) return(try_handle_error(response)) response$response }
/scratch/gouwar.j/cran-all/cranData/vkR/R/groups.R
#' Converts the given igraph object to GEXF format and saves it at the given filepath location #' #' @param g Input igraph object to be converted to gexf format #' @param filepath File location where the output gexf file should be saved #' @author Gopalakrishna Palem, \email{Gopalakrishna.Palem@@Yahoo.com} #' @export saveAsGEXF <- function(g, filepath="converted_graph.gexf") { if (!requireNamespace("igraph", quietly = TRUE)) stop("The package igraph was not installed") if (!requireNamespace("rgexf", quietly = TRUE)) stop("The package rgexf was not installed") # gexf nodes require two column data frame (id, label) # check if the input vertices has label already present # if not, just have the ids themselves as the label if(is.null(igraph::V(g)$label)) igraph::V(g)$label <- as.character(igraph::V(g)) # similarily if edges does not have weight, add default 1 weight if(is.null(igraph::E(g)$weight)) igraph::E(g)$weight <- rep.int(1, igraph::ecount(g)) nodes <- data.frame(cbind(igraph::V(g), igraph::V(g)$label)) edges <- t(Vectorize(igraph::get.edge, vectorize.args='id')(g, 1:igraph::ecount(g))) # combine all node attributes into a matrix (and take care of & for xml) vAttrNames <- setdiff(igraph::list.vertex.attributes(g), "label") nodesAtt <- data.frame(sapply(vAttrNames, function(attr) sub("&", "&#038;", igraph::get.vertex.attribute(g, attr)))) # combine all edge attributes into a matrix (and take care of & for xml) eAttrNames <- setdiff(igraph::list.edge.attributes(g), "weight") edgesAtt <- data.frame(sapply(eAttrNames, function(attr) sub("&", "&#038;", igraph::get.edge.attribute(g, attr)))) # combine all graph attributes into a meta-data graphAtt <- sapply(igraph::list.graph.attributes(g), function(attr) sub("&", "&#038;", igraph::get.graph.attribute(g, attr))) # generate the gexf object output <- rgexf::write.gexf(nodes, edges, edgesWeight=igraph::E(g)$weight, edgesAtt = edgesAtt, nodesAtt = nodesAtt, meta=c(list(creator="Gopalakrishna Palem", description="igraph -> gexf converted file", keywords="igraph, gexf, R, rgexf"), graphAtt)) print(output, filepath, replace=T) }
/scratch/gouwar.j/cran-all/cranData/vkR/R/igraph_gefx_exporter.R
#' Returns a list of IDs of users who added the specified object to their Likes list #' #' @param type Object type #' @param owner_id ID of the user, community, or application that owns the object #' @param item_id Object ID #' @param page_url URL of the page where the Like widget is installed. Used instead of the item_id parameter #' @param filter Filters to apply: likes - returns information about all users who liked the object (default); copies - returns information only about users who told their friends about the object #' @param friends_only Specifies which users are returned: 1 - to return only the current user's friends; 0 - to return all users (default) #' @param skip_own Flag, either 1 or 0 #' @param extended Specifies whether extended information will be returned. 1 - to return extended information about users and communities from the Likes list; 0 - to return no additional information (default) #' @param offset Offset needed to select a specific subset of users #' @param count Number of user IDs to return (maximum 1000) #' @param v Version of API #' @export likesGetList <- function(type='', owner_id='', item_id='', page_url='', filter='', friends_only='0', extended='', offset='', count='100', skip_own=0, v=getAPIVersion()) { query <- queryBuilder('likes.getList', type = type, owner_id = owner_id, item_id = item_id, page_url = page_url, filter = filter, friends_only = friends_only, extended = extended, offset = offset, count = count, skip_own = skip_own, v = v) request_delay() response <- jsonlite::fromJSON(query) if (has_error(response)) return(try_handle_error(response)) response$response } #' Returns a list of IDs of users who added the specified objects to their Likes list #' #' @param objects List of objects (objects must contain fields owner_id and id) #' @param type Object type (post or comment) #' @param filter Filters to apply: likes - returns information about all users who liked the object (default); copies - returns information only about users who told their friends about the object #' @param friends_only Specifies which users are returned: 1 - to return only the current user's friends; 0 - to return all users (default) #' @param extended Specifies whether extended information will be returned. 1 - to return extended information about users and communities from the Likes list; 0 - to return no additional information (default) #' @param skip_own flag, either 1 or 0 #' @param progress_bar Display progress bar #' @param v Version of API #' @examples #' \dontrun{ #' wall <- getWallExecute(domain = 'privivkanet', count = 10, progress_bar = TRUE) #' post_likers <- likesGetListForObjects(wall, type = 'post', progress_bar = TRUE) #' post_likers_extended <- likesGetListForObjects(wall, type = 'post', #' extended = 1, progress_bar = TRUE) #' } #' @importFrom utils setTxtProgressBar txtProgressBar #' @export likesGetListForObjects <- function(objects, type = 'post', filter = 'likes', friends_only = 0, extended = 0, skip_own = 0, progress_bar = FALSE, v = getAPIVersion()) { get_likes <- function(objects, type = 'post', filter = 'likes', friends_only = 0, extended = 0, skip_own = 0, v = getAPIVersion()) { num_requests <- ceiling(nrow(objects) / 25) from <- 1 to <- 25 likes <- list() for (i in 1:num_requests) { code <- 'var likes_per_object = {}; var likes;' if (to > nrow(objects)) to <- nrow(objects) for (index in from:to) { owner_id <- objects[index, ]$owner_id obj_id <- objects[index, ]$id code <- paste0(code, 'likes = API.likes.getList({ "type":"', type, '", "owner_id":"', owner_id, '", "item_id":"', obj_id, '", "page_url":"', '', '", "filter":"', filter, '", "friends_only":"', friends_only, '", "extended":"', extended, '", "skip_own":"', skip_own, '", "count":"', 1000, '", "v":"', v, '"}).items; likes_per_object.obj', obj_id, "=likes;", sep = "") } code <- paste0(code, 'return likes_per_object;') likes <- append(likes, execute(code)) from <- from + 25 to <- to + 25 } obj_ids <- unlist(strsplit(names(likes), "obj", fixed = T)) obj_ids <- as.integer(obj_ids[obj_ids != ""]) names(likes) <- obj_ids #objects$id likes } if ("posts.list" %in% class(objects)) objects <- objects$posts likes <- list() from <- 1 max_count <- nrow(objects) to <- ifelse(max_count >= 75, 75, max_count) if (progress_bar) { pb <- txtProgressBar(min = 0, max = nrow(objects), style = 3) setTxtProgressBar(pb, 0) } repeat { likes75 <- get_likes(objects[from:to, ], type = type, filter = filter, friends_only = friends_only, extended = extended, skip_own = skip_own, v = v) likes <- append(likes, likes75) if (progress_bar) setTxtProgressBar(pb, length(likes)) if (to >= max_count) break from <- to + 1 to <- ifelse(to + 75 >= max_count, max_count, to + 75) } if (progress_bar) close(pb) class(likes) <- c(class(likes), paste0("vk.likes_per_", type)) likes }
/scratch/gouwar.j/cran-all/cranData/vkR/R/likes.R
#' Returns message history for the specified user or group chat #' #' @param offset Offset needed to return a specific subset of messages #' @param count Number of messages to return (maximum value 200) #' @param user_id ID of the user whose message history you want to return #' @param peer_id Destination ID (user ID, group ID or chat ID) #' @param start_message_id Starting message ID from which to return history #' @param rev Sort order: 1 - return messages in chronological order; 0 - return messages in reverse chronological order #' @param v Version of API #' @export messagesGetHistory <- function(offset='', count='', user_id='', peer_id='', start_message_id='', rev='', v=getAPIVersion()) { query <- queryBuilder('messages.getHistory', offset = offset, count = count, user_id = user_id, peer_id = peer_id, start_message_id = start_message_id, rev = rev, v = v) request_delay() response <- jsonlite::fromJSON(query) if (has_error(response)) return(try_handle_error(response)) response$response } #' Returns message history for the specified user or group chat #' #' @param offset Offset needed to return a specific subset of messages #' @param count Number of messages to return (0 for all history) #' @param user_id ID of the user whose message history you want to return #' @param peer_id Destination ID (user ID, group ID or chat ID) #' @param start_message_id Starting message ID from which to return history #' @param rev Sort order: 1 - return messages in chronological order; 0 - return messages in reverse chronological order #' @param progress_bar Display progress bar #' @param v Version of API #' @importFrom utils setTxtProgressBar txtProgressBar #' @export messagesGetHistoryExecute <- function(offset=0, count=0, user_id='', peer_id='', start_message_id='', rev=0, progress_bar=FALSE, v=getAPIVersion()) { get_messages <- function(offset='', count='', user_id='', peer_id='', start_message_id='', rev='', v=getAPIVersion()) { code <- 'var messages = [];' num_requests <- 0 while (num_requests != 3 && count != 0) { current_count <- ifelse((count - 200) >= 0, 200, count) code <- paste0(code, 'messages = messages + API.messages.getHistory({"user_id":"', user_id, '", "offset":"', offset, '", "count":"', current_count, '", "peer_id":"', peer_id, ifelse(start_message_id == '', '', paste0('", "start_message_id":"', start_message_id)), '", "rev":"', rev, '", "v":"', v, '"}).items;') offset <- offset + 200 num_requests <- num_requests + 1 count <- count - current_count } code <- paste0(code, 'return messages;') execute(code) } code <- paste0('return API.messages.getHistory({"user_id":"', user_id, '", "offset":"', offset, '", "rev":"', rev, '", "count":"', 1, '", "peer_id":"', peer_id, ifelse(start_message_id == '', '', paste0('", "start_message_id":"', start_message_id)), '", "v":"', v, '"});') response <- execute(code) messages <- response$items max_count <- ifelse((response$count - offset) > count & count != 0, count, response$count - offset) if (max_count == 0) return(list(messages = response$items, count = response$count, in_read = response$in_read, out_read = response$out_read, unread = NULL)) offset_counter <- 0 if (progress_bar) { pb <- txtProgressBar(min = 0, max = max_count, style = 3) setTxtProgressBar(pb, nrow(messages)) } while (nrow(messages) < max_count) { messages600 <- get_messages(user_id = user_id, peer_id = peer_id, rev = rev, start_message_id = start_message_id, count = (max_count - nrow(messages)), offset = (1 + offset + offset_counter * 600), v = v) messages <- jsonlite::rbind_pages(list(messages, messages600)) if (progress_bar) setTxtProgressBar(pb, nrow(messages)) offset_counter <- offset_counter + 1 } if (progress_bar) close(pb) list(messages = messages, count = response$count, in_read = response$in_read, out_read = response$out_read, unread = response$unread) } #' Returns all message history for the specified user or group chat #' #' @param user_id ID of the user whose message history you want to return #' @param peer_id Destination ID (user ID, group ID or chat ID) #' @param rev Sort order: 1 - return messages in chronological order; 0 - return messages in reverse chronological order #' @param v Version of API #' @export messagesGetHistoryAll <- function(user_id='', peer_id='', rev=0, v=getAPIVersion()) { messagesGetHistoryExecute(user_id = user_id, peer_id = peer_id, rev = rev, count = 0, v = v) } #' Returns a list of the current user's incoming or outgoing private messages #' #' @param out 1 - to return outgoing messages; 0 - to return incoming messages (default) #' @param offset Offset needed to return a specific subset of messages #' @param count Number of messages to return #' @param time_offset Maximum time since a message was sent, in seconds. To return messages without a time limitation, set as 0 #' @param filters Filter to apply: 1 - unread only; 2 - not from the chat; 4 - messages from friends #' @param preview_length Number of characters after which to truncate a previewed message. To preview the full message, specify 0 #' @param last_message_id ID of the message received before the message that will be returned last #' @param v Version of API #' @export messagesGet <- function(out='', offset='', count='', time_offset='', filters='', preview_length='', last_message_id='', v=getAPIVersion()) { query <- queryBuilder('messages.get', out = out, offset = offset, count = count, time_offset = time_offset, filters = filters, preview_length = preview_length, last_message_id = last_message_id, v = v) request_delay() response <- jsonlite::fromJSON(query) if (has_error(response)) return(try_handle_error(response)) response$response } #' Sends a message. #' @param user_id User ID (by default — current user). #' @param random_id Unique identifier to avoid resending the message. #' @param peer_id Destination ID. #' @param domain User's short address (for example, illarionov). #' @param chat_id ID of conversation the message will relate to. #' @param user_ids IDs of message recipients (if new conversation shall be started). #' @param message Text of the message (required if attachments is not set). #' @param lat Geographical latitude of a check-in, in degrees (from -90 to 90). #' @param long Geographical longitude of a check-in, in degrees (from -180 to 180). #' @param attachment List of objects attached to the message, separated by commas, in the following format: <type><owner_id>_<media_id> #' #' <type> — Type of media attachment: #' \itemize{ #' \item photo - photo #' \item video - video #' \item audio - audio #' \item doc - document #' \item wall - wall post #' } #' #' <owner_id> — ID of the media attachment owner. #' #' <media_id> — media attachment ID. #' @param forward_messages ID of forwarded messages, separated with a comma. Listed messages of the sender will be shown in the message body at the recipient's. #' @param sticker_id Sticker id. #' @param v Version of API. #' @export messagesSend <- function(user_id, random_id='', peer_id='', domain='', chat_id='', user_ids='', message='', lat='', long='', attachment='', forward_messages='', sticker_id='', v=getAPIVersion()) { query <- queryBuilder('messages.send', user_id = user_id, random_id = random_id, peer_id = peer_id, domain = domain, chat_id = chat_id, user_ids = user_ids, message = message, lat = lat, long = long, attachment = attachment, forward_messages = forward_messages, sticker_id = sticker_id, v = v) request_delay() response <- jsonlite::fromJSON(URLencode(query)) if (has_error(response)) return(try_handle_error(response)) response$response } #' Split messages by days, weeks, months #' #' @param messages List of messages from messagesGet() #' @param format Character string giving a date-time format as used by strptime #' @export messagesSplitByDate <- function(messages, format = "%y-%m-%d") { days_list <- format(as.POSIXct(messages$date, origin = "1970-01-01"), format = format) messages_by_days <- split(messages, as.factor(days_list)) messages_by_days }
/scratch/gouwar.j/cran-all/cranData/vkR/R/messages.R
#' Initialize database #' @param db_name Database name ('temp' by default) #' @param verbose Emit some more output #' @export db_init <- function(db_name = "temp", verbose = FALSE) { if (!requireNamespace("mongolite", quietly = TRUE)) stop("The package mongolite was not installed") conn <- mongolite::mongo(db = .vkr$db_name, collection = .vkr$db_meta_name, verbose = verbose) .vkr$db_metadata <- conn .vkr$db_active <- db_name } #' Get a mongo connection object #' @param collection_name Collection name #' @param collection_suffix Collection suffix #' @param db_name Database name #' @export db_get_connection <- function(collection_name, collection_suffix = '', db_name = db_getActive()) { if (missing(collection_name)) stop('argument "collection_name" is missing, with no default') meta_conn <- db_metaConnection() dbs <- meta_conn$find() s <- dbs$db == db_name & dbs$collection == collection_name & dbs$suffix == collection_suffix dbs <- subset(dbs, s) if (nrow(dbs) == 0) stop("Collection doesn't exist", call. = FALSE) if (nrow(dbs) > 1) stop("Oops, something has gone wrong. See show_collections()", call. = FALSE) conn <- mongolite::mongo(db = db_getName(), collection = paste(dbs$db, dbs$collection, dbs$suffix, sep = '.')) return(conn) } #' Create empty collection #' @param collection Collection name #' @param suffix Collection suffix #' @param db_name Database name #' @export create_empty_collection <- function(collection, suffix, db_name = db_getActive()) { meta_conn <- db_metaConnection() meta_conn$insert(data.frame(db = db_name, collection = collection, suffix = suffix)) } #' Get collection #' @param collection_name Collection name #' @param collection_suffix Collection suffix #' @param db_name Database name #' @export db_get_collection <- function(collection_name, collection_suffix = '', db_name = db_getActive()) { tryCatch({ conn <- db_get_connection(collection_name, collection_suffix, db_name) collection_data <- conn$find() rm(conn) return(collection_data) }, error = function(e) { return(NULL) }) } #' Check if collection exists #' @param collection_name Collection name #' @param collection_suffix Collection suffix #' @param db_name Database name #' @export collection_exists <- function(collection_name, collection_suffix = '', db_name = db_getActive()) { tryCatch({ db_get_connection(collection_name, collection_suffix, db_name) return(TRUE) }, error = function(e) { return(FALSE) }) } #' Load collection from db #' @param collection_name Collection name #' @param collection_suffix Collection suffix #' @param db_name Database name #' @export db_load_collection <- function(collection_name, collection_suffix = '', db_name = db_getActive()) { collection <- db_get_collection(collection_name, collection_suffix, db_name) if (is.null(collection)) stop("Collection doesn't exist", call. = FALSE) # assign(paste(db_name, collection_name, collection_suffix, sep = '.'), # collection, envir = .GlobalEnv) return(collection) } #' Save object to db #' @param object Object to save #' @param collection Collection name #' @param suffix Collection suffix #' @param db_name Database name #' @export db_save <- function(object, collection, suffix = '', db_name = db_getActive()) { if (missing(object)) stop('argument "object" is missing, with no default') if (missing(collection)) stop('argument "collection" is missing, with no default') # If collection already exists conn <- mongolite::mongo(db = db_getName(), collection = paste(db_name, collection, suffix, sep = '.')) if (conn$count() > 0) { return(conn) } rm(conn) # Add record about new collection to meta meta_conn <- db_metaConnection() meta_conn$insert(data.frame(db = db_name, collection = collection, suffix = suffix)) # Save new collection conn <- mongolite::mongo(db = db_getName(), collection = paste(db_name, collection, suffix, sep = '.')) conn$insert(object) return(conn) } #' Insert object into existing collection #' @param object Object to insert #' @param db_name Database name #' @param collection Collection name #' @param suffix Collection suffix #' @export db_insert <- function(object, collection, suffix, db_name = db_getActive()) { if (missing(object)) stop('argument "object" is missing, with no default') if (missing(collection)) stop('argument "collection" is missing, with no default') conn <- mongolite::mongo(db = db_getName(), collection = paste(db_name, collection, suffix, sep = '.')) conn$insert(object) rm(conn) } #' Update existing records #' @param object Object to insert #' @param key Key #' @param db_name Database name #' @param collection Collection name #' @param suffix Collection suffix #' @param upsert Insert a new document if no matching document exists #' @export db_update <- function(object, key, collection, suffix = '', db_name = db_getActive(), upsert = FALSE) { if (missing(object)) stop('argument "object" is missing, with no default') if (missing(key)) stop('argument "key" is missing, with no default') if (missing(collection)) stop('argument "collection" is missing, with no default') conn <- mongolite::mongo(db = db_getName(), collection = paste(db_name, collection, suffix, sep = '.')) # TODO: Object must be a data frame, key must be an integer value for (i in 1:nrow(object)) { query <- sprintf('{"%s":%s}', key, object[i, key]) json <- jsonlite::toJSON(object[i,]) update <- sprintf('{"$set":%s}', substr(json, 2, nchar(json)-1)) # drop brackets conn$update(query = query, update = update, upsert = upsert) } return(conn) } #' Get meta connection db_metaConnection <- function() { return(.vkr$db_metadata) } #' The current database name #' @export db_getActive <- function() { return(.vkr$db_active) } #' The current database name #' @export db_getName <- function() { return(.vkr$db_name) } #' Switch database #' @param db_name Database name #' @export use_db <- function(db_name) { .vkr$db_active <- db_name } #' Show databases #' @export show_dbs <- function() { meta_conn <- db_metaConnection() print(unique(meta_conn$find()$db)) } #' Show collections #' @export show_collections <- function() { meta_conn <- db_metaConnection() dbs_list <- meta_conn$find() if (nrow(dbs_list) != 0) { active <- dbs_list$db == db_getActive() dbs_list <- subset(dbs_list, active) dbs_list$count <- rep(0, nrow(dbs_list)) } for (i in 1:nrow(dbs_list)) { conn <- db_get_connection(collection_name = dbs_list[i,]$collection, collection_suffix = dbs_list[i,]$suffix, db_name = dbs_list[i,]$db) dbs_list[i,]$count <- conn$count() rm(conn) } print(dbs_list) } #' Load all collections from db for specified data base #' @param db_name Database name #' @export db_load <- function(db_name = db_getActive()) { meta_conn <- db_metaConnection() dbs <- meta_conn$find() s <- dbs$db == db_name dbs <- subset(dbs, s) collections <- paste(dbs$db, dbs$collection, dbs$suffix, sep = '.') collections_list <- list() for (collection_name in collections) { conn <- mongolite::mongo(db = db_getName(), collection = collection_name) # assign(collection_name, conn$find(), envir = .GlobalEnv) collections_list[[collection_name]] <- conn$find() rm(conn) } gc(verbose = FALSE) return(collections_list) } #' Drop collection #' @param collection_name Collection name #' @param collection_suffix Collection suffix #' @param db_name Database name #' @export db_drop_collection <- function(collection_name, collection_suffix = '', db_name = db_getActive()) { if (missing(collection_name)) stop('argument "collection_name" is missing, with no default') conn <- db_get_connection(collection_name, collection_suffix, db_name) conn$drop() rm(conn) meta_conn <- db_metaConnection() meta_conn$remove(sprintf('{"db": "%s", "collection": "%s", "suffix": "%s"}', db_name, collection_name, collection_suffix)) gc(verbose = FALSE) } #' Drop database #' @param db_name Database name #' @export db_drop <- function(db_name) { if (missing(db_name)) stop('argument "db_name" is missing, with no default') meta_conn <- db_metaConnection() dbs <- meta_conn$find() s <- dbs$db == db_name dbs <- subset(dbs, s) if (nrow(dbs) == 0) stop("Database doesn't exist", call. = FALSE) collections <- unique(paste(dbs$db, dbs$collection, dbs$suffix, sep = '.')) for (collection_name in collections) { conn <- mongolite::mongo(db = db_getName(), collection = collection_name) conn$drop() rm(conn) } meta_conn$remove(sprintf('{"db": "%s"}', db_name), multiple = TRUE) gc(verbose = FALSE) }
/scratch/gouwar.j/cran-all/cranData/vkR/R/mongo_connection.R
#' Building a friend graph #' #' @param users_ids User IDs #' @export getEgoNetwork <- function(users_ids='') { n <- length(users_ids) adjacency_matrix <- data.frame(matrix(data = rep(0, n*n), nrow = n, ncol = n), row.names = users_ids) colnames(adjacency_matrix) <- users_ids mutual_friends <- getMutualExecute(target_uids = users_ids) for (friend_id in 1:length(users_ids)) { friends <- mutual_friends$common_friends[[friend_id]] if (length(friends) > 0) { share_friends <- intersect(users_ids, friends) if (length(share_friends) > 0) { for (shared_user_id in 1:length(share_friends)) { adjacency_matrix[as.character(share_friends[shared_user_id]), as.character(users_ids[friend_id])] <- 1 } } } } adjacency_matrix } #' Building a friend graph for an arbitrary list of users #' #' @param users_ids User IDs #' @param format Either "edgelist" for a list of edges or "adjmatrix" for an adjacency matrix #' @export getArbitraryNetwork <- function(users_ids, format='edgelist') { if (!requireNamespace("reshape2", quietly = TRUE)) stop("The package reshape2 was not installed") if (!requireNamespace("dplyr", quietly = TRUE)) stop("The package dplyr was not installed") users_lists <- getFriendsFor(users_ids) users_lists <- users_lists[!sapply(users_lists, is.null)] users_lists <- users_lists[!!sapply(users_lists, length)] edge_list <- reshape2::melt(users_lists) colnames(edge_list) <- c("from", "to") edge_list <- dplyr::filter(edge_list, edge_list$from %in% users_ids & edge_list$to %in% users_ids) edge_list$from <- as.character(edge_list$from) edge_list$to <- as.character(edge_list$to) if (format == 'edgelist') return(edge_list) n <- length(users_ids) adjacency_matrix <- matrix(0, nrow = n, ncol = n) rownames(adjacency_matrix) <- colnames(adjacency_matrix) <- users_ids adjacency_matrix[as.matrix(edge_list)[,1:2]] <- 1 adjacency_matrix } #' Returns a list of paths between two users #' #' @param source_id Source ID #' @param target_id Target ID #' @param are_friends By default is FALSE #' @param max_depth Maximum depth #' @importFrom utils head #' @export getPaths <- function(source_id, target_id, are_friends = FALSE, max_depth = 5) { # List of visited users visited <- c() predictCity <- function(user_id) { names(sort(table(getFriends(user_id, fields = "city")$items$city$id), decreasing = T)[1]) } findIntersection <- function(friends_sid, friends_tid, depth) { if (nrow(friends_sid) == 0) stop(paste0('Number of friends for ', source_id,' is 0')) if (nrow(friends_tid) == 0) stop(paste0('Number of friends for ', target_id,' is 0')) if (depth > max_depth) stop("Sorry, the path is too long") print(paste0("Current depth is ", depth)) # for R CMD check to pass city.id <- id <- deactivated <- NULL friends_sid$lists <- NULL friends_tid$lists <- NULL friends_sid <- as.data.frame.list(friends_sid) friends_tid <- as.data.frame.list(friends_tid) friends_sid_filtered <- subset(friends_sid, city.id == city_tid) friends_tid_filtered <- subset(friends_tid, city.id == city_sid) if (nrow(friends_sid_filtered) > 0) friends_sid <- friends_sid_filtered if (nrow(friends_tid_filtered) > 0) friends_tid <- friends_tid_filtered friends_sid <- subset(friends_sid, !(id %in% visited)) friends_tid <- subset(friends_tid, !(id %in% visited)) friends_sid <- getFriendsFor(unique(friends_sid$id)) friends_tid <- getFriendsFor(unique(friends_tid$id)) friends_lists_sid[[depth]] <<- friends_sid friends_lists_tid[[depth]] <<- friends_tid shared_friends <- unique(intersect(unlist(friends_sid), unlist(friends_tid))) if (length(shared_friends) > 0) { print(paste0("Number of possible paths is ", length(shared_friends))) return(list(sid = friends_lists_sid, tid = friends_lists_tid, shared_friends = shared_friends)) } visited <<- unique(c(visited, unlist(names(friends_sid)), unlist(names(friends_tid)))) friends_sid <- getUsersExecute(unlist(friends_sid), fields = "city") if ("deactivated" %in% names(friends_sid)) friends_sid <- subset(friends_sid, deactivated != "deleted" | is.na(deactivated)) friends_tid <- getUsersExecute(unlist(friends_tid), fields = "city") if ("deactivated" %in% names(friends_tid)) friends_tid <- subset(friends_tid, deactivated != "deleted" | is.na(deactivated)) findIntersection(friends_sid, friends_tid, depth + 1) } extractPaths <- function(users_lists, shared_friends) { extractPathsFor <- function(user_id, paths, depth) { path <- c(user_id) if (depth == 1) return(path) for (name in names(paths[[depth]])) if (user_id %in% paths[[depth]][[name]]) path <- c(path, extractPathsFor(name, paths, depth - 1)) path } splitPaths <- function(ids, depth) { corePath <- head(ids, depth - 1) splittedPaths <- list() for (i in 1:(length(ids) - depth + 1)) { splittedPaths[[i]] <- c(corePath, ids[i + length(corePath)]) } splittedPaths } extracted_paths <- list() depth <- length(users_lists) for (shared_friend in shared_friends) { path <- extractPathsFor(shared_friend, users_lists, depth) if (length(path) > depth) { path <- splitPaths(path, depth) extracted_paths <- append(extracted_paths, path) } else { extracted_paths <- append(extracted_paths, list(path)) } } extracted_paths } if ((are_friends == FALSE) & areFriends(target_id, source_id)) return(c(source_id, target_id)) source_id_info <- getUsers(user_ids = c(source_id), fields = 'city') target_id_info <- getUsers(user_ids = c(target_id), fields = 'city') if ("deactivated" %in% names(source_id_info)) if (source_id_info$deactivated == "banned" | source_id_info$deactivated == "deleted") stop(paste0('User ID ', source_id,' is banned or deleted')) if ("deactivated" %in% names(target_id_info)) if (target_id_info$deactivated == "banned" | target_id_info$deactivated == "deleted") stop(paste0('User ID ', target_id,' is banned or deleted')) city_sid <- source_id_info$city$id city_tid <- target_id_info$city$id if (is.null(city_sid)) city_sid <- predictCity(source_id) if (is.null(city_tid)) city_tid <- predictCity(target_id) friends_sid <- getFriends(source_id, fields = "city")$items friends_tid <- getFriends(target_id, fields = "city")$items if (length(friends_sid) == 0) stop(paste0('Number of friends for ', source_id,' is 0')) if (length(friends_tid) == 0) stop(paste0('Number of friends for ', target_id,' is 0')) friends_lists_sid <- list() friends_lists_sid[[1]] <- friends_sid$id friends_lists_tid <- list() friends_lists_tid[[1]] <- friends_tid$id shared_friends <- unique(intersect(friends_sid$id, friends_tid$id)) if (are_friends == TRUE) visited <- c(shared_friends) else if (length(shared_friends) > 0) { print(paste0("Number of possible paths is ", length(shared_friends))) return(list(sid = friends_lists_sid, tid = friends_lists_tid, shared_friends = shared_friends)) } # for R CMD check to pass deactivated <- NULL if ("deactivated" %in% names(friends_sid)) friends_sid <- subset(friends_sid, deactivated != "deleted" | is.na(deactivated)) if ("deactivated" %in% names(friends_tid)) friends_tid <- subset(friends_tid, deactivated != "deleted" | is.na(deactivated)) visited <- c(visited, source_id, target_id) paths <- findIntersection(friends_sid, friends_tid, depth = 2) shared_friends <- paths$shared_friends[paths$shared_friends != 0] source_paths <- extractPaths(paths$sid, shared_friends) target_paths <- extractPaths(paths$tid, shared_friends) source_paths <- data.frame(do.call(rbind, source_paths)) target_paths <- data.frame(do.call(rbind, target_paths)) colnames(source_paths) <- c('mutual_friend', length(paths$sid):2) colnames(target_paths) <- c('mutual_friend', length(paths$tid):2) source_paths$mutual_friend <- as.character(source_paths$mutual_friend) target_paths$mutual_friend <- as.character(target_paths$mutual_friend) all_paths <- merge(source_paths, target_paths, by = 'mutual_friend') ncols <- ncol(all_paths) ordered_paths <- all_paths[, c(ceiling(ncols/2):2,1,ncols:(ncols/2 + 1))] ordered_paths$source <- rep(source_id, nrow(all_paths)) ordered_paths$target <- rep(target_id, nrow(all_paths)) ordered_paths <- ordered_paths[, c(ncols + 1, 1:ncols, ncols + 2)] ordered_paths }
/scratch/gouwar.j/cran-all/cranData/vkR/R/network.R
#' Returns search results by statuses #' #' @param q Search query string (e.g., New Year). #' @param extended 1 — to return additional information about the user or community that placed the post #' @param count Number of posts to return #' @param latitude Geographical latitude point (in degrees, -90 to 90) within which to search #' @param longitude Geographical longitude point (in degrees, -180 to 180) within which to search #' @param start_time Earliest timestamp (in Unix time) of a news item to return. By default, 24 hours ago #' @param end_time Latest timestamp (in Unix time) of a news item to return. By default, the current time #' @param start_from String, accessible for versions from 5.13 #' @param fields Additional fields of profiles and communities to return #' @param v Version of API #' @export newsfeedSearch <- function(q='', extended='', count='', latitude='', longitude='', start_time='', end_time='', start_from='', fields='', v = getAPIVersion()) { query <- queryBuilder('newsfeed.search', q = q, extended = extended, count = count, latitude = latitude, longitude = longitude, start_time = start_time, end_time = end_time, start_from = start_from, fields = fields, v = v) request_delay() response <- jsonlite::fromJSON(URLencode(query)) if (has_error(response)) return(try_handle_error(response)) response$response }
/scratch/gouwar.j/cran-all/cranData/vkR/R/newsfeed.R
#' Delaying a request if necessary #' #' VK can accept maximum 3 requests to API methods per second from a client. request_delay <- function() { start_time <- Sys.time() taken_time <- start_time - .vkr$last_request_time if (taken_time <= 1.0 & .vkr$num_requests >= .vkr$max_requests) { Sys.sleep(1.0 - taken_time) } .vkr$num_requests <- ifelse(.vkr$num_requests < 3, .vkr$num_requests + 1, 1) .vkr$last_request_time <- Sys.time() } #' Get error code from response #' @param response httr response object has_error <- function(response) { return(ifelse(!is.null(response$error), response$error$error_code, 0)) } #' Try to handle network error #' @param error Error try_handle_network_error <- function(error) { warning(error$message, call. = FALSE, immediate. = TRUE) if (.vkr$repeats_counter < .vkr$max_repeats) { warning('Trying to repeat the last query...', call. = FALSE, immediate. = TRUE) .vkr$repeats_counter <- .vkr$repeats_counter + 1 Sys.sleep(.vkr$timeout) response <- repeat_last_query(n=2) .vkr$repeats_counter <- 0 return(response) } .vkr$repeats_counter <- 0 stop(error$message, call. = FALSE) } #' Repeat last function call #' @param params Query params #' @param n The number of generations to go back repeat_last_query <- function(params = list(), n = 1) { parent_name <- deparse(sys.calls()[[sys.nframe()-n]]) parent_args <- as.list(sys.frame(-n)) args <- list() for (arg in names(as.list(match.call(definition = sys.function(-n), call = sys.call(-n)))[-1])) args[[arg]] <- parent_args[[arg]] for (arg in names(params)) args[[arg]] <- params[[arg]] do.call(what = gsub("\\(.*\\)", "", parent_name), args = args) } #' Set timeout #' @param secs Seconds #' @export setTimeout <- function(secs) { .vkr$timeout <- secs } #' Set maximum number of repeats #' @param n Repeats number #' @export setRepeats <- function(n) { .vkr$max_repeats <- n } #' Custom error #' @param message Error message #' @param call Call expression #' @param error_code Error code vk_stop <- function(message = "", call = sys.call(), error_code = "") { cond <- structure(list(message = message, call = call), class = c(paste0("vk_error", error_code), "error", "condition")) stop(cond) } #' Captcha error handler #' @param error Error object #' @importFrom graphics plot rasterImage #' @importFrom utils download.file handle_captcha <- function(error) { if (!interactive()) stop("Captcha needed.\nFor handle this error you must to interact with your console", call. = FALSE) if (!requireNamespace("jpeg", quietly = TRUE)) stop("The package jpeg was not installed") download.file(url = error$captcha_img, destfile = 'captcha.jpg', mode = 'wb') captcha_img <- jpeg::readJPEG("captcha.jpg", native = TRUE) plot(0:1, 0:1, type = "n", ann = FALSE, axes = FALSE) rasterImage(captcha_img, 0, 0, 1, 1) captcha_sid <- error$captcha_sid captcha_key <- readline("Enter the key from captcha: ") list('captcha_key' = captcha_key, 'captcha_sid' = captcha_sid) } #' Validation error handler #' @param error Error object handle_validation <- function(error) { if (!interactive()) stop("Required phone number.\nFor handle this error you must to interact with your console", call. = FALSE) response <- httr::GET(error$redirect_uri) authorize_form <- XML::htmlParse(httr::content(response, "text", encoding = "UTF-8")) action <- XML::xpathSApply(authorize_form, "//form", XML::xmlGetAttr, "action") if (length(action) != 0 && grepl("security_check", action)) { phone <- XML::xpathSApply(authorize_form, "//*/span", XML::xmlValue) print(phone) missing_numbers <- readline("Enter the missing numbers in the phone number: ") response <- httr::GET(paste0("https://m.vk.com",action), query = list('code' = missing_numbers), httr::add_headers('Content-Type' = 'application/x-www-form-urlencoded')) for (i in 1:length(response$all_headers)) { location <- response$all_headers[[i]]$headers$location if (!is.null(location) & grepl("access_token", location)) { access_token <- gsub(".*?access_token=(.*?)&.*", "\\1", location) setAccessToken(access_token) break } } } } #' Check response for errors #' @param response httr response object try_handle_error <- function(response) { tryCatch( vk_stop(message = response$error$error_msg, error_code = response$error$error_code), vk_error14 = function(e) { params <- handle_captcha(response$error) return(repeat_last_query(params = params, n = 6)) }, vk_error17 = function(e) { handle_validation(response$error) return(repeat_last_query(n = 6)) }, vk_error6 = function(e) { request_delay() return(repeat_last_query(n = 6)) } ) } #' Returns a query string #' #' @param method_name Method name #' @param ... Method arguments queryBuilder <- function(method_name, ...) { query <- paste("https://api.vk.com/method/", method_name, "?", sep = "") arguments <- sapply(substitute(list(...))[-1], deparse) arg_names <- names(arguments) for (arg_pos in seq(length(arguments))) { if (arg_names[arg_pos] != "") { if (is.character(arguments[arg_pos])) { #arg_value <- gsub("\"", "", arguments[arg_pos]) arg_value <- list(...)[arg_names[arg_pos]] } else { # ??? arg_value <- arguments[arg_pos] } query <- paste(query, ifelse(arg_value != "", paste("&", arg_names[arg_pos], "=", arg_value, sep = ""), ""), sep = "") } } query <- paste(query, '&access_token=', getAccessToken(), sep = "") query } #' A universal method for calling a sequence of other methods while saving and filtering interim results #' @param code Algorithm code in VKScript #' @param params Parameters list #' @export execute <- function(code, params = list()) { request_delay() query <- "https://api.vk.com/method/execute" body = list('code' = code, 'access_token' = getAccessToken(), 'v' = getAPIVersion()) safe_POST <- purrr::safely(httr::POST) post_res <- safe_POST(url = query, body = append(body, params)) if (!is.null(post_res$error)) return(try_handle_network_error(post_res$error)) post_res <- post_res$result content <- httr::content(post_res, "text", encoding="UTF-8") if (startsWith(content, "ERROR") | post_res["status_code"] == 500 | post_res["status_code"] == 404) vk_stop(message = sprintf("Response error '%s'", content), error_code = post_res$status_code) response <- jsonlite::fromJSON(content) if (has_error(response)) { return(try_handle_error(response)) } response$response }
/scratch/gouwar.j/cran-all/cranData/vkR/R/queries.R
#' Allows the programmer to do a quick search for any substring #' @param q Search query string #' @param limit Maximum number of results to return #' @param filters List of comma-separated words #' @param search_global Flag, either 1 or 0, default 1 #' @param v Version of API #' @importFrom utils URLencode #' @export search.getHints <- function(q='', limit='', filters='', search_global='', v=getAPIVersion()) { query <- queryBuilder('search.getHints', q = q, limit = limit, filters = filters, search_global = search_global, v = v) request_delay() response <- jsonlite::fromJSON(URLencode(query)) if (has_error(response)) return(try_handle_error(response)) response$response }
/scratch/gouwar.j/cran-all/cranData/vkR/R/search.R
#' Returns data required to show the status of a users and/or communities #' #' @param users_ids User IDs #' @param groups_ids Community IDs #' @param progress_bar Display progress bar #' @param v Version of API #' @examples #' \dontrun{ #' status.me <- getStatus() #' status.friends <- getStatus(users_ids = getFriends()$items) #' status.groups <- getStatus(groups_ids = getGroups()$items) #' status.friends_and_groups <- getStatus(users_ids = getFriends()$items, #' groups_ids = getGroups()$items, progress_bar = T) #' } #' @importFrom utils setTxtProgressBar txtProgressBar #' @export getStatus <- function(users_ids=c(), groups_ids=c(), progress_bar=FALSE, v=getAPIVersion()) { get_status <- function(users_ids=c(), groups_ids=c(), v=getAPIVersion()) { param_name <- 'user_id' objects <- users_ids if (length(users_ids) <= 0) { param_name <- 'group_id' objects <- groups_ids } code <- 'var updates = {}; var query;' for (i in 1:length(objects)) { code <- paste0(code, 'query = API.status.get({"', param_name, '":"', objects[i], '", "v":"', v, '"}).text; updates.id', objects[i], '=query;') } code <- paste0(code, 'return updates;') response <- execute(code) if (!is.null(response)) names(response) <- objects response } if (length(users_ids) <= 0 & length(groups_ids) <= 0) return(execute(paste0('return API.status.get({"v":"', v, '"}).text;'))) max_length <- length(users_ids) + length(groups_ids) if (progress_bar) { pb <- txtProgressBar(min = 0, max = max_length, style = 3) setTxtProgressBar(pb, 0) } all_updates <- list() # By users if (length(users_ids) > 0) { from <- 1 to <- 25 repeat { if (to >= length(users_ids)) to <- length(users_ids) updates <- get_status(users_ids = users_ids[from:to], v = v) all_updates <- append(all_updates, updates) if (progress_bar) setTxtProgressBar(pb, length(all_updates)) if (to >= length(users_ids)) break from <- to + 1 to <- to + 25 } } # By groups if (length(groups_ids) > 0) { from <- 1 to <- 25 repeat { if (to >= length(groups_ids)) to <- length(groups_ids) updates <- get_status(groups_ids = groups_ids[from:to], v = v) all_updates <- append(all_updates, updates) if (progress_bar) setTxtProgressBar(pb, length(all_updates)) if (to >= length(groups_ids)) break from <- to + 1 to <- to + 25 } } if (progress_bar) close(pb) if (!requireNamespace('reshape2', quietly = TRUE)) { all_updates <- do.call(rbind.data.frame, all_updates) colnames(all_updates) <- c("status") all_updates$id <- rownames(all_updates) rownames(all_updates) <- NULL return(all_updates) } all_updates <- reshape2::melt(all_updates) colnames(all_updates) <- c("status", "id") all_updates }
/scratch/gouwar.j/cran-all/cranData/vkR/R/status.R
#' Returns detailed information on users #' #' @param user_ids User IDs or screen names (screen_name). By default, current user ID (the maximum number of elements allowed is 1000) #' @param fields Profile fields to return (see fetails for more information about fields) #' @param name_case Case for declension of user name and surname #' @param flatten Automatically flatten nested data frames into a single non-nested data frame #' @param v Version of API #' @details #' \href{https://vk.com/dev/fields}{User object} describes a user profile, contains the following fields: #' \itemize{ #' \item \strong{uid} User ID #' \item \strong{first_name} First name #' \item \strong{last_name} Last name #' \item \strong{deactivated} Returns if a profile is deleted or blocked. Gets the value deleted or banned. Keep in mind that in this case no additional fields are returned #' \item \strong{hidden: 1} Returns while operating without access_token if a user has set the "Who can see my profile on the Internet" -> "Only VK users" privacy setting. Keep in mind that in this case no additional fields are returned #' \item \strong{verified} Returns 1 if the profile is verified, 0 if not #' \item \strong{blacklisted} Returns 1 if a current user is in the requested user's blacklist #' \item \strong{sex} User sex (1 - female; 2 - male; 0 - not specified) #' \item \strong{bdate} User's date of birth. Returned as DD.MM.YYYY or DD.MM (if birth year is hidden). If the whole date is hidden, no field is returned #' \item \strong{city} ID of the city specified on user's page in "Contacts" section. Returns city ID that can be used to get its name using places.getCityById method. If no city is specified or main information on the page is hidden for in privacy settings, then it returns 0 #' \item \strong{country} ID of the country specified on user's page in "Contacts" section. Returns country ID that can be used to get its name using places.getCountryById method. If no country is specified or main information on the page is hidden in privacy settings, then it returns 0 #' \item \strong{home_town} User's home town #' \item \strong{photo_50} Returns URL of square photo of the user with 50 pixels in width. In case user does not have a photo, http://vk.com/images/camera_c.gif is returned #' \item \strong{photo_100} Returns URL of square photo of the user with 100 pixels in width. In case user does not have a photo, http://vk.com/images/camera_b.gif is returned #' \item \strong{photo_200_orig} Returns URL of user's photo with 200 pixels in width. In case user does not have a photo, http://vk.com/images/camera_a.gif is returned #' \item \strong{photo_200} Returns URL of square photo of the user with 200 pixels in width. If the photo was uploaded long time ago, there can be no image of such size and in this case the reply will not include this field #' \item \strong{photo_400_orig} Returns URL of user's photo with 400 pixels in width. If user does not have a photo of such size, reply will not include this field #' \item \strong{photo_max} Returns URL of square photo of the user with maximum width. Can be returned a photo both 200 and 100 pixels in width. In case user does not have a photo, http://vk.com/images/camera_b.gif is returned #' \item \strong{photo_max_orig} Returns URL of user's photo of maximum size. Can be returned a photo both 400 and 200 pixels in width. In case user does not have a photo, http://vk.com/images/camera_a.gif is returned #' \item \strong{online} Information whether the user is online. Returned values: 1 - online, 0 - offline. If user utilizes a mobile application or site mobile version, it returns online_mobile additional field that includes 1. With that, in case of application, online_app additional field is returned with application ID. #' \item \strong{lists} Information about friend lists. Returns IDs of friend lists the user is member of, separated with a comma. The field is available for friends.get method only. To get information about ID and names of friend lists use friends.getLists method. If user is not a member of any friend list, then when accepting data in XML format the respective <user> node does not contain <lists> tag #' \item \strong{domain} Page screen name. Returns a string with a page screen name (only subdomain is returned, like andrew). If not set, "id'+uid is returned, e.g. id35828305 #' \item \strong{has_mobile} Information whether the user's mobile phone number is available. Returned values: 1 - available, 0 - not available. We recommend you to use it prior to call of secure.sendSMSNotification method #' \item \strong{contacts} Information about user's phone numbers. If data are available and not hidden in privacy settings, the following fields are returned (mobile_phone - user's mobile phone number (only for standalone applications); home_phone - user's additional phone number) #' \item \strong{site} Returns a website address from a user profile #' \item \strong{education} Information about user's higher education institution. The following fields are returned: #' \itemize{ #' \item \strong{university} University ID #' \item \strong{university_name} University name #' \item \strong{faculty} Faculty ID #' \item \strong{faculty_name} Faculty name #' \item \strong{graduation} Graduation year #' } #' \item \strong{universities} List of higher education institutions where user studied. Returns universities array with university objects with the following fields: #' \itemize{ #' \item \strong{id} University ID #' \item \strong{country} ID of the country the university is located in #' \item \strong{city} ID of the city the university is located in #' \item \strong{name} University name #' \item \strong{faculty} Faculty ID #' \item \strong{faculty_name} Faculty name #' \item \strong{chair} University chair ID #' \item \strong{chair_name} Chair name #' \item \strong{graduation} Graduation year #' } #' \item \strong{schools} List of schools where user studied in. Returns schools array with school objects with the following fields: #' \itemize{ #' \item \strong{id} School ID #' \item \strong{country} ID of the country the school is located in #' \item \strong{city} ID of the city the school is located in #' \item \strong{name} School name #' \item \strong{year_from} Year the user started to study #' \item \strong{year_to} Year the user finished to study #' \item \strong{year_graduated} Graduation year #' \item \strong{class} School class letter #' \item \strong{speciality} Speciality #' \item \strong{type} Type ID #' \item \strong{type_str} Type name #' } #' \item \strong{status} User status. Returns a string with status text that is in the profile below user's name #' \item \strong{last_seen} Last visit date. Returns last_seen object with the following fields: #' \itemize{ #' \item \strong{time} Last visit date (in Unix time) #' \item \strong{platform} Type of the platform that used for the last authorization. See more at \href{https://vk.com/dev/using_longpoll}{Using LongPoll server} #' } #' \item \strong{followers_count} Number of user's followers #' \item \strong{common_count} Number of common friends with a current user #' \item \strong{counters} Number of various objects the user has. Can be used in users.get method only when requesting information about a user. Returns an object with fields: #' \itemize{ #' \item \strong{albums} Number of photo albums #' \item \strong{videos} Number of videos #' \item \strong{audios} Number of audios #' \item \strong{notes} Number of notes #' \item \strong{friends} Number of friends #' \item \strong{groups} Number of communities #' \item \strong{online_friends} Number of online friends #' \item \strong{mutual_friends} Number of mutual friends #' \item \strong{user_videos} Number of videos the user is tagged on #' \item \strong{followers} Number of followers #' \item \strong{user_photos} Number of photos the user is tagged on #' \item \strong{subscriptions} Number of subscriptions #' } #' \item \strong{occupation} Current user's occupation. Returns following fields: #' \itemize{ #' \item \strong{type} Can take the values: work, school, university #' \item \strong{id} ID of school, university, company group (the one a user works in) #' \item \strong{name} Name of school, university or work place #' } #' \item \strong{nickname} User nickname #' \item \strong{relatives} Current user's relatives list. Returns a list of objects with id and type fields (name instead of id if a relative is not a VK user). type - relationship type. Possible values: #' \itemize{ #' \item \emph{sibling} #' \item \emph{parent} #' \item \emph{child} #' \item \emph{grandparent} #' \item \emph{grandchild} #' } #' \item \strong{relation} User relationship status. Returned values: #' \itemize{ #' \item \strong{1} - Single #' \item \strong{2} - In a relationship #' \item \strong{3} - Engaged #' \item \strong{4} - Married #' \item \strong{5} - It's complicated #' \item \strong{6} - Actively searching #' \item \strong{7} - In love #' } #' \item \strong{personal} Information from the "Personal views" section #' \itemize{ #' \item \strong{political} Political views: #' \itemize{ #' \item{1} - Communist #' \item{2} - Socialist #' \item{3} - Moderate #' \item{4} - Liberal #' \item{5} - Conservative #' \item{6} - Monarchist #' \item{7} - Ultraconservative #' \item{8} - Apathetic #' \item{9} - Libertian #' } #' \item \strong{langs} Languages #' \item \strong{religion} World view #' \item \strong{inspired_by} Inspired by #' \item \strong{people_main} Improtant in others: #' \itemize{ #' \item{1} - Intellect and creativity #' \item{2} - Kindness and honesty #' \item{3} - Health and beauty #' \item{4} - Wealth and power #' \item{5} - Courage and persistance #' \item{6} - Humor and love for life #' } #' \item \strong{life_main} Personal priority: #' \itemize{ #' \item{1} - Family and children #' \item{2} - Career and money #' \item{3} - Entertainment and leisure #' \item{4} - Science and research #' \item{5} - Improving the world #' \item{6} - Personal development #' \item{7} - Beauty and art #' \item{8} - Fame and influence #' } #' \item \strong{smoking} Views on smoking (1 - very negative; 2 - negative; 3 - neutral; 4 - compromisable; 5 - positive) #' \item \strong{alcohol} Views on alcohol (1 - very negative; 2 - negative; 3 - neutral; 4 - compromisable; 5 - positive) #' } #' \item \strong{connections} Returns specified services such as: skype, facebook, twitter, livejournal, instagram #' \item \strong{exports} External services with export configured (twitter, facebook, livejournal, instagram) #' \item \strong{wall_comments} Wall comments allowed(1 - allowed, 0 - not allowed) #' \item \strong{activities} Activities #' \item \strong{interests} Interests #' \item \strong{music} Favorite music #' \item \strong{movies} Favorite movies #' \item \strong{tv} Favorite TV shows #' \item \strong{books} Favorite books #' \item \strong{games} Favorite games #' \item \strong{about} "About me" #' \item \strong{quotes} Favorite quotes #' \item \strong{can_post} Can post on the wall: 1 - allowed, 0 - not allowed #' \item \strong{can_see_all_posts} Can see other users' posts on the wall: 1 - allowed, 0 - not allowed #' \item \strong{can_see_audio} Can see other users' audio on the wall: 1 - allowed, 0 - not allowed #' \item \strong{can_write_private_message} Can write private messages to a current user: 1 - allowed, 0 - not allowed #' \item \strong{timezone} user time zone. Retuns only while requesting current user info #' \item \strong{screen_name} User page's screen name (subdomain) #' } #' @examples #' \dontrun{ #' user <- getUsers('1', fields='sex,bdate,city') #' } #' @importFrom utils URLencode #' @export getUsers <- function(user_ids='', fields='', name_case='nom', flatten=FALSE, v=getAPIVersion()) { .Deprecated("getUsersExecute()") body <- list(fields = profile_fields(fields), name_case = name_case) if (length(user_ids) > 1) { user_ids <- paste(user_ids, collapse = ",") body <- append(body, list(user_ids = user_ids)) query <- queryBuilder('users.get', v = v) } else { query <- queryBuilder('users.get', user_ids = user_ids, v = v) } request_delay() response <- jsonlite::fromJSON(httr::content( httr::POST(URLencode(query), body = body), "text", encoding = "UTF-8")) if (has_error(response)) return(try_handle_error(response)) response <- response$response if (isTRUE(flatten)) response <- jsonlite::flatten(response) class(response) <- c(class(response), "vk.users") response } #' Returns detailed information on arbitrary number of users #' #' @param users_ids User IDs or screen names (screen_name). By default, current user ID #' @param fields Profile fields to return #' @param name_case Case for declension of user name and surname #' @param drop Drop deleted or banned users #' @param flatten Automatically flatten nested data frames into a single non-nested data frame #' @param use_db Use database #' @param db_params Collection name and suffix #' @param progress_bar Display progress bar #' @param v Version of API #' @details #' \href{https://vk.com/dev/fields}{User object} describes a user profile, contains the following fields: #' \itemize{ #' \item \strong{uid} User ID #' \item \strong{first_name} First name #' \item \strong{last_name} Last name #' \item \strong{deactivated} Returns if a profile is deleted or blocked. Gets the value deleted or banned. Keep in mind that in this case no additional fields are returned #' \item \strong{hidden: 1} Returns while operating without access_token if a user has set the "Who can see my profile on the Internet" -> "Only VK users" privacy setting. Keep in mind that in this case no additional fields are returned #' \item \strong{verified} Returns 1 if the profile is verified, 0 if not #' \item \strong{blacklisted} Returns 1 if a current user is in the requested user's blacklist #' \item \strong{sex} User sex (1 - female; 2 - male; 0 - not specified) #' \item \strong{bdate} User's date of birth. Returned as DD.MM.YYYY or DD.MM (if birth year is hidden). If the whole date is hidden, no field is returned #' \item \strong{city} ID of the city specified on user's page in "Contacts" section. Returns city ID that can be used to get its name using places.getCityById method. If no city is specified or main information on the page is hidden for in privacy settings, then it returns 0 #' \item \strong{country} ID of the country specified on user's page in "Contacts" section. Returns country ID that can be used to get its name using places.getCountryById method. If no country is specified or main information on the page is hidden in privacy settings, then it returns 0 #' \item \strong{home_town} User's home town #' \item \strong{photo_50} Returns URL of square photo of the user with 50 pixels in width. In case user does not have a photo, http://vk.com/images/camera_c.gif is returned #' \item \strong{photo_100} Returns URL of square photo of the user with 100 pixels in width. In case user does not have a photo, http://vk.com/images/camera_b.gif is returned #' \item \strong{photo_200_orig} Returns URL of user's photo with 200 pixels in width. In case user does not have a photo, http://vk.com/images/camera_a.gif is returned #' \item \strong{photo_200} Returns URL of square photo of the user with 200 pixels in width. If the photo was uploaded long time ago, there can be no image of such size and in this case the reply will not include this field #' \item \strong{photo_400_orig} Returns URL of user's photo with 400 pixels in width. If user does not have a photo of such size, reply will not include this field #' \item \strong{photo_max} Returns URL of square photo of the user with maximum width. Can be returned a photo both 200 and 100 pixels in width. In case user does not have a photo, http://vk.com/images/camera_b.gif is returned #' \item \strong{photo_max_orig} Returns URL of user's photo of maximum size. Can be returned a photo both 400 and 200 pixels in width. In case user does not have a photo, http://vk.com/images/camera_a.gif is returned #' \item \strong{online} Information whether the user is online. Returned values: 1 - online, 0 - offline. If user utilizes a mobile application or site mobile version, it returns online_mobile additional field that includes 1. With that, in case of application, online_app additional field is returned with application ID. #' \item \strong{lists} Information about friend lists. Returns IDs of friend lists the user is member of, separated with a comma. The field is available for friends.get method only. To get information about ID and names of friend lists use friends.getLists method. If user is not a member of any friend list, then when accepting data in XML format the respective <user> node does not contain <lists> tag #' \item \strong{domain} Page screen name. Returns a string with a page screen name (only subdomain is returned, like andrew). If not set, "id'+uid is returned, e.g. id35828305 #' \item \strong{has_mobile} Information whether the user's mobile phone number is available. Returned values: 1 - available, 0 - not available. We recommend you to use it prior to call of secure.sendSMSNotification method #' \item \strong{contacts} Information about user's phone numbers. If data are available and not hidden in privacy settings, the following fields are returned (mobile_phone - user's mobile phone number (only for standalone applications); home_phone - user's additional phone number) #' \item \strong{site} Returns a website address from a user profile #' \item \strong{education} Information about user's higher education institution. The following fields are returned: #' \itemize{ #' \item \strong{university} University ID #' \item \strong{university_name} University name #' \item \strong{faculty} Faculty ID #' \item \strong{faculty_name} Faculty name #' \item \strong{graduation} Graduation year #' } #' \item \strong{universities} List of higher education institutions where user studied. Returns universities array with university objects with the following fields: #' \itemize{ #' \item \strong{id} University ID #' \item \strong{country} ID of the country the university is located in #' \item \strong{city} ID of the city the university is located in #' \item \strong{name} University name #' \item \strong{faculty} Faculty ID #' \item \strong{faculty_name} Faculty name #' \item \strong{chair} University chair ID #' \item \strong{chair_name} Chair name #' \item \strong{graduation} Graduation year #' } #' \item \strong{schools} List of schools where user studied in. Returns schools array with school objects with the following fields: #' \itemize{ #' \item \strong{id} School ID #' \item \strong{country} ID of the country the school is located in #' \item \strong{city} ID of the city the school is located in #' \item \strong{name} School name #' \item \strong{year_from} Year the user started to study #' \item \strong{year_to} Year the user finished to study #' \item \strong{year_graduated} Graduation year #' \item \strong{class} School class letter #' \item \strong{speciality} Speciality #' \item \strong{type} Type ID #' \item \strong{type_str} Type name #' } #' \item \strong{status} User status. Returns a string with status text that is in the profile below user's name #' \item \strong{last_seen} Last visit date. Returns last_seen object with the following fields: #' \itemize{ #' \item \strong{time} Last visit date (in Unix time) #' \item \strong{platform} Type of the platform that used for the last authorization. See more at \href{https://vk.com/dev/using_longpoll}{Using LongPoll server} #' } #' \item \strong{followers_count} Number of user's followers #' \item \strong{common_count} Number of common friends with a current user #' \item \strong{counters} Number of various objects the user has. Can be used in users.get method only when requesting information about a user. Returns an object with fields: #' \itemize{ #' \item \strong{albums} Number of photo albums #' \item \strong{videos} Number of videos #' \item \strong{audios} Number of audios #' \item \strong{notes} Number of notes #' \item \strong{friends} Number of friends #' \item \strong{groups} Number of communities #' \item \strong{online_friends} Number of online friends #' \item \strong{mutual_friends} Number of mutual friends #' \item \strong{user_videos} Number of videos the user is tagged on #' \item \strong{followers} Number of followers #' \item \strong{user_photos} Number of photos the user is tagged on #' \item \strong{subscriptions} Number of subscriptions #' } #' \item \strong{occupation} Current user's occupation. Returns following fields: #' \itemize{ #' \item \strong{type} Can take the values: work, school, university #' \item \strong{id} ID of school, university, company group (the one a user works in) #' \item \strong{name} Name of school, university or work place #' } #' \item \strong{nickname} User nickname #' \item \strong{relatives} Current user's relatives list. Returns a list of objects with id and type fields (name instead of id if a relative is not a VK user). type - relationship type. Possible values: #' \itemize{ #' \item \emph{sibling} #' \item \emph{parent} #' \item \emph{child} #' \item \emph{grandparent} #' \item \emph{grandchild} #' } #' \item \strong{relation} User relationship status. Returned values: #' \itemize{ #' \item \strong{1} - Single #' \item \strong{2} - In a relationship #' \item \strong{3} - Engaged #' \item \strong{4} - Married #' \item \strong{5} - It's complicated #' \item \strong{6} - Actively searching #' \item \strong{7} - In love #' } #' \item \strong{personal} Information from the "Personal views" section #' \itemize{ #' \item \strong{political} Political views: #' \itemize{ #' \item{1} - Communist #' \item{2} - Socialist #' \item{3} - Moderate #' \item{4} - Liberal #' \item{5} - Conservative #' \item{6} - Monarchist #' \item{7} - Ultraconservative #' \item{8} - Apathetic #' \item{9} - Libertian #' } #' \item \strong{langs} Languages #' \item \strong{religion} World view #' \item \strong{inspired_by} Inspired by #' \item \strong{people_main} Improtant in others: #' \itemize{ #' \item{1} - Intellect and creativity #' \item{2} - Kindness and honesty #' \item{3} - Health and beauty #' \item{4} - Wealth and power #' \item{5} - Courage and persistance #' \item{6} - Humor and love for life #' } #' \item \strong{life_main} Personal priority: #' \itemize{ #' \item{1} - Family and children #' \item{2} - Career and money #' \item{3} - Entertainment and leisure #' \item{4} - Science and research #' \item{5} - Improving the world #' \item{6} - Personal development #' \item{7} - Beauty and art #' \item{8} - Fame and influence #' } #' \item \strong{smoking} Views on smoking (1 - very negative; 2 - negative; 3 - neutral; 4 - compromisable; 5 - positive) #' \item \strong{alcohol} Views on alcohol (1 - very negative; 2 - negative; 3 - neutral; 4 - compromisable; 5 - positive) #' } #' \item \strong{connections} Returns specified services such as: skype, facebook, twitter, livejournal, instagram #' \item \strong{exports} External services with export configured (twitter, facebook, livejournal, instagram) #' \item \strong{wall_comments} Wall comments allowed(1 - allowed, 0 - not allowed) #' \item \strong{activities} Activities #' \item \strong{interests} Interests #' \item \strong{music} Favorite music #' \item \strong{movies} Favorite movies #' \item \strong{tv} Favorite TV shows #' \item \strong{books} Favorite books #' \item \strong{games} Favorite games #' \item \strong{about} "About me" #' \item \strong{quotes} Favorite quotes #' \item \strong{can_post} Can post on the wall: 1 - allowed, 0 - not allowed #' \item \strong{can_see_all_posts} Can see other users' posts on the wall: 1 - allowed, 0 - not allowed #' \item \strong{can_see_audio} Can see other users' audio on the wall: 1 - allowed, 0 - not allowed #' \item \strong{can_write_private_message} Can write private messages to a current user: 1 - allowed, 0 - not allowed #' \item \strong{timezone} user time zone. Retuns only while requesting current user info #' \item \strong{screen_name} User page's screen name (subdomain) #' } #' @examples #' \dontrun{ #' random_ids <- sample(x=seq(1:10000000), size=10000, replace=FALSE) #' users <- getUsersExecute(random_ids, fields='sex,bdate,city') #' } #' @importFrom utils setTxtProgressBar txtProgressBar #' @export getUsersExecute <- function(users_ids, fields='', name_case='nom', drop=FALSE, flatten=FALSE, use_db=FALSE, db_params=list(), progress_bar=FALSE, v=getAPIVersion()) { get_users <- function(user_ids='', fields='', name_case='nom', v=getAPIVersion()) { code <- 'var users = [];' num_requests <- ifelse(length(user_ids) %% 500 == 0, (length(user_ids) %/% 500), (length(user_ids) %/% 500) + 1) from <- 1 to <- ifelse(num_requests >= 2, 500, length(user_ids)) for (i in 1:num_requests) { code <- paste0(code, 'users = users + API.users.get({ "user_ids":"', paste0(user_ids[from:to], collapse = ','), '", "fields":"', fields, '", "name_case":"', name_case, '", "v":"', v, '"});') from <- to + 1 to <- to + ifelse(length(user_ids) - (to + 500) >= 0, 500, length(user_ids) - to) } code <- paste0(code, 'return users;') if (nchar(code) > 65535) stop("The POST request is limited by 65535 bytes") execute(code) } fields <- profile_fields(fields) if (missing(users_ids)) { code <- paste0('return API.users.get({"fields":"', fields, '", "name_case":"', name_case, '", "v":"', v, '"});') response <- execute(code) if (isTRUE(flatten)) response <- jsonlite::flatten(response) return(response) } if ("vk.friends.ids" %in% class(users_ids)) users_ids <- unique(unlist(users_ids)) users_ids <- as.integer(users_ids) users_ids <- users_ids[!is.na(users_ids)] if (length(users_ids) == 0) stop('"users_ids" has no user IDs', call. = FALSE) if (use_db) { collection <- or(db_params[['collection']], 'users') suffix <- or(db_params[['suffix']], '') key <- or(db_params[['key']], 'id') if (!collection_exists(collection, suffix)) create_empty_collection(collection, suffix) } all_users <- data.frame() from <- 1 to <- 5000 if (progress_bar) { pb <- txtProgressBar(min = 0, max = length(users_ids), style = 3) setTxtProgressBar(pb, 0) } repeat { if (to >= length(users_ids)) to <- length(users_ids) users <- get_users(users_ids[from:to], fields = fields, name_case = name_case, v = v) if (use_db) db_update(object = users, key = key, collection = collection, suffix = suffix, upsert = TRUE) all_users <- jsonlite::rbind_pages(list(all_users, users)) if (progress_bar) setTxtProgressBar(pb, nrow(all_users)) if (to >= length(users_ids)) break from <- to + 1 to <- to + 5000 } if (progress_bar) close(pb) # for R CMD check to pass deactivated <- NULL if (isTRUE(drop) && "deactivated" %in% colnames(all_users)) { all_users <- subset(all_users, is.na(deactivated)) all_users$deactivated <- NULL rownames(all_users) <- NULL } if (isTRUE(flatten)) all_users <- jsonlite::flatten(all_users) class(all_users) <- c(class(all_users), "vk.users") all_users } #' Returns a list of IDs of followers of the user in question, sorted by date added, most recent first #' #' @param user_id User ID #' @param offset Offset needed to return a specific subset of followers #' @param count Number of followers to return #' @param fields Profile fields to return #' @param name_case Case for declension of user name and surname #' @param drop Drop deleted or banned followers #' @param flatten Automatically flatten nested data frames into a single non-nested data frame #' @param progress_bar Display progress bar #' @param v Version of API #' @importFrom utils setTxtProgressBar txtProgressBar #' @examples \dontrun{ #' my_followers <- usersGetFollowers(me()) #' } #' @export usersGetFollowers <- function(user_id='', offset=0, count=0, fields='', name_case='', drop=FALSE, flatten=FALSE, progress_bar=FALSE, v=getAPIVersion()) { get_followers <- function(user_id='', offset=0, count=0, fields='', name_case='', v=getAPIVersion()) { code <- 'var followers = [];' num_requests <- 0 while (num_requests != 25 && count != 0) { current_count <- ifelse((count - 1000) >= 0, 1000, count) code <- paste0(code, 'followers = followers + API.users.getFollowers({"user_id":"', user_id, '", "offset":"', offset, '", "count":"', current_count, '", "fields":"', fields, '", "name_case":"', name_case, '", "v":"', v, '"}).items;') offset <- offset + 1000 num_requests <- num_requests + 1 count <- count - current_count } code <- paste0(code, 'return followers;') execute(code) } fields <- profile_fields(fields) if (isTRUE(drop) && fields == '') fields <- 'deactivated' user_id <- as.integer(user_id) code <- paste0('return API.users.getFollowers({"user_id":"', user_id, '", "offset":"', offset, '", "count":"', 1, '", "fields":"', fields, '", "name_case":"', name_case, '", "v":"', v, '"});') result <- tryCatch( response <- execute(code), vk_error18 = function(e) list(followers = list(), count = 0) ) if (result$count == 0) return(result) followers <- response$items max_count <- ifelse((response$count - offset) > count & count != 0, count, response$count - offset) if (max_count == 0) return(list(followers = response$items, count = response$count)) len <- ifelse(is.vector(followers), length, nrow) offset_counter <- 0 if (progress_bar) { pb <- txtProgressBar(min = 0, max = max_count, style = 3) setTxtProgressBar(pb, len(followers)) } while (len(followers) < max_count) { followers25000 <- get_followers(user_id = user_id, offset = (1 + offset + offset_counter * 25000), count = (max_count - len(followers)), fields = fields, name_case = name_case, v = v) if (is.vector(followers)) followers <- append(followers, followers25000) else { followers <- jsonlite::rbind_pages(list(followers, followers25000)) followers <- followers[!duplicated(followers$id), ] } if (progress_bar) setTxtProgressBar(pb, len(followers)) offset_counter <- offset_counter + 1 } if (progress_bar) close(pb) # for R CMD check to pass deactivated <- NULL if (isTRUE(drop) && "deactivated" %in% colnames(followers)) { followers <- subset(followers, is.na(deactivated)) followers$deactivated <- NULL rownames(followers) <- NULL } if (isTRUE(flatten) & !is.vector(followers)) followers <- jsonlite::flatten(followers) list(followers = followers, count = response$count) } #' Returns a list of IDs of users and communities followed by the user #' #' @param user_id User ID #' @param offset Offset needed to return a specific subset of subscriptions #' @param count Number of users and communities to return #' @param fields Profile fields to return #' @param extended 1 - to return a combined list of users and communities, 0 - to return separate lists of users and communities #' @param flatten Automatically flatten nested data frames into a single non-nested data frame #' @param progress_bar Display progress bar #' @param v Version of API #' @importFrom utils setTxtProgressBar txtProgressBar #' @examples \dontrun{ #' my_subscriptions <- usersGetSubscriptions(me()) #' } #' @export usersGetSubscriptions <- function(user_id='', extended='1', offset=0, count=0, fields='', flatten=FALSE, progress_bar=FALSE, v=getAPIVersion()) { get_subscriptions <- function(user_id='', extended='', offset='', count='', fields='', v=getAPIVersion()) { code <- 'var subscriptions = [];' num_requests <- 0 while (num_requests != 25 && count != 0) { current_count <- ifelse((count - 200) >= 0, 200, count) code <- paste0(code, 'subscriptions = subscriptions + API.users.getSubscriptions({"user_id":"', user_id, '", "offset":"', offset, '", "count":"', current_count, '", "fields":"', fields, '", "extended":"', extended, '", "v":"', v, '"}).items;') offset <- offset + 200 num_requests <- num_requests + 1 count <- count - current_count } code <- paste0(code, 'return subscriptions;') execute(code) } user_id <- as.integer(user_id) fields <- profile_fields(fields) code <- paste0('return API.users.getSubscriptions({"user_id":"', user_id, '", "extended":"', 1, '", "offset":"', offset, '", "count":"', 1, '", "fields":"', fields, '", "v":"', v, '"});') response <- execute(code) subscriptions <- response$items max_count <- ifelse((response$count - offset) > count & count != 0, count, response$count - offset) if (max_count == 0) return(list(subscriptions = subscriptions, count = response$count)) offset_counter <- 0 if (progress_bar) { pb <- txtProgressBar(min = 0, max = max_count, style = 3) setTxtProgressBar(pb, nrow(subscriptions)) } while (nrow(subscriptions) < max_count) { subscriptions600 <- get_subscriptions(user_id = user_id, offset = (1 + offset + offset_counter * 600), count = (max_count - nrow(subscriptions)), fields = fields, extended = 1, v = v) subscriptions <- jsonlite::rbind_pages(list(subscriptions, subscriptions600)) if (progress_bar) setTxtProgressBar(pb, nrow(subscriptions)) offset_counter <- offset_counter + 1 } if (progress_bar) close(pb) if (isTRUE(flatten)) subscriptions <- jsonlite::flatten(subscriptions) if (as.numeric(extended) == 0) { subscriptions_splt <- split(subscriptions, subscriptions$type == 'page') groups <- subscriptions_splt$`TRUE` users <- subscriptions_splt$`FALSE` return(list(groups = groups, users=users)) } list(subscriptions = subscriptions, count = response$count) } #' Returns a list of users matching the search criteria #' #' @param q Search query string (e.g., Vasya Babich) #' @param sort Sort order: 1 - by date registered; 0 - by rating #' @param offset Offset needed to return a specific subset of users #' @param count Number of users to return #' @param fields Profile fields to return #' @param city City ID #' @param country Country ID #' @param hometown City name in a string #' @param university_country ID of the country where the user graduated #' @param university ID of the institution of higher education #' @param university_year Year of graduation from an institution of higher education #' @param university_faculty Faculty ID #' @param university_chair Chair ID #' @param sex 1 - female; 2 - male; 0 - any (default) #' @param status Relationship status: 1 - Not married; 2 - In a relationship; 3 - Engaged; 4 - Married; 5 - It's complicated; 6 - Actively searching; 7 - In love #' @param age_from Minimum age #' @param age_to Maximum age #' @param birth_day Day of birth #' @param birth_month Month of birth #' @param birth_year Year of birth #' @param online 1 - online only; 0 - all users #' @param has_photo 1 - with photo only; 0 - all users #' @param school_country ID of the country where users finished school #' @param school_city ID of the city where users finished school #' @param school_class Positive number #' @param school ID of the school #' @param school_year School graduation year #' @param religion Users' religious affiliation #' @param interests Users' interests #' @param company Name of the company where users work #' @param position Job position #' @param group_id ID of a community to search in communities #' @param from_list List of comma-separated words #' @param flatten Automatically flatten nested data frames into a single non-nested data frame #' @param v Version of API #' @export usersSearch <- function(q='', sort='', offset='', count='20', fields='', city='', country='', hometown='', university_country='', university='', university_year='', university_faculty='', university_chair='', sex='', status='', age_from='', age_to='', birth_day='', birth_month='', birth_year='', online='', has_photo='', school_country='', school_city='', school_class='', school='', school_year='', religion='', interests='', company='', position='', group_id='', from_list='', flatten=FALSE, v=getAPIVersion()) { query <- queryBuilder('users.search', q = q, sort = sort, offset = offset, count = count, fields = profile_fields(fields), city = city, country = country, hometown = hometown, university_country = university_country, university = university, university_year = university_year, university_faculty = university_faculty, university_chair = university_chair, sex = sex, status = status, age_from = age_from, age_to = age_to, birth_day = birth_day, birth_month = birth_month, birth_year = birth_year, online = online, has_photo = has_photo, school_country = school_country, school_city = school_city, school_class = school_class, school = school, school_year = school_year, religion = religion, interests = interests, company = company, position = position, group_id = group_id, from_list = from_list, v = v ) request_delay() response <- jsonlite::fromJSON(query) if (has_error(response)) return(try_handle_error(response)) response <- response$response if (isTRUE(flatten) && response$count > 0) response$items <- jsonlite::flatten(response$items) response } #' Helper function for working with profile fields #' @param fields Profile fields to return #' @examples \dontrun{ #' # get list of all fields #' fields <- profile_fields('all') #' #' # get list of all fields except specified #' fields <- profile_fields('all - photo_50,photo_100,photo_200') #' #' # get only specified fields #' fields <- profile_fields('sex,bdate') #' } #' @export profile_fields <- function(fields = '') { fields_set <- c('photo_id', 'verified', 'sex', 'bdate', 'city', 'country', 'home_town', 'has_photo', 'photo_50', 'photo_100', 'photo_200_orig', 'photo_200', 'photo_400_orig', 'photo_max', 'photo_max_orig', 'online', 'lists', 'domain', 'has_mobile', 'contacts', 'site', 'education', 'universities', 'schools', 'status', 'last_seen', 'followers_count', 'common_count', 'occupation', 'nickname', 'relatives', 'relation', 'personal', 'connections', 'exports', 'wall_comments', 'activities', 'interests', 'music', 'movies', 'tv', 'books', 'games', 'about', 'quotes', 'can_post', 'can_see_all_posts', 'can_see_audio', 'can_write_private_message', 'can_send_friend_request', 'is_favorite', 'is_hidden_from_feed', 'timezone', 'screen_name', 'maiden_name', 'crop_photo', 'is_friend', 'friend_status', 'career', 'military', 'blacklisted', 'blacklisted_by_me') if (fields == '') return(fields) if (fields == 'all') return(paste(fields_set, collapse = ',')) trim <- function (x) gsub("^\\s+|\\s+$", "", x) selected_fields <- strsplit(fields, '-') selected_fields <- trim(selected_fields[[1]]) if (length(selected_fields) > 1) { selected_fields <- strsplit(selected_fields[-1], ',')[[1]] incorrect_fields <- setdiff(selected_fields, fields_set) if (length(incorrect_fields) != 0) warning("This fields are incorrect: ", paste(incorrect_fields, collapse = ','), call. = FALSE) fields <- setdiff(fields_set, selected_fields) return(paste(fields, collapse = ',')) } fields <- strsplit(fields, ',')[[1]] fields <- trim(fields) incorrect_fields <- fields[!fields %in% fields_set] fields <- setdiff(fields, incorrect_fields) if (length(incorrect_fields) != 0) warning("This fields are incorrect: ", paste(incorrect_fields, collapse = ','), call. = FALSE) paste(fields, collapse = ',') } #' Returns user id by tag #' #' @param tag Tag #' @export tag2Id <- function(tag) { suppressWarnings(getUsers(tag)$id) } #' Returns current user ID #' #' @export me <- function() { if (.vkr$me == 0) .vkr$me <- suppressWarnings(getUsers()$id) .vkr$me }
/scratch/gouwar.j/cran-all/cranData/vkR/R/users.R
#' Predict age for the specified user #' #' @param user_id User ID #' @export age_predict <- function(user_id='') { friends <- getFriends(user_id = user_id, fields = 'bdate')$items friends$bdate <- as.Date.character(friends$bdate, format = "%d.%M.%Y") friends <- friends[!is.na(friends$bdate), ] friends$year_of_birth <- as.numeric(format(friends$bdate, "%Y")) data.frame(uid = user_id, year_of_birth = stats::median(friends$year_of_birth), nfriends = length(friends$year_of_birth)) } #' Extract URLs from messages #' #' @param messages Array of messages #' @param message_body Add message body to URLs #' @export getURLs <- function(messages, message_body=FALSE) { # http://stackoverflow.com/questions/26496538/extract-urls-with-regex-into-a-new-data-frame-column url_pattern <- "http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+" match <- regexpr(url_pattern, messages) if (message_body) as.character(messages[match != -1]) else regmatches(messages, match) } #' Apply a method over a vector of objects #' #' Returns a data frame of the same number of rows as length of `objs`, each element of which is the #' result of applying `method` to the corresponding element of `objs` #' @param objs A vector of objects #' @param method The function to be applied to each element of `objs` #' @examples #' \dontrun{ #' users <- vkApply(c("",1234567), function(user) getUsers(user, fields="sex")) #' countries <- vkApply(c(2,5122182,1906578), getCountryByCityId) #' } #' @export vkApply <- function(objs, method) { res <- data.frame() for (obj in objs) res <- jsonlite::rbind_pages(list(res, method(obj))) res } #' Create post object #' #' @param ... List of attributes #' @export vkPost <- function(...) { args <- list(...)[[1]] post <- list(id = args[["id"]], from_id = args[["from_id"]], owner_id = args[["owner_id"]], date = args[["date"]], post_type = args[["post_type"]], text = args[["text"]], copy_history = args[["copy_history"]], post_source = args[["post_source"]], comments = args[["comments"]], likes = args[["likes"]], reposts = args[["reposts"]], attachments = args[["attachments"]], geo = args[["geo"]]) class(post) <- "vkPost" return(post) } # Functions for NLP #' Get stop words list for russian language #' @param stop_words User defined stop words #' @importFrom utils read.table #' @export get_stop_words <- function(stop_words = c()) { tm_stop_words <- c() if (requireNamespace("tm", quietly = TRUE)) tm_stop_words <- tm::stopwords('russian') google_stop_words <- c() filename <- system.file("extdata", "stop_words_russian.txt", package = 'vkR') if (file.exists(filename)) google_stop_words <- as.vector(read.table(filename)$V1) stop_words <- unique(c(stop_words, google_stop_words, tm_stop_words)) stop_words } #' Clear text #' @param lines List of lines #' @param patterns List of user defined patterns #' @export clear_text <- function(lines, patterns = list()) { if (!requireNamespace("stringr", quietly = TRUE)) stop("The package stringr was not installed") lines <- stringr::str_replace_all(lines, "[\u0451]", "\u0435") lines <- stringr::str_replace_all(lines, "[[:punct:]]", " ") lines <- stringr::str_replace_all(lines, "[[:digit:]]", " ") lines <- stringr::str_replace_all(lines, "http\\S+\\s*", " ") lines <- stringr::str_replace_all(lines, "[a-zA-Z]", " ") if (is.list(patterns) & length(patterns)) { for (pattern in patterns) { if (length(pattern) > 1) lines <- stringr::str_replace_all(lines, pattern[1], pattern[2]) else lines <- stringr::str_replace_all(lines, pattern, " ") } } lines <- stringr::str_replace_all(lines, "\\s+", " ") lines <- tolower(lines) lines <- stringr::str_trim(lines, side = "both") lines } #' Logical or operator #' @param expr1 Expression 1 #' @param expr2 Expression 2 #' @export or <- function(expr1, expr2) { r <- any(as.logical(expr1)) if ((!is.na(r) && r) || (is.character(expr1) && expr1 != '')) return(expr1) return(expr2) }
/scratch/gouwar.j/cran-all/cranData/vkR/R/utils.R
#' Access to VK API via R #' #' This package provides a series of functions that allow R users #' to access VK's API (\url{https://vk.com/dev/methods}) to get #' information about users, messages, groups, posts and likes. #' #' VK (\url{https://vk.com/}) is the largest European online social #' networking service, based in Russia. It is available in several #' languages, and is especially popular among Russian-speaking users. #' VK allows users to message each other publicly or privately, to #' create groups, public pages and events, share and tag images, #' audio and video, and to play browser-based games [1]. #' #' @references [1] \url{https://en.wikipedia.org/wiki/VK_(social_networking)} #' #' @seealso \code{\link{vkOAuth}}, #' \code{\link{getUsersExecute}}, \code{\link{getWallExecute}}, #' \code{\link{getFriends}}, \code{\link{getFriendsFor}}, #' \code{\link{getGroupsForUsers}}, \code{\link{getGroupsMembersExecute}}, #' \code{\link{likesGetListForObjects}}, \code{\link{messagesGetHistoryExecute}}, #' \code{\link{getArbitraryNetwork}}, \code{\link{getStatus}} #' #' @name vkR #' @docType package #' @author Dmitriy Sorokin \email{[email protected]} NULL if (!exists(".vkr")) { .vkr <- new.env() .vkr$access_token <- NULL .vkr$api_version <- '5.73' .vkr$me <- 0L .vkr$last_request_time <- 0 .vkr$num_requests <- 0 .vkr$max_requests <- 3 # Database variables .vkr$db_name <- 'vkR_projects' .vkr$db_active <- NULL .vkr$db_meta_name <- 'meta_collection' .vkr$db_metadata <- NULL # Handling connection errors .vkr$timeout <- 3 .vkr$max_repeats <- 3 .vkr$repeats_counter <- 0 } getAPIVersion <- function() { .vkr$api_version } #' Set API version #' #' @param v API version #' @export setAPIVersion <- function(v) { .vkr$api_version <- v }
/scratch/gouwar.j/cran-all/cranData/vkR/R/vkR.R
#' Returns a list of posts on a user wall or community wall #' #' @param owner_id ID of the user or community that owns the wall. By default, current user ID. Use a negative value to designate a community ID. #' @param domain User or community short address. #' @param offset Offset needed to return a specific subset of posts. #' @param count Number of posts to return (maximum 100). #' @param filter Filter to apply: #' \itemize{ #' \item \strong{owner} - posts by the wall owner; #' \item \strong{others} - posts by someone else; #' \item \strong{all} - posts by the wall owner and others (default); #' \item \strong{postponed} - timed posts (only available for calls with an access_token); #' \item \strong{suggests} - suggested posts on a community wall. #' } #' @param extended 1 - to return wall, profiles, and groups fields, 0 - to return no additional fields (default). #' @param fields List of comma-separated words #' @param v Version of API #' @return Returns a list of post objects. #' If extended is set to 1, also returns the following: #' \itemize{ #' \item \strong{wall} - Contains a list of post objects. #' \item \strong{profiles} - Contains user objects with additional fields photo and online. #' \item \strong{groups} - Contains community objects. #' } #' @examples \dontrun{ #' wall <- getWall(domain='spbrug', count=10, progress_bar=TRUE) #' } #' @export getWall <- function(owner_id='', domain='', offset='', count='', filter='owner', extended='', fields='', v=getAPIVersion()) { .Deprecated("getWallExecute()") query <- queryBuilder('wall.get', owner_id = owner_id, domain = domain, offset = offset, count = count, filter = filter, extended = extended, fields = fields, v = v) request_delay() response <- jsonlite::fromJSON(query) if (has_error(response)) return(try_handle_error(response)) response$response } #' Returns a list of posts on a user wall or community wall #' #' @param owner_id ID of the user or community that owns the wall. By default, current user ID. Use a negative value to designate a community ID. #' @param domain User or community short address. #' @param offset Offset needed to return a specific subset of posts. #' @param count Number of posts to return (0 for all posts). #' @param filter Filter to apply: #' \itemize{ #' \item \strong{owner} - posts by the wall owner; #' \item \strong{others} - posts by someone else; #' \item \strong{all} - posts by the wall owner and others (default); #' \item \strong{postponed} - timed posts (only available for calls with an access_token); #' \item \strong{suggests} - suggested posts on a community wall. #' } #' @param extended 1 - to return wall, profiles, and groups fields, 0 - to return no additional fields (default). #' @param fields List of comma-separated words #' @param use_db Use database #' @param db_params Collection name and suffix #' @param progress_bar Display progress bar #' @param v Version of API #' @return Returns a list of post objects. #' If extended is set to 1, also returns the following: #' \itemize{ #' \item \strong{wall} - Contains a list of post objects. #' \item \strong{profiles} - Contains user objects with additional fields photo and online. #' \item \strong{groups} - Contains community objects. #' } #' @importFrom utils setTxtProgressBar txtProgressBar #' @examples \dontrun{ #' # get all posts from wall #' wall <- getWallExecute(domain='spbrug', count=0, progress_bar=TRUE) #' } #' @export getWallExecute <- function(owner_id='', domain='', offset=0, count=10, filter='owner', extended='', fields='', use_db=FALSE, db_params=list(), progress_bar=FALSE, v=getAPIVersion()) { get_posts2500 <- function(owner_id='', domain='', offset=0, max_count='', filter='owner', extended='', fields='', v=getAPIVersion()) { if (max_count > 2500) max_count <- 2500 if (max_count <= 100) { execute(paste0('return API.wall.get({"owner_id":"', owner_id, '", "domain":"', domain, '", "offset":"', offset, '", "count":"', max_count, '", "filter":"', filter, '", "extended":"', extended, '", "v":"', v, '"}).items;')) } else { code <- 'var wall_records = [];' code <- paste0(code, 'wall_records = wall_records + API.wall.get({"owner_id":"', owner_id, '", "domain":"', domain, '", "offset":"', offset, '", "count":"', 100, '", "filter":"', filter, '", "extended":"', extended, '", "v":"', v, '"}).items;') code <- paste0(code, 'var offset = 100 + ', offset, '; var count = 100; var max_offset = offset + ', max_count, '; while (offset < max_offset && wall_records.length <= offset && offset-', offset, '<', max_count, ') { if (', max_count, ' - wall_records.length < 100) { count = ', max_count, ' - wall_records.length; }; wall_records = wall_records + API.wall.get({"owner_id":"', owner_id, '", "domain":"', domain, '", "offset":offset, "count":count, "filter":"', filter, '", "extended":"', extended, '", "v":"', v, '"}).items; offset = offset + 100; }; return wall_records;') execute(code) } } code <- paste0('return API.wall.get({"owner_id":"', owner_id, '", "domain":"', domain, '", "offset":"', offset, '", "count":"', 1, '", "filter":"', filter, '", "extended":"', extended, '", "v":"', v, '"});') response <- execute(code) posts <- response$items max_count <- ifelse((response$count - offset) > count & count != 0, count, response$count - offset) if (max_count == 0) return(list(posts = response$items, count = response$count)) if (use_db) { collection <- or(db_params[['collection']], or(domain, owner_id)) suffix <- or(db_params[['suffix']], 'wall') key <- or(db_params[['key']], 'id') if (collection_exists(collection, suffix)) db_update(object = posts, key = key, collection = collection, suffix = suffix, upsert = TRUE) else db_save(object = posts, collection = collection, suffix = suffix) } if (progress_bar) { pb <- txtProgressBar(min = 0, max = max_count, style = 3) setTxtProgressBar(pb, nrow(posts)) } num_records <- max_count - nrow(posts) while (nrow(posts) < max_count) { tryCatch({ posts2500 <- get_posts2500(owner_id = owner_id, domain = domain, filter = filter, extended = extended, fields = fields, max_count = num_records, offset = offset + nrow(posts), v = v) if (use_db) db_update(object = posts2500, key = key, collection = collection, suffix = suffix, upsert = TRUE) posts <- jsonlite::rbind_pages(list(posts, posts2500)) num_records <- ifelse((max_count - nrow(posts)) > num_records, num_records, max_count - nrow(posts)) }, vk_error13 = function(e) { num_records <<- as.integer(num_records / 2) warning(simpleWarning(paste0('Parameter "count" was tuned: ', num_records, ' per request.'))) }) if (progress_bar) setTxtProgressBar(pb, nrow(posts)) } if (progress_bar) close(pb) wall <- list(posts = posts, count = response$count) class(wall) <- c(class(wall), "posts.list") return(wall) } #' Allows to search posts on user or community walls #' #' @param owner_id User or community id. Remember that for a community owner_id must be negative. #' @param domain User or community screen name. #' @param query Search query string. #' @param owners_only 1 - returns only page owner's posts. #' @param count Count of posts to return. #' @param offset Results offset. #' @param extended Show extended post info. #' @param fields List of comma-separated words #' @param v Version of API #' @return If executed successfully, returns a list of post objects. #' @export wallSearch <- function(owner_id='', domain='', query='', owners_only='', count='20', offset='0', extended='', fields='', v=getAPIVersion()) { query <- queryBuilder('wall.search', owner_id = owner_id, domain = domain, query = query, owners_only = owners_only, count = count, offset = offset, extended = extended, fields = fields, v = v) request_delay() response <- jsonlite::fromJSON(query) if (has_error(response)) return(try_handle_error(response)) response$response } #' Returns a list of posts from user or community walls by their IDs #' #' @param posts User or community IDs and post IDs, separated by underscores. Use a negative value to designate a community ID. #' @param extended 1 - to return user and community objects needed to display posts, 0 - no additional fields are returned (default). #' @param copy_history_depth Sets the number of parent elements to include in the array copy_history that is returned if the post is a repost from another wall. #' @param fields List of comma-separated words #' @param v Version of API #' @return Returns a list of post objects. #' If extended is set to 1, returns the following: #' \itemize{ #' \item \strong{wall} - Contains post objects. #' \item \strong{profiles} - Contains user objects with additional fields sex, photo, photo_medium_rec, and online. #' \item \strong{groups} - Contains community objects. #' } #' If the post is a copy of another post, returns an additional array copy_history with information about original posts. #' @export wallGetById <- function(posts='', extended='', copy_history_depth='', fields='', v=getAPIVersion()) { query <- queryBuilder('wall.getById', posts = posts, extended = extended, copy_history_depth = copy_history_depth, fields = fields, v = v) request_delay() response <- jsonlite::fromJSON(query) if (has_error(response)) return(try_handle_error(response)) response$response } #' Returns information about reposts of a post on user wall or community wall #' #' @param owner_id User ID or community ID. By default, current user ID. Use a negative value to designate a community ID. #' @param post_id Post ID. #' @param offset Offset needed to return a specific subset of reposts. #' @param count Number of reposts to return. #' @param v Version of API #' @return #' Returns an object containing the following fields: #' \itemize{ #' \item \strong{items} - An array of wall reposts. #' \item \strong{profiles} - Information about users with additional fields sex, online, photo, photo_medium_rec, and screen_name. #' \item \strong{groups} - Information about communities. #' } #' @export wallGetReposts <- function(owner_id='', post_id='', offset='', count='20', v=getAPIVersion()) { query <- queryBuilder('wall.getReposts', owner_id = owner_id, post_id = post_id, offset = offset, count = count, v = v) request_delay() response <- jsonlite::fromJSON(query) if (has_error(response)) return(try_handle_error(response)) response$response } #' Returns a list of comments on a post on a user wall or community wall #' #' @param owner_id User ID or community ID. Use a negative value to designate a community ID. #' @param post_id Post ID. #' @param need_likes 1 - to return the likes field, 0 - not to return the likes field (default). #' @param start_comment_id Positive number. #' @param offset Offset needed to return a specific subset of comments. #' @param count Number of comments to return (maximum 100). #' @param sort Sort order: asc - chronological, desc - reverse chronological. #' @param preview_length Number of characters at which to truncate comments when previewed. By default, 90. Specify 0 if you do not want to truncate comments. #' @param extended Flag, either 1 or 0. #' @param v Version of API #' @export wallGetComments <- function(owner_id='', post_id='', need_likes='', start_comment_id='', offset='', count='10', sort='', preview_length='0', extended='', v=getAPIVersion()) { query <- queryBuilder('wall.getComments', owner_id = owner_id, post_id = post_id, need_likes = need_likes, start_comment_id = start_comment_id, offset = offset, count = count, sort = sort, preview_length = preview_length, extended = extended, v = v) request_delay() response <- jsonlite::fromJSON(query) if (has_error(response)) return(try_handle_error(response)) response$response } #' Returns a list of comments on a post on a user wall or community wall #' #' @param owner_id User ID or community ID. Use a negative value to designate a community ID. #' @param post_id Post ID. #' @param need_likes 1 - to return the likes field (default), 0 - not to return the likes field. #' @param start_comment_id Positive number #' @param offset Offset needed to return a specific subset of comments. #' @param count Number of comments to return. #' @param sort Sort order: asc - chronological, desc - reverse chronological. #' @param preview_length Number of characters at which to truncate comments when previewed. Specify 0 (default) if you do not want to truncate comments. #' @param extended Flag, either 1 or 0. #' @param progress_bar Display progress bar #' @param v Version of API #' @export postGetComments <- function(owner_id='', post_id='', need_likes=1, start_comment_id='', offset=0, count=10, sort='', preview_length=0, extended='', progress_bar = FALSE, v=getAPIVersion()) { get_comments2500 <- function(owner_id='', post_id='', need_likes=1, start_comment_id='', offset=0, max_count='', sort='', preview_length=0, extended='', v=getAPIVersion()) { if (max_count > 2500) max_count <- 2500 if (max_count <= 100) { execute(paste0('return API.wall.getComments({ "owner_id":"', owner_id, '", "post_id":"', post_id, '", "count":"', max_count, '", "offset":"', offset, '", "need_likes":"', need_likes, '", "start_comment_id":"', start_comment_id, '", "sort":"', sort, '", "preview_length":"', preview_length, '", "extended":"', extended, '", "v":"', v, '"}).items;')) } else { code <- 'var comments = [];' code <- paste0(code, 'comments = comments + API.wall.getComments({ "owner_id":"', owner_id, '", "post_id":"', post_id, '", "count":"', 100, '", "offset":"', offset, '", "need_likes":"', need_likes, '", "start_comment_id":"', start_comment_id, '", "sort":"', sort, '", "preview_length":"', preview_length, '", "extended":"', extended, '", "v":"', v, '"}).items;') code <- paste0(code, 'var offset = 100 + ', offset, '; var count = 100; var max_offset = offset + ', max_count, '; while (offset < max_offset && comments.length <= offset && offset-', offset, '<', max_count, ') { if (', max_count, ' - comments.length < 100) { count = ', max_count, ' - comments.length; }; comments = comments + API.wall.getComments({ "owner_id":"', owner_id, '", "post_id":"', post_id, '", "offset":offset, "count":count, "need_likes":"', need_likes, '", "start_comment_id":"', start_comment_id, '", "sort":"', sort, '", "preview_length":"', preview_length, '", "extended":"', extended, '", "v":"', v, '"}).items; offset = offset + 100; }; return comments;') execute(code) } } code <- paste0('return API.wall.getComments({ "owner_id":"', owner_id, '", "post_id":"', post_id, '", "count":"', 1, '", "offset":"', offset, '", "need_likes":"', need_likes, '", "start_comment_id":"', start_comment_id, '", "sort":"', sort, '", "preview_length":"', preview_length, '", "extended":"', extended, '", "v":"', v, '"});') response <- execute(code) comments <- response$items max_count <- ifelse((response$count - offset) > count & count != 0, count, response$count - offset) if (max_count == 0) return(list(comments = response$items, count = response$count)) offset_counter <- 0 if (progress_bar) { pb <- txtProgressBar(min = 0, max = max_count, style = 3) setTxtProgressBar(pb, nrow(comments)) } while (nrow(comments) < max_count) { tryCatch({ comments2500 <- get_comments2500(owner_id = owner_id, post_id = post_id, need_likes = need_likes, extended = extended, sort = sort, preview_length = preview_length, start_comment_id = start_comment_id, max_count = (max_count - nrow(comments)), offset = (1 + offset + offset_counter * 2500), v = v) comments <- jsonlite::rbind_pages(list(comments, comments2500)) offset_counter <- offset_counter + 1 }, error = function(e) { warning(e) }) if (progress_bar) setTxtProgressBar(pb, nrow(comments)) } if (progress_bar) close(pb) list(comments = comments, count = response$count) } #' Returns a list of comments on a user wall or community wall #' #' @param posts A list of posts or wall object (from getWallExecute()) #' @param progress_bar Display progress bar #' @param v Version of API #' @export wallGetCommentsList <- function(posts, progress_bar = FALSE, v = getAPIVersion()) { get_comments <- function(posts, v = getAPIVersion()) { num_requests <- ceiling(nrow(posts) / 25) from <- 1 to <- 25 comments <- list() for (i in 1:num_requests) { code <- 'var comments_per_post = {}; var comments;' if (to > nrow(posts)) to <- nrow(posts) for (index in from:to) { code <- paste0(code, 'comments = API.wall.getComments({ "owner_id":"', posts[index, ]$owner_id, '", "post_id":"', posts[index, ]$id, '", "need_likes":"', 1, '", "count":"', 100, '", "v":"', v, '"}).items; comments_per_post.post', posts[index, ]$id, "=comments;", sep = "") } code <- paste0(code, 'return comments_per_post;') comments <- append(comments, execute(code)) from <- from + 25 to <- to + 25 } obj_ids <- unlist(strsplit(names(comments), "post", fixed = T)) obj_ids <- as.integer(obj_ids[obj_ids != ""]) names(comments) <- obj_ids #posts$id comments } if ("posts.list" %in% class(posts)) posts <- posts$posts cmt_groups <- split(posts, posts$comments$count > 100) posts_le100 <- cmt_groups[['FALSE']] posts_gt100 <- cmt_groups[['TRUE']] comments <- list() from <- 1 max_count <- nrow(posts_le100) to <- ifelse(max_count >= 75, 75, max_count) if (progress_bar) { pb <- txtProgressBar(min = 0, max = nrow(posts), style = 3) setTxtProgressBar(pb, 0) } repeat { comments75 <- get_comments(posts_le100[from:to, ], v) comments <- append(comments, comments75) if (progress_bar) setTxtProgressBar(pb, length(comments)) if (to >= max_count) break from <- to + 1 to <- ifelse(to + 75 >= max_count, max_count, to + 75) } if (!is.null(posts_gt100)) { for (i in 1:nrow(posts_gt100)) { owner_id <- posts_gt100$owner_id[i] post_id <- posts_gt100$id[i] comments[[paste0(post_id)]] <- postGetComments(owner_id = owner_id, post_id = post_id, count = 0, v = v)$comments if (progress_bar) setTxtProgressBar(pb, length(comments)) } } if (progress_bar) close(pb) comments_ordered <- list() for (i in 1:nrow(posts)) { comments_ordered[[paste0(posts$id[i])]] <- comments[[paste0(posts$id[i])]] } class(comments_ordered) <- c(class(comments_ordered), "vk.comments") comments_ordered } #' Filtering attachments by type #' #' @param attachments List of attachments #' @param type type field may have the following values: #' \itemize{ #' \item \strong{photo} - photo from an album; #' \item \strong{posted_photo} - photo uploaded directly from user's computer; #' \item \strong{video} - video; #' \item \strong{audio} - audio; #' \item \strong{doc} - document; #' \item \strong{graffiti} - graffiti; #' \item \strong{url} - web page URL; #' \item \strong{note} - note; #' \item \strong{app} - image uploaded with a third party application; #' \item \strong{poll} - poll; #' \item \strong{page} - wiki page. #' } #' @export filterAttachments <- function(attachments, type) { if (!requireNamespace("plyr", quietly = TRUE)) { stop("plyr package needed for this function to work. Please install it.", .call = FALSE) } if (!is.character(type)) stop('type must be a character') filtered_attachments <- data.frame() for (i in 1:length(attachments)) { if (!is.null(attachments[[i]])) { for (j in 1:nrow(attachments[[i]])) { if (attachments[[i]][j, ]$type == type) { filtered_attachments <- plyr::rbind.fill(filtered_attachments, attachments[[i]][j, ][[type]]) } } } } filtered_attachments }
/scratch/gouwar.j/cran-all/cranData/vkR/R/wall.R
#' @importFrom VMDecomp vmd #' @importFrom forecast nnetar forecast #' @importFrom utils head tail #' @importFrom graphics plot #' @importFrom stats as.ts ts #' @export #' VMDARIMA <- function(data, stepahead=10, nIMF=4, alpha=2000, tau=0, D=FALSE) { data <- ts(data) data<- as.vector(data) v<- vmd(data , alpha=2000, tau=0, K=nIMF, DC=D, init=1, tol = 1e-6) AllIMF<-v$u data_trn <- ts(head(data, round(length(data) - stepahead))) data_test <- ts(tail(data, stepahead)) IMF_trn <- AllIMF[-c(((length(data) - stepahead) + 1):length(data)), ] Fcast_AllIMF <- NULL for (AllIMF in 1:(ncol(IMF_trn))) { IndIMF <- NULL IndIMF <- IMF_trn[, AllIMF] VMDARIMAFit <- forecast::auto.arima(as.ts(IndIMF)) VMDARIMA_fcast = forecast::forecast(VMDARIMAFit, h = stepahead) VMDARIMA_fcast_Mean = VMDARIMA_fcast$mean Fcast_AllIMF <- cbind(Fcast_AllIMF, as.matrix(VMDARIMA_fcast_Mean)) } FinalVMDARIMA_fcast <- ts(rowSums(Fcast_AllIMF, na.rm = T)) MAE_VMDARIMA = mean(abs(data_test - FinalVMDARIMA_fcast)) MAPE_VMDARIMA = mean(abs(data_test - FinalVMDARIMA_fcast)/data_test) RMSE_VMDARIMA = sqrt(mean((data_test - FinalVMDARIMA_fcast)^2)) return(list(AllIMF = AllIMF, data_test = data_test, AllIMF_forecast = Fcast_AllIMF, FinalVMDARIMA_forecast = FinalVMDARIMA_fcast, MAE_VMDARIMA = MAE_VMDARIMA, MAPE_VMDARIMA = MAPE_VMDARIMA, RMSE_VMDARIMA = RMSE_VMDARIMA )) }
/scratch/gouwar.j/cran-all/cranData/vmdTDNN/R/VMDARIMA.R
#' @importFrom VMDecomp vmd #' @importFrom forecast nnetar forecast #' @importFrom utils head tail #' @importFrom nnfor elm #' @importFrom graphics plot #' @importFrom stats as.ts ts #' @export #' VMDELM <- function(data, stepahead=10, nIMF=4, alpha=2000, tau=0, D=FALSE) { data <- ts(data) data<- as.vector(data) v<- vmd(data , alpha=2000, tau=0, K=nIMF, DC=D, init=1, tol = 1e-6) AllIMF<-v$u data_trn <- ts(head(data, round(length(data) - stepahead))) data_test <- ts(tail(data, stepahead)) IMF_trn <- AllIMF[-c(((length(data) - stepahead) + 1):length(data)), ] Fcast_AllIMF <- NULL for (AllIMF in 1:(ncol(IMF_trn))) { IndIMF <- NULL IndIMF <- IMF_trn[, AllIMF] VMDELMFit <- nnfor::elm(as.ts(IndIMF)) VMDELM_fcast = forecast::forecast(VMDELMFit, h = stepahead) VMDELM_fcast_Mean = VMDELM_fcast$mean Fcast_AllIMF <- cbind(Fcast_AllIMF, as.matrix(VMDELM_fcast_Mean)) } FinalVMDELM_fcast <- ts(rowSums(Fcast_AllIMF, na.rm = T)) MAE_VMDELM = mean(abs(data_test - FinalVMDELM_fcast)) MAPE_VMDELM = mean(abs(data_test - FinalVMDELM_fcast)/data_test) RMSE_VMDELM = sqrt(mean((data_test - FinalVMDELM_fcast)^2)) return(list(AllIMF = AllIMF, data_test = data_test, AllIMF_forecast = Fcast_AllIMF, FinalVMDELM_forecast = FinalVMDELM_fcast, MAE_VMDELM = MAE_VMDELM, MAPE_VMDELM = MAPE_VMDELM, RMSE_VMDELM = RMSE_VMDELM )) }
/scratch/gouwar.j/cran-all/cranData/vmdTDNN/R/VMDELM.R
#' @importFrom VMDecomp vmd #' @importFrom forecast nnetar forecast #' @importFrom utils head tail #' @importFrom graphics plot #' @importFrom stats as.ts ts #' @export #' VMDTDNN <- function(data, stepahead=10, nIMF=4, alpha=2000, tau=0, D=FALSE) { data <- ts(data) data<- as.vector(data) v<- vmd(data , alpha=2000, tau=0, K=nIMF, DC=D, init=1, tol = 1e-6) AllIMF<-v$u data_trn <- ts(head(data, round(length(data) - stepahead))) data_test <- ts(tail(data, stepahead)) IMF_trn <- AllIMF[-c(((length(data) - stepahead) + 1):length(data)), ] Fcast_AllIMF <- NULL for (AllIMF in 1:(ncol(IMF_trn))) { IndIMF <- NULL IndIMF <- IMF_trn[, AllIMF] VMDTDNNFit <- forecast::nnetar(as.ts(IndIMF)) VMDTDNN_fcast = forecast::forecast(VMDTDNNFit, h = stepahead) VMDTDNN_fcast_Mean = VMDTDNN_fcast$mean Fcast_AllIMF <- cbind(Fcast_AllIMF, as.matrix(VMDTDNN_fcast_Mean)) } FinalVMDTDNN_fcast <- ts(rowSums(Fcast_AllIMF, na.rm = T)) MAE_VMDTDNN = mean(abs(data_test - FinalVMDTDNN_fcast)) MAPE_VMDTDNN = mean(abs(data_test - FinalVMDTDNN_fcast)/data_test) RMSE_VMDTDNN = sqrt(mean((data_test - FinalVMDTDNN_fcast)^2)) return(list(AllIMF = AllIMF, data_test = data_test, AllIMF_forecast = Fcast_AllIMF, FinalVMDTDNN_forecast = FinalVMDTDNN_fcast, MAE_VMDTDNN = MAE_VMDTDNN, MAPE_VMDTDNN = MAPE_VMDTDNN, RMSE_VMDTDNN = RMSE_VMDTDNN )) }
/scratch/gouwar.j/cran-all/cranData/vmdTDNN/R/VMDTDNN.R
#' Quantify multiple animals at the same time #' #' @param working_folder The folder containing the completed analysis #' #' @importFrom future plan multisession #' @importFrom doFuture registerDoFuture #' @importFrom progressr handlers handler_progress with_progress progressor #' @importFrom foreach foreach %dopar% #' @importFrom progressr progressor handlers #' @importFrom tcltk tk_choose.dir #' #' @return saves a pdf of a series of quantified folders in the root directory #' #' @noRd #' #' @examples #' # Used interactivley only #' quantify_folders = function(working_folder = tk_choose.dir()) { folders_to_process = list.dirs(working_folder, recursive = FALSE) # graph_folder(folders_to_process[1]) registerDoFuture() plan(multisession) handlers(list( handler_progress( format = ":spin :current/:total (:message) [:bar] :percent in :elapsed ETA: :eta" ))) with_progress({ p <- progressor(steps = length(folders_to_process)) y <- foreach(x = folders_to_process) %dopar% { quantify_folder(x) p(message = sprintf("Completed %s", x), class = "sticky") gc() } }) } #' Quantify all ROI in a single folder #' #' @param folder The folder to quantify #' @param kband The k value to use in smoothing the data #' #' @return Saves a PDF file of the analyasis generated #' #' @noRd #' #' @importFrom pdftools pdf_combine #' @importFrom ggplot2 aes facet_wrap ggsave #' @importFrom readr write_csv #' @importFrom dplyr select distinct #' @importFrom tcltk tk_choose.dir #' #' #' @examples #' # Select a folder and it will be quantified #' #' folder = choose.dir() #' quantify_folder = function(folder, kband = 40) { var1 = import_folder_bin(folder) var1.1 = var1 %>% group_by(frame_id, video, animal, treatment, roi, ygroup, trace_id) %>% summarise(n = n()) var2 = var1.1 %>% group_by(trace_id) %>% mutate(max_range = max(p_mean)-min(p_mean)) %>% group_by(roi) %>% mutate(maxcont = max_range == max(max_range)) # Plot out that data ggplot(var2) + geom_line(aes(x = frame_id, y = p_mean, color = as.factor(ygroup), group = ygroup)) + facet_wrap(~ roi) # roi1 = var2 %>% filter(roi == "AP19S1.1_1.1") %>% filter(trace_id == 1) # roi1 = roi1 %>% group_by(trace_id, frame_id) %>% summarise(p_mean = mean(p_mean)) # ggplot(roi1) + geom_line(aes(x = frame_id, y = p_mean, color = trace_id)) var2.1 = var2 animal_trt = paste("Animal", unique(var2.1$animal), "Treatment", paste(unique(var2.1$treatment)), sep = "_")[[1]] quant_folder = paste(scratch_dir(), "\\", hash(paste(folder, Sys.time())), sep = "") dir.create(quant_folder) folder_files = list.files(folder, recursive = TRUE, pattern = "\\_ width.csv$", full.names = TRUE) # for(one_region in folder_files) # { # heat_plot = plot_heatmap(one_region) # # overall_plot_heat_file = paste(quant_folder, "\\",animal_trt, file_path_sans_ext(basename(one_region)), "_heatmap.pdf", sep = "") # # ggsave(overall_plot_heat_file, heat_plot, width = 297, height = 210, units = "mm") # # } # Import a whole folder of data, binned into ygroups overall_plot = ggplot(var2.1) + geom_line(aes(x = frame_id, y = p_mean, color = as.factor(ygroup), group = ygroup)) + labs(title = animal_trt) + facet_wrap(~ roi) overall_plot_file = paste(quant_folder, "\\",animal_trt, "_overall.pdf", sep = "") overall_data_file = paste(folder, "\\",animal_trt, "_overall.csv", sep = "") ggsave(overall_plot_file, overall_plot, width = 297, height = 210, units = "mm") write_csv(var2, overall_data_file) for (trace in unique(var2.1$trace_id)) { local_data = subset(var2.1, var2.1$trace_id == trace) title_data = local_data %>% select(animal, treatment, roi, ygroup) %>% distinct() title = paste(paste(colnames(title_data), title_data, sep = "_"), collapse = ",") peak_data = find_contraction_events(input_vector = local_data$p_mean, min_dist = 100, kband = kband, min_change = 0.5, nups = 10) write.csv(peak_data[2], paste(quant_folder,"\\", title, "_peaks.csv", sep = "")) breaks = local_data %>% group_by(video) %>% summarise(max_frame = max(frame_id), min_frame = min(frame_id)) breaks$minima = min(local_data$p_mean) if(!is.null(peak_data[[1]])) { graph_output = peak_data[[1]] + labs(title = title) + geom_vline(aes(xintercept = breaks$max_frame)) } else { graph_output = ggplot(local_data) + geom_line(aes(x = frame_id, y = p_mean)) + labs(title = title) + geom_vline(xintercept = breaks$max_frame) } ggsave(paste(quant_folder,"\\", title, "_peaks.pdf", sep = ""), graph_output, width = 297, height = 210, units = "mm") } heatmap_list = list.files(quant_folder, pattern = "_heatmap.pdf", full.names = TRUE) pdf_list = list.files(quant_folder, pattern = "_peaks.pdf", full.names = TRUE) pdf_combine(c(heatmap_list,overall_plot_file, pdf_list), paste(folder, "\\", animal_trt,"_combined.pdf", sep = "")) folder_files = list.files(quant_folder, recursive = TRUE, pattern = "\\_peaks.csv$", full.names = TRUE) import_csv_with_source = function(csv_file) { try({ dataframe = read.csv(csv_file) dataframe$source_file = file_path_sans_ext(basename(csv_file)) dataframe$X1 = NULL return(dataframe) }) return(NULL) } # Import them all with lapply and combine with dplyr applied = lapply(folder_files, import_csv_with_source) contraction_data = dplyr::bind_rows(applied, .id = "file_id") overall_contraction_file = paste(folder, "\\",animal_trt, "_contractions.csv", sep = "") write.csv(contraction_data, overall_contraction_file) unlink(quant_folder) } #' Plot a heatmap showing a vessel's contraction over time #' #' @param heatmap_file a vmeasur file of vessel diameters over time #' #' @return a ggplot2 heatmap #' #' @noRd #' #' @importFrom ggplot2 ggplot geom_raster aes scale_y_reverse scale_x_continuous labs scale_fill_gradient #' #' @examples #' #' #heatmap_file = file.choose() #' #plot_heatmap(heatmap_file) plot_heatmap = function(heatmap_file) { if(is.data.frame(heatmap_file)) { heatmap_data = heatmap_file } else { heatmap_data = read.csv(heatmap_file) } heatmap_data$frame_id = as.numeric(heatmap_data$filename) heatmap = (ggplot(heatmap_data) + geom_raster(aes(x = as.numeric(frame_id)/22.8, y = y/73, fill = p_width/73)) + scale_y_reverse(expand = c(0,0))+ scale_x_continuous(expand=c(0,0))+ labs(y = "Vessel Position (mm from top of image)", x = "Time (s)", fill = "Vessel \n Diameter (mm)") + scale_fill_viridis_c()) + theme(legend.title.align=0.5) return(heatmap) } #' Plot a line graph of a vessel's change in diameter over time #' #' @param widths_file a vmeasur created file showing the widths of the vessel over time #' #' @return a ggplot2 of the vessel #' #' @importFrom dplyr group_by summarise #' @importFrom ggplot2 ggplot geom_line aes #' @importFrom stats sd #' #' @noRd #' #' @examples #' #' #widths_file = file.choose() #' #plot_heatmap(heatmap_file) plot_line = function(widths_file) { width_data = import_file(widths_file) width_data_summary = width_data %>% group_by(filename) %>% summarise(mean_diameter = mean(p_width, na.rm = TRUE), sd_diameter = sd(p_width, na.rm = TRUE)) heatmap = ggplot(width_data_summary) + geom_line(aes(x = as.numeric(filename)/22.8, y = mean_diameter)) return(heatmap) } #' Quantify the width of a vessel continuously along it's length #' #' Generate heat maps and line plots showing the changes in vessel diameter along #' it's length #' #' @param widths_file A csv file created by select_roi or threshold_vessel. The #' user will be prompted to select a file if this is not specified. #' #' @importFrom ggplot2 coord_flip scale_x_reverse geom_line aes theme labs scale_x_continuous geom_vline #' @importFrom scales reverse_trans #' @importFrom dplyr group_by summarize #' @importFrom stats sd #' #' @return Two plots: A heat map of the vessel diameter at each position over #' time and a plot showing the maximum change in diameter over time #' #' @export #' #' @examples #' #' quantify_width_position(vmeasur::example_vessel) #' quantify_width_position = function(widths_file = tk_file.choose()) { if(is.data.frame(widths_file)) { width_data = widths_file }else{ width_data = read.csv(widths_file) } # Fig A pa = plot_heatmap(width_data) # Fig B width_summ = width_data %>% group_by(y) %>% summarise(range = max(p_width) - min(p_width), sd = sd(p_width)) vlines = data.frame(xintercept = c(1:11)*30/73, colour = "blue3") pb = ggplot(width_summ) + geom_line(aes(x = y/73, y = range/73, color = "Vessel width")) + geom_vline(aes(xintercept = xintercept, color = "Regions selected"), data= vlines, show.legend = FALSE) + coord_flip() + labs(y = "Maximum change in diameter (mm)", x = "Vessel Position (mm from top of image)", color = "") + theme(legend.title.align=0.5) + scale_x_continuous(expand = c(0,0), trans = reverse_trans()) return(list(pa, pb)) } #' Quantify the contractility of a vessel in sections along it's length #' #' Quantify the physiological parameters in each section of the vessel along it's #' length. #' #' @param widths_file A csv file created by select_roi or threshold_vessel. If #' not specified, the user will be prompted to make a selection. #' #' @importFrom foreach `%do%` #' @importFrom ggplot2 guides guide_colorbar scale_shape_manual geom_errorbar element_rect `.pt` #' scale_color_continuous scale_fill_viridis_c scale_size_area scale_color_gradient scale_x_continuous scale_y_reverse theme #' @importFrom dplyr arrange cur_data filter bind_rows group_by mutate #' @importFrom grid grobTree linesGrob gpar #' @importFrom rlang `%||%` #' @importFrom scales alpha #' #' @return Graphs showing the contractility over time, contraction position and #' amplitude detected, length of contraction and a heatmap overlay for verification #' of the overall data. #' #' @export #' #' @examples #' #' # quantify_mean_width_sections(widths_file = vmeasur::example_vessel) #' quantify_mean_width_sections = function(widths_file = tk_file.choose()) { widths_binned = import_file_bin(widths_file, raw = TRUE) bins = unique(widths_binned$ygroup) result = foreach(bin = bins) %do% { local_widths = widths_binned %>% filter(ygroup == bin) mean_width = quantify_mean_width(local_widths) return(mean_width) } contraction_detection = sapply(result, "[", 2) %>% bind_rows(.id = "ygroup") contraction_phys = sapply(result, "[", 3) %>% bind_rows(.id = "ygroup") raw_data = sapply(result, "[", 4) %>% bind_rows(.id = "ygroup") widths_binned_summary = summarise_import_file_bin(widths_binned, "TEST") var2 = widths_binned_summary %>% group_by(trace_id) %>% mutate(max_range = max(p_mean)-min(p_mean)) %>% group_by(roi) %>% mutate(maxcont = max_range == max(max_range)) # Plot out that data pc = ggplot(var2) + geom_line(aes(x = frame_id/22.8, y = p_mean/73, color = as.factor(paste(round(ygroup*30/73,2)-0.41, "to", round(ygroup*30/73,2), "mm")))) + labs(x = "Time (s)", y = "Mean Diameter (mm)", color = "Vessel Section") + theme(legend.title.align=0.5) # Generate contraction background data contraction = contraction_detection contraction$source_file = as.numeric(contraction$ygroup) pe = ggplot(contraction) + geom_point(aes(baseline_change/73, (source_file*30-15)/73, color = event_maxima/22.5), size = 2, alpha = 0.7) + scale_y_reverse() + labs(x = "Contraction Amplitude (mm)", y = "Distance from top of image (mm)", color = "Time at \n maximal \n contraction (s)") + scale_color_gradient(low = "purple", high = "darkorange") + guides(colour = guide_colorbar(reverse=T, min = 0, max = 30)) + theme(legend.title.align=0.5) cont_lab = contraction %>% group_by(source_file) %>% arrange(event_start) %>% mutate(cont_id = match(event_start, cur_data()[["event_start"]])) pf = ggplot(cont_lab) + geom_point(aes(baseline_change/73, event_duration/22.8, color = as.factor(paste(round(as.numeric(ygroup)*30/73,2)-0.41, "to", round(as.numeric(ygroup)*30/73,2), "mm")), shape = as.factor(cont_id)), size = 3) + labs(shape = "Contraction\n number", x = "Contraction Amplitude (mm)", y = "Contraction Duration (s)", color = "Vessel Section") + scale_shape_manual(values = c(1,0,3,10)) + theme(legend.title.align=0.5) draw_key_bracket <- function(data, params, size) { grobTree( linesGrob(c(0.15, 0.8), c(0.5, 0.5)), linesGrob(c(0.15, 0.15), c(0.15, 0.85)), linesGrob(c(0.8, 0.8), c(0.85, 0.15)), gp = gpar( col = data$colour %||% "grey20", fill = alpha(data$fill %||% "white", data$alpha), lwd = (data$size %||% 1) * .pt, lty = data$linetype %||% 1 ) ) } heatmap = quantify_width_position(widths_file)[[1]] suppressMessages({ pd = heatmap + geom_point(aes(x = event_maxima/22.8, y = (source_file*30-15)/73, size = baseline_change/73), data = cont_lab, color = "white") + geom_errorbar(aes(xmin = event_start/22.8, xmax = event_end/22.8, y = (source_file*30-15)/73, linetype = "Contraction \n Duration"), color = "white", data = contraction, key_glyph = draw_key_bracket) + scale_y_reverse(expand=c(0,0)) + scale_x_continuous(expand=c(0,0))+ labs(size = "Contraction \n amplitude (mm)", linetype = "", shape = "Contracton Number") + theme(legend.key = element_rect(fill = "black")) }) pd = pd + scale_size_area(limits = c(0.01, 0.22), breaks = c(0.01, 0.08, 0.15, 0.22)) + theme(legend.title.align=0.5) return(list(pc, pe, pf, pd, cont_lab, contraction_phys)) }
/scratch/gouwar.j/cran-all/cranData/vmeasur/R/Bulk_Graphing_V2.R
#' Calibrate the pixel size using a test image #' #' In order to calculate absolute densities from pixel sizes, the size of the #' field captured by an operating microscope must be determined. This function #' allows the user to select an image of a ruler captured under a microscope, #' before automatically determining the scale. #' #' @param file_path The path to the image of a ruler to use for calibration. If #' left blank, the user will be prompted to select the file. #' #' @return A graphical representation of the ruler and calibration process. The #' number of pixels per mm will also be displayed. #' #' @importFrom imager load.image imrotate grabLine grabRect #' @importFrom ggpubr ggarrange #' @importFrom ggplot2 ggplot geom_line geom_raster geom_vline theme labs aes scale_y_continuous scale_fill_gradient #' @importFrom dplyr mutate filter summarise lag #' @importFrom stats acf median #' @importFrom imager draw_text grayscale #' #' @export #' #' @examples #' \dontrun{ #' #' file = paste(system.file(package = "vmeasur"), "extdata/mm_scale.jpg", sep = "/") #' calibrate_pixel_size(file) #' #' } #' calibrate_pixel_size = function(file_path = tk_file.choose()) { if(is.null(file_path)) { standard = file.choose() } else { standard = file_path } image = load.image(standard) image_display = pad(image, 100, "y", pos = -1) %>% draw_text(10, 10, "Draw a line along the edge of the calibration scale", "white", opacity = 1, fsize = 50) result = grabLine(image_display) rotation = tan(abs(result['y0'] - result['y1'])/ abs(result['x0'] - result['x1'])) /(2*pi)*360 rotated = imrotate(image, rotation) %>% pad( 100, "y", pos = -1) %>% draw_text(10, 10, "Select the section containg a graded scale", "white", opacity = 1, fsize = 50) rectangle = grabRect(rotated) rotated.df = rotated %>% grayscale %>% as.data.frame() rotated_clean = rotated.df %>% filter(x<rectangle["x1"], y<rectangle["y1"]) %>% filter(x>rectangle["x0"], y>rectangle["y0"]) %>% mutate(x = x-min(x)+1, y = y-min(y)+1) # rotated_clean %>% as.cimg() %>% plot() grouped = rotated_clean %>% group_by(x) %>% summarise(luminance = mean(value)) ggplot(grouped) + geom_line(aes(x = x, y = luminance)) output = as.vector(acf(grouped$luminance, lag.max = length(grouped$luminance))$acf) data.df = data.frame(lag = c(1:length(output)), reading = output) res = data.df %>% ungroup() %>% mutate(`previous` = lag(reading)) %>% mutate(switch = (`reading`<0 & `previous`>0)) %>% filter(`switch`) %>% mutate(`lagz` = `lag`-min(`lag`)) %>% mutate(`lagd` = `lag` - lag(`lag`)) %>% mutate(`residual` = `lagz` %% median(`lagd`, na.rm = TRUE)) detected_pixels = median(res$lagd, na.rm = TRUE) rotated_to_plot = filter(rotated.df, y>100) p0 = ggplot(rotated_to_plot) + geom_raster(aes(x = `x`, y = `y`, fill = `value`)) + scale_y_continuous(trans=scales::reverse_trans()) + scale_fill_gradient(low="black",high="white") + theme(legend.position = "none") + labs(x = "X position (pixels)", y = "Y position (pixels)", title = "Calibration Image") p1 = ggplot(rotated_clean) + geom_raster(aes(x = `x`, y = `y`, fill = `value`)) + scale_y_continuous(trans=scales::reverse_trans()) + scale_fill_gradient(low="black",high="white") + geom_vline(xintercept = res$lag, color = "blue") + theme(legend.position = "none") + labs(x = "X position (pixels)", y = "Y position (pixels)", title = "Calibration area selected") p2 = ggplot(grouped) + geom_line(aes(x = `x`, y = `luminance`)) + geom_vline(xintercept = res$lag, color = "blue") + labs(x = "X position (pixels)", y = "Average Luminance", title = paste("Fitted scale: ", detected_pixels, "px")) plotreturn = ggarrange(p0,p1,p2, ncol = 1) cat(paste("Pixels between scale gradations:", detected_pixels)) cat("\n") cat(paste("Gradation units/pixel:", 1/detected_pixels)) cat("\n") return(plotreturn) } #' Choose a single file with tcltk #' #' @param path a path, if not specified #' #' @return a single filename #' #' @importFrom tcltk tk_choose.files #' #' @noRd #' #' @examples #' ## tk_file.choose = function(path = tk_choose.files()) { return(path[[1]]) }
/scratch/gouwar.j/cran-all/cranData/vmeasur/R/Scale_detection.R
# function() # { # # # summary.df %>% select(site, animal, treatment, source_video) %>% # distinct() %>% # group_by(site, animal, treatment) %>% # summarise(n = n()) # # unique(summary.df$site) # # # # Y banded ROI # # roi_list = summary.df %>% select(site, animal, treatment, vessel) %>% distinct() # # # for(j in c(1:nrow(roi_list))) # { # try({ # # roi = roi_list[j,] # print(paste(j, "of",nrow(roi_list), "-",ceiling(j/nrow(roi_list)*100),"%")) # # # Find peaks locally # # fulldata_mean_mini = summary.df %>% filter(site == roi$site, animal == roi$animal, vessel == roi$vessel, treatment == roi$treatment) # # # # video_shift = (fulldata_mean_mini %>% group_by(source_video) %>% summarise(video_shift = max(frame_id)))$video_shift # # # # ggplot(fulldata_mean_mini) + # # geom_line(aes(x = frame_id, y = p_mean, color = paste(vessel, "/", ygroup))) + # # geom_vline(xintercept = video_shift) + # # labs(title = (paste(i, ")" , roi$treatment, roi$animal, " S",roi$site, sep = ""))) # # fulldata_mean_mini = fulldata_mean_mini %>% group_by(vessel, source_video) %>% mutate(p_mean = ksmooth(time(c(1:length(p_mean))),p_mean, "normal", 30)$y) %>% ungroup() # # trace_table = fulldata_mean_mini %>% select(site, animal, vessel, treatment, source_video, ygroup) %>% distinct() # trace_table$id = c(1:nrow(trace_table)) # # res = list() # stat = list() # # for(i in c(1:nrow(trace_table))) # { # #print(paste(j,i)) # filters = trace_table[i,] # local_data = fulldata_mean_mini %>% filter(site == filters$site, animal == filters$animal, vessel == filters$vessel, treatment == filters$treatment, source_video == filters$source_video, ygroup == filters$ygroup) # output = find_contraction_events(input_vector = local_data$p_mean, min_dist = 30, kband = 30, min_change = 1, nups = 10) # if(!is.null(output)) # { # output$id = i # res[[i]] = output # } # # # nullo = function(vector) # { # if(is.null(vector)) # { # return (0) # } # else # { # return(vector) # } # } # # output$EF = ((output$start_value)^2 - (output$max_value)*2)/((output$start_value)^2) # # dat = list() # # # # dat["max_EF"] = max(nullo(output$baseline_change)) # dat["mean_EF"] = mean(nullo(output$baseline_change)) # dat["median_EF"] = median(nullo(output$baseline_change)) # dat["sum_EF"] = sum(nullo(output$baseline_change)) # dat["sd_EF"] = sd(nullo(output$baseline_change)) # # dat["n_contraction"] = length(nullo(output$event_maxima)) # # dat["FPF"] = dat[["n_contraction"]] * dat[["mean_EF"]] # # dat["source_video"] = unique(local_data$source_video) # dat["site"] = unique(local_data$site) # dat["animal"] = unique(local_data$animal) # dat["treatment"] = unique(local_data$treatment) # dat["vessel"] = unique(local_data$vessel) # dat["ygroup"] = unique(local_data$ygroup) # # # # dat["max_magnitude"] = max(nullo(output$baseline_change)) # dat["mean_magnitude"] = mean(nullo(output$baseline_change)) # dat["median_magnitude"] = median(nullo(output$baseline_change)) # dat["sum_magnitude"] = sum(nullo(output$baseline_change)) # dat["sd_magnitude"] = sd(nullo(output$baseline_change)) # # dat["max_ef"] = sd(nullo(output$baseline_change/output$start_value)) # dat["mean_ef"] = sd(nullo(output$baseline_change/output$start_value)) # dat["median_ef"] = sd(nullo(output$baseline_change/output$start_value)) # # dat["max_duration"] = max(nullo(output$event_duration)) # dat["mean_duration"] = mean(nullo(output$event_duration)) # dat["median_duration"] = median(nullo(output$event_duration)) # dat["sum_duration"] = sum(nullo(output$event_duration)) # dat["sd_duration"] = sd(nullo(output$event_duration)) # # dat["max_cont_duration"] = max(nullo(output$cont_duration)) # dat["mean_cont_duration"] = mean(nullo(output$cont_duration)) # dat["median_cont_duration"] = median(nullo(output$cont_duration)) # dat["sum_cont_duration"] = sum(nullo(output$cont_duration)) # dat["sd_cont_duration"] = sd(nullo(output$cont_duration)) # # dat["max_fill_duration"] = max(nullo(output$fill_duration)) # dat["mean_fill_duration"] = mean(nullo(output$fill_duration)) # dat["median_fill_duration"] = median(nullo(output$fill_duration)) # dat["sum_fill_duration"] = sum(nullo(output$fill_duration)) # dat["sd_fill_duration"] = sd(nullo(output$fill_duration)) # # dat["max_gradient"] = max(nullo(output$event_gradient)) # dat["mean_gradient"] = mean(nullo(output$event_gradient)) # dat["median_gradient"] = median(nullo(output$event_gradient)) # dat["sum_gradient"] = sum(nullo(output$event_gradient)) # dat["sd_gradient"] = sd(nullo(output$event_gradient)) # # dat["mean_width"] = max(local_data$p_mean) # dat["median_width"] = median(local_data$p_mean) # dat["max_width"] = max(local_data$p_mean) # # local_stat = as.data.frame.list(dat) # stat[[i]] = local_stat # # } # # res.df = bind_rows(res) # stat.df = bind_rows(stat) # # vessid = (paste(roi$treatment, roi$animal, "_S",roi$site, "_V", roi$vessel, sep = "")) # file_root = paste("//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/quant2/",vessid, sep = "") # # write.csv(stat.df, paste(file_root, "_stat.csv", sep = "")) # # # if(nrow(res.df)==0) # { # stop("Done, no_cont") # } # # stop("Done") # # full_trace_table = res.df %>% left_join(trace_table, by = c("id")) # # full_trace_contractions = full_trace_table %>% group_by(site, animal, vessel, treatment, source_video, ygroup) %>% # mutate(contractions = n()) # # combined_full_table = fulldata_mean_mini %>% left_join(full_trace_contractions) %>% mutate(event_maxima_id = event_maxima - frame + frame_id) # # # file_root = paste("//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/quant2/",vessid, sep = "") # # ggplot(combined_full_table) + # geom_line(aes(x = frame_id, y = p_mean, color = as.character(contractions), group = source_video))+ # geom_vline(xintercept = video_shift) + # geom_point(aes(x = event_maxima_id, y = max_value), color = "blue") + # labs(title = vessid) + # facet_wrap(~paste(vessel, "/", ygroup)) # # ggsave(file=paste(file_root, ".pdf", sep = ""), width = 297, height = 210, units = "mm") # # # full_table_summary = combined_full_table %>% group_by(source_video, site, animal, treatment, vessel) %>% # summarise(max_cont = max(contractions,0, na.rm = TRUE), # median_cont = median(contractions), # mean_cont = mean(contractions), # sd_cont = sd(contractions), # mean_magnitude = mean(baseline_change), # median_magnitude = median(baseline_change), # sd_magnitude = sd(baseline_change), # max_magnitude = max(baseline_change,0), # sum_magnitude = sum(baseline_change, na.rm = TRUE)) # # write.csv(full_table_summary, paste(file_root, "_summary.csv", sep = "")) # }) # } # # # csv_files = list.files("//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/quant2/", # recursive = TRUE, full.names = TRUE) # # summary_csv = subset(csv_files,(str_count(csv_files, "stat.csv")>0)) # # summary_data = lapply(summary_csv, read.csv, as.is = TRUE) # # summary_cont.df = summary_data[[1]] # # for(i in c(2:length(summary_data))) # { # print(i) # summary_cont.df = rbind(summary_cont.df, summary_data[[i]]) # } # # # # }
/scratch/gouwar.j/cran-all/cranData/vmeasur/R/bulk_import_quantify.R
# contraction_file = file.choose() # contraction_data = read_csv(contraction_file) # # contraction_data = events_returned[[2]] # # # contraction = contraction_data # # calculate_physiological(contraction) # # # pixel_mm = 73 # FPS = 22.8 #' Title #' #' @param cont Contraction data #' @param pixel_mm Size of each mm in pixels #' @param FPS frames per second of video #' @param mean should a mean be returned, or the actual results #' #' @importFrom dplyr arrange mutate group_by summarize #' @importFrom tidyr pivot_longer #' #' @importFrom magrittr `%>%` #' #' @return #' #' @noRd #' #' @examples #' # Not exported #' calculate_physiological = function(cont, pixel_mm = 73, FPS = 22.8, mean = TRUE) { cont = cont %>% arrange(event_start) %>% mutate(type = NULL) cont$event_id = c(1:nrow(cont)) cont = cont %>% mutate(EDD = start_value/73) cont = cont %>% mutate(EDD2 = end_value/73) cont = cont %>% mutate(ESD = max_value/73) cont = cont %>% mutate(Nadir = min(ESD)) cont = cont %>% mutate(Peak = max(EDD, EDD2)) cont = cont %>% mutate(ED = event_duration/FPS, event_duration = NULL) cont = cont %>% mutate(CD = (event_maxima-event_start)/FPS, event_start = event_start/FPS, fill_duration = NULL) cont = cont %>% mutate(FD = (event_end - event_maxima)/FPS, event_end = event_end/FPS) mean_event_duration = mean(cont$ED) contraction_frequency = 1/(mean_event_duration/60) cont = cont %>% mutate(CA = EDD-ESD, baseline_change = NULL) cont = cont %>% mutate(EF = ((EDD^2-ESD^2)/EDD^2)*100) cont = cont %>% mutate(FPF = (EF*contraction_frequency)) cont = cont %>% mutate(CS = CA/CD, event_gradient = NULL) cont = cont %>% mutate(FS = (EDD2-ESD)/FD) cont = cont %>% mutate(PRF = (EDD2-ESD)/CA*100) # cont_mean = cont %>% pivot_longer(-event_id, names_to = "variable") %>% # group_by(variable) %>% summarise(mean = signif(mean(value),4), sd = signif(sd(value),4)) %>% # mutate(overall = paste(mean, " (", sd, ")", sep = "")) # if(isTRUE(mean)) # { # return(cont_mean) # } # else # { # return(cont) # } return(cont) }
/scratch/gouwar.j/cran-all/cranData/vmeasur/R/calculate_physiological_paramaters.R
# Data documentation globalVariables(c("end_value" ,"event_change" ,"event_duration" ,"event_end" ,"event_start", "excluded", "i", "p_width", "start_value", "value" ,'y', "animal", "baseline_change", "event_maxima", "frame", "frame_id", "lagd", "lagz luminance", "max_range", "max_value", "maxcont", "npix", "opts", "p_mean", "previous", "reading", "site", "source_video", "trace_id", "treatment", "vessel", "x", "ygroup", "lagz", "luminance", "filename", "mean_diameter", "roi", "video", "CA" ,"CD", "EDD" ,"EDD2", "EF", "ESD", "FD", "bin", "cont_id", "event_id", "source_file" ,"variable" ,"xintercept", "cont_group_id" ,"current_csv" ,"current_tree", "currentfile", "locallist", "mean_width" ,"nvideo" ,"remaining" ,"roi_name" ,"segment" ,"tree" ,"y_group" ,"y_group_pos", "y_position" ,"y_position_excluded")) #' Example lymphatic width dataset #' #' A data set containing the widths of a test vessel in each frame of a video. Identical #' in format to that produced by select_roi and threshold_vessel #' #' @format A data frame with 245,230 rows and 5 variables: #' \describe{ #' \item{X.1}{identification number of each row} #' \item{y}{y position in the image} #' \item{p_width}{width of the vessel at that position, in pixels} #' \item{excluded}{was that row excluded due to an air bubble} #' \item{filename}{which frame was the pixel row acquired from} #' ... #' } #' #' #' @source Collected for this package by Peter Russell (2021) "example_vessel"
/scratch/gouwar.j/cran-all/cranData/vmeasur/R/data_documentation.r
# blank = function() # { # # libraries = c("imager", "av", "tools", "ggplot2", "dplyr", "rlang", "foreach", "doSNOW", "pbmcapply", "devtools") # new.packages <- libraries[!(libraries %in% installed.packages()[,"Package"])] # if(length(new.packages)) install.packages(new.packages) # lapply(libraries, library, character.only = TRUE) # # # SH29S2 # # # setwd("//files.auckland.ac.nz/research/ressci202000061-PROM-study/vmeasur") # devtools::load_all() # # scratch_dir("Q://") # setwd("//files.auckland.ac.nz/research/ressci202000061-PROM-study") # # select_roi() # # # # # All ROIs listed below # # AP19S1.1_1.1 # threshold_apply( threshold = '0.711484593837535',roi_name = 'AP19S1.1_1.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image112.avi', radians = 0.24756263670402, xlength = 60, ylength = 126, xstart = 288, ystart = 429 ) # # threshold_apply( threshold = '0.711484593837535',roi_name = 'AP19S1.1_1.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image113.avi', radians = 0.24756263670402, xlength = 60, ylength = 126, xstart = 288, ystart = 429 ) # # threshold_apply( threshold = '0.711484593837535',roi_name = 'AP19S1.1_1.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image114.avi', radians = 0.24756263670402, xlength = 60, ylength = 126, xstart = 288, ystart = 429 ) # threshold_apply( threshold = '0.711484593837535',roi_name = 'AP19S1.1_1.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image115.avi', radians = 0.24756263670402, xlength = 60, ylength = 126, xstart = 288, ystart = 429 ) # threshold_apply( threshold = '0.711484593837535',roi_name = 'AP19S1.1_1.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image116.avi', radians = 0.24756263670402, xlength = 60, ylength = 126, xstart = 288, ystart = 429 ) # threshold_apply( threshold = '0.711484593837535',roi_name = 'AP19S1.1_1.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image117.avi', radians = 0.24756263670402, xlength = 60, ylength = 126, xstart = 288, ystart = 429 ) # threshold_apply( threshold = '0.711484593837535',roi_name = 'AP19S1.1_1.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image118.avi', radians = 0.24756263670402, xlength = 60, ylength = 126, xstart = 288, ystart = 429 ) # threshold_apply( threshold = '0.711484593837535',roi_name = 'AP19S1.1_1.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image119.avi', radians = 0.24756263670402, xlength = 60, ylength = 126, xstart = 288, ystart = 429 ) # #AP19S1.1_2.1 # threshold_apply( threshold = '0.576470588235294',roi_name = 'AP19S1.1_2.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image112.avi', radians = 0.678463009402465, xlength = 60, ylength = 154, xstart = 423, ystart = 634 ) # threshold_apply( threshold = '0.576470588235294',roi_name = 'AP19S1.1_2.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image113.avi', radians = 0.678463009402465, xlength = 60, ylength = 154, xstart = 423, ystart = 634 ) # threshold_apply( threshold = '0.576470588235294',roi_name = 'AP19S1.1_2.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image114.avi', radians = 0.678463009402465, xlength = 60, ylength = 154, xstart = 423, ystart = 634 ) # threshold_apply( threshold = '0.576470588235294',roi_name = 'AP19S1.1_2.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image115.avi', radians = 0.678463009402465, xlength = 60, ylength = 154, xstart = 423, ystart = 634 ) # threshold_apply( threshold = '0.576470588235294',roi_name = 'AP19S1.1_2.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image116.avi', radians = 0.678463009402465, xlength = 60, ylength = 154, xstart = 423, ystart = 634 ) # threshold_apply( threshold = '0.576470588235294',roi_name = 'AP19S1.1_2.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image117.avi', radians = 0.678463009402465, xlength = 60, ylength = 154, xstart = 423, ystart = 634 ) # threshold_apply( threshold = '0.576470588235294',roi_name = 'AP19S1.1_2.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image118.avi', radians = 0.678463009402465, xlength = 60, ylength = 154, xstart = 423, ystart = 634 ) # threshold_apply( threshold = '0.576470588235294',roi_name = 'AP19S1.1_2.1',video_path = '//files.auckland.ac.nz/research/ressci202000061-PROM-study/Full Dataset 2/AP19S1/image119.avi', radians = 0.678463009402465, xlength = 60, ylength = 154, xstart = 423, ystart = 634 ) # #Ap19S1.1_2.2 # # # # # # # # # } # # # # # # # # # # # # #
/scratch/gouwar.j/cran-all/cranData/vmeasur/R/example_flow.R
#' Import a saved file #' #' @param filename the csv file to import from #' #' @return a cleaned up data frame #' #' @importFrom dplyr bind_rows mutate group_by ungroup #' @importFrom tidyr extract_numeric #' @importFrom utils read.csv #' @importFrom stringr str_split str_remove #' @importFrom readr parse_number #' #' #' @noRd #' #' @examples #' i = 1 #' #' import_file = function(filename) { if(is.data.frame(filename)) { csvfile = filename }else{ csvfile = read.csv(filename) } csvfile$X = NULL csvfile$X.1 = NULL file_source = basename(file_path_sans_ext(filename)) %>% str_split("_") video = file_source[[1]][[1]] treatment = (file_source[[1]][[2]] %>% str_split("S"))[[1]][[1]] %>% str_replace_all("[:digit:]","") animal = (file_source[[1]][[2]] %>% str_split("S"))[[1]][[1]] %>% str_replace_all("[A-Za-z]","") field = (file_source[[1]][[2]] %>% str_to_upper() %>% str_split("S"))[[1]][[2]] vessel = file_source[[1]][[3]] # csvfile %>% select(y, excluded) %>% distinct() # toexclude = csvfile %>% group_by(y) %>% summarise(excluded = sum(excluded==1)) csvfile = csvfile %>% group_by(`y_position`) %>% mutate(y_position_excluded= sum(y_position_excluded)>1) %>% ungroup() %>% mutate(p_width = ifelse(!`y_position_excluded`, `p_width`, NA)) %>% mutate(video = video, treatment = treatment, animal = animal, field = field, vessel = vessel, roi = roi_name, y = y_position) return(csvfile) } #' Parallel mass csv import code #' #' @param current_dir list of csv files to import #' @param y_bin number of pixels to put in each ybin #' #' #' @importFrom dplyr row_number #' #' @return A bulk list of imported csv files #' #' @noRd #' #' @examples #' # Not applicable to CRAN #' import_folder_bin = function(current_dir, y_bin = 30) { # list out the CSV files to import folder_files = list.files(current_dir, recursive = TRUE, pattern = "width.csv$", full.names = TRUE) if(length(folder_files)==1) { fulldata = import_file(folder_files) } else{ # Import them all with lapply and combine with dplyr applied = foreach(currentfile = folder_files) %do% { import_file(currentfile) } fulldata = dplyr::bind_rows(applied, .id = "file_id") } fulldata = fulldata %>% group_by(y, roi, animal, treatment, video) %>% mutate(frame_id = row_number()) pixel_bin = y_bin fulldata_grouped = fulldata %>% mutate(ygroup = ((y-1) %/% pixel_bin) + 1) %>% group_by(treatment, animal, roi, ygroup) %>% mutate(max = max(y), min = min(y), npix = max-min +1, trace = cur_group_id()) %>% filter(npix == pixel_bin) %>% ungroup() fulldata_mean = fulldata_grouped %>% filter(!y_position_excluded) %>% ungroup() %>% group_by(`frame_id`, `video`, `animal`, `treatment`, `roi`, `ygroup`) %>% summarise(p_mean = mean(p_width, na.rm = TRUE), p_median = median(p_width, na.rm = TRUE)) %>% group_by(`animal`, `treatment`, `roi`, `ygroup`) %>% mutate(trace_id = cur_group_id()) return(fulldata_mean) } #' Bin and import a csv file #' #' @param file_location location of the file #' @param y_bin bin size #' @param raw is the data raw, or should averages be calculated for each bin #' #' @return #' @noRd #' #' @examples import_file_bin = function(file_location, y_bin = 30, raw = FALSE) { if(is.data.frame(file_location)) { fulldata = file_location } else { fulldata = import_file(file_location) } fulldata = fulldata %>% group_by(y, roi, animal, treatment, video) %>% mutate(frame_id = row_number()) pixel_bin = y_bin fulldata_grouped = fulldata %>% mutate(ygroup = ((y-1) %/% pixel_bin) + 1) %>% group_by(treatment, animal, roi, ygroup) %>% mutate(max = max(y), min = min(y), npix = max-min +1, trace = cur_group_id()) %>% filter(npix == pixel_bin) %>% ungroup() if(isTRUE(raw)) { return(fulldata_grouped) } fulldata_mean = summarise_import_file_bin(fulldata_grouped, file_location) return(fulldata_mean) } #' Title #' #' @param fulldata_grouped #' #' @return #' #' @noRd #' #' @examples summarise_import_file_bin = function(fulldata_grouped, file_location) { toreturn = fulldata_grouped %>% filter(!y_position_excluded) %>% group_by(`frame_id`, `video`, `animal`, `treatment`, `roi`, `ygroup`) %>% summarise(p_mean = mean(p_width, na.rm = TRUE), p_median = median(p_width, na.rm = TRUE)) %>% group_by(`animal`, `treatment`, `roi`, `ygroup`) %>% mutate(trace_id = cur_group_id()) toreturn$filename = basename(file_location) return(toreturn) }
/scratch/gouwar.j/cran-all/cranData/vmeasur/R/file_import_bulk.R
#' Make a file name that identifies an animal #' #' @param animal animal id #' @param treatment treatment type #' @param video source video which was measured #' @param roi region of interest imaged #' @param list a list containing all the data, for convenience when being called from other functions #' @param extension the filename extension required #' #' @return the name of a file to be used #' #' @noRd #' #' @examples #' # filename = make_filename("1", "Test", extension = ".csv") #' make_filename = function(animal = "NS", treatment = "NS", video = "NS", roi = "NS", list = NULL, extension = "") { if(!is.null(list)) { animal = list["animal"] treatment = list["treatment"] video = list["video"] roi = list["roi"] } paste("[animal_", animal, "]", "[treatment_", treatment, "]", "[video_", video, "]", "[roi_", roi, "]", extension, sep = "") } #' Divide a filename into multiple variables that have been assigned to the vessel #' #' @param filename the filename to be broken up into sections #' #' @return a table containing the content of the variable that has been broken up #' #' @importFrom stringr str_replace #' @importFrom tidyr separate #' #' @noRd #' break_filename = function(filename) { if(is.data.frame(filename)) { working_filename = "[animal_NS][treatment_NS][video_NS][roi_NS]_widths" } else{ working_filename = file_path_sans_ext(basename(filename)) } breakout = str_split(working_filename, "\\]\\[")[[1]] breakout = str_replace(breakout, "\\]", "") %>% str_remove("\\[") breakout = data.frame(breakout) breakout = breakout %>% separate(breakout, c("variable", "value"), sep = "_", extra = "drop") br_list = as.list(breakout$value) names(br_list) = breakout$variable return(br_list) } #' Collect the variables needed to make a file name from the user using a popup #' #' @param animal the animal that the name should be generated for #' @param treatment type of treatment the animal received #' @param video id of the video which was captured #' @param roi the region of interest selected #' #' @importFrom utils edit #' #' @noRd #' #' @return a list containing the critical filename values collected from the user #' #' @examples #' # Run in interactive mode only #' # make_filename(list = collect_filename()) collect_filename = function(animal = "NS", treatment = "NS", video = "NS", roi = "NS") { blank_data = data.frame(Variable = c("Animal", "Treatment", "Video", "ROI"), Value = c(animal, treatment, video, roi)) blank_data = edit(blank_data) br_list = as.list(blank_data$Value) names(br_list) = tolower(blank_data$Variable) return(br_list) }
/scratch/gouwar.j/cran-all/cranData/vmeasur/R/filename_manipulation.R
#' Find peaks in a vascular time series #' #' @param input_vector vector of values to analyze #' @param kband K smoothing window to apply to the data #' @param nups number of increases before and after the dataset to threshold on #' @param min_change minimum size of change to be termed significant #' @param min_dist Minimum distance between the minima #' @param plot should a plot be returned along with the detected peaks #' #' @importFrom stats ksmooth time #' @importFrom pracma findpeaks #' @importFrom graphics grid points #' @importFrom magrittr %>% #' @importFrom dplyr mutate rowwise cur_group_id #' @importFrom ggplot2 aes geom_point scale_color_manual scale_colour_viridis_c guides guide_legend theme labs #' #' #' #' @return A vector of peaks detected #' #' @noRd #' #' #' @examples #' i = 1 #' # Test to come #' find_contraction_events = function(input_vector, kband = 20, nups = 10, min_change = 0.25, min_dist = 10, plot = FALSE, pixel_scale = 73, time_scale = 22.5) { smooth_vector = ksmooth(time(1:length(input_vector)), input_vector, 'normal', bandwidth = kband)$y inverted_vector = 0-smooth_vector x = findpeaks(inverted_vector, nups = nups, ndowns = nups, zero = "+", minpeakdistance = min_dist) # Not run: # plot(smooth_vector, type="l", col="navy") # grid() # points(x[, 2], x[, 1], pch=20, col="maroon") # # return(x) colour_fill = c("grey30", "#C77CFF", "#7CAE00", "#00BFC4", "#F8766D") blankdata = data.frame(x = c(1:length(input_vector)), y = input_vector) blankgraphdata = ggplot() + geom_line(data = blankdata, aes(x = x/time_scale, y = y/pixel_scale, color = "No contractions found")) + labs(y = "Mean Diameter (mm)", x = "Time (s)", colour = "Tracking property") + theme(legend.title.align=0.5) blankgraph = list() blankgraph[[1]] = blankgraphdata blankgraph[2] = NA if(!isTRUE(nrow(x)>1)) { return(blankgraph) } if(isTRUE(nrow(x)>=1)) { events = data.frame(event_maxima = x[,2], event_start = x[,3],event_end = x[,4],type = "contract") }else { return(blankgraph) } events$start_value = smooth_vector[events$event_start] events$end_value = smooth_vector[events$event_end] events$max_value = smooth_vector[events$event_maxima] events = events %>% rowwise() %>% mutate( `baseline_change` = (`start_value`-`max_value`), `event_duration` = `event_end` - `event_start`, `cont_duration` = `event_maxima` - `event_start`, `fill_duration` = `event_end` - `event_maxima`, `event_gradient` = `baseline_change`/`event_duration`) raw_events = events #events = subset(events, !events$event_end == length(input_vector) & !events$event_start == 1) events = events %>% filter(abs(`baseline_change`)>min_change) %>% filter(`event_duration`>min_dist) if(nrow(events) ==0) { return(blankgraph) } # # gg_color_hue <- function(n) { # hues = seq(15, 375, length = n + 1) # hcl(h = hues, l = 65, c = 100)[1:n] # } # # "#00B0F6", function_plot = ggplot() + geom_line(aes(x = (c(1:length(smooth_vector)))/time_scale, y = input_vector/pixel_scale, color = "Raw data")) + geom_line(aes(x = c(1:length(smooth_vector))/time_scale, y = smooth_vector/pixel_scale, color = "Smoothed"), alpha = 0.8) + # geom_point(data = raw_events, aes(x = event_start, y = start_value, color = "Event Start")) + # geom_point(data = raw_events, aes(x = event_end, y = end_value, color = "Event end")) + # geom_point(data = raw_events, aes(x = `event_maxima`/time_scale, y = `max_value`/pixel_scale, color = "Events found and excluded"))+ geom_point(data = events, aes(x = `event_maxima`/time_scale, y = `max_value`/pixel_scale, color = "Event minima"), size = 2) + geom_point(data = events, aes(x = `event_start`/time_scale, y = `start_value`/pixel_scale, color = "Event start"), size = 4) + geom_point(data = events, aes(x = `event_end`/time_scale, y = `end_value`/pixel_scale, color = "Event end"), size = 2) + scale_color_manual(values = c(colour_fill), breaks = c("Raw data", "Smoothed", "Event minima", "Event start", "Event end")) function_plot = function_plot + guides(color = guide_legend(override.aes = list(linetype = c(1, 1, 0, 0, 0), shape = c(NA, NA, 19, 19, 19) ) ) ) function_plot = function_plot + labs(y = "Mean Diameter (mm)", x = "Time (s)", colour = "Tracking property") + theme(legend.title.align=0.5) return(list(function_plot, events)) } #' Quantify the vessel width over an entire ROI #' #' This function calculates the overall widths and contraction parameters for the vessel as a whole. #' #' @param widths_file A CSV file created by select_roi or threshold_vessel #' @param pixel_scale The number of pixels per mm, can be calculated with #' calibrate_pixel_size if unknown #' #' @return A list containing: #' A graph showing the detected contraction events, #' Details of each contraction event, #' The mean and standard deviation of the calculated contraction physiological parameters, #' The raw data used in the quantification process #' #' #' @export #' #' @examples #' #' quantify_mean_width(vmeasur::example_vessel) #' quantify_mean_width = function(widths_file, pixel_scale = 73) { if(is.data.frame(widths_file)) { width_data = widths_file } else { width_data = import_file(widths_file) } width_data_summary = width_data %>% group_by(filename) %>% summarise(mean_diameter = mean(p_width, na.rm = TRUE), sd_diameter = sd(p_width, na.rm = TRUE)) width_data_summary = width_data_summary %>% arrange(as.numeric(filename)) events_returned = find_contraction_events(width_data_summary$mean_diameter, kband = 20) events_returned[[3]] = calculate_physiological(events_returned[[2]]) events_returned[[4]] = width_data return(events_returned) }
/scratch/gouwar.j/cran-all/cranData/vmeasur/R/find_peaks_v3.R
#' Title #' #' @param contraction_data #' #' @return #' #' @importFrom foreach foreach %do% #' @importFrom dplyr mutate group_split inner_join left_join n #' @importFrom stringr str_to_upper #' @importFrom stats runif #' #' @noRd #' #' @examples add_contraction_group = function(contraction_data) { cd2 = contraction_data %>% group_by(tree) %>% group_split() processed = foreach(w4 = cd2, .combine = bind_rows) %do% { w4$plainid = c(1:nrow(w4)) close = foreach(row = c(1:nrow(w4))) %do% { currentrow = w4[row,] distances = abs(w4$event_maxima - currentrow$event_maxima)<20 closerows = subset(w4, distances) in_spec = paste(closerows$plainid, collapse = ",") return(in_spec) } w4$close = unlist(close) w4$close = overlap_ranges(w4$close) w4 = w4 %>% mutate(contraction_group = close, close = NULL) return(w4) } return(processed) } # localdata = contractions %>% group_by(treatment, animal, tree) %>% group_split() # # grouped = add_contraction_group(localdata[[3]]) # # heat_data = all_ygroup_labeled %>% filter(tree == "3") # # # ggplot(grouped) + geom_line(aes(y = event_maxima, # x = paste(segment,y_group, sep = "-"), # group = contraction_group, # color = as.factor(contraction_group))) + # geom_point(aes(y = event_maxima, # x = paste(segment,y_group, sep = "-"), # group = contraction_group, # color = as.factor(contraction_group)))+ # coord_flip() # # # foreach(currentdata = localdata) %do% # { # current_contractions = contractions %>% filter(roi == unique(currentdata$roi)) # # heatmap = ggplot() + geom_raster(aes(x = frame, y = y_position, fill = p_width), data = currentdata)+ scale_fill_viridis_c(option = "mako") + # geom_point(aes(x = event_maxima, y = ((y_group*pixel_bin)-(pixel_bin/2)), size = baseline_change), data = current_contractions, color = "white") + # geom_errorbar(aes(xmin = event_start, xmax = event_end, y = y_group*pixel_bin-pixel_bin/2), data = current_contractions, color = "white") + # geom_vline(xintercept = video_breaks$maxframe, color = "grey") # # print(heatmap) # } # # # # w10 = add_contraction_group(current_contractions) # # ggplot(w10) + geom_point(aes(x = event_maxima, # y = paste(vessel, y_group, sep = "-"), # color = as.factor(`close`))) # # ggplot(w4) + geom_line(aes(y = event_maxima, # x = paste(vessel, y_group, sep = "-"), # group = close, # color = as.factor(`close`))) # # # v1 = "1,2,3,4,5" # v2 = "4,5,6" # # v2 = "60,61,62,63,64,65,66,67,68,69,70,71,72" # v1 = "61,62,63,64,65,66,67,68,69,70,71,72" # # combine_lists(v1, v2) #' Title #' #' @param v1 #' @param v2 #' #' @return #' #' @noRd #' #' @examples combine_lists = function(v1, v2) { v1v = unlist(strsplit(v1, ",")) v2v = unlist(strsplit(v2, ",")) if(sum(v1v %in% v2v, v2v %in% v1v)>1) { return(paste(sort(as.numeric(unique(c(v1v, v2v)))), collapse = ",")) } else return(v1) } #' Title #' #' @param v1 #' @param bigvector #' #' @return #' #' @noRd #' #' @examples combine_all_lists = function(v1, bigvector) { for(i in bigvector) { v1 = combine_lists(v1,i) bigvector = c(v1, bigvector) } return(v1) } overlap_ranges = function(string_vector) { unique_string_vector = unique(string_vector) result = foreach(row = string_vector) %do% { parse1 = combine_all_lists(row, unique_string_vector) return(parse1) } unique_result = unique(result) result = foreach(row = result) %do% { parse1 = combine_all_lists(row, unique_result) return(parse1) } return(unlist(result)) } # w5 = w4 %>% rowwise() %>% # mutate(close = combine_all_lists(close, w4$close)) # # unique(w5$close) # # # # w6 = w5 %>% separate_rows(close) %>% # mutate(close = as.numeric(close), plainid = as.numeric(plainid)) %>% # rowwise() %>% # mutate(low = min(close, plainid), high = max(close,plainid)) %>% # mutate(plainid = NULL, close = NULL) %>% distinct() # # w6 = w6 %>% group_by(low) %>% summarise(high = max(high)) %>% # group_by(high) %>% summarise(low = min(low))
/scratch/gouwar.j/cran-all/cranData/vmeasur/R/group_contractions.R
# library(doFuture) # registerDoFuture() # # plan("multisession") # # library(imager) # library(tidyverse) # library(tools) # # starttime = Sys.time() # threshold_apply(threshold = '0.579052239452482',roi_name = '[animal_10][treatment_yes][video_image112][roi_30]', video_path = 'P:/Width testing/image112.avi', # radians = 1.37211044508047, xlength = 40, ylength = 154, xstart = 450, ystart = 722, fps = 1) # Sys.time() - starttime #' Threshold a video with pre-determined parameters #' #' Using pre-determined values this function generates ROI from a video. If parameters are not known, use select_roi() #' This function is optimized to run in parallel, so should be relatively rapid. If running slowly, check the scratch disk is set correctly. #' #' @param threshold The threshold for the red channel. Range 0-1. #' @param roi_name Name assigned to the region of interest #' @param video_path Location of the video file to process #' @param radians Degrees to rotate the image, in radians #' @param xlength Number of x pixels in the ROI #' @param ylength Number of y pixels in the ROI #' @param xstart ROI starting x co-ordinate #' @param ystart ROI starting y co-ordinate #' @param image_list If pre-computed, a list of images to use rather than a video #' @param fps Number of fps to process, this can be set lower for validation #' #' #' @return Saves the quantified CSV and overlaid video in the same directory as the video #' #' @importFrom utils setTxtProgressBar write.csv read.csv #' @importFrom foreach `%dopar%` foreach #' @importFrom tools file_path_sans_ext #' @importFrom magrittr `%>%` #' @importFrom imager as.cimg #' @importFrom doFuture registerDoFuture #' @importFrom tcltk tk_choose.dir #' @importFrom dplyr mutate #' @importFrom future availableCores #' @importFrom stringr str_replace #' #' #' #' @export #' threshold_apply = function(threshold = 0.5, roi_name = "test", video_path = 'image826.avi',radians = 0.217604550320612,xlength = 60,ylength = 242,xstart = 696,ystart = 323, image_list = NULL, fps = NULL) { starttime = Sys.time() #Make a full processing run in RAM output_folder = output_dir(dirname(video_path), use_default = TRUE) # Crop video and save output scratch_dir(wipe_scratch = TRUE) scratch = scratch_dir(file_name = video_path) unlink(gsub("/$", "", scratch), recursive = TRUE, force = TRUE) dir.create(scratch) filter_string = paste("rotate = '",radians,":out_w=rotw(",radians,"):out_h=roth(",radians,"):c = red',", "crop=",xlength,":",ylength,":",xstart,":",ystart,"", sep = "") if(!is.null(fps)) { filter_string = paste("fps = ",fps,",",filter_string, sep = "") } av::av_encode_video(video_path, paste(scratch, "/%03d_raw.png", sep = ""), vfilter = filter_string, codec = "png") cropped_file_list = list.files(scratch, full.names = TRUE, pattern = "\\_raw.png$") # Run analytics on each of the unpacked frames starttime = Sys.time() options(future.rng.onMisuse = "ignore") load.image(cropped_file_list[[1]]) %>% plot() message("Making segmentation") bundlesize = length(cropped_file_list)/as.numeric(availableCores()) split_file_list = split(cropped_file_list, ceiling(seq_along(cropped_file_list)/bundlesize)) # if(length(cropped_file_list>20)) # { # plan("multisession", .skip = TRUE) # }else # { # plan("sequential") # } foreach(locallist = split_file_list) %dopar% { for(current_frame in locallist) { current_frame_threshold = threshold_image(current_frame, threshold) imager::save.image(current_frame_threshold[[1]], current_frame %>% str_replace("_raw.png", "_threshold.png")) current_frame_threshold[[2]]$filename = current_frame_threshold[[2]]$filename %>% str_replace("_raw", "") utils::write.csv(current_frame_threshold[[2]], current_frame %>% str_replace("_raw.png", "_width.csv")) current_frame_spread = image_intensity_spread(current_frame) current_frame_spread$frame = current_frame_spread$frame %>% str_replace("_raw", "") utils::write.csv(current_frame_spread, current_frame %>% str_replace("_raw.png", "_profile.csv")) } } message("Copying results") output_file_base = paste(output_folder, "/", basename(file_path_sans_ext(video_path)), "_", roi_name, "_", sep = "") file_list = list.files(scratch, full.names = TRUE, pattern = "_raw.png$") av::av_encode_video(file_list, output = paste(output_file_base, "raw.avi", sep = ""),codec = "libx264", verbose = 24) file_list = list.files(scratch, full.names = TRUE, pattern = "_threshold.png$") load.image(file_list[[1]]) %>% plot() av::av_encode_video(file_list, output = paste(output_file_base, "threshold.avi", sep = ""),codec = "libx264", verbose = 24) file_list = list.files(scratch, full.names = TRUE, pattern = "_width.csv$") all_csv = lapply(file_list, read.csv) %>% bind_rows() all_csv = all_csv %>% mutate(X = NULL, y_position = y, y = NULL, y_position_excluded = excluded, excluded = NULL, frame = filename, filename = NULL, roi_name = roi_name) write.csv(all_csv, paste(output_file_base, "width.csv")) file_list = list.files(scratch, full.names = TRUE, pattern = "_profile.csv$") all_csv = lapply(file_list, read.csv) %>% bind_rows() all_csv = all_csv %>% mutate(X = NULL, x_position = x, x = NULL, roi_name = roi_name) write.csv(all_csv, paste(output_file_base, "profile.csv", sep = "")) unlink(gsub("/$", "", scratch), recursive = TRUE, force = TRUE) print(Sys.time() - starttime) }
/scratch/gouwar.j/cran-all/cranData/vmeasur/R/multiple_processing_threshold_v3.R
#' Quantify the content of an entire directory of sub-directories at once #' #' @param target_folder The folder to quantify the readings in #' #' @return A PDF file for each directory quantified, showing the quantification #' #' @importFrom stringr str_remove #' @importFrom grDevices dev.off pdf #' @importFrom stats runif #' @importFrom ggplot2 geom_hline #' #' #' #' @export quantify_directory = function(target_folder) { width_csv = list.files(target_folder, full.names = TRUE, recursive = FALSE, pattern = "_ width.csv") width_csv_list = foreach(current_csv = width_csv, .combine = bind_rows) %do% { import_data = read.csv(current_csv) import_data$roi_name = basename(current_csv) %>% str_remove("_ width.csv") return(import_data) } target_folder = output_dir(set = target_folder, use_default = TRUE) gc() pixel_bin = 100 width_csv_list$roi_name = str_to_upper(width_csv_list$roi_name) width_csv_list$y_group = (((width_csv_list$y_position-1) %/% pixel_bin) + 1) width_csv_list = width_csv_list %>% group_by(roi_name, y_group) %>% mutate(n = n()) %>% ungroup() %>% group_by(roi_name) %>% filter(n == max(n)) %>% mutate(n = NULL) %>% ungroup() width_csv_list = width_csv_list %>% group_by(roi_name, y_position_excluded) %>% mutate(y_position_excluded = max(y_position_excluded)==1) %>% filter(y_position_excluded == 0) %>% ungroup() all_ygroup = width_csv_list %>% select(roi_name) %>% distinct() %>% separate(roi_name, c("video", "roi"), sep = "_", extra = "merge", remove = FALSE) %>% left_join(width_csv_list) %>% mutate(roi_name = NULL) %>% group_by(roi, y_group, y_position) %>% mutate(frame = row_number()) %>% ungroup() summary_ygroup = width_csv_list %>% group_by(y_group, frame, roi_name) %>% summarise(mean_width = mean(p_width)) %>% separate(roi_name, c("video", "roi"), sep = "_", extra = "merge", remove = FALSE) %>% ungroup() %>% group_by(roi, y_group) %>% arrange(video, frame) %>% mutate(frame = row_number()) %>% ungroup() video_breaks = summary_ygroup %>% group_by(video) %>% summarise(maxframe = max(frame)) locations_plot = summary_ygroup %>% ggplot() + geom_line(aes(x = frame, y = mean_width, color = as.factor(y_group), linetype = video)) + facet_wrap(~roi) + geom_vline(xintercept = video_breaks$maxframe) localdata = summary_ygroup %>% group_by(roi, y_group, video) %>% group_split() pdf(paste(target_folder, "\\", basename(target_folder), "_contraction_detection.pdf", sep = ""), onefile = TRUE, height = 8.27, width = 11.69) all_contractions = foreach(i = c(1:(length(localdata)))) %do% { print(paste(i, "in", length(localdata))) result = find_contraction_events(input_vector = localdata[[i]]$"mean_width", kband = 30) print(result[[1]] + labs(title = unique(localdata[[i]]$roi_name))) if(is.na(result[[2]])) {return(NULL)} else { table = result[[2]] } start_time = min(localdata[[i]]$frame) table$event_maxima = table$event_maxima + start_time table$event_start = table$event_start + start_time table$event_end = table$event_end + start_time table$roi = unique(localdata[[i]]$roi) table$roi_name = unique(localdata[[i]]$roi_name) table$video = unique(localdata[[i]]$video) table$y_group = unique(localdata[[i]]$y_group) rm(result) gc() return(table) } dev.off() contractions = bind_rows(all_contractions) # rm(all_contractions) # gc() contractions = contractions %>% mutate(roi = str_to_upper(roi)) %>% separate(roi, c("remaining", "vessel"), sep = "_", remove = FALSE) %>% mutate(treatment = substr(remaining, 1,3), treatment = str_remove(treatment, "[0-9]")) %>% mutate(animal = substr(remaining, 3,5), animal = str_remove(animal, "[A-z]")) %>% mutate(site = str_remove(remaining, paste(treatment, animal, "S", sep = ""))) %>% mutate(remaining = NULL) %>% mutate(tree = substr(vessel,1,1), segment = substr(vessel, 3,length(vessel))) ygroup_details = all_ygroup %>% select(video, roi) %>% distinct() %>% mutate(roi = str_to_upper(roi)) %>% separate(roi, c("remaining", "vessel"), sep = "_", remove = FALSE) %>% mutate(treatment = substr(remaining, 1,3), treatment = str_remove(treatment, "[0-9]")) %>% mutate(animal = substr(remaining, 3,5), animal = str_remove(animal, "[A-z]")) %>% mutate(site = str_remove(remaining, paste(treatment, animal, "S", sep = ""))) %>% mutate(remaining = NULL) %>% mutate(tree = substr(vessel,1,1), segment = substr(vessel, 3,length(vessel))) full_details = contractions %>% select(video, vessel, treatment, animal, site, tree, roi, segment) %>% distinct() %>% mutate(roi = str_to_upper(roi)) all_ygroup = all_ygroup %>% mutate(X = NULL, y_position_excluded = NULL) all_ygroup_2 = all_ygroup %>% inner_join(ygroup_details) all_ygroup_2 = all_ygroup_2 %>% group_by(animal, treatment, site, tree) %>% mutate(nvideo = length(unique(video))) contractions = add_contraction_group(contractions) contractions = calculate_physiological(contractions) # %>% filter(CA>0.015) contractions = all_ygroup_2 %>% ungroup() %>% select(tree, nvideo) %>% distinct() %>% inner_join(contractions, by = c('tree')) # # print(ggplot() + geom_raster(aes(x = frame, y = y_position, fill = p_width), data = all_ygroup_labeled)+ scale_fill_viridis_c(option = "mako") + # geom_point(aes(x = event_maxima, y = ((y_group*pixel_bin)-(pixel_bin/2)), size = baseline_change/2, color = as.factor(as.numeric(as.factor(contraction_group)))), data = cont3) + # geom_errorbar(aes(xmin = event_start, xmax = event_end, y = y_group*pixel_bin-pixel_bin/2), data = cont3, color = "white") + # geom_vline(xintercept = video_breaks$maxframe, color = "grey") + # facet_grid(vars(segment), vars(tree)), space = "free_y", scales = "free_y", shrink = TRUE) pdf(paste(target_folder, "\\", basename(target_folder), "_contraction_summary.pdf", sep = ""), onefile = TRUE, height = 8.27, width = 11.69) print(locations_plot) foreach(current_tree = unique(all_ygroup_2$tree)) %do% { print(current_tree) sub_all = all_ygroup_2 %>% select(p_width, y_position, frame, tree, segment, y_group) %>% subset(tree == current_tree) %>% mutate(y_position = y_position - (y_position %% 2) , frame = frame - (frame %%2)) %>% group_by(y_position, frame, segment, y_group) %>% summarise(p_width = mean(p_width, na.rm = TRUE)) %>% arrange(segment, y_position) %>% group_by(segment, y_position) %>% mutate(y_position = cur_group_id()) ygroup_pos = sub_all %>% select(y_group, segment, y_position) %>% distinct() %>% group_by(y_group, segment) %>% summarise(y_group_pos = mean(y_position)) sub_cont = contractions %>% subset(tree == current_tree) sub_cont = left_join(sub_cont, ygroup_pos) sub_cont$cont_group_id = as.factor(as.numeric(as.factor(sub_cont$contraction_group))) graphic = ggplot() + geom_raster(aes(y = frame, x = y_position, fill = p_width), data = sub_all)+ scale_fill_viridis_c(option = "mako") + geom_point(aes(y = event_maxima, x = y_group_pos, size = CA, color = cont_group_id), data = sub_cont) + geom_errorbar(aes(ymin = event_start*22.8, ymax = event_end*22.8, x = y_group_pos), data = sub_cont, color = "white") + geom_hline(yintercept = video_breaks$maxframe, color = "grey") + geom_line(aes(x = y_group_pos, y = event_maxima, color = cont_group_id), data = sub_cont) + scale_x_reverse() + coord_flip() print(graphic) rm(sub_all) rm(ygroup_pos) rm(graphic) gc() return(NULL) } dev.off() write.csv(contractions, paste(target_folder, "\\", basename(target_folder), "_contraction.csv", sep = "")) # write.csv(summary_ygroup, paste(target_folder, "\\", basename(target_folder), "_y_group_summary.csv", sep = "")) # write.csv(all_ygroup_labeled, paste(target_folder, "\\", basename(target_folder), "_all_ygroup.csv", sep = "")) rm(localdata, locations_plot, all_contractions) rm(contractions, summary_ygroup, all_ygroup) gc() return(NULL) }
/scratch/gouwar.j/cran-all/cranData/vmeasur/R/quantify_directory.R
#' Select a ROI from a video file #' #' This function provides a graphical tool to walk the user through selecting a ROI from an AVI video. #' #' @return Saves an annotated AVI and CSV file in the same directory as the video. #' Will also output and copy the parameters used to create the video. #' #' @export #' #' @importFrom av av_video_images av_encode_video #' @importFrom imager load.image grabLine draw_rect grabLine imrotate grabRect as.imlist as.cimg draw_rect #' @importFrom rlang hash #' @importFrom utils menu #' @importFrom tools file_path_sans_ext #' @importFrom rlang hash #' @importFrom magrittr %>% #' @importFrom crayon green #' @importFrom svDialogs dlg_input #' #' #' @examples #' #' \dontrun{ #' select_roi() #' } #' select_roi = function() { video_path = file.choose() video_folder = dirname(video_path) video_name = basename(file_path_sans_ext(video_path)) workingdir = paste(scratch_dir(),"/",hash(video_name), sep = "") av_video_images(video_path, format = "png", destdir = workingdir, fps = 1) raw = load.image(paste(workingdir,"/image_000001.png", sep = "")) latchcrop = TRUE latchthresh = TRUE while(latchcrop || latchthresh){ while(latchcrop) { angleselect = grabLine(raw, output = "coord") degrees = atan((angleselect["y0"]-angleselect["y1"])/(angleselect["x0"]-angleselect["x1"]))/(2*pi)*360 rotation = (90-abs(degrees))%%90 * (degrees/abs(degrees)) radians = rotation/360*(2*pi) rotated = imrotate(raw, rotation) areaselect = grabRect(rotated, output = "coord") xstart = min(areaselect["x1"],areaselect["x0"]) ystart = min(areaselect["y1"],areaselect["y0"]) ylength = max(areaselect["y1"],areaselect["y0"])-ystart xlength = max(areaselect["x1"],areaselect["x0"])-xstart ylength = ylength + ylength %% 2 xlength = xlength + xlength %% 2 cropped = crop_dims(rotated, xstart, ystart, xlength, ylength) plot(cropped) status = menu(c("Change", "Accept", "Exit")) if(status == 3) { stop("Function exited") } else if(status == 2) { latchcrop = FALSE latchthresh = TRUE } } filter_string = paste("rotate = '",radians,":out_w=rotw(",radians,"):out_h=roth(",radians,"):c = red',", "crop=",xlength,":",ylength,":",xstart,":",ystart,"", sep = "") av_encode_video(video_path, paste(workingdir, "/test1w%03d_cropped.png", sep = ""), vfilter = filter_string, codec = "png") file_list = list.files(workingdir, full.names = TRUE, pattern = "\\_cropped.png$") prettylist = pretty(1:length(file_list),n = 6) prettylist = subset(prettylist, prettylist<length(file_list)) file_list_s = file_list[prettylist] overallthreshold = mean(unlist(lapply(file_list, calculate_auto_threshold))) autothreshold = overallthreshold latchthresh = TRUE while(latchthresh){ plotmatrix = as.imlist(lapply(lapply(file_list_s, threshold_image, threshold = overallthreshold), function(x) { as.cimg(x[[1]])})) make_matrix(plotmatrix, width = 10) %>% plot print(paste("Default thresold:", autothreshold)) print(paste("Current threshold is:", overallthreshold)) print("Input any number less than 1 to change the threshold") status = menu(c("Change", "Accept", "Re-crop", "Exit")) if(status == 4) { stop("Function exited") } else if(status == 2) { latchthresh = FALSE } else if(status == 3) { latchthresh = FALSE latchcrop = TRUE } else { cat("\n New threshold") overallthreshold <- as.numeric(scan(what=character(),nmax=1,quiet=TRUE)) } } } draw_rect(rotated,xstart,ystart,xstart+xlength, ystart+ylength,color="green",opacity=0.3) %>% plot() print(paste("File name is:", video_name)) roi_name = make_filename(list = collect_filename(video = video_name)) video_path = gsub("\\\\", "/", video_path) variables = paste("threshold = '",overallthreshold,"',", "roi_name = '",roi_name,"',", "video_path = '", video_path, "',", " radians = ", radians,",", " xlength = ", xlength,",", " ylength = ", ylength,",", " xstart = ", xstart,",", " ystart = ", ystart, sep ="") function_string = paste("threshold_apply(",variables,")") function_string = paste("\n", function_string, "\n ") cat(crayon::green(function_string)) threshold_apply(threshold = overallthreshold, roi_name = roi_name, video_path = video_path, radians = radians, xlength = xlength, ylength = ylength, xstart = xstart, ystart = ystart, image_list = file_list) unlink(workingdir, recursive = TRUE) }
/scratch/gouwar.j/cran-all/cranData/vmeasur/R/roi_select.R
#' Threshold an image #' #' @param file_path Path to the image for quantification #' @param threshold Threshold to set for the inverse channel. Range 0-1. #' @param min_area The minimum number of pixels in an area to not ignore as noise #' #' @return A list containing #' 1) the processed threshold image #' 2) calculated widths #' #' @importFrom imager load.image imsplit clean parany #' @importFrom dplyr summarise group_by #' @importFrom magrittr `%>%` #' @importFrom purrr discard #' #' @noRd #' #' @examples #' # No examples #' threshold_image <- function(file_path, threshold, min_area = 100) { # Load the image and split it into it's channels im <- imager::load.image(file_path) im.c = imager::imsplit(im,"c")[[1]] # plot(im.c) # Histogram plot if needed for debugging #hist(as.data.frame(im.c)$value) # Threshold the image px = im.c<threshold px = imager::clean(px,2) # plot(px) # If pixels have been located, split them into contiguous areas of greater than 100px in size if(sum(px)>0) { pxconn = imager::split_connected(px) %>% purrr::discard(~ sum(.) < min_area) } else { pxconn = list() } # If there is more than one area that passes, stick them together into a single mask if(length(pxconn)>1) { region = pxconn %>% parany %>% plot } else { region = px } # Plot out the greyscale for debugging #hist(as.data.frame(grayscale(im))$value) # Find and expand the overlapped pixels maximal <- (im.c>0.99) maximal = imager::grow(maximal, 10) # Superimpose the two searches to find any overlap overlaid_regions = region+maximal # Combine the data into a visualization boundary_lines = imager::imappend(imager::imlist(imager::boundary(region)==2,imager::boundary(region), maximal), "c") imagelist = imager::imlist(imager::as.cimg(boundary_lines), im) overlap_image = imager::parmax(imagelist) # plot(overlap_image) # Calculate overlaid pixels overlapping = as.data.frame(overlaid_regions) yoverlap = overlapping %>% dplyr::group_by(`y`) %>% dplyr::summarise(bubble = max(`value`)==2) # Calculate vessel widths output = as.data.frame(as.cimg(region)) widths = output %>% dplyr::group_by(`y`) %>% dplyr::summarise(p_width = sum(`value`)) # Unify data and add metadata widths$excluded = yoverlap$bubble widths$filename = basename(tools::file_path_sans_ext(file_path)) return(list(overlap_image, widths)) } #' Apply a threshold to a single frame #' #' @param file_path path to the file to be used. If left blank, the user will be #' prompted to make a selection #' @param threshold The threshold to use #' @param min_area Minimum area to recognize as a vessel. Any smaller items will #' be ignored #' #' @return a data frame containing the widths of the vessel in each row of the image, #' and if any rows were excluded due to overexposure #' #' @export threshold_vessel <- function(file_path = tk_file.choose(), threshold, min_area = 100) { threshold_image(file_path, threshold, min_area) } #' Title #' #' @param image_test The image to test #' #' @return #' #' @importFrom imager R #' @importFrom tools file_path_sans_ext #' @importFrom dplyr group_by mutate summarise ungroup #' #' @noRd #' #' @examples image_intensity_spread = function(image_test) { frameid = file_path_sans_ext(image_test) %>% basename() red_data = load.image(image_test) %>% R() %>% as.data.frame() %>% group_by(x) %>% mutate(value = 1-value) %>% summarise(mean = mean(value)) %>% ungroup() %>% mutate(shiftedx = x - x[which.max(mean)]) %>% mutate(frame = frameid) return(red_data) } #' #' Threshold a video with pre-determined parameters #' #' #' #' Using pre-determined values this function generates ROI from a video. If parameters are not known, use select_roi() #' #' This function is optimized to run in parallel, so should be relatively rapid. If running slowly, check the scratch disk is set correctly. #' #' #' #' @param threshold The threshold for the red channel. Range 0-1. #' #' @param roi_name Name assigned to the region of interest #' #' @param video_path Location of the video file to process #' #' @param radians Degrees to rotate the image, in radians #' #' @param xlength Number of x pixels in the ROI #' #' @param ylength Number of y pixels in the ROI #' #' @param xstart ROI starting x co-ordinate #' #' @param ystart ROI starting y co-ordinate #' #' @param image_list If pre-computed, a list of images to use rather than a video #' #' @param output_folder The folder to save the results in, if required #' #' #' #' #' #' @return Saves the quantified CSV and overlaid video in the same directory as the video #' #' #' #' @importFrom utils setTxtProgressBar write.csv read.csv #' #' @importFrom foreach `%dopar%` foreach #' #' @importFrom tools file_path_sans_ext #' #' @importFrom magrittr `%>%` #' #' @importFrom imager as.cimg #' #' @importFrom doFuture registerDoFuture #' #' @importFrom tcltk tk_choose.dir #' #' #' #' #' #' @export #' #' #' #' #' threshold_apply = function(threshold = 0.5, roi_name = "test", video_path = 'image826.avi',radians = 0.217604550320612,xlength = 60,ylength = 242,xstart = 696,ystart = 323, image_list = NULL) #' { #' #' output_folder = dirname(video_path) #' #' #' video_folder = dirname(video_path) #' #' video_name = roi_name #' #' #' #' if(!is.null(image_list)) #' { #' file_list = image_list #' temp_path = dirname(file_list[1]) #' } #' else #' { #' temp_path = paste(scratch_dir(), "/", video_name, sep = "") #' dir.create(temp_path) #' #' filter_string = paste("rotate = '",radians,":out_w=rotw(",radians,"):out_h=roth(",radians,"):c = red',", #' "crop=",xlength,":",ylength,":",xstart,":",ystart,"", #' sep = "") #' #' av::av_encode_video(video_path, paste(temp_path, "/%03d.png", sep = ""), vfilter = filter_string, codec = "png") #' #' file_list = list.files(temp_path, full.names = TRUE, pattern = "\\.png$") #' } #' #' crop_file_list = file_list #' av::av_encode_video(crop_file_list, output = paste(temp_path, "/crop.avi", sep = ""),codec = "libx264", verbose = 24) #' file.copy(paste(temp_path, "/crop.avi", sep = ""), paste(output_folder,"/", basename(file_path_sans_ext(video_path)),"_",video_name,"_cropped.avi", sep = "")) #' #' #' registerDoFuture() #' plan(multisession) #' #' options(future.rng.onMisuse = "ignore") #' #' #' result <- foreach(i = file_list, .combine = rbind) %dopar% #' { #' #' processed_image = threshold_image(i, threshold) #' #' file_path = i #' #' save_image_path = paste(tools::file_path_sans_ext(i), "_overlaid.png", sep = "") #' save_csv_path = paste(tools::file_path_sans_ext(i), "_overlaid.csv", sep = "") #' #' imager::save.image(processed_image[[1]], save_image_path) #' utils::write.csv(processed_image[[2]], save_csv_path) #' } #' #' #' #' file_list = list.files(temp_path, full.names = TRUE, pattern = "\\overlaid.png$") #' av::av_encode_video(file_list, output = paste(temp_path, "/overlaid.avi", sep = ""),codec = "libx264", verbose = 24) #' #' #' file_list = list.files(temp_path, full.names = TRUE, pattern = "\\overlaid.csv$") #' ldf <- lapply(file_list , read.csv) #' df.final <- do.call("rbind", ldf) #' #' write.csv(df.final, paste(temp_path, "/widths.csv", sep = "")) #' #' # dir.create(paste(output_folder,"/", basename(file_path_sans_ext(video_path)), "_processing/", sep = "")) #' #' file.copy(paste(temp_path, "/widths.csv", sep = ""), paste(output_folder,"/", basename(file_path_sans_ext(video_path)),"_", video_name,"_widths.csv", sep = "")) #' #' file.copy(paste(temp_path, "/overlaid.avi", sep = ""), paste(output_folder,"/", basename(file_path_sans_ext(video_path)),"_",video_name,"_overlaid.avi", sep = "")) #' #' unlink(temp_path, recursive = TRUE) #' #' }
/scratch/gouwar.j/cran-all/cranData/vmeasur/R/thresholding.R
#' Calculate an automatically generated threshold as a number #' #' @param file_path path of the file to calculate the threshold from #' #' @importFrom magrittr %>% #' @importFrom imager load.image imsplit #' #' @return The numerical threshold automatically calculated by imager #' #' @noRd #' #' @examples #' i = 1 #' # # calculate_auto_threshold(imager::boats) #' calculate_auto_threshold = function(file_path) { im = imager::load.image(file_path) im.c = imager::imsplit(im,"c")[[1]] threshold_mask = as.data.frame(imager::as.cimg(imager::threshold(im.c))) im.c.df = as.data.frame(im.c) im.c.df$masked = threshold_mask$value auto_threshold = min(subset(im.c.df, im.c.df$masked>0)$value) return(auto_threshold) } #' Crop an image, starting at (x,y) co-ordinates #' #' Crop an image with the same data used by FFMPEG, allowing for the same #' co-ordinates to be used with av. Note av is much faster in aggregate so #' should be used for processing whole videos rather than generating and #' cropping a series of images. #' #' @param img an imager image to crop #' @param xstart starting x co-ordinate #' @param ystart starting y co-ordinate #' @param xlength x axes length #' @param ylength y axes length #' #' @importFrom dplyr filter #' @importFrom imager autocrop as.cimg #' #' @importFrom dplyr filter #' @importFrom imager autocrop as.cimg #' #' @return a cropped image #' #' @noRd #' #' @examples #' # TEST HERE #' crop_dims = function(img, xstart, ystart, xlength, ylength) { img.df = as.data.frame(img) img.df.c = img.df %>% filter(img.df$x>xstart,img.df$y>ystart,img.df$x<xstart+xlength,img.df$y<ystart+ylength) toreturn = autocrop(as.cimg(img.df.c)) return(toreturn) } #' Tile multiple images into a single image #' #' @param output_list list of images to turn into a matrix #' @param width matrix width in images, default 2 #' #' @importFrom imager imlist pad ci imappend #' #' @return an image, with each input arranged in a matrix #' #' @noRd #' #' @examples #' imagelist = imager::imlist(imager::boats, imager::boats, imager::boats) #' matrix = make_matrix(imagelist) #' plot(matrix) #' make_matrix = function(output_list, width = 2) { total_images = length(output_list) current_col = imlist() overall_matrix = imlist() current_width = 0 for(i in 1:total_images) { currentimg = output_list[i][[1]] currentimg = pad(currentimg, 5, "xy") current_col = ci(current_col, currentimg) current_width = current_width + 1 if(width == current_width || i == total_images) { current_col_image = imappend(current_col, "x") overall_matrix = ci(overall_matrix, current_col_image) current_col = imlist() current_width = 0 } } returnimage = imappend(overall_matrix, "y") return(returnimage) } #' Set the output directory #' #' @param set The directory to set to #' @param use_default Should the default value be used, or the system value #' @param set_default Should the system value be updated #' #' @return The file path to export to #' #' @export output_dir = function(set = NULL, use_default = FALSE, set_default = FALSE) { if(isTRUE(set_default)) { options("quantifyvessel-output_dir" = use_default) } if(isTRUE(use_default) && isTRUE(unlist(options("quantifyvessel-output_dir")))) { return(set) } if(!is.null(set)) { options("quantifyvessel-scratch_dir"= set) } if(is.null(unlist(options("quantifyvessel-scratch_dir")))) { scratch = tempdir() } else { scratch = options("quantifyvessel-scratch_dir")[[1]] } if(scratch == tempdir()) { print("Outputting to temporary directory") } return(scratch) } #' Set the scratch directory for vmeasur #' #' vmeasur uses av to unpack temporary image files, which are then stored for #' further usage. This runs better if done to a high speed storage location such #' as a ram drive. This function sets that directory, and provides other options #' for specifying the structure of this temporary data. #' #' If not specified, the default R tempdir is used #' #' @param set new directory to set. If left blank, no directory change will occur #' @param random_subfolder Should a random sub folder be created #' @param file_name Specify the name of the directory #' @param wipe_scratch Should the folder be cleared before use #' #' @return the current location of the scratch directory #' #' @importFrom stringr str_replace_all #' #' @export #' #' @examples #' scratch_dir() #' scratch_dir("R:") #' scratch_dir = function(set = NULL, random_subfolder = FALSE, file_name = FALSE, wipe_scratch = FALSE) { if(!is.null(set)) { options("quantifyvessel-scratch_dir"= set) } if(is.null(unlist(options("quantifyvessel-scratch_dir")))) { scratch = tempdir() } else { scratch = options("quantifyvessel-scratch_dir")[[1]] } if(!isFALSE(file_name)) { file_changetime = file.mtime(file_name) %>% str_replace_all(":", " ") file_changetime = paste(basename(file_path_sans_ext(file_name))," ", file_changetime, sep = "") scratch = paste(scratch_dir(), "/", file_changetime, "/", sep = "") if(file.exists(scratch)) { unlink(scratch, recursive = TRUE, force = TRUE) } dir.create(scratch) } if(isTRUE(random_subfolder)) { runif(1, min = 0, max = 1000) randomstring = hash(runif(1, min = 0, max = 1000)) scratch = paste(scratch, randomstring, "/", sep = "") dir.create(scratch) } if(isTRUE(wipe_scratch)) { directory_list = list.dirs(options("quantifyvessel-scratch_dir")[[1]]) unlink(directory_list, recursive = TRUE, force = TRUE) } return(scratch) }
/scratch/gouwar.j/cran-all/cranData/vmeasur/R/utility_functions.R
# This file is part of vmr. # Copyright (c) 2021 Jean-François Rey <[email protected]> # # vmr is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # vmr is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Foobar. If not, see <https://www.gnu.org/licenses/>. .VagrantCloudOrganization <- "VMR" # @title Print package information # @name getInfo # @description Displays some information about the package # @importFrom utils packageVersion getInfo <- function() { packageStartupMessage("Package: vmr | Virtual Machines for R") packageStartupMessage("Author: Jean-Fran\u00E7ois Rey <[email protected]>") packageStartupMessage("Version: ", appendLF = FALSE) packageStartupMessage(utils::packageVersion("vmr")) packageStartupMessage("License: GPLV3") packageStartupMessage("'vmr' package is under development") packageStartupMessage("Be comprehensible and feedback are welcome.") packageStartupMessage("Do not use virtual machine for production!") packageStartupMessage("Enjoy!") } # @title Check package dependencies # @name .checkDependencies # @description Check if tools needed by 'vmr' package is installed # @return check results .checkDependencies <- function() { res <- vagrantIsInstalled() if (!is.null(res) && nzchar(res$vagrant_bin) && grepl("[0-9\\.]+", res$version)) { packageStartupMessage(paste0("vagrant path: ", res$vagrant_bin)) packageStartupMessage(paste0("vagrant version: ", res$version)) } ## TODO Virtualbox or providers check } # @title Things to do at package attach # @name .onAttach # @param libname a character string giving the library directory where # the package defining the namespace was found. # @param pkgname a character string giving the name of the package. # @description Print package information and check dependencies .onAttach <- function(libname, pkgname) { getInfo() .checkDependencies() } # @title Things to do at package load # @name .onLoad # @param libname a character string giving the library directory where # the package defining the namespace was found. # @param pkgname a character string giving the name of the package. # @description Print package information and check dependencies .onLoad <- function(libname, pkgname) { vmr_env$verbose_mode <- 1 vagrant_exec <- Sys.which("vagrant") if (nzchar(vagrant_exec)) { vmr_env$vagrant_bin <- vagrant_exec } else { vmr_env$vagrant_bin <- "vagrant" } } # @title manage verbose message # @description print message if verbose_mode is >= to verbose_val # @param verbose_val # @param ... messages to print printVerbose <- function(verbose_val = 0, ...) { if (vmr_env$verbose_mode >= verbose_val) { cat(paste0(" vmr ==> ", paste(..., collapse = " "), "\n")) } return(invisible(NULL)) }
/scratch/gouwar.j/cran-all/cranData/vmr/R/package.R
# This file is part of vmr. # Copyright (c) 2021 Jean-François Rey <[email protected]> # # vmr is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # vmr is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Foobar. If not, see <https://www.gnu.org/licenses/>. # @title System call to vagrant (old version) # @name vagrantExec2 # @description execute vagrant command with parameters # @param args character vector of arguments to vagrant command # @param stdout if "" print output, if TRUE capture and return it and FALSE discard ouput # @param stderr if "" print stderr, if TRUE capture and return it and FALSE discard stderr # @return execution code or error output vagrantExec2 <- function(args = character(), stdout = "", stderr = "") { out <- -1 tc <- tryCatch( { printVerbose(2, "Execute:", "vagrant ", paste(args)) out <- system2("vagrant", args, stdout = stdout, stderr = stderr) }, error = function(cond) { message(cond) warning("vagrant command went wrong!\n", immediate. = TRUE) }, warning = function(cond) { message(cond) warning("vagrant command may went wrong!\n") }, finally = { } ) return(invisible(out)) } # @title System call to vagrant # @name vagrantExec # @description execute vagrant command with parameters # @param args character vector of arguments to vagrant command # @param stdout if "" print output, if TRUE capture and return it and FALSE discard ouput # @param stderr if "" print stderr, if TRUE capture and return it and FALSE discard stderr # @return execution code (0 if OK) otherwise cmd output if stdout and/or stderr is TRUE vagrantExec <- function(args = character(), stdout = "", stderr = "") { out <- -1 printVerbose(2, "Execute: '", vmr_env$vagrant_bin, " ", paste(args, collapse = " "), "'") if (vmr_env$verbose_mode == 0 && stdout != TRUE) stdout <- FALSE if (stderr != TRUE && (vmr_env$verbose_mode == 0 || vmr_env$verbose_mode == 1)) { stderr <- FALSE } out <- system2(vmr_env$vagrant_bin, args, stdout = stdout, stderr = stderr) if (!is.null(attr(out, "status"))) { warning("vmr ==> vagrant command may went wrong!\n vmr ==> output : \n", out) } return(invisible(out)) } # @title List vagrant boxes installed # @name vagrantBoxList # @description execute vagrant command 'box list' # @return a data.frame with boxes Name, Poviders and Version vagrantBoxList <- function() { args <- c("box", "list") out <- vagrantExec(args, stdout = TRUE) df_box <- data.frame(Name = character(0), Provider = character(0), Version = character(0)) if( length(grep("There are no installed boxes!",out)) == 0L ) { l_box <- lapply(out, FUN = function(l) { lp <- strsplit(l, "[ ]+") df_box <<- rbind( df_box, c( lp[[1]][1], substr(lp[[1]][2], start = 2, stop = nchar(lp[[1]][2]) - 1), substr(lp[[1]][3], start = 1, stop = nchar(lp[[1]][3]) - 1) ) ) }) } colnames(df_box) <- c("Name", "Provider", "Version") return(df_box) } # @title Update a vagrant box # @name vagrantBoxUpdate # @description execute vagrant command 'box update'. # Will download the latest version of the current directory box # or a specific box using arguments. # @param box a box name to update # @param provider a provider of the box to update # @return execution code vagrantBoxUpdate <- function(box = "", provider = "") { args <- c("box", "update") if (nchar(box) > 0) args <- c(args, "--box", box) if (nchar(provider) > 0) args <- c(args, "--provider", provider) vagrantExec(args) } # @title Remove a vagrant box # @name vagrantBoxRemove # @description Execute vagrant command 'box remove'. # It remove a specific box identify by it name. It provider and version can be specify. # @param name the box name to remove # @param provider the box provider (default: the default one) # @param version the box version (default: the latest) # @param force force box remove if TRUE (default: FALSE) # @return execution code or error output vagrantBoxRemove <- function(name, provider = "", version = "", force = FALSE) { args <- c("box", "remove") if (nchar(provider) > 0) args <- c(args, "--provider", provider) if (nchar(version) > 0) args <- c(args, "--box-version", version) if (force) args <- c(args, "--force") args <- c(args, name) vagrantExec(args) } # @title Download a box from VagrantCloud # @name vagrantBoxAdd # @description Download a box from VagrantCloud. Command 'box add'. # @param name the box name to download # @param version the box version (default : latest) # @param provider the box provider (default : virtualbox) # @param force if TRUE force box update otherwise don't # @return a data.frame with box information (name, provider and version) vagrantBoxAdd <- function(name, version = "latest", provider = "virtualbox", force = FALSE) { ## TODO check box size and harddrive space printVerbose(1, "The download can be long depending on the size of the box and your bandwidth") args <- "box add" args <- c(args, paste0("--provider ", provider)) if (version != "latest" && version != "") args <- c(args, paste0(" --box-version ", version)) if (isTRUE(force)) args <- c(args, " --force ") args <- c(args, name) vagrantExec(args) return(invisible(data.frame(name = name, version = version, provider = provider))) } # @title Remove old versions of installed boxes # @name vagrantBoxPrune # @description Run vagrant command 'box prune'. # Removes old versions of installed boxes. # @return a data.frame with boxes information (name, provider and version) vagrantBoxPrune <- function() { printVerbose(2, "Boxes before pruning\n", vagrantBoxList()) args <- c("box", "prune", "--force", "--keep-active-boxes") vagrantExec(args) printVerbose(2, "Boxes keeped after pruning\n") boxes <- vagrantBoxList() printVerbose(2, boxes) return(boxes) } # @title Initialize a Vagrantfile # @name vagrantInit # @description Run vagrant command 'init'. # Creates a Vagrantfile for a box, and force version if specified # in the current directory. # @param name the box name # @param version the box version (default : "latest") # @param force default to FALSE, if TRUE force to rewrite file # @return Vagrantfile path vagrantInit <- function(name, version = "latest", force = FALSE) { if (file.exists("Vagrantfile") && isFALSE(force)) { stop("Vagrantfile already exists in ", getwd(), "\n use argument force=TRUE to override it.") } args <- "init " if (version != "latest") args <- c(args, paste0(" --box-version ", version)) args <- c(args, " --minimal ") if (isTRUE(force)) args <- c(args, " --force ") args <- c(args, name) vagrantExec(args) return(normalizePath(paste0(getwd(), "/Vagrantfile"))) } # @title Creates and configure a guest machine # @name vagrantInit # @description Run vagrant command 'up --no-provision'. # Creates and configure a guest machine according to the Vagrantfile # in current directory. # @return execution code or error vagrantUp <- function() { if (!file.exists("Vagrantfile")) { stop("Vagrantfile does not exists in ", getwd(), "\n use vmrInitEnv()") } args <- " up " args <- c(args, "--no-provision") printVerbose(2, "Start guest machine using VagrantFile, please wait...") vagrantExec(args) } # @title Halt a guest machine # @name vagrantHalt # @description Run vagrant command 'halt'. # Shuts down the running machine in the vagrant current directory. # @param force if TRUE force poweroff otherwise normal shut down. # @return execution code or error vagrantHalt <- function(force = FALSE) { if (!file.exists("Vagrantfile")) { stop("Vagrantfile does not exists in ", getwd(), "\n use startVMR") } args <- " halt " if (isTRUE(force)) args <- c(args, " --force ") printVerbose(2, "Stop box using VagrantFile in ", getwd(), "\n please wait...") vagrantExec(args) } # @title List machines states # @name vagrantStatus # @description Run vagrant command 'status'. # Tell the state of machines. # @return a data.frame with name, provider and state or a empty list vagrantStatus <- function() { args <- "status" args <- c(args, "--machine-readable") res <- vagrantExec(args, stdout = TRUE) if (is.character(res)) { vagrantName <- strsplit(res[1], split = ",")[[1]][2] provider <- strsplit(res[2], split = ",")[[1]][4] state <- strsplit(res[4], split = ",")[[1]][4] return(list(vagrantName = vagrantName, provider = provider, state = state)) } else { return(list()) } } # @title get global-status of Vagrant environment # @name vagrantGlobalStatus # @description Run vagrant command 'global-status --prune'. # Its get Vagrant global-status. # @return a data.frame with id, name, provider, path and state or empty data.frame vagrantGlobalStatus <- function() { args <- c("global-status", "--prune") # , "--machine-readable") res <- vagrantExec(args, stdout = TRUE) if ((is.null(attr(res, "status")) || attr(res, "status") == 0) && is.character(res)) { col_name <- strsplit(res[1], split = "[ ]+")[[1]] global_status <- data.frame(matrix(ncol = length(col_name), nrow = 0)) if (!identical(res, integer(0)) && !identical(which(res == " "), integer(0))) { trash <- lapply(res[3:(which(res == " ") - 1)], FUN = function(vm) { global_status <<- rbind(global_status, strsplit(vm, split = "[ ]+")[[1]]) }) } colnames(global_status) <- col_name return(global_status) } return(data.frame()) } # @title Get id of a vagrant environment # @name vagrantGetID # @description Get id of a vagrant environment from the global-status # environment is identify by name and/or directory # @param name vagrant environment name # @param path vagrant environment directory # @return the id or "" vagrantGetID <- function(name = "", path = "") { res <- vagrantGlobalStatus() if (length(res) == 0) { return("") } if (name != "" && path != "") { ind <- which(res$name == name & res$directory == path) if (length(ind) == 1) { return(res[ind, "id"]) } } if (name == "" && path != "") { ind <- which(res$directory == path) if (length(ind) == 1) { return(res[ind, "id"]) } } if (name != "" && path == "") { ind <- which(res$name == name) if (length(ind) == 1) { return(res[ind, "id"]) } } # warning("Can't determine environment ID") return("") } # @title Run VagrantFile provision # @name vagrantProvision # @description This method is no more used. # Run vagrant command 'provision'. # Its run vagrantFile provision section. vagrantProvision <- function() { if (!file.exists("Vagrantfile")) { stop("Vagrantfile does not exists in ", getwd(), "\n use vmrInitEnv") } args <- " provision " printVerbose(2, "Provision box using VagrantFile in ", getwd(), "\n please wait...") vagrantExec(args) } # @title Check if vagrant is installed and up to date. # @name vagrantIsInstalled # @description try to run vagrant and get version # @return vagrant binary path and version or empty characters vagrantIsInstalled <- function() { where_is_vagrant <- Sys.which("vagrant") version <- "" out <- "" # if can't find vagrant try 'which' sys call if (!nzchar(where_is_vagrant)) { out <- tryCatch( { # if (.Platform$OS.type == "windows") { # system2("where", args = c("vagrant"), stdout = TRUE, stderr = FALSE) # }else {} system2("which", args = c("vagrant"), stdout = TRUE, stderr = FALSE) }, error = function(cond) {}, warning = function(cond) {}, finally = {} ) # 'which' return vagrant path and binary exists ? if (!is.null(out) && grepl("vagrant", out[1]) && file.exists(out[1])) where_is_vagrant <- out[1] } if (nzchar(where_is_vagrant)) { names(where_is_vagrant) <- NULL out <- suppressWarnings(try(system2(where_is_vagrant, args = c("--version"), stdout = TRUE, stderr = TRUE))) if (grepl("Vagrant", out[1])) { pos_value <- regexpr("[0-9\\.]+", out[1]) version <- substr(out[1], pos_value, pos_value + attr(pos_value, "match.length")) } } else { packageStartupMessage( "Vagrant seems not to be installed.\n", "Please visit the page below and download and install vagrant: \n", "https://www.vagrantup.com/downloads.html\n" ) } # vagrant version # if ( !identical(pos <- grep("Installed Version: ", out), integer(0)) ) message("# Vagrant ", out[pos]) # if ( !identical(pos <- grep("Latest Version: ", out), integer(0)) ) message("# Please upgrade vagrant to ", out[pos]) return(list("vagrant_bin" = where_is_vagrant, "version" = version)) } # @title Run a command via ssh on guest machine # @name vagrantSHHCommand # @description Run vagrant command 'ssh'. # Executes a command via ssh on the guest machine. # @param cmd a command # @return execution code vagrantSSHCommand <- function(cmd) { # cmd_escape <- paste0('\"', gsub('\\\"', '\\\\\"', cmd), ' 1>&2\"') cmd_escape <- paste0('\"', gsub('\\\"', '\\\\\"', cmd), '\"') args <- c(" ssh ", " -c ") args <- c(args, cmd_escape) res <- vagrantExec(args) return(res) } # @title Get vagrant ssh configuration # @name vagrantSSHConfig # @return a list with hostname, port and (identity) keyfile vagrantSSHConfig <- function() { out <- vagrantExec(c("--machine-readable", "ssh-config"), stdout = TRUE) res <- list() ind <- regexpr("HostName ([0-9.]+)", out[2], useBytes = FALSE) res$hostname <- substr(out[2], ind[1] + 9, ind[1] + 9 + attr(ind, "match.length") - 10) ind <- regexpr("Port ([0-9]+)", out[2], useBytes = FALSE) res$port <- substr(out[2], ind[1] + 5, ind[1] + 5 + attr(ind, "match.length") - 6) ind <- regexpr("IdentityFile [a-zA-Z0-9/:._]+", out[2], useBytes = FALSE) res$keyfile <- substr(out[2], ind[1] + 13, ind[1] + 13 + attr(ind, "match.length") - 14) return(res) } # @title Upload a file or directory from host to guest machine # @name vagrantUpload # @description Run vagrant command 'upload'. # Uploads a file or a directory from the host to the guest machine. # @param elt a file or directory name # @param dest destination path (default "~/vmr") # @return code (0 if OK) or message vagrantUpload <- function(elt, dest = "") { if (!file.exists(elt) && !dir.exists(elt)) { stop(paste0(elt, " does not exists. Can't upload it.")) } args <- c("upload", elt, dest) res <- vagrantExec(args = args) return(res) } # @title Manage snapshot virtual machine # @description Run vagrant command 'snapshot'. # Its list, create or restore snapshots. # @param cmd "list", "save" or "restore" # @param name snapshot name to save or restore # @return execution code or message vagrantSnapshot <- function(cmd, name = "") { args <- c("snapshot", cmd, name) res <- vagrantExec(args) return(invisible(res)) } # @title create a R script for provisioning via vagrantFile # @name .provisionRscript # @description create a RScript with cmd to declare it into the VagrantFile. # This is not the best way to do so (Deprecated for now). Use vmrExec() instead. # @param cmd list of R command # @return characters to be cat into a Vagrantfile .provisionRscript <- function(cmd) { paste0( "$rscript = <<-'SCRIPT'\n", "#!/usr/bin/env Rscript\n", "r_lib <- .libPaths()[1]\n", paste0(cmd, collapse = "\n"), "SCRIPT\n" ) }
/scratch/gouwar.j/cran-all/cranData/vmr/R/vagrant.R
# This file is part of vmr. # Copyright (c) 2021 Jean-François Rey <[email protected]> # # vmr is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # vmr is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Foobar. If not, see <https://www.gnu.org/licenses/>. .VagrantCloudURL <- "https://app.vagrantup.com" # @title List Boxes Available in VagrantCloud # @name getAvailableBoxes # @description List boxes and providers available. # @param org organization name account (default: "VMR") # @return a data.frame # @import curl # @import jsonlite # @return a data.frame with "Name", "Version", "Description" and "Provider" boxes getAvailableBoxes <- function(org = .VagrantCloudOrganization) { req <- .requestVagrantAPI(api_uri = paste0("/user/", org)) # json_res <- jsonlite::prettify(rawToChar(req$content)) # print(json_res) printVerbose(2, "parsing response : \n ", paste0(" \n", rawToChar(req$content))) json_res <- jsonlite::parse_json(paste0(" \n", rawToChar(req$content))) list_boxes <- lapply(json_res$boxes, FUN = function(l) { list_providers <- lapply(l$current_version$providers, FUN = function(p) { return(c(l$name, l$current_version$version, l$short_description, p$name)) }) return(list_providers) }) vms <- data.frame(matrix(unlist(list_boxes), ncol = 4, byrow = TRUE)) colnames(vms) <- c("Name", "Version", "Description", "Provider") return(vms) } # @title Get a box information from VagrantCloud # @name getBoxInfo # @description Get information about a box # @param name the box name # @param org organization name account (default: "VMR") # @return a data.frame # @import curl # @import jsonlite # @return a data.frame with "Name, "Version", "Description", "Provider" and "Date" getBoxInfo <- function(name, org = .VagrantCloudOrganization) { req <- .requestVagrantAPI(api_uri = paste0("/box/", org, "/", name)) # json_res <- jsonlite::prettify(rawToChar(req$content)) # print(json_res) printVerbose(2, "parsing response : \n ", paste0(" \n", rawToChar(req$content))) json_res <- jsonlite::parse_json(paste0(" \n", rawToChar(req$content))) # last_box <- lapply(json_res$current_version$providers, # FUN = function(p) { # return(c(json_res$name, # json_res$current_version$version, # json_res$short_description, # p$name, # p$created_at)) # }) # # vm <- data.frame(matrix(unlist(last_box), ncol = 5, byrow = TRUE)) list_boxes <- lapply(json_res$versions, FUN = function(l) { list_providers <- lapply(l$providers, FUN = function(p) { return(c( json_res$name, l$version, l$description_markdown, p$name, p$created_at )) }) return(list_providers) }) vms <- data.frame(matrix(unlist(list_boxes), ncol = 5, byrow = TRUE)) # boxes <- rbind(vm,vms) boxes <- vms colnames(boxes) <- c("Name", "Version", "Description", "Provider", "Date") return(boxes) } # @title Query Vagrant Cloud API # @name requestVagrantAPI # @description Query Vagrant Cloud API using api_uri # @param api_uri API uri to use # @return API answer .requestVagrantAPI <- function(api_uri = "/user/VMR") { url <- paste0(.VagrantCloudURL, "/api/v1", api_uri) printVerbose(2, "Fetching information from ", url) res <- curl::curl_fetch_memory(url) printVerbose(2, "Response status: ", res$status_code) printVerbose(2, "Response type: ", res$type) if (res$status_code != 200) { warning("VagrantCloud API query return ", res$status_code, "code => seems somethings got wrong") } return(res) }
/scratch/gouwar.j/cran-all/cranData/vmr/R/vagrantcloudAPI.R
# This file is part of vmr. # Copyright (c) 2021 Jean-François Rey <[email protected]> # # vmr is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # vmr is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Foobar. If not, see <https://www.gnu.org/licenses/>. # @title Create Vagrantfile VirtualBox configuration # @name virtualboxVagrantFile # @description Creates a string usable in a Vagrantfile for virtualbox provider. # It use params to set options. # @param params virtualbox options # @return a character (string) virtualBoxVagrantFile <- function(params) { if (is.null(params)) stop("VirtualBox options not set. See virtualboxOptions()\n") paste0( '\tconfig.vm.provider "virtualbox" do |vb|\n', if (!is.null(params$gui) && isTRUE(params$gui)) { paste0("\t\tvb.gui = true\n") } else { paste0("\t\tvb.gui = false\n") }, if (!is.null(params$name) && params$name != "") paste0('\t\tvb.name = "', params$name, '"\n'), if (!is.null(params$nic_type) && params$nic_type != "") paste0('\t\tvb.default_nic_type = "', params$nic_type, '"\n'), if (!is.null(params$linked_clone) && params$linked_clone == TRUE) { paste0("\t\tvb.linked_clone = true\n") } else { "\t\tvb.linked_clone = false\n" }, if (!is.null(params$check_guest_additions) && params$check_guest_addition == TRUE) { paste0("\t\tvb.check_guest_additions = true\n") } else { "\t\tvb.check_guest_additions = false\n" }, if (!is.null(params$modifyvm) && length(params$modifyvm) > 0) { modif <- "" for (name in names(params$modifyvm)) { modif <- paste0(modif, '\t\tvb.customize ["modifyvm", :id, "--', name, '", "', params$modifyvm[[name]], '"]\n') } paste0(modif) }, "\tend\n" ) } # @title Print VirtualBox provider options # @name virtualboxPrintOptions # @description Print options from arguments # @param opts virtualbox provider options as list # @return opts virtualboxPrintOptions <- function(opts) { if (is.null(opts)) { cat("\t# No VirtualBox options\n") } else { cat("\t# VirtualBox options:\n") if (!is.null(opts$name) && !identical(opts$name, character(0))) cat("\t Name: ", opts$name, "\n") if (!is.null(opts$gui) && opts$gui == TRUE) { cat("\t headless: FALSE\n") } else { cat("\t headless: TRUE\n") } if (!is.null(opts$nic_type) && !identical(opts$nic_type, character(0))) cat("\t default NIC type: ", opts$nic_type, "\n") if (!is.null(opts$linked_clone) && opts$linked_clone == TRUE) { cat("\t linked type: TRUE (clone)\n") } else { cat("\t linked type: FALSE \n") } if (!is.null(opts$check_guest_additions) && opts$check_guest_additions == TRUE) { cat("\t check guest additions: TRUE\n") } else { cat("\t check guest additions: ", opts$check_guest_additions, "\n") } if (!is.null(opts$modifyvm) && length(opts$modifyvm) > 0) { cat("\t Modify VM options:\n") temp <- lapply(names(opts$modifyvm), FUN = function(n) { cat("\t\t", n, ": ", opts$modifyvm[[n]], "\n") } ) } } return(opts) } #' @title List 'VirtualBox' options available #' @name virtualboxOptions #' @description List available options for 'VirtualBox' provider #' @details Get the 'VirtualBox' default options. #' It return a list as follow: #' ```r #' list( #' gui = TRUE, #' name = NULL, #' nic_type = NULL, #' linked_clone = FALSE, #' check_guest_additions = TRUE, #' modifyvm = list(cpus = "2", memory = "4096") #' ) #' ``` #' #' * __gui__: if TRUE show the GUI, otherwise headless mode is actived #' * __name__: the 'VirtualBox' instance name #' * __nic_type__: the NIC type for the network interface to use, by default use the default one. #' see [VirtualBox Networking](https://www.virtualbox.org/manual/ch06.html) #' * __linked_clone__: if TRUE, linked clones are based on a master VM, which is #' generated by importing the base box only once the first time it is required. #' For the linked clones only differencing disk images are created where #' the parent disk image belongs to the master VM. #' (Be careful, master VM can't be remove until linked_clone still exists) #' * __check_guest_additions__: If TRUE (default) check if guest have guest additions installed. #' * __modifyvm__: list of 'VirtualBox' properties for the guest VM (such as number of cpus, memory size,...). #' [see 'VirtualBox' modifyvm](https://www.virtualbox.org/manual/ch08.html#vboxmanage-modifyvm) #' @param details if TRUE print options (default), otherwise only return default options #' @return A default list of options #' ```r #' list( #' gui = TRUE, #' name = NULL, #' nic_type = NULL, #' linked_clone = FALSE, #' check_guest_additions = TRUE, #' modifyvm = list(cpus = "2", memory = "4096") #' ) #' ``` #' @examples #' \dontrun{ #' vb.opts <- virtualboxOptions(details = FALSE) #' vb.opts$modifyvm$cpus <- "4" #' vb.opts$modifyvm$memory <- "8192" #' vb.opts #' } #' @export #' @md virtualboxOptions <- function(details = TRUE) { if (details) { cat("VirtualBox provider available options list():\n") cat("gui : TRUE to display GUI, FALSE for headless mode\n") cat("name : the name of the virtualbox\n") cat("nic_type: NIC type for network interfaces (default: use default NIC)\n") cat( "linked_clone: if TRUE use Master virtual machine and clone disk\n", "\t be careful, it speed box creation and reduce overhead but clones are connected to master\n" ) cat("check_guest_additions: if TRUE check if Guest Additions is installed (default FALSE)\n") cat( "modifyvm: list of virtualbox options to set.\n", "https://www.virtualbox.org/manual/ch08.html#vboxmanage-modifyvm\n", "ex: list(cpus='4', memory='4096')\n" ) } return(list( gui = TRUE, name = "", nic_type = "", linked_clone = FALSE, check_guest_additions = FALSE, modifyvm = list(cpus = "2", memory = "4096") )) } # @title Load virtualbox options from a VagrantFile # @name virtualboxReadOptions # @description Read and load virtualbox form a VagrantFile # @param text_vector a character vector (vagrantFile) # @return a list 'vmr' compatible virtualboxReadOptions <- function(text_vector) { res <- list() printVerbose(2, "Reading virtualbox options") extract_string <- function(text_vector, pattern) { text <- text_vector[grep(pattern, text_vector)] pos_value <- regexpr('\"[^\"]+\"', text) substr(text, pos_value + 1, pos_value + attr(pos_value, "match.length") - 2) } extract_bool <- function(text_vector, pattern) { text <- text_vector[grep(pattern, text_vector)] pos_value <- regexpr("=[ ]+.*", text) substr(text, pos_value + 2, pos_value + attr(pos_value, "match.length") - 1) } extract_modifyvm <- function(text_vector, pattern) { name_value <- c() text <- strsplit(text_vector[grep(pattern, text_vector)], split = ",") tt <- lapply(text, function(l) { pos_value <- regexpr("[A-Za-z]+", l[3]) name <- substr(l[3], pos_value, pos_value + attr(pos_value, "match.length") - 1) value <- substr(l[4], 3, 3 + nchar(l[4]) - 5) name_value <<- c(name_value, name) value }) names(tt) <- name_value return(tt) } res$gui <- as.logical(extract_bool(text_vector, "vb.gui")) res$name <- as.character(extract_string(text_vector, "vb.name")) if (identical(res$name, character(0))) res$name <- "" res$nic_type <- as.character(extract_string(text_vector, "vb.default_nic_type")) if (identical(res$nic_type, character(0))) res$nic_type <- "" res$linked_clone <- as.logical(extract_bool(text_vector, "vb.linked_clone")) res$check_guest_additions <- as.logical(extract_bool(text_vector, "vb.check_guest_additions")) res$modifyvm <- extract_modifyvm(text_vector, "modifyvm") return(res) } #' @title Configure the guest VM to be use as a Gitlab-Runner #' @name virtualboxGitlabRunner #' @description Configure the guest VM to be use as a GitLab Runner #' and return the command to run in shell to register it. #' @param vmr a __vmr__ object #' @param gitlab_url a GitLab URL with protocol (http or https) #' @param gt_token a GitLab registration token #' @param snapshot_name name of a snapshot to use if any #' @param vm_name the 'VitualBox' VM name if not specified in 'vmr' object provider_options. #' @return Character command to run in shell to register it #' @examples #' \dontrun{ #' cmd <- virtualboxGitLabRunner(vmr, "gitlab.com", "mytoken") #' system(cmd) #' } #' @export #' @md virtualboxGitlabRunner <- function(vmr, gitlab_url, gt_token, snapshot_name = "", vm_name = "") { gitlab_url_tmp <- gsub("?(f|ht)tp(s?)://", "", gitlab_url) gitlab_url_tmp <- gsub("/$", "", gitlab_url_tmp) if(gitlab_url_tmp == gitlab_url) { stop(paste0("GitLab URL seems incorrect ", gitlab_url)) } printVerbose(1, "Configuring guest machine...\n") vagrantSSHCommand("mkdir -p ~/.ssh") vagrantSSHCommand("touch ~/.ssh/known_hosts") vagrantSSHCommand(paste0("ssh-keyscan -t ecdsa -H ", gitlab_url_tmp, " >> ~/.ssh/known_hosts")) printVerbose(2, "Run this command in a shell to enable the VM as a GitLab Runner:") cmd <- paste0( "gitlab-runner register ", "--non-interactive ", "--name ", paste0(vmr$org, "-", vmr$box), " ", "--url ", gitlab_url, " ", "--registration-token ", gt_token, " ", "--executor 'virtualbox' ", "--tag-list vmr,R4 ", "--ssh-user ", vmr$ssh_user, " ", "--ssh-password ", vmr$ssh_pwd, " ", "--ssh-disable-strict-host-key-checking true ", if (vm_name != "") { paste0("--virtualbox-base-name '", vm_name, "' ") } else { if (!is.null(vmr$provider_options$name) && vmr$provider_options$name != "") { paste0("--virtualbox-base-name '", vmr$provider_options$name, "' ") } else { paste0("--virtualbox-base-name '<VirtualBox_Name>' ") } }, if (snapshot_name != "") { paste0("--virtualbox-base-snapshot='", snapshot_name, "' ") }, "--virtualbox-disable-snapshots" ) printVerbose(2, cmd) return(cmd) }
/scratch/gouwar.j/cran-all/cranData/vmr/R/virtualbox.R
# This file is part of vmr. # Copyright (c) 2021 Jean-François Rey <[email protected]> # # vmr is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # vmr is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Foobar. If not, see <https://www.gnu.org/licenses/>. #' @title Set verbose level #' @name vmrSetVerbose #' @description Set verbose level for vmr package functions #' @details Three verboses mode is available: #' * "None" : print nothings #' * "Normal" : print essential #' * "Full" : print all #' @param verbose_mode "None", "Normal" or "Full" #' @return invisible verbose value #' @export #' @md vmrSetVerbose <- function(verbose_mode = "Normal") { switch(verbose_mode, "None" = vmr_env$verbose_mode <- 0, "Normal" = vmr_env$verbose_mode <- 1, "Full" = vmr_env$verbose_mode <- 2, stop('Unknow verbose mode, should be "None", "Normal" or "Full"') ) return(invisible(vmr_env$verbose_mode)) } #' @title List available boxes from VagrantCloud #' @name vmrList #' @description List of available boxes from a VagrantCloud organization account. #' @details Default usage lists boxes preconfigurated with R #' from [VMR organization account](https://app.vagrantup.com/VMR). #' @param org Vagrant Cloud organization name (default : 'VMR') #' @return a data.frame with Name, Provider, Version and Description of available boxes #' @export #' @md vmrList <- function(org = .VagrantCloudOrganization) { getAvailableBoxes(org) } #' @title List all available version of a box #' @name vmrListBox #' @description List all versions and providers available of a box. #' @details List information of a box from VagrantCloud. #' Default usage list information of a box preconfigurated with R #' from [VMR organization account](https://app.vagrantup.com/VMR). #' @param box_name the box name #' @param org Vagrant Cloud organization name (default : 'VMR') #' @examples #' \dontrun{ #' # List Boxes #' boxes <- vmrList() #' # Box informaion #' box_info <- vmrListBox(boxes$Name[1]) #' box_info #' } #' @return a data.frame with "Name, "Version", "Description", "Provider" and "Date" of the box #' @export #' @md vmrListBox <- function(box_name, org = .VagrantCloudOrganization) { getBoxInfo(box_name, org) } #' @title Create a __vmr__ environment class #' @name vmrCreate #' @description Create a __vmr__ object. #' @details Create a S3 __vmr__ object (a simple list). #' The object contains all information needed to configure and manage a #' __vmr__ environment (a vagrant environment). #' #' A __vmr__ environment need mostly a box _name_ and a _provider_. #' The environment is attached to the current working directory. #' #' __vmr__ object main attributs: #' * __path__: working directory #' * __org__: Vagrant cloud user/organization name 'VMR' #' * __box__: the box name #' * __version__: the box version #' * __provider__: the provider #' * __provider_options__: the provider options (see [[getProviderOptions()]]) #' * __vagrantName__: Vagrant environment name #' * __ID__ <- Vagrant environment ID #' * __synced_folder__: a list with source and destination #' * __ssh_user__: the ssh user #' * __ssh_pwd__: the ssh user password #' * __ssh_port__: the ssh port #' * __ssh_private_key_path__: the private ssh key path #' @param name a box name #' @param provider the box provider (default: "virtualbox") #' @param version the box version (default : "latest") #' @param provider.options provider options (call [[getProviderOptions()]] to get values) #' @examples #' \dontrun{ #' # List boxes available #' boxes <- vmrList() #' # Create a vmr object #' vmr <- vmrCreate(boxes$Name[1]) #' #' # to customize the guest machine for virtualbox #' virtualboxOpts <- getProviderOptions(provider = "virtualbox") #' virtualboxOpts$modifyvm <- list(cpus = 4, memory = 4096) #' virtualboxOpts$name <- "My VM Cool Name" #' # To specify a provider and version #' vmr <- vmrCreate( #' name = boxes$Name[1], #' provider = "virtualbox", #' version = boxes$Version[1], #' provider.options = virtualboxOpts #' ) #' } #' @return a __vmr__ object (see details) #' @export #' @md vmrCreate <- function(name, provider = "virtualbox", version = "latest", provider.options = virtualboxOptions(FALSE)) { printVerbose(1, "Creating vmr environment...") vmr <- list() attr(vmr, "class") <- "vmr" vmr$path <- normalizePath(getwd()) if (!identical(grep("/", name), integer(0))) { pos <- regexpr("/", name) vmr$org <- substr(name, 1, pos - 1) vmr$box <- substr(name, pos + 1, nchar(name)) } else { vmr$org <- .VagrantCloudOrganization vmr$box <- name } vmr$version <- version vmr$provider <- provider vmr$provider_options <- provider.options vmr$vagrantName <- paste0("vmr-", sub("/", "-", name), "-", provider) vmr$ID <- vagrantGetID(vmr$vagrantName, vmr$path) vmr$synced_folder <- list() vmr$synced_folder$source <- "" vmr$synced_folder$destination <- "" vmr$ssh_user <- "vagrant" vmr$ssh_pwd <- "vagrant" vmr$ssh_port <- "" vmr$ssh_private_key_path <- "" return(vmr) } #' @title List provider options #' @name getProviderOptions #' @description List a provider available options. #' @details It return a list of options name and value for a specific provider. #' To get the help page do ```?<provider_name>Options()```, for example [[virtualboxOptions()]]. #' @param provider a provider name #' @param details if TRUE print options, otherwise return default options #' @return a list of options #' @examples #' vbOpts <- getProviderOptions(provider = "virtualbox") #' print(vbOpts) #' @export #' @md getProviderOptions <- function(provider = "virtualbox", details = FALSE) { get(paste0(provider, "Options"))(details) } #' @title Print __vmr__ object information #' @name print.vmr #' @description print information from a __vmr__ object #' @param x a __vmr__ object #' @param ... optional print arguments #' @return the __vmr__ object (via invisible(x)) #' @export #' @md print.vmr <- function(x, ...) { cat("### vmr environment information ###\n") cat("Organization:", x$org, "\n") cat("Name:", x$box, "\n") cat("Version:", x$version, "\n") if (x$ID != "") cat("Machine ID:", x$ID, "\n") cat("Machine instance:", x$vagrantName, "\n") cat("Machine environment directory:", x$path, "\n") if (x$synced_folder$source != "" && x$synced_folder$destination != "") { cat(paste0("Synced folder: '", x$synced_folder$source, "' to Guest '", x$synced_folder$destination, "'\n")) } else { cat("Synced folder: Disable\n") } cat("ssh user:", x$ssh_user, "\n") cat("ssh password:", x$ssh_pwd, "\n") cat("ssh port:", x$ssh_port, "\n") if (x$ssh_private_key_path != "") cat("ssh private key path:", x$ssh_private_key_path, "\n") cat("Provider:", x$provider, "\n") if (x$provider == "virtualbox") { virtualboxPrintOptions(x$provider_options) } else { cat(x$provider, "unknow or not implemented provider\n") } return(invisible(x)) } #' @title Summary __vmr__ object information #' @name summary.vmr #' @description print information from a __vmr__ object #' @param object a __vmr__ object #' @param ... optional print arguments #' @return the __vmr__ object (via invisible(x)) #' @export #' @md summary.vmr <- function(object, ...) { print.vmr(object) } # @title check a vmr object "class" # @description check minimal variables needed in a 'vmr' object # @param vmr a vmr object # @return TRUE if OK .checkMinimalVMR <- function(vmr) { if (attr(vmr, "class") != "vmr") stop("arguments is not a vmr list object") if (is.null(vmr$org) || vmr$org == "") stop("vmr$org undefined") if (is.null(vmr$box) || vmr$box == "") stop("vmr$box undefined") if (is.null(vmr$provider) || vmr$provider == "") stop("vmr$provider undefined") TRUE } # .checkvmrEnv <- function(){ # if (!file.exists("Vagrantfile")) stop("Wrong vmr environment\n", # "Vagrantfile template can't be find here\n", # getwd(),"\n") # global_status <- vagrantGlobalStatus() # # } #' @title Load a __vmr__ environment containing a Vagrant file #' @name vmrLoad #' @description Load a __vmr__ environment containing a VagrantFile #' and create a __vmr__ object (see [[vmrCreate()]] for object details). #' @details It read a Vagrant file template with __vmr__ compatible parameters. #' It's an experimental Vagrant file reading, some parameters may not be loaded. #' @param dir the __vmr__ environment directory (default: "./") #' @param vagrantfileName a Vagrantfile name (default: "Vagrantfile") #' @examples #' \dontrun{ #' # load the Vagrantfile in the current directory #' vmr <- vmrLoad(getwd()) #' } #' @return a __vmr__ object #' @export #' @md vmrLoad <- function(dir = "./", vagrantfileName = "Vagrantfile") { printVerbose(2, "Experimental Vagrantfile reading... some Options may not be loaded") path <- normalizePath(dir) printVerbose(2, paste0("move to ", path)) oldpath <- getwd() setwd(path) on.exit(setwd(oldpath)) vagrant_file <- normalizePath(file.path(path, vagrantfileName)) if (!file.exists(vagrant_file)) stop("can't find ", vagrant_file) vmr <- list() attr(vmr, "class") <- "vmr" vmr$path <- path printVerbose(1, paste0("Reading ", vagrant_file)) vagrant_data <- readLines(vagrant_file) extract <- function(text_vector, pattern) { text <- text_vector[grep(pattern, text_vector)] pos_value <- regexpr('\"[^\"]+\"', text) substr(text, pos_value + 1, pos_value + attr(pos_value, "match.length") - 2) } extractInt <- function(text_vector, pattern) { text <- text_vector[grep(pattern, text_vector)] pos_value <- regexpr("= [0-9]+", text) as.integer(substr(text, pos_value + 2, pos_value + attr(pos_value, "match.length"))) } box <- strsplit(extract(vagrant_data, "config.vm.box "), split = "/")[[1]] vmr$org <- box[1] vmr$box <- box[2] vmr$version <- extract(vagrant_data, "config.vm.box_version") if (identical(character(0), vmr$version) || nchar(vmr$version) == 0) vmr$version <- "latest" vmr$vagrantName <- extract(vagrant_data, "config.vm.define") vmr$provider <- extract(vagrant_data, "config.vm.provider") if (identical(vmr$provider, character(0))) { vmr$provider <- "" vmr$provider_options <- NULL } else { vmr$provider_options <- get(paste0(vmr$provider, "ReadOptions"))(vagrant_data) } vmr$ID <- vagrantGetID(vmr$vagrantName, vmr$path) vmr$synced_folder <- list() vmr$synced_folder$source <- "" vmr$synced_folder$destination <- "" sync_f <- vagrant_data[grep("config.vm.synced_folder", vagrant_data)] if (length(sync_f) > 1) { temp <- strsplit(sync_f[1], split = " +")[[1]] vmr$synced_folder$source <- substr(temp[2], 2, nchar(temp[2]) - 2) vmr$synced_folder$destination <- substr(temp[3], 2, nchar(temp[3]) - 1) } vmr$ssh_user <- extract(vagrant_data, "config.ssh.username") if (identical(character(0), vmr$ssh_user)) vmr$ssh_user <- "vagrant" vmr$ssh_pwd <- extract(vagrant_data, "config.ssh.password") if (identical(character(0), vmr$ssh_pwd)) vmr$ssh_pwd <- "vagrant" vmr$ssh_port <- extractInt(vagrant_data, "config.ssh.port") if (identical(integer(0), vmr$ssh_port)) vmr$ssh_port <- "" vmr$ssh_private_key_path <- extract(vagrant_data, "config.ssh.private_key_path") if (identical(character(0), vmr$ssh_private_key_path)) vmr$ssh_private_key_path <- "" return(vmr) } #' @title Initialize the __vmr__ environment #' @name vmrInitEnv #' @description Create __vmr__ environment in the current directory. #' Set configuration into a template file name "Vagrantfile" #' and download the box if needed. #' @details The __vmr__ environment consist of a directory (the working directory) #' and a template file name _Vagrantfile_. #' If the box is not present in localhost it will be download. #' @param vmr a __vmr__ object #' @param force.vagrantfile if TRUE force to overwrite environment configuration (default FALSE) #' @param force.download if TRUE force to download the box, otherwise do not (default FALSE). #' @examples #' \dontrun{ #' boxes <- vmrList() #' vmr <- vmrCreate(boxes$Name[1]) #' vmr <- vmrInitEnv(vmr) #' } #' @return the __vmr__ object #' @export vmrInitEnv <- function(vmr, force.vagrantfile = FALSE, force.download = FALSE) { printVerbose(1, "Initialize vmr environment") .checkMinimalVMR(vmr) oldpath <- getwd() setwd(vmr$path) on.exit(setwd(oldpath)) if (vmrIsRunning()) stop("The virtual guest is running. run vmrStop() and recall this function") writeVagrantFile(vmr, force.vagrantfile) ## TODO may update box vagrantBoxUpdate if (identical(grep("/", vmr$box), integer(0))) { box <- paste0(vmr$org, "/", vmr$box) } else { box <- vmr$box } vagrantBoxAdd(box, vmr$version, vmr$provider, force.download) printVerbose(1, "Now run vmr<Function>() into", vmr$path, "directory") return(invisible(vmr)) } # @title Create a Vagrant file from a __vmr__ object # @description Create a Vagrant file template from a __vmr__ object. # @param vmr a __vmr__ object # @param force if TRUE force to overwrite the template file # @return path to the Vagrantfile # @md writeVagrantFile <- function(vmr, force = FALSE) { if (file.exists("Vagrantfile") && isFALSE(force)) { stop("Vagrantfile already exists in ", getwd(), "\n use argument force.vagrantfile=TRUE to override it.") } printVerbose(2, "Creating Vagrantfile template in ", getwd()) tryCatch( { sink("Vagrantfile") cat(paste0( # if (isTRUE(vmr$r$update_packages) || length(vmr$r$install_packages) != 0) .provisionRscript(vmr), 'Vagrant.configure("2") do |config|\n', if (!identical(grep("/", vmr$box), integer(0))) { paste0('\tconfig.vm.box = "', vmr$box, '"\n') } else { paste0('\tconfig.vm.box = "', vmr$org, "/", vmr$box, '"\n') }, if (vmr$version != "" && vmr$version != "latest") paste0('\tconfig.vm.box_version = "', vmr$version, '"\n'), if (vmr$synced_folder$source != "" && vmr$synced_folder$destination != "") { paste0('\tconfig.vm.synced_folder "', vmr$synced_folder$source, '", "', vmr$synced_folder$destination, '"\n') }, '\tconfig.vm.synced_folder ".", "/vagrant", disabled: true\n', paste0('\tconfig.vm.define "', vmr$vagrantName, '" do |d|\n'), "\tend\n", # SSH configuration '\tconfig.vm.communicator = "ssh"\n', if (vmr$ssh_user != "vagrant" || vmr$ssh_pwd != "vagrant") { paste0( '\tconfig.ssh.username ="', vmr$ssh_user, '"\n', '\tconfig.ssh.password ="', vmr$ssh_pwd, '"\n', "\tconfig.ssh.keep_alive = true\n", "\tconfig.ssh.insert_key = true\n" ) } else { paste0("\tconfig.ssh.insert_key = false\n") }, if (vmr$ssh_port != "") paste0("\tconfig.ssh.port = ", vmr$ssh_port, "\n"), if (vmr$ssh_private_key_path != "") paste0('\tconfig.ssh.private_key_path = "', vmr$ssh_private_key_path, '"\n'), # Provisioning # if (isTRUE(vmr$r$update_packages) || length(vmr$r$install_packages) != 0) '\tconfig.vm.provision "shell", inline: $rscript\n', # Provider configuration (default virtualbox) # for providers options '\tconfig.vm.provider = "',vmr$provider,'"\n', if (!is.null(vmr$provider) && nchar(vmr$provider) > 0) { if (vmr$provider == "virtualbox" && !is.null(vmr$provider_options)) virtualBoxVagrantFile(vmr$provider_options) }, "end\n" )) sink() }, error = function(cond) { sink() warning("Error writing Vagrantfile template\n", cond, immediate. = TRUE) }, warning = function(cond) { sink() warning("May have some error writing Vagrantfile template\n", cond, immediate. = TRUE) }, finally = { } ) return(normalizePath("Vagrantfile")) } #' @title Get guest machine information #' @name vmrInfo #' @description Get guest machine information. #' Print OS, R, R-devel and R packages information. #' Still in development. #' @examples #' \dontrun{ #' boxes <- vmrList() #' vmr <- vmrCreate(boxes$Name[1]) #' vmr <- vmrInitEnv(vmr) #' vmrStart() #' vmrInfo() #' } #' @return \code{NULL} #' @export #' @md vmrInfo <- function() { if (!vmrIsRunning()) stop("Virtual Machine is not running.\n use vmrStart() or vmrResume()\n") # TODO make a clean print cat("#### R Version ####\n") vagrantSSHCommand("R --version") cat("#### Rdevel Version ####\n") vagrantSSHCommand("if Rdevel --version &> /dev/null ; then Rdevel --version ; fi") cat("#### OS informations ####\n") vagrantSSHCommand("Rscript -e \"Sys.info()\"") # cat("#### R packages installed ####\n") # vagrantSSHCommand("Rscript -e \"installed.packages()\"") return(NULL) } #' @title Update a __vmr__ environment. #' @name vmrUpdateEnvVersion #' @description Force to use the latest box version of the current __vmr__ environment. #' @details Put __vmr__ object version to latest and update the Vagrant File template. #' Download the new box version if needed. #' @param vmr a __vmr__ object #' @examples #' \dontrun{ #' boxes <- vmrList() #' vmr <- vmrCreate(boxes$Name[1], version = "oldone") #' vmr <- vmrInitEnv(vmr) #' #' # update to latest #' vmr <- vmrUpdateEnvVersion(vmr) #' vmrStart() #' } #' @return a __vmr__ object #' @export #' @md vmrUpdateEnvVersion <- function(vmr) { if (vmrIsRunning()) vmrStop() printVerbose(1, "Set environment to use the latest box version") vmr$version <- "latest" vmr <- vmrInitEnv(vmr, force.vagrantfile = TRUE) vmrLocalBoxUpdate() return(vmr) } #' @title Remove all resources created in a __vmr__ environment #' @name vmrDestroy #' @description Remove all resources created by [vmrStart()] #' @details Will by default remove all resources created from the current __vmr__ environment. #' By specifying the _id_ any environment with this _id_ will be remove. #' @param id a __vmr__ environment id (default : "" id from the current environment) #' @param force if TRUE force to remove #' @return the vagrant environment id #' @examples #' \dontrun{ #' vmrStop() #' vmrDestroy() #' } #' @export #' @md vmrDestroy <- function(id = "", force = FALSE) { printVerbose(1, "Checking configuration...") vms <- vagrantGlobalStatus() if (id != "") { vm <- vms[which(vms$id == id), ] } else { vm <- vms[which(vms$directory == normalizePath("./")), ] } x <- "y" if (!isTRUE(force)) { x <- readline(paste0("Are you sure you want to destroy the ", vm$name, " VM? [y/N] ")) } if (x == "y" || x == "Y") { printVerbose(1, paste0("The VM ", vm$name, " will be destroyed")) args <- c("destroy", "--force", vm$id) vagrantExec(args) } else { printVerbose(1, paste0("The VM ", vm$name, " will not be destroyed")) } if (id == "") { x <- "y" if (!isTRUE(force)) { x <- readline(paste0( "Do you want to remove Vagrantfile template ", "and .vagrant directory ", "(clean vmr environment) ? [y/N] " )) } if (x == "y" || x == "Y") { file.remove(paste0(getwd(), "/Vagrantfile")) unlink(paste0(getwd(), "/.vagrant"), recursive = TRUE) } } return(invisible(vm$id)) } #' @title Mount a host directory to guest #' @name vmrMountDir #' @description Mount a host directory to the guest machine. #' @details If the option of mounting a directory is available #' in the guest provider, it will mount _src_ to _destination_ directory. #' Calling with no arguments will disable this option. #' @param vmr a __vmr__ object #' @param src a host directory #' @param dest a destination guest directory #' @examples #' \dontrun{ #' boxes <- vmrList() #' vmr <- vmrCreate(boxes$Name[1]) #' vmr <- vmrMountDir(vmr, src = getwd(), dest = "/vmr") #' vmr <- vmrInitEnv(vmr) #' vmrStart() #' } #' @return a __vmr__ object #' @export #' @md vmrMountDir <- function(vmr, src = "", dest = "") { printVerbose(1, "Mount ", src, " to", dest) printVerbose(1, "This option may not be available to all boxes") printVerbose(1, "Call vmrInitEnv() and restart the environment after that to take effect") vmr$synced_folder <- list() vmr$synced_folder$source <- src vmr$synced_folder$destination <- dest return(vmr) } #' @title List downloaded boxes #' @name vmrLocalBoxList #' @description List all boxes downloaded in localhost #' @examples #' \dontrun{ #' localBoxes <- vmrLocalBoxList() #' print(localBoxes) #' } #' @return a data.frame with boxes Name, Providers and Version #' @export #' @md vmrLocalBoxList <- function() { box_list <- vagrantBoxList() printVerbose(2, "This is all boxes available in your system: ") # printVerbose(2, box_list) return(box_list) } #' @title Update local box version #' @name vmrLocalBoxUpdate #' @description Download the latest version of the box use in the current __vmr__ environment. #' @return execution code or message #' @export #' @md vmrLocalBoxUpdate <- function() { printVerbose(2, "Download latest version of the current environment box") invisible(vagrantBoxUpdate()) } #' @title Remove a box from localhost #' @name vmrLocalBoxRemove #' @description Remove a specific box from localhost. #' @param name the box name #' @param provider the box provider (default: first provider found) #' @param version the box version (default: version available) #' @param force if TRUE force to remove #' @examples #' \dontrun{ #' lboxes <- vmrLocalBoxList() #' vmrLocalBoxRemove(lboxes$Name[[1]]) #' # if multiple providers and versions #' vmrLocalBoxRemove(lboxes$Name[[1]], lboxes$Provider[[1]], lboxes$Version[[1]]) #' } #' @return execution code or message #' @export #' @md vmrLocalBoxRemove <- function(name, provider = "", version = "", force = FALSE) { printVerbose(2, paste0("Remove the box: ", name)) if (provider != "") printVerbose(1, "Provider: ", provider) if (version != "") printVerbose(1, "Version: ", version) invisible(vagrantBoxRemove(name, provider, version, force)) } #' @title Remove old installed boxes #' @name vmrLocalBoxPrune #' @description Removes old versions of installed boxes. #' @return a data.frame of still installed boxes (Name, Poviders and Version) #' @examples #' \dontrun{ #' vmrLocalBoxPrune() #' } #' @export #' @md vmrLocalBoxPrune <- function() { printVerbose(2, "Removes old boxes version") vagrantBoxPrune() } # @export # vmrPackage <- function() { # print("soon") # } #' @title Take a snapshot of the guest machine #' @name vmrTakeSnapshot #' @description Take a snapshot of the guest machine. #' @param snap_name the name given to the snapshot #' @return the snapshot name (invisible) #' @examples #' \dontrun{ #' vmrTakeSnapshot("my snapshot") #' } #' @export #' @md vmrTakeSnapshot <- function(snap_name) { printVerbose(1, "Taking a snapshot ", snap_name) vagrantSnapshot("save", snap_name) return(invisible(snap_name)) } #' @title Restore a snapshot of the guest machine #' @name vmrRestoreSnapshot #' @description Restore a snapshot of the guest machine. #' @param snap_name the snapshot name #' @return the snapshot name #' @examples #' \dontrun{ #' vmrRestoreSnapshot("my snapshot") #' } #' @export #' @md vmrRestoreSnapshot <- function(snap_name) { printVerbose(1, "Restore snapshot: ", snap_name) vagrantSnapshot("restore", snap_name) return(invisible(snap_name)) } #' @title List snapshot of the guest machine #' @name vmrListSnapshot #' @description Print all snapshot name of the guest machine #' @return \code{NULL} #' @export #' @md vmrListSnapshot <- function() { printVerbose(1, "Listing snapshot") vagrantSnapshot("list") ## TODO return the list name return(NULL) } #' @title remove a snapshot of the guest machine #' @name vmrRemoveSnapshot #' @description remove a snapshot of the guest machine #' @param snap_name the snapshot name #' @return \code{NULL} #' @export #' @md vmrRemoveSnapshot <- function(snap_name) { printVerbose(1, "Delete a snapshot: ", snap_name) vagrantSnapshot("delete", snap_name) ## TODO return the list name return(NULL) } #' @title Save state and stop guest machine #' @name vmrSuspend #' @description Save the guest machine and stop it. #' @details In the current __vmr__ environment, save the state of the guest machine #' and stop it. #' @return \code{NULL} #' @export #' @md vmrSuspend <- function() { printVerbose(1, "Suspend environment") args <- "suspend" invisible(vagrantExec(args)) } #' @title Resume a stopped guest machine #' @name vmrResume #' @description Resume a stopped guest machine. #' @details In the current __vmr__ environment, start a stopped ([[vmrSuspend()]]) guest machine. #' @return \code{NULL} #' @export #' @md vmrResume <- function() { printVerbose(1, "Resume Environment") args <- "resume" invisible(vagrantExec(args)) return(NULL) } #' @title Get the state of the guest machine #' @name vmrStatus #' @description Print guest machine state in the current __vmr__ environment. #' @return a data.frame with Name, Provider and state #' @export #' @md vmrStatus <- function() { printVerbose(1, "Getting status") st <- vagrantStatus() printVerbose(1, paste0( "The machine ", st$vagrantName, " provided by:\n", st$provider, " is ", st$state )) return(st) } #' @title Download a Box #' @name vmrBoxDownload #' @description Download a box from a __vmr__ object. #' @param vmr a __vmr__ object #' @return a __vmr__ object #' @export #' @md vmrBoxDownload <- function(vmr) { .checkMinimalVMR(vmr) printVerbose( 1, paste0( "Will download the box ", paste0(vmr$org, "/", vmr$box), " ", vmr$version, " for", vmr$provider, "provider." ) ) res <- vagrantBoxAdd(name = paste0(vmr$org, "/", vmr$box), version = vmr$version, provider = vmr$provider) return(invisible(vmr)) } #' @title Start a __vmr__ environment #' @name vmrStart #' @description Start a guest virtual machine using the current __vmr__ environment #' (directory and Vagrantfile template) #' @examples #' \dontrun{ #' lboxes <- vmrList() #' vmr <- vmrCreate(lboxes$Name[1]) #' vmr <- vmrInitEnv(vmr) #' vmrStart() #' vmrStop() #' } #' @return the vmr environment unique id #' @export #' @md vmrStart <- function() { if (vmrIsRunning()) stop("Virtual machine already running") printVerbose(1, "Starting virtual machine") vagrantUp() id <- vagrantGetID(vagrantStatus()$vagrantName, normalizePath(getwd())) return(id) } #' @title Stop a __vmr__ environement #' @name vmrStop #' @description Stop a guest virtual machine in the current __vmr__ environment. #' @param force if TRUE force to stop (powerOff), otherwise FALSE clean shutdown #' @return \code{NULL} #' @examples #' \dontrun{ #' lboxes <- vmrList() #' vmr <- vmrCreate(lboxes$Name[1]) #' vmr <- vmrInitEnv(vmr) #' vmrStart() #' vmrStop() #' } #' @export #' @md vmrStop <- function(force = FALSE) { if (vmrIsRunning()) { printVerbose(1, "Stoping virtual machine") vagrantHalt(force) } else { stop("virtual machine is not running") } return(NULL) } #' @title Is __vmr__ environment running #' @name vmrIsRunning #' @description Check if a guest machine in a __vmr__ environment is running #' @examples #' \dontrun{ #' lboxes <- vmrList() #' vmr <- vmrCreate(lboxes$Name[1]) #' vmr <- vmrInitEnv(vmr) #' vmrStart() #' vmrIsRunning() #' vmrStop() #' vmrIsRunning() #' } #' @return TRUE if running, otherwise FALSE #' @export #' @md vmrIsRunning <- function() { if (file.exists("Vagrantfile")) { st <- vagrantStatus() if (length(st) != 0) { return(st$state == "running") } } return(FALSE) } #' @title Update R packages installed #' @name vmrUpdatePackages #' @description Updates R packages installed in the guest machine. #' @details Will perform a [update.packages()] in the guest machine #' of the current __vmr__ environment. #' @return NULL #' @examples #' \dontrun{ #' lboxes <- vmrList() #' vmr <- vmrCreate(lboxes$Name[1]) #' vmr <- vmrInitEnv(vmr) #' vmrStart() #' vmrUpdatePackages() #' } #' @export #' @md vmrUpdatePackages <- function() { if (!vmrIsRunning()) stop("Virtual Machine is not running.\n use vmrStart() or vmrResume()\n") Rcmd <- "Rscript -e \"update.packages(lib.loc=.libPaths()[1], repo='https://cloud.r-project.org', ask=FALSE)\"" vagrantSSHCommand(Rcmd) return(NULL) } #' @title Install R packages into guest machine #' @name vmrInstallPackages #' @description Install a list of R packages into the guest machine #' of the current __vmr__ environment. #' @param pkgs list of R packages #' @examples #' \dontrun{ #' vmrInstallPackages(c("vmr")) #' } #' @return installed packages vector #' @export #' @md vmrInstallPackages <- function(pkgs = c()) { if (!vmrIsRunning()) stop("Virtual Machine is not running.\n use vmrStart() or vmrResume()\n") pkgs_escape <- unlist(lapply(pkgs, FUN = function(p) { paste0("'", p, "'") })) Rcmd <- paste0("Rscript -e \"install.packages(c(", paste(pkgs_escape, collapse = ","), "), repo='https://cloud.r-project.org', ask=FALSE)\"") cat("Installing R packages :\n") cat(paste0("c(", paste(pkgs_escape, collapse = ","), ")\n")) vagrantSSHCommand(Rcmd) return(invisible(pkgs)) } #' @title Send files and/or directories to guest machine #' @name vmrSend #' @description Send files and/or directories to the guest machine #' in the current __vmr__ environment. #' They are upload into ~/vmr/ directory. #' @param elt list of files and directories #' @return 0 if OK, message otherwise #' @examples #' \dontrun{ #' vmrSend(c("myfile")) #' } #' @export #' @md vmrSend <- function(elt = c()) { if (!vmrIsRunning()) stop("Virtual Machine is not running.\n use vmrStart() or vmrResume()\n") res <- lapply(elt, FUN = vagrantUpload) lapply(seq_along(res), FUN = function(i) { if (res[[i]] == 0) { message(elt[i], " uploaded") } else { message(res[[i]]) } }) return(invisible(res)) } # TODO implement vmrGet to download form guest files/directories #' @title Execute R methods into guest machine #' @name vmrExec #' @description Run R method into guest machine. #' @details call Rscript -e "cmd" into the guest machine from #' the current __vmr__ environment. #' Command are independents and do not keep memory of past commands. #' @param cmd list of R command #' @examples #' \dontrun{ #' cmd <- c("Sys.info()", 'print("Hello World!")') #' vmrExec(cmd) #' } #' @return \code{NULL} #' @export #' @md vmrExec <- function(cmd = c()) { if (!vmrIsRunning()) stop("Virtual Machine is not running.\n use vmrStart() or vmrResume()\n") Rcmds <- lapply(cmd, FUN = function(c) { paste0("Rscript -e \"", c, "\"") }) res <- lapply(Rcmds, vagrantSSHCommand) # return(invisible(res)) return(NULL) } #' @title Configure ssh #' @name vmrConfigSSH #' @description Configure ssh credential. #' @details by default __vmr__ use vagrant as user/password and insecure key #' for ssh connection. This behavior can be change here, by setting an another #' user and/or ssh keys. Calling with no arguments will disable this option. #' Be careful, ssh using only password may result of _vmr_ functions bugs. #' @param vmr a __vmr__ object #' @param ssh_user the ssh user (default 'vagrant') #' @param ssh_pwd the ssh pwd if any (default 'vagrant') #' @param port the ssh port (default empty) #' @param ssh_private_key_path path to the private ssh key to use (default empty, use insecure vagrant key) #' @examples #' \dontrun{ #' vmr <- vmrConfigSSH(ssh_user = "John", ssh_pwd = "d0e", port = "22") #' vmr <- vmrConfigSSH(ssh_user = "John", private_key_path = "/path/to/private/key/") #' } #' @return an updated __vmr__ object #' @export #' @md vmrConfigSSH <- function(vmr, ssh_user = "vagrant", ssh_pwd = "vagrant", port = "", ssh_private_key_path = "") { vmr$ssh_user <- ssh_user vmr$ssh_pwd <- ssh_pwd vmr$ssh_port <- port vmr$ssh_private_key_path <- ssh_private_key_path return(vmr) } #' @title Open a ssh connection to guest machine #' @name vmrConnect #' @description Open a ssh connection to guest machine #' @details To open a ssh connection 'ssh' package have to be installed. #' @param vmr a __vmr__ object #' @return a __vmr__ object # @import ssh #' @export #' @md vmrConnect <- function(vmr) { if (!requireNamespace("ssh", quietly = TRUE)) { stop("Package \"ssh\" needed for this function to work. Please install it.", call. = FALSE ) } if (!vmrIsRunning()) stop("Virtual Machine is not running.\n use vmrStart() or vmrResume()\n") ssh_conf <- vagrantSSHConfig() printVerbose( 2, "Try to connect...\n", "User: ", vmr$ssh_user, "\n", "Passwd: ", vmr$ssh_pwd, "\n", "Host: ", ssh_conf$hostname, "\n", "Port: ", ssh_conf$port, "\n", "Keyfile: ", ssh_conf$keyfile, "\n" ) session <- ssh::ssh_connect( host = paste0(vmr$ssh_user, "@", ssh_conf$hostname, ":", ssh_conf$port), keyfile = ssh_conf$keyfile, passwd = vmr$ssh_pwd ) ssh::ssh_exec_wait(session, command = c("whoami", "R --version")) vmr$ssh_session <- session return(vmr) } #' @title Disconnect ssh connection to guest machine #' @name vmrDisconnect #' @description Close a ssh connection to the guest machine #' @details 'ssh' package need to be installed. #' @param vmr a __vmr__ object #' @return a __vmr__ object #' @export #' @md vmrDisconnect <- function(vmr) { if (!requireNamespace("ssh", quietly = TRUE)) { stop("Package \"ssh\" needed for this function to work. Please install it.", call. = FALSE ) } printVerbose(2, "Disconnect ssh...") ssh::ssh_disconnect(vmr$ssh_session) vmr$ssh_session <- NULL return(vmr) } #' @title Provision a __vmr__ environment #' @name vmrProvision #' @description Provision a __vmr__ environment. #' @details Upload 'elts' files and/or directories to the guest machine 'dest' #' from the current __vmr__ environment. #' And finaly run shell commands 'cmd' in the guest machine. #' @param cmd list of shell commands #' @param elts list of files and/or directories #' @param dest destination of elts (default HOME/vmr) #' @return \code{NULL} #' @export vmrProvision <- function(cmd = c(), elts = c(), dest = "") { if (!vmrIsRunning()) stop("Virtual Machine is not running.\n use vmrStart() or vmrResume()\n") lapply(elts, FUN = function(fd) { vagrantUpload(fd, dest) }) lapply(cmd, FUN = function(c) { vagrantSSHCommand(cmd) }) return(invisible("")) } #' @title Perform a package check on guest #' @name vmrPackageCheck #' @description Perform a package check into the guest #' @details upload the package and run devtools::check() #' into the guest machine. (check available in $HOME/vmr/package/pkg). #' Checking a directory with multiple files may slower upload, prefer tar.gz file #' @param pkg a package directory or a tar.gz file #' @return \code{NULL} #' @examples #' \dontrun{ #' vmrPackageCheck("vmr_package.tar.gz") #' } #' @export #' @md vmrPackageCheck <- function(pkg = "./") { printVerbose(1, "Will perform package check") if (dir.exists(normalizePath(pkg))) { to_dir <- basename(normalizePath(pkg)) to_pkg <- "" Rcmds <- paste0( "Rscript -e \"library(devtools);check(normalizePath('vmr/package/", to_dir, "/", to_pkg, "'), check_dir=normalizePath('vmr/package/", to_dir, "'))\"" ) } else { to_dir <- basename(dirname(normalizePath(pkg))) to_pkg <- basename(pkg) Rcmds <- paste0( "Rscript -e \"library(devtools);check_built(normalizePath('vmr/package/", to_dir, "/", to_pkg, "'), check_dir=normalizePath('vmr/package/", to_dir, "'))\"" ) } printVerbose(2, paste0("Cleaning Guest \"vmr/package/", to_dir, "\"")) vagrantSSHCommand(paste0("rm -rf vmr/package/", to_dir, "/* 2>&1 > /dev/null")) vagrantSSHCommand(paste0("mkdir -p vmr/package/", to_dir, " 2>&1 > /dev/null")) vmrProvision(cmd = Rcmds, elts = pkg, dest = paste0("vmr/package/", to_dir, "/")) return(NULL) } #' @title Build a package in the guest machine #' @name vmrPackageBuild #' @description Build a package bundle or binary into the guest machine. #' @details upload the package and run devtools::build() #' (build available in $HOME/vmr/package/pkg) in the current __vmr__ environment. #' @param pkg a package directory or a tar.gz file #' @param binary if TRUE build binary package otherwise FALSE #' @return \code{NULL} #' @export #' @md vmrPackageBuild <- function(pkg = "./", binary = FALSE) { printVerbose(1, "Will perform package build") if (dir.exists(normalizePath(pkg))) { to_dir <- basename(normalizePath(pkg)) to_pkg <- "" } else { to_dir <- basename(dirname(normalizePath(pkg))) to_pkg <- basename(pkg) } printVerbose(2, paste0("Cleaning Guest \"vmr/package/", to_dir, "\"")) vagrantSSHCommand(paste0("rm -rf vmr/package/", to_dir, "/* 2>&1 > /dev/null")) vagrantSSHCommand(paste0("mkdir -p vmr/package/", to_dir, " 2>&1 > /dev/null")) Rcmds <- paste0( "Rscript -e \"library(devtools);build(normalizePath('vmr/package/", to_dir, "/", to_pkg, "'), binary=", binary, ")\"" ) vmrProvision(cmd = Rcmds, elts = pkg, dest = paste0("vmr/package/", to_dir, "/")) return(NULL) # TODO return path to package binary in guest } #' @title Test a package into a guest machine #' @name vmrPackageTest #' @description Test a package into a guest machine #' @details Perform a package check into the guest machine #' of the current __vmr__ environment using devtools::test(). #' (tests are available in $HOME/vmr/package/pkg) #' @param pkg a package directory or tar.gz #' @return \code{NULL} #' @export vmrPackageTest <- function(pkg = "./") { printVerbose(1, "Will perform package test") if (dir.exists(normalizePath(pkg))) { to_dir <- basename(normalizePath(pkg)) to_pkg <- "" } else { to_dir <- basename(dirname(normalizePath(pkg))) to_pkg <- basename(pkg) } printVerbose(2, paste0("Cleaning Guest \"vmr/package/", to_dir, "\"")) vagrantSSHCommand(paste0("rm -rf vmr/package/", to_dir, "/* 2>&1 > /dev/null")) vagrantSSHCommand(paste0("mkdir -p vmr/package/", to_dir, " 2>&1 > /dev/null")) Rcmds <- paste0( "Rscript -e \"library(devtools);test(normalizePath('vmr/package/", to_dir, "/", to_pkg, "'))\"" ) vmrProvision(cmd = Rcmds, elts = pkg, dest = paste0("vmr/package/", to_dir, "/")) return(NULL) }
/scratch/gouwar.j/cran-all/cranData/vmr/R/vmr-methods.R
# This file is part of vmr. # Copyright (c) 2021 Jean-François Rey <[email protected]> # # vmr is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # vmr is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Foobar. If not, see <https://www.gnu.org/licenses/>. #' @encoding UTF-8 #' @title Virtual Machines for R #' @description Manage, provision and use Virtual Machines pre-configured for R. #' Develop, test and build package in a clean environment. #' 'Vagrant' tool and a provider (such as 'Virtualbox') have to be installed. #' @aliases vmr-package vmr #' #' @author Jean-François Rey \email{jf.rey.public@@gmail.com} #' #' Maintainer: Jean-François Rey \email{jf.rey.public@@gmail.com} #' @docType package #' @name vmr-package #' @details \tabular{ll}{ #' Package: \tab vmr\cr #' Type: \tab Package\cr #' Version: \tab 0.0.6\cr #' Date: \tab 2023-03-07\cr #' License: \tab GPL (>=3)\cr #' } #' @details This package is a wrap of the [Vagrant](https://www.vagrantup.com/) #' tool and more. #' It allows to manage, provision and use Virtual Machines pre-configured for R. #' It currently only uses 'Virtualbox' (>= 6.1.14) as provider. #' Vagrant tool have to be installed too. #' Used VMs come from [https://app.vagrantup.com/VMR](https://app.vagrantup.com/VMR) #' repository and the sources use to generate them can be found at #' [https://gitlab.com/rstuff/vms](https://gitlab.com/rstuff/vms). #' See vignettes for the documentations `browseVignette("vmr")`. #' #' @keywords vagrant virtual machine provision provider virtualbox #' @examples #' \dontrun{ #' library("vmr") #' } #' @md #' @importFrom curl curl_fetch_memory #' @importFrom jsonlite prettify parse_json "_PACKAGE" vmr_env <- new.env(parent = emptyenv()) vmr_env$verbose_mode <- 1 vmr_env$vagrant_bin <- "vagrant"
/scratch/gouwar.j/cran-all/cranData/vmr/R/vmr.R
--- title: "1- Working with vmr package" output: rmarkdown::html_vignette: toc: true toc_depth: 2 vignette: > %\VignetteIndexEntry{1- Working with vmr package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- # The __vmr__ package ## Presentation The __vmr__ package allow you to manage __virtual environment__ with and for __R__. You can develop, run tests, build packages binaries easily in a clean environment with R pre-installed and configured. It uses [Vagrant](https://www.vagrantup.com/) tool to manage this virtual environments, call _boxes_. A __box__ is a Vagrant environment (bundle) containing a __virtual environment_ (such as a virtual machine) for a specific __provider__ (such as [VirtualBox](https://www.virtualbox.org/)). Official __vmr__ boxes list is available here : [https://app.vagrantup.com/VMR/](https://app.vagrantup.com/VMR/) > Boxes use default user and password: __vagrant__. Boxes are available for development and test, do not use it for production. Take a [quick overview](vmrPoster.pdf) from userR2022 poster presentation. ## Dependencies The __vmr__ package needs two tools to work __Vagrant__ to manage boxes and a __provider__ to instantiate the virtual environment. * __Vagrant__ (>= 2.2.0) : download and install from [https://www.vagrantup.com/downloads.html](https://www.vagrantup.com/downloads.html). * A __provider__, currently only [VirtualBox](https://www.virtualbox.org/) (>= 6.1.14) is available in __vmr__ boxes. Install it. ## Install From CRAN: ``` install.packages(c('vmr')) ``` From in development repository: ``` remotes::install_git('https://gitlab.com/rstuff/vmr.git') ``` # Go further 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html) ### Next vignette : [2-Start my first environment](O2-vmrFirstStep.html)
/scratch/gouwar.j/cran-all/cranData/vmr/inst/doc/O1-workwithvmr.Rmd
## ----echo=TRUE, eval=FALSE---------------------------------------------------- # #install.packages(c('vmr')) # library(vmr) # vmrSetVerbose("Full") ## ----eval=FALSE--------------------------------------------------------------- # list_boxes <- vmrList() # print(list_boxes) ## ----eval=FALSE--------------------------------------------------------------- # index <- which(list_boxes$Name == "LinuxMint20-R")[1] # vmr_env <- vmrCreate(name = list_boxes$Name[index], provider = list_boxes$Provider[1]) # vmr_env ## ----eval=FALSE--------------------------------------------------------------- # vmr_env <- vmrInitEnv(vmr_env) ## ----eval=FALSE--------------------------------------------------------------- # vmrStart() ## ----eval=FALSE--------------------------------------------------------------- # vmrStatus() ## ----eval=FALSE--------------------------------------------------------------- # vmrSuspend() ## ----eval=FALSE--------------------------------------------------------------- # vmrResume() ## ----eval=FALSE--------------------------------------------------------------- # vmrStop()
/scratch/gouwar.j/cran-all/cranData/vmr/inst/doc/O2-vmrFirstStep.R
--- title: "2- vmr package first step" output: rmarkdown::html_vignette: toc: true toc_depth: 2 vignette: > %\VignetteIndexEntry{2- vmr package first step} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- # To Start ```{r echo=TRUE, eval=FALSE} #install.packages(c('vmr')) library(vmr) vmrSetVerbose("Full") ``` ## List available environment (boxes) Boxes are listed from [https://app.vagrantup.com/VMR/](https://app.vagrantup.com/VMR/). Mainly you've got the OS, the R version installed (follow by a timestamp) and the provider available. ```{r eval=FALSE} list_boxes <- vmrList() print(list_boxes) ``` ## Create a __vmr__ object To create a minimal __vmr__ object you need the _name_ and the _provider_ of a box (default _version_ is "_latest_"). ```{r eval=FALSE} index <- which(list_boxes$Name == "LinuxMint20-R")[1] vmr_env <- vmrCreate(name = list_boxes$Name[index], provider = list_boxes$Provider[1]) vmr_env ``` ## Initialize the __vmr__ environment > Environment configuration depend of a directory (the _working directory_) and a file template call _Vagrantfile_. This function have to be re-call if __vmr\_env__ object is modified. When initializing environment the box with same name, version and provider, will be download once. It can be long depending of the box size and network bandwide. The box is save in vagrant environment ("~/.vagrant.d/"). ```{r eval=FALSE} vmr_env <- vmrInitEnv(vmr_env) ``` > Note : when __vmr__ environment is initialize, no more need to recreate _vmr\_env_ for further use (unless for specific case). Use __vmrLoad()__ in that case to recreate the __vmr__ object _vmr\_env_. ## Start __vmr__ environment To start an environment: > This start the virtual environment using the __vmr directory__ and the __Vagrantfile template__. __Be sure to always be in the same working directory.__ ```{r eval=FALSE} vmrStart() ``` Now enjoy using R console, the virtual environment GUI or both. To get vmr Status: ```{r eval=FALSE} vmrStatus() ``` ## Stop __vmr__ environment To save the current state and stop the environment: ```{r eval=FALSE} vmrSuspend() ``` To resume an environment previously suspended: ```{r eval=FALSE} vmrResume() ``` To stop the environment (_powerOff_): ```{r eval=FALSE} vmrStop() ``` # Vignettes summary 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html) ### Next vignette : [3-Manage __vmr__ environment](O3-vmrManagevmr.html)
/scratch/gouwar.j/cran-all/cranData/vmr/inst/doc/O2-vmrFirstStep.Rmd
## ----eval=FALSE--------------------------------------------------------------- # vmr_env <- vmrCreate(<boxname>) ## ---- eval=FALSE-------------------------------------------------------------- # setwd("path/to/my/vmr/environment/") # vmr_env <- vmrLoad() # vmr_env ## ----eval=FALSE--------------------------------------------------------------- # vmr_env # created or loaded object # # force.vagrantfile will override existing Vagrantfile template # vmr_env <- vmrInitEnv(vmr_env, force.vagrantfile=TRUE) ## ----eval=FALSE--------------------------------------------------------------- # vmr_env <- vmrLoad() # # provider cleaning # vmrDestroy(vmr_env$id) # # box cleaning # vmrLocalBoxRemove(vmr_env$box, provider = vmr_env$provider, version = vmr_env$version) # # remove the working directory ## ----eval=FALSE--------------------------------------------------------------- # vmr_env <- vmrUpdateEnvVersion(vmr_env) ## ----eval=FALSE--------------------------------------------------------------- # vmr_env <- vmrMountDir(vmr_env, src = "/" , dest = "/" ) ## ----eval=FALSE--------------------------------------------------------------- # # Get environment status # vmrStatus() # # Start a provider instance # vmrStart() # # Save state and stop provider instance # vmrSuspend() # # Resume a saved provider instance # vmrResume() # # Stop a provider instance # vmrStop() # # Remove a provider instance # vmrDestroy() ## ----eval=FALSE--------------------------------------------------------------- # # Take a snapshot # vmrTakeSnapshot("my snapshot") # # resume a snapshot # vmrRestoreSnapshot("my snapshot") # # list snapshots # vmrListSnapshot()
/scratch/gouwar.j/cran-all/cranData/vmr/inst/doc/O3-vmrManagevmr.R
--- title: "3- Manage vmr environment" output: rmarkdown::html_vignette: toc: true toc_depth: 2 vignette: > %\VignetteIndexEntry{3- Manage vmr environment} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- ## Clarification A __vmr__ environment consist of a __directory__ and a template file call __Vagrantfile__. A _vmr_ object contains information to create and modify an __vmr__ environment. Once the environment created and initialized the _vmr_ object became optional and only the __working directory__ and the __Vagrantfile__ are the managers. ## Create a __vmr__ environment ### Create a _vmr_ object ```{r eval=FALSE} vmr_env <- vmrCreate(<boxname>) ``` _vmrCreate()_ function create a _vmr_ object using several arguments: * name : the box name (from _vmrList()_) * provider: the provider name (from _vmrList()_) * version: (optional by default latest version is use) * provider.options: specific provider options (vignette n°5) ### Load a _vmr_ object Set the working directory to a __vmr__ environment who was already initialized. ```{r, eval=FALSE} setwd("path/to/my/vmr/environment/") vmr_env <- vmrLoad() vmr_env ``` ## Initialize a __vmr__ environment Initialize a __vmr__ environment will create a _Vagrantfile_ template into the working directory and download the box associated. > The box download can be long depending of the box size and network bandwide. The box is save in vagrant environment ("~/.vagrant.d/"). ```{r eval=FALSE} vmr_env # created or loaded object # force.vagrantfile will override existing Vagrantfile template vmr_env <- vmrInitEnv(vmr_env, force.vagrantfile=TRUE) ``` ### Clean a __vmr__ environment To remove any file created, boxes downloaded and provider instance run this commands: ```{r eval=FALSE} vmr_env <- vmrLoad() # provider cleaning vmrDestroy(vmr_env$id) # box cleaning vmrLocalBoxRemove(vmr_env$box, provider = vmr_env$provider, version = vmr_env$version) # remove the working directory ``` ## Add options to vmr environment Several functions need and can modify a _vmr_ object to add options to the environment. _vmrInitEnv()_ have to be recall at _vmr_ object modification. ### Upgrade environment It's possible to upgrade an environment to use the latest box version. ```{r eval=FALSE} vmr_env <- vmrUpdateEnvVersion(vmr_env) ``` ### Shared files To share a host directory to the guest. ```{r eval=FALSE} vmr_env <- vmrMountDir(vmr_env, src = "/" , dest = "/" ) ``` ## Manipulate a __vmr__ environment This functions manage the environment instance. They have to be call in __vmr__ environment (working directory), with no arguments. ```{r eval=FALSE} # Get environment status vmrStatus() # Start a provider instance vmrStart() # Save state and stop provider instance vmrSuspend() # Resume a saved provider instance vmrResume() # Stop a provider instance vmrStop() # Remove a provider instance vmrDestroy() ``` ### Snapshot Manage provider instance with snapshot. ```{r eval=FALSE} # Take a snapshot vmrTakeSnapshot("my snapshot") # resume a snapshot vmrRestoreSnapshot("my snapshot") # list snapshots vmrListSnapshot() ``` ## Vignette summary 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html) ### Next vignette : [4-Manage Boxes](O4-vmrManageBoxes.html)
/scratch/gouwar.j/cran-all/cranData/vmr/inst/doc/O3-vmrManagevmr.Rmd
## ----eval=FALSE--------------------------------------------------------------- # boxes_list <- vmrList() # boxes_list ## ----eval=FALSE--------------------------------------------------------------- # vmrListBox(boxes_list$Name[1]) ## ----eval=FALSE--------------------------------------------------------------- # vmrBoxDownload(vmr_env) ## ----eval=FALSE--------------------------------------------------------------- # # List downloaded boxes # vmrLocalBoxList() # # Remove old boxes (not up to date) # vmrLocalBoxPrune() # # Remove a specific box # vmrLocalBoxRemove(<box name>) # # Download the last box version (use in a __vmr__ environment) # vmrLocalBoxUpdate()
/scratch/gouwar.j/cran-all/cranData/vmr/inst/doc/O4-vmrManageBoxes.R
--- title: "4- Manage vmr boxes" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{4- Manage vmr boxes} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- # The __vmr__ boxes ## Presentation A __box__ is a Vagrant environment (bundle) containing a virtual environment (such as a virtual machine) for a specific provider (such as [VirtualBox](https://www.virtualbox.org/)). > Boxes are available for development and test, do not use it for production. Once a box is downloaded, it saved in _~/.vagrand.d/_ directory. > Be careful boxes can be large in space, be sure to have enough hard drive space. ## List boxes Official __vmr__ boxes list is available here : [https://app.vagrantup.com/VMR/](https://app.vagrantup.com/VMR/) __vmr__ boxes are identify by : * a name: <the OS name and version>-R * a version: <R version>.<timestamp> * a provider: provider name (default virtualbox) * a description: for information To get this list in R console: ```{r eval=FALSE} boxes_list <- vmrList() boxes_list ``` To get information about a specific box: ```{r eval=FALSE} vmrListBox(boxes_list$Name[1]) ``` ## Download a box Box download is automatic in a __vmr__ environement, but if you need to download it manualy, you can: ```{r eval=FALSE} vmrBoxDownload(vmr_env) ``` ## Manage boxes You can manage downloaded boxes with this functions: ```{r eval=FALSE} # List downloaded boxes vmrLocalBoxList() # Remove old boxes (not up to date) vmrLocalBoxPrune() # Remove a specific box vmrLocalBoxRemove(<box name>) # Download the last box version (use in a __vmr__ environment) vmrLocalBoxUpdate() ``` ## Vignette summary 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html) ### Next vignette : 5-[Manage providers](O5-vmrManageProviders.html)
/scratch/gouwar.j/cran-all/cranData/vmr/inst/doc/O4-vmrManageBoxes.Rmd
## ----eval=FALSE--------------------------------------------------------------- # vb.opt <- virtualboxOptions(details = FALSE) ## ----eval=FALSE--------------------------------------------------------------- # vb.opt$name <- "My Virtualbox name" ## ----eval=FALSE--------------------------------------------------------------- # vb.opt$gui <- FALSE ## ----eval=FALSE--------------------------------------------------------------- # vb.opt$modifyvm$cpus <- 3 # vb.opt$modifyvm$memory <- 8192
/scratch/gouwar.j/cran-all/cranData/vmr/inst/doc/O5-vmrManageProviders.R
--- title: "5- Manage vmr Providers" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{5- Manage vmr Providers} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- # The __vmr__ Providers ## Presentation __vmr__ providers are "tools" that manage the virtual machines instances such as _VirtualBox_ (default in __vmr__), _docker_, _vmware_, _aws_, _openstack_, ... Actually only _VirtualBox_ is available in __vmr__ package. ## VirtualBox provider __vmr__ can manage some of virtualbox options. Options available can be find here: ```{r eval=FALSE} vb.opt <- virtualboxOptions(details = FALSE) ``` To set a name to the virtualbox machine: ```{r eval=FALSE} vb.opt$name <- "My Virtualbox name" ``` To disable GUI: ```{r eval=FALSE} vb.opt$gui <- FALSE ``` To add more cpus and memory: ```{r eval=FALSE} vb.opt$modifyvm$cpus <- 3 vb.opt$modifyvm$memory <- 8192 ``` More options can be find here [https://www.virtualbox.org/manual/ch08.html#vboxmanage-modifyvm](https://www.virtualbox.org/manual/ch08.html#vboxmanage-modifyvm) ## Vignette summary 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html) ### Next vignette : [6-Development](O6-vmrDev.html)
/scratch/gouwar.j/cran-all/cranData/vmr/inst/doc/O5-vmrManageProviders.Rmd
## ----eval=FALSE--------------------------------------------------------------- # vmrInfo() ## ----eval=FALSE--------------------------------------------------------------- # vmrSend(c("myfile1","myfile2")) ## ----eval=FALSE--------------------------------------------------------------- # vmrProvision(cmd = c("./myscript.sh"), elts = c("myscript.sh"), dest = "/home/vmr/") ## ----eval=FALSE--------------------------------------------------------------- # vmrExec(c('print("HelloWorld")')) ## ----eval=FALSE--------------------------------------------------------------- # vmrUpdatePackages() # vmrInstallPackages(pkg = c("vmr")) ## ----eval=FALSE--------------------------------------------------------------- # # check a local package # vmrPackageCheck() # # Test it # vmrPackageTest() # # and create archive and package binary # vmrPackageBuild()
/scratch/gouwar.j/cran-all/cranData/vmr/inst/doc/O6-vmrDev.R
--- title: "6- Use vmr for development" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{6- Use vmr for development} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- # Develop using __vmr__ package __vmr__ environment are clean and setup with R (and some with R-devel), Rtools and some dependencies. You can use the GUI or use __vmr__ functions to execute commands. ## Information To get guest information: ```{r eval=FALSE} vmrInfo() ``` To send files to guest: ```{r eval=FALSE} vmrSend(c("myfile1","myfile2")) ``` To send and/or run bash commands : ```{r eval=FALSE} vmrProvision(cmd = c("./myscript.sh"), elts = c("myscript.sh"), dest = "/home/vmr/") ``` ## Run R commands To execute a R command to guest: ```{r eval=FALSE} vmrExec(c('print("HelloWorld")')) ``` To update and install R packages in guest: ```{r eval=FALSE} vmrUpdatePackages() vmrInstallPackages(pkg = c("vmr")) ``` ## Package development With __vmr__ you can test, and build a package in development using this functions: To build, check and test: ```{r eval=FALSE} # check a local package vmrPackageCheck() # Test it vmrPackageTest() # and create archive and package binary vmrPackageBuild() ``` ## Vignette summary 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html) ### Next vignette : [7-CI/CD](O7-vmrcicd.html)
/scratch/gouwar.j/cran-all/cranData/vmr/inst/doc/O6-vmrDev.Rmd
## ----eval=FALSE--------------------------------------------------------------- # vmrStart() # # do what you want # vmrTakeSnapshot("cicdversionR") ## ----eval=FALSE--------------------------------------------------------------- # virtualboxGitlabRunner(vmr_env, # gitlab_url = "gitlab.com", # gt_token = "<mytoken>", # snapshot_name = "cicdversionR", # vm_name = <VirtualBox VM Name>) ## ----eval=FALSE--------------------------------------------------------------- # vmrTakeSnapshot("cicdversionR") # vmrStop()
/scratch/gouwar.j/cran-all/cranData/vmr/inst/doc/O7-vmrcicd.R
--- title: "7- Use vmr for CI/CD" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{7- Use vmr for CI/CD} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- # CI/CD You can set up a __vmr__ environment to use it in CI/CD pipelines. ```{r eval=FALSE} vmrStart() # do what you want vmrTakeSnapshot("cicdversionR") ``` ## GitLab Runner CI/CD > Using gitlab-runner >= 15.0.0 the option ```[runners.ssh.disable_strict_host_key_checking]``` have to be set to ```true```. Then you can get the command to run, to add it as a GitLab Runner (this will also configure the guest VM) : ```{r eval=FALSE} virtualboxGitlabRunner(vmr_env, gitlab_url = "gitlab.com", gt_token = "<mytoken>", snapshot_name = "cicdversionR", vm_name = <VirtualBox VM Name>) ``` Copy and paste the return command in a terminal where GitLab Runner and the __vmr__ environment are installed (be careful with user right on Gitlab Runner and __vmr__ environment). ## Snapshot Once you set up your __vmr__ environment and configure CI/CD take a snapshot of it (snapshot name as to be the same as given into _virtualboxGitlabRunner_). ```{r eval=FALSE} vmrTakeSnapshot("cicdversionR") vmrStop() ``` ## Vignette summary 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html) ### Next vignette : [8- Functions resume](O8-vmrResume.html)
/scratch/gouwar.j/cran-all/cranData/vmr/inst/doc/O7-vmrcicd.Rmd
## ----eval=FALSE--------------------------------------------------------------- # list_boxes <- vmrList() # print(list_boxes) ## ----eval=FALSE--------------------------------------------------------------- # vmrListBox("boxname") ## ----eval=FALSE--------------------------------------------------------------- # vmr_env <- vmrCreate(name = "boxname") ## ----eval=FALSE--------------------------------------------------------------- # vb.options <- getProviderOptions(provider="virtualbox") # vb.options ## ----eval=FALSE--------------------------------------------------------------- # vmrInitEnv(vmr_env) ## ----eval=FALSE--------------------------------------------------------------- # setwd("/path/to/vmr/environment/") # vmr_env <- vmrLoad() ## ----eval=FALSE--------------------------------------------------------------- # vmrStart() ## ----eval=FALSE--------------------------------------------------------------- # vmrStatus() ## ----eval=FALSE--------------------------------------------------------------- # vmrInfo() ## ----eval=FALSE--------------------------------------------------------------- # vmrSuspend() ## ----eval=FALSE--------------------------------------------------------------- # vmrResume() ## ----eval=FALSE--------------------------------------------------------------- # vmrStop() ## ----eval=FALSE--------------------------------------------------------------- # vmrDestroy() ## ----eval=FALSE--------------------------------------------------------------- # vmrUpdatePackages() ## ----eval=FALSE--------------------------------------------------------------- # vmrInstallPackages(c("vmr")) ## ----eval=FALSE--------------------------------------------------------------- # vmrExec(cmd=c("print(Hello World !")) ## ----eval=FALSE--------------------------------------------------------------- # vmrPackageTest(pkg = "./") ## ----eval=FALSE--------------------------------------------------------------- # vmrPackageCheck(pkg = "./") ## ----eval=FALSE--------------------------------------------------------------- # vmrPackageBuild(pkg = "./") ## ----eval=FALSE--------------------------------------------------------------- # vmrProvision(elts = c("myscript.sh"), cmd=c("ls","./myscript.sh"))
/scratch/gouwar.j/cran-all/cranData/vmr/inst/doc/O8-vmrResume.R
--- title: "8- vmr package functions resume" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{8- vmr package functions resume} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- ## List available boxes ```{r eval=FALSE} list_boxes <- vmrList() print(list_boxes) ``` ## Get box information ```{r eval=FALSE} vmrListBox("boxname") ``` ## Create a __vmr__ environment ```{r eval=FALSE} vmr_env <- vmrCreate(name = "boxname") ``` ## Get providers options: ```{r eval=FALSE} vb.options <- getProviderOptions(provider="virtualbox") vb.options ``` ## Initialize __vmr__ environment ```{r eval=FALSE} vmrInitEnv(vmr_env) ``` ## Load an already initialized __vmr__ environment ```{r eval=FALSE} setwd("/path/to/vmr/environment/") vmr_env <- vmrLoad() ``` ## Start __vmr__ environment ```{r eval=FALSE} vmrStart() ``` ## __vmr__ environment Status: ```{r eval=FALSE} vmrStatus() ``` ## Get Guest informations: ```{r eval=FALSE} vmrInfo() ``` ## Save and stop the environment: ```{r eval=FALSE} vmrSuspend() ``` ## Resume an environment previously suspended: ```{r eval=FALSE} vmrResume() ``` ## Stop a __vmr__ environment ```{r eval=FALSE} vmrStop() ``` ## Remove a __vmr__ environment ```{r eval=FALSE} vmrDestroy() ``` ## Update R packages ```{r eval=FALSE} vmrUpdatePackages() ``` ## Install R packages ```{r eval=FALSE} vmrInstallPackages(c("vmr")) ``` ## Run R commands ```{r eval=FALSE} vmrExec(cmd=c("print(Hello World !")) ``` ## Test R package ```{r eval=FALSE} vmrPackageTest(pkg = "./") ``` ## Check R package ```{r eval=FALSE} vmrPackageCheck(pkg = "./") ``` ## Build R package ```{r eval=FALSE} vmrPackageBuild(pkg = "./") ``` ## Send files and run bash commands ```{r eval=FALSE} vmrProvision(elts = c("myscript.sh"), cmd=c("ls","./myscript.sh")) ``` ## Vignette summary 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html)
/scratch/gouwar.j/cran-all/cranData/vmr/inst/doc/O8-vmrResume.Rmd
--- title: "1- Working with vmr package" output: rmarkdown::html_vignette: toc: true toc_depth: 2 vignette: > %\VignetteIndexEntry{1- Working with vmr package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- # The __vmr__ package ## Presentation The __vmr__ package allow you to manage __virtual environment__ with and for __R__. You can develop, run tests, build packages binaries easily in a clean environment with R pre-installed and configured. It uses [Vagrant](https://www.vagrantup.com/) tool to manage this virtual environments, call _boxes_. A __box__ is a Vagrant environment (bundle) containing a __virtual environment_ (such as a virtual machine) for a specific __provider__ (such as [VirtualBox](https://www.virtualbox.org/)). Official __vmr__ boxes list is available here : [https://app.vagrantup.com/VMR/](https://app.vagrantup.com/VMR/) > Boxes use default user and password: __vagrant__. Boxes are available for development and test, do not use it for production. Take a [quick overview](vmrPoster.pdf) from userR2022 poster presentation. ## Dependencies The __vmr__ package needs two tools to work __Vagrant__ to manage boxes and a __provider__ to instantiate the virtual environment. * __Vagrant__ (>= 2.2.0) : download and install from [https://www.vagrantup.com/downloads.html](https://www.vagrantup.com/downloads.html). * A __provider__, currently only [VirtualBox](https://www.virtualbox.org/) (>= 6.1.14) is available in __vmr__ boxes. Install it. ## Install From CRAN: ``` install.packages(c('vmr')) ``` From in development repository: ``` remotes::install_git('https://gitlab.com/rstuff/vmr.git') ``` # Go further 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html) ### Next vignette : [2-Start my first environment](O2-vmrFirstStep.html)
/scratch/gouwar.j/cran-all/cranData/vmr/vignettes/O1-workwithvmr.Rmd
--- title: "2- vmr package first step" output: rmarkdown::html_vignette: toc: true toc_depth: 2 vignette: > %\VignetteIndexEntry{2- vmr package first step} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- # To Start ```{r echo=TRUE, eval=FALSE} #install.packages(c('vmr')) library(vmr) vmrSetVerbose("Full") ``` ## List available environment (boxes) Boxes are listed from [https://app.vagrantup.com/VMR/](https://app.vagrantup.com/VMR/). Mainly you've got the OS, the R version installed (follow by a timestamp) and the provider available. ```{r eval=FALSE} list_boxes <- vmrList() print(list_boxes) ``` ## Create a __vmr__ object To create a minimal __vmr__ object you need the _name_ and the _provider_ of a box (default _version_ is "_latest_"). ```{r eval=FALSE} index <- which(list_boxes$Name == "LinuxMint20-R")[1] vmr_env <- vmrCreate(name = list_boxes$Name[index], provider = list_boxes$Provider[1]) vmr_env ``` ## Initialize the __vmr__ environment > Environment configuration depend of a directory (the _working directory_) and a file template call _Vagrantfile_. This function have to be re-call if __vmr\_env__ object is modified. When initializing environment the box with same name, version and provider, will be download once. It can be long depending of the box size and network bandwide. The box is save in vagrant environment ("~/.vagrant.d/"). ```{r eval=FALSE} vmr_env <- vmrInitEnv(vmr_env) ``` > Note : when __vmr__ environment is initialize, no more need to recreate _vmr\_env_ for further use (unless for specific case). Use __vmrLoad()__ in that case to recreate the __vmr__ object _vmr\_env_. ## Start __vmr__ environment To start an environment: > This start the virtual environment using the __vmr directory__ and the __Vagrantfile template__. __Be sure to always be in the same working directory.__ ```{r eval=FALSE} vmrStart() ``` Now enjoy using R console, the virtual environment GUI or both. To get vmr Status: ```{r eval=FALSE} vmrStatus() ``` ## Stop __vmr__ environment To save the current state and stop the environment: ```{r eval=FALSE} vmrSuspend() ``` To resume an environment previously suspended: ```{r eval=FALSE} vmrResume() ``` To stop the environment (_powerOff_): ```{r eval=FALSE} vmrStop() ``` # Vignettes summary 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html) ### Next vignette : [3-Manage __vmr__ environment](O3-vmrManagevmr.html)
/scratch/gouwar.j/cran-all/cranData/vmr/vignettes/O2-vmrFirstStep.Rmd
--- title: "3- Manage vmr environment" output: rmarkdown::html_vignette: toc: true toc_depth: 2 vignette: > %\VignetteIndexEntry{3- Manage vmr environment} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- ## Clarification A __vmr__ environment consist of a __directory__ and a template file call __Vagrantfile__. A _vmr_ object contains information to create and modify an __vmr__ environment. Once the environment created and initialized the _vmr_ object became optional and only the __working directory__ and the __Vagrantfile__ are the managers. ## Create a __vmr__ environment ### Create a _vmr_ object ```{r eval=FALSE} vmr_env <- vmrCreate(<boxname>) ``` _vmrCreate()_ function create a _vmr_ object using several arguments: * name : the box name (from _vmrList()_) * provider: the provider name (from _vmrList()_) * version: (optional by default latest version is use) * provider.options: specific provider options (vignette n°5) ### Load a _vmr_ object Set the working directory to a __vmr__ environment who was already initialized. ```{r, eval=FALSE} setwd("path/to/my/vmr/environment/") vmr_env <- vmrLoad() vmr_env ``` ## Initialize a __vmr__ environment Initialize a __vmr__ environment will create a _Vagrantfile_ template into the working directory and download the box associated. > The box download can be long depending of the box size and network bandwide. The box is save in vagrant environment ("~/.vagrant.d/"). ```{r eval=FALSE} vmr_env # created or loaded object # force.vagrantfile will override existing Vagrantfile template vmr_env <- vmrInitEnv(vmr_env, force.vagrantfile=TRUE) ``` ### Clean a __vmr__ environment To remove any file created, boxes downloaded and provider instance run this commands: ```{r eval=FALSE} vmr_env <- vmrLoad() # provider cleaning vmrDestroy(vmr_env$id) # box cleaning vmrLocalBoxRemove(vmr_env$box, provider = vmr_env$provider, version = vmr_env$version) # remove the working directory ``` ## Add options to vmr environment Several functions need and can modify a _vmr_ object to add options to the environment. _vmrInitEnv()_ have to be recall at _vmr_ object modification. ### Upgrade environment It's possible to upgrade an environment to use the latest box version. ```{r eval=FALSE} vmr_env <- vmrUpdateEnvVersion(vmr_env) ``` ### Shared files To share a host directory to the guest. ```{r eval=FALSE} vmr_env <- vmrMountDir(vmr_env, src = "/" , dest = "/" ) ``` ## Manipulate a __vmr__ environment This functions manage the environment instance. They have to be call in __vmr__ environment (working directory), with no arguments. ```{r eval=FALSE} # Get environment status vmrStatus() # Start a provider instance vmrStart() # Save state and stop provider instance vmrSuspend() # Resume a saved provider instance vmrResume() # Stop a provider instance vmrStop() # Remove a provider instance vmrDestroy() ``` ### Snapshot Manage provider instance with snapshot. ```{r eval=FALSE} # Take a snapshot vmrTakeSnapshot("my snapshot") # resume a snapshot vmrRestoreSnapshot("my snapshot") # list snapshots vmrListSnapshot() ``` ## Vignette summary 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html) ### Next vignette : [4-Manage Boxes](O4-vmrManageBoxes.html)
/scratch/gouwar.j/cran-all/cranData/vmr/vignettes/O3-vmrManagevmr.Rmd
--- title: "4- Manage vmr boxes" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{4- Manage vmr boxes} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- # The __vmr__ boxes ## Presentation A __box__ is a Vagrant environment (bundle) containing a virtual environment (such as a virtual machine) for a specific provider (such as [VirtualBox](https://www.virtualbox.org/)). > Boxes are available for development and test, do not use it for production. Once a box is downloaded, it saved in _~/.vagrand.d/_ directory. > Be careful boxes can be large in space, be sure to have enough hard drive space. ## List boxes Official __vmr__ boxes list is available here : [https://app.vagrantup.com/VMR/](https://app.vagrantup.com/VMR/) __vmr__ boxes are identify by : * a name: <the OS name and version>-R * a version: <R version>.<timestamp> * a provider: provider name (default virtualbox) * a description: for information To get this list in R console: ```{r eval=FALSE} boxes_list <- vmrList() boxes_list ``` To get information about a specific box: ```{r eval=FALSE} vmrListBox(boxes_list$Name[1]) ``` ## Download a box Box download is automatic in a __vmr__ environement, but if you need to download it manualy, you can: ```{r eval=FALSE} vmrBoxDownload(vmr_env) ``` ## Manage boxes You can manage downloaded boxes with this functions: ```{r eval=FALSE} # List downloaded boxes vmrLocalBoxList() # Remove old boxes (not up to date) vmrLocalBoxPrune() # Remove a specific box vmrLocalBoxRemove(<box name>) # Download the last box version (use in a __vmr__ environment) vmrLocalBoxUpdate() ``` ## Vignette summary 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html) ### Next vignette : 5-[Manage providers](O5-vmrManageProviders.html)
/scratch/gouwar.j/cran-all/cranData/vmr/vignettes/O4-vmrManageBoxes.Rmd
--- title: "5- Manage vmr Providers" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{5- Manage vmr Providers} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- # The __vmr__ Providers ## Presentation __vmr__ providers are "tools" that manage the virtual machines instances such as _VirtualBox_ (default in __vmr__), _docker_, _vmware_, _aws_, _openstack_, ... Actually only _VirtualBox_ is available in __vmr__ package. ## VirtualBox provider __vmr__ can manage some of virtualbox options. Options available can be find here: ```{r eval=FALSE} vb.opt <- virtualboxOptions(details = FALSE) ``` To set a name to the virtualbox machine: ```{r eval=FALSE} vb.opt$name <- "My Virtualbox name" ``` To disable GUI: ```{r eval=FALSE} vb.opt$gui <- FALSE ``` To add more cpus and memory: ```{r eval=FALSE} vb.opt$modifyvm$cpus <- 3 vb.opt$modifyvm$memory <- 8192 ``` More options can be find here [https://www.virtualbox.org/manual/ch08.html#vboxmanage-modifyvm](https://www.virtualbox.org/manual/ch08.html#vboxmanage-modifyvm) ## Vignette summary 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html) ### Next vignette : [6-Development](O6-vmrDev.html)
/scratch/gouwar.j/cran-all/cranData/vmr/vignettes/O5-vmrManageProviders.Rmd
--- title: "6- Use vmr for development" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{6- Use vmr for development} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- # Develop using __vmr__ package __vmr__ environment are clean and setup with R (and some with R-devel), Rtools and some dependencies. You can use the GUI or use __vmr__ functions to execute commands. ## Information To get guest information: ```{r eval=FALSE} vmrInfo() ``` To send files to guest: ```{r eval=FALSE} vmrSend(c("myfile1","myfile2")) ``` To send and/or run bash commands : ```{r eval=FALSE} vmrProvision(cmd = c("./myscript.sh"), elts = c("myscript.sh"), dest = "/home/vmr/") ``` ## Run R commands To execute a R command to guest: ```{r eval=FALSE} vmrExec(c('print("HelloWorld")')) ``` To update and install R packages in guest: ```{r eval=FALSE} vmrUpdatePackages() vmrInstallPackages(pkg = c("vmr")) ``` ## Package development With __vmr__ you can test, and build a package in development using this functions: To build, check and test: ```{r eval=FALSE} # check a local package vmrPackageCheck() # Test it vmrPackageTest() # and create archive and package binary vmrPackageBuild() ``` ## Vignette summary 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html) ### Next vignette : [7-CI/CD](O7-vmrcicd.html)
/scratch/gouwar.j/cran-all/cranData/vmr/vignettes/O6-vmrDev.Rmd
--- title: "7- Use vmr for CI/CD" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{7- Use vmr for CI/CD} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- # CI/CD You can set up a __vmr__ environment to use it in CI/CD pipelines. ```{r eval=FALSE} vmrStart() # do what you want vmrTakeSnapshot("cicdversionR") ``` ## GitLab Runner CI/CD > Using gitlab-runner >= 15.0.0 the option ```[runners.ssh.disable_strict_host_key_checking]``` have to be set to ```true```. Then you can get the command to run, to add it as a GitLab Runner (this will also configure the guest VM) : ```{r eval=FALSE} virtualboxGitlabRunner(vmr_env, gitlab_url = "gitlab.com", gt_token = "<mytoken>", snapshot_name = "cicdversionR", vm_name = <VirtualBox VM Name>) ``` Copy and paste the return command in a terminal where GitLab Runner and the __vmr__ environment are installed (be careful with user right on Gitlab Runner and __vmr__ environment). ## Snapshot Once you set up your __vmr__ environment and configure CI/CD take a snapshot of it (snapshot name as to be the same as given into _virtualboxGitlabRunner_). ```{r eval=FALSE} vmrTakeSnapshot("cicdversionR") vmrStop() ``` ## Vignette summary 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html) ### Next vignette : [8- Functions resume](O8-vmrResume.html)
/scratch/gouwar.j/cran-all/cranData/vmr/vignettes/O7-vmrcicd.Rmd
--- title: "8- vmr package functions resume" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{8- vmr package functions resume} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} \usepackage[utf8]{inputenc} --- ## List available boxes ```{r eval=FALSE} list_boxes <- vmrList() print(list_boxes) ``` ## Get box information ```{r eval=FALSE} vmrListBox("boxname") ``` ## Create a __vmr__ environment ```{r eval=FALSE} vmr_env <- vmrCreate(name = "boxname") ``` ## Get providers options: ```{r eval=FALSE} vb.options <- getProviderOptions(provider="virtualbox") vb.options ``` ## Initialize __vmr__ environment ```{r eval=FALSE} vmrInitEnv(vmr_env) ``` ## Load an already initialized __vmr__ environment ```{r eval=FALSE} setwd("/path/to/vmr/environment/") vmr_env <- vmrLoad() ``` ## Start __vmr__ environment ```{r eval=FALSE} vmrStart() ``` ## __vmr__ environment Status: ```{r eval=FALSE} vmrStatus() ``` ## Get Guest informations: ```{r eval=FALSE} vmrInfo() ``` ## Save and stop the environment: ```{r eval=FALSE} vmrSuspend() ``` ## Resume an environment previously suspended: ```{r eval=FALSE} vmrResume() ``` ## Stop a __vmr__ environment ```{r eval=FALSE} vmrStop() ``` ## Remove a __vmr__ environment ```{r eval=FALSE} vmrDestroy() ``` ## Update R packages ```{r eval=FALSE} vmrUpdatePackages() ``` ## Install R packages ```{r eval=FALSE} vmrInstallPackages(c("vmr")) ``` ## Run R commands ```{r eval=FALSE} vmrExec(cmd=c("print(Hello World !")) ``` ## Test R package ```{r eval=FALSE} vmrPackageTest(pkg = "./") ``` ## Check R package ```{r eval=FALSE} vmrPackageCheck(pkg = "./") ``` ## Build R package ```{r eval=FALSE} vmrPackageBuild(pkg = "./") ``` ## Send files and run bash commands ```{r eval=FALSE} vmrProvision(elts = c("myscript.sh"), cmd=c("ls","./myscript.sh")) ``` ## Vignette summary 1. [Working with __vmr__ package](O1-workwithvmr.html) 2. [Start my first environment](O2-vmrFirstStep.html) 3. [Manage __vmr__ environment](O3-vmrManagevmr.html) 4. [Manage boxes](O4-vmrManageBoxes.html) 5. [Manage providers](O5-vmrManageProviders.html) 6. [Development with __vmr__](O6-vmrDev.html) 7. [CI/CD](O7-vmrcicd.html) 8. [Functions resume](O8-vmrResume.html)
/scratch/gouwar.j/cran-all/cranData/vmr/vignettes/O8-vmrResume.Rmd
#' Check controls #' #' @description #' This function checks the input \code{controls}. #' #' @param controls #' Either \code{NULL} or a named list with the following elements. Missing #' elements are set to the default values in parentheses. #' \itemize{ #' \item \code{init_runs} (\code{5}): #' The number of initial searches. #' \item \code{init_min} (\code{-1}): #' The minimum argument value for the random initialization. #' \item \code{init_max} (\code{1}): #' The maximum argument value for the random initialization. #' \item \code{init_iterlim} (\code{20}): #' The number of iterations for the initial searches. #' \item \code{neighborhoods} (\code{5}): #' The number of nested neighborhoods. #' \item \code{neighbors} (\code{5}): #' The number of neighbors in each neighborhood. #' \item \code{beta} (\code{0.05}): #' A non-negative weight factor to account for the function's curvature in the #' selection of the neighbors. If \code{beta = 0}, the curvature is ignored. #' The higher the value, the higher the probability of selecting a neighbor in #' the direction of the highest function curvature. #' \item \code{iterlim} (\code{1000}): #' The maximum number of iterations to be performed before the local search is #' terminated. #' \item \code{tolerance} (\code{1e-6}): #' A positive scalar giving the tolerance for comparing different optimal #' arguments for equality. #' \item \code{time_limit} (\code{NULL}): #' The time limit in seconds for the algorithm. #' } #' #' @return #' The checked and filled list \code{controls}. #' #' @export check_controls <- function(controls) { if (is.null(controls)) { controls <- list() } if (!is.list(controls)) { stop("'controls' must be a list.", call. = FALSE ) } if (is.null(controls[["init_runs"]])) { controls[["init_runs"]] <- 5 } if (!(is.numeric(controls[["init_runs"]]) && controls[["init_runs"]] > 0 && controls[["init_runs"]] %% 1 == 0)) { stop("'controls$init_runs' must be a positive number.", call. = FALSE ) } if (is.null(controls[["init_min"]])) { controls[["init_min"]] <- -1 } if (!is.numeric(controls[["init_min"]])) { stop("'controls$init_min' must be a numeric.", call. = FALSE ) } if (is.null(controls[["init_max"]])) { controls[["init_max"]] <- 1 } if (!is.numeric(controls[["init_max"]])) { stop("'controls$init_max' must be a numeric.", call. = FALSE ) } if (controls[["init_max"]] < controls[["init_min"]]) { stop("'controls$init_max' must not be smaller than controls$init_min.", call. = FALSE ) } if (is.null(controls[["init_iterlim"]])) { controls[["init_iterlim"]] <- 20 } if (!is.numeric(controls[["init_iterlim"]])) { stop("'controls$init_iterlim' must be a numeric.", call. = FALSE ) } if (is.null(controls[["neighborhoods"]])) { controls[["neighborhoods"]] <- 5 } if (!(is.numeric(controls[["neighborhoods"]]) && controls[["neighborhoods"]] > 0 && controls[["neighborhoods"]] %% 1 == 0)) { stop("'controls$neighborhoods' must be a positive number.", call. = FALSE ) } if (is.null(controls[["neighbors"]])) { controls[["neighbors"]] <- 5 } if (!(is.numeric(controls[["neighbors"]]) && controls[["neighbors"]] > 0 && controls[["neighbors"]] %% 1 == 0)) { stop("'controls$neighbors' must be a positive number.", call. = FALSE ) } if (is.null(controls[["beta"]])) { controls[["beta"]] <- 0.05 } if (!(is.numeric(controls[["beta"]]) && controls[["beta"]] >= 0)) { stop("'controls$beta' must be greater zero.", call. = FALSE ) } if (is.null(controls[["iterlim"]])) { controls[["iterlim"]] <- 1000 } if (!is.numeric(controls[["iterlim"]])) { stop("'controls$iterlim' must be a numeric.", call. = FALSE ) } if (is.null(controls[["tolerance"]])) { controls[["tolerance"]] <- 1e-6 } if (!(is.numeric(controls[["tolerance"]]) && controls[["tolerance"]] >= 0)) { stop("'controls$tolerance' must be non-negative.", call. = FALSE ) } if (!is.null(controls[["time_limit"]])) { if (!is.numeric(controls[["time_limit"]])) { stop("'controls$time_limit' must be a numeric.", call. = FALSE ) } } return(controls) }
/scratch/gouwar.j/cran-all/cranData/vntrs/R/check_controls.R
#' Check function #' #' @description #' This function checks the input \code{f}. #' #' @param f #' A function that computes value, gradient, and Hessian of the function to be #' optimized and returns them as a named list with elements \code{value}, #' \code{gradient}, and \code{hessian}. #' @param npar #' The number of parameters of \code{f}. #' @inheritParams check_controls #' #' @return #' No return value, called for side-effects. #' #' @keywords #' internal #' #' @importFrom stats runif check_f <- function(f, npar, controls) { ### check inputs if (!is.function(f)) { stop("'f' must be a function.", call. = FALSE ) } if (!(is.numeric(npar) && npar > 0 && npar %% 1 == 0)) { stop("'npar' must be a positive number.", call. = FALSE ) } ### draw random test points test_runs <- 10 y <- stats::runif(test_runs * npar, controls$init_min, controls$init_max) y <- matrix(y, nrow = test_runs, ncol = npar) y <- round(y, 1) ### perform test for (run in seq_len(test_runs)) { call <- paste0("f(", paste(y[run, ], collapse = ","), ")") out <- try(f(y[run, ])) if (inherits(out, "try-error")) { stop("Could not compute ", call, ".", call. = FALSE ) } if (!is.list(out)) { stop(call, " does not return a list.", call. = FALSE ) } if (is.null(out[["value"]])) { stop(call, " does not return a list with element 'value'.", call. = FALSE ) } if (!(is.numeric(out[["value"]]) && length(out[["value"]]) == 1)) { stop("The element 'value' in the output list of ", call, " is not a single numeric value.", call. = FALSE ) } if (is.null(out[["gradient"]])) { stop(call, " does not return a list with element 'gradient'.", call. = FALSE ) } if (!(is.numeric(out[["gradient"]]) && length(out[["gradient"]]) == npar)) { stop("The element 'gradient' in the output list of ", call, " is not a numeric vector of length 'npar'.", call. = FALSE ) } if (is.null(out[["hessian"]])) { stop(call, " does not return a list with element 'hessian'.", call. = FALSE ) } if (!(is.numeric(out[["hessian"]]) && is.matrix(out[["hessian"]]) && all(dim(out[["gradient"]]) == c(npar, npar)))) { stop("The element 'hessian' in the output list of ", call, " is not a numeric matrix of dimension 'npar' x 'npar'.", call. = FALSE ) } } }
/scratch/gouwar.j/cran-all/cranData/vntrs/R/check_f.R
#' Initialize search #' #' @description #' Function that initializes the variable neighborhood trust region search. #' #' @inheritParams vntrs #' #' @return #' A list of #' \itemize{ #' \item the list \code{L} of identified optima which contains lists with #' \itemize{ #' \item \code{value} and #' \item \code{argument} #' } #' of each identified optimum. #' \item best initial point \code{x_best}. #' } #' #' @keywords #' internal #' #' @importFrom stats runif initialize <- function(f, npar, minimize, controls) { ### initialize list of identified optima L <- list() ### random starting points (row-wise) y <- stats::runif(controls$init_runs * npar, controls$init_min, controls$init_max) y <- matrix(y, nrow = controls$init_runs, ncol = npar) ### search locally (for a small number of iterations) starting from 'y' local_searches <- list() cat("* Apply local search at", controls$init_runs, "random starting points.\n") for (n in seq_len(controls$init_runs)) { ### perform local search cat("** Run", n) start <- Sys.time() local_search <- trust::trust( objfun = f, parinit = y[n, ], rinit = 1, rmax = 10, iterlim = controls$init_iterlim, minimize = minimize ) end <- Sys.time() ### save local search local_searches[[n]] <- list( "success" = local_search$converged, "value" = local_search$value, "argument" = local_search$argument ) ### save local optimum (if unique one has been found) t <- difftime(end, start, units = "auto") cat(paste0(" [", sprintf("%.0f", t), " ", units(t), "]")) if (local_searches[[n]]$success) { cat(" [found optimum]") if (unique_optimum( L = L, argument = local_searches[[n]]$argument, tolerance = controls$tolerance )) { cat(" [optimum is unknown]") L <- c(L, list(local_searches[[n]])) } } cat("\n") } ### select best candidate candidates <- unlist(lapply(local_searches, function(x) x$value)) j_hat <- do.call( what = ifelse(minimize, which.min, which.max), args = list("x" = candidates) ) ### check if best candidate is local optimum if (local_searches[[j_hat]]$success) { ### save initial point x_best <- local_searches[[j_hat]]$argument } else { ### search locally again longer if no local optimum has been found yet cat(paste0("* Continue the best run ", j_hat, ".")) local_search_long <- trust::trust( objfun = f, parinit = local_searches[[j_hat]]$argument, rinit = 1, rmax = 10, iterlim = controls$iterlim, minimize = minimize ) if (!local_search_long$converged) { stop("Initialization failed. Consider increasing 'controls$init_runs', 'controls$init_iterlim' and 'controls$iterlim'.") } x_best <- local_search_long$argument cat(" [found optimum]\n") } ### return set of identified local optima and initially best parameter value return(list("L" = L, "x_best" = x_best)) }
/scratch/gouwar.j/cran-all/cranData/vntrs/R/initialize.R
#' Interrupt local search #' #' @description #' This function checks if the local search can be interrupted prematurely. #' #' @inheritParams vntrs #' @param point #' The current location of the local search. #' @inheritParams unique_optimum #' #' @return #' \code{TRUE} for premature interruption, \code{FALSE} if not. #' #' @keywords #' internal interruption <- function(f, point, L, minimize) { ### no interruption if 'L' is empty if (length(L) == 0) { return(FALSE) } ### the value at the best iterate in L f_best <- do.call( what = ifelse(minimize, min, max), args = list(unlist(lapply(L, function(x) x$value))) ) for (i in seq_len(length(L))) { ### compute distance between 'point' and identified optima dist <- norm(matrix(point - L[[i]]$argument), "F") ### check if distance is below 1 if (dist <= 1) { cat(" [optimum already visted]") return(TRUE) } } ### check for convergence to a local optimum if (1e-3 >= norm(matrix(f(point)$gradient), "F") && 3 >= f(point)$value - f_best) { cat(" [optimum seems local]") return(TRUE) } ### otherwise do not interrupt return(FALSE) }
/scratch/gouwar.j/cran-all/cranData/vntrs/R/interruption.R
#' Local trust region search #' #' @description #' Function that links to \code{\link[trust]{trust}}. #' #' @inheritParams vntrs #' @param parinit #' Passed on to \code{\link[trust]{trust}}. #' @inheritParams unique_optimum #' #' @return #' A list of #' \itemize{ #' \item \code{success}: A boolean, determining wether the local search #' successfully converged. #' \item \code{value}: The value at the point where the local search #' terminated. #' \item \code{argument}: The point where the local search terminated. #' } #' #' @keywords #' internal local <- function(f, parinit, minimize, controls, L) { ### do not check premature interruption if 'L' is empty or ### 'controls$only_global' = FALSE if (length(L) == 0) { batches <- 1 } else { batches <- controls$iterlim } ### perform local search for (b in seq_len(batches)) { out <- trust::trust( objfun = f, parinit = parinit, rinit = 1, rmax = 10, iterlim = controls$iterlim / batches, minimize = minimize, blather = FALSE ) if (b < batches) { ### check if already converged if (out$converged) break ### check if local search can be interrupted prematurely if (interruption(f = f, point = out$argument, L = L, minimize = minimize)) { return(list("success" = FALSE, "value" = NA, "argument" = NA)) } } parinit <- out$argument } return(list( "success" = out$converged, "value" = out$value, "argument" = out$argument )) }
/scratch/gouwar.j/cran-all/cranData/vntrs/R/local.R
#' Select neighbors #' #' @description #' Function that selects neighbors around a given point \code{x}. #' #' @inheritParams vntrs #' @param x #' A point in the domain of \code{f}. #' @param neighborhood_expansion #' A scaling factor, specifying the expansion of the neighborhood. #' #' @return #' A list points in the domain of \code{f} which neighbors of \code{x}. #' #' @keywords #' internal #' #' @importFrom stats runif select_neighbors <- function(f, x, neighborhood_expansion, controls) { ### list of neighbors z <- list() ### compute eigenvectors and eigenvalues of the Hessian of 'f' H_f <- f(x)$hessian H_f_eig <- eigen(H_f) v <- H_f_eig$vectors lambda <- H_f_eig$values ### set selection probabilities p_v <- exp(controls$beta * lambda / neighborhood_expansion) p_v <- p_v / sum(p_v) ### select neighbors for (j in 1:controls$neighbors) { alpha <- stats::runif(1, 0.75, 1) dir <- sample(c(-1, 1), 1) ### select eigenvector v_i <- v[, sample(1:length(lambda), size = 1, prob = p_v)] ### compute neighbor value z[[j]] <- x + alpha * neighborhood_expansion * dir * v_i } return(z) }
/scratch/gouwar.j/cran-all/cranData/vntrs/R/select_neighbors.R
#' Check new optimum for uniqueness #' #' @description #' This function checks if a new optimum \code{argument} is not yet contained #' in \code{L}. #' #' @param L #' A list of identified optima which contains lists with #' \itemize{ #' \item \code{value} and #' \item \code{argument} #' } #' of each identified optimum. #' @param argument #' The argument of a candidate optimum. #' @param tolerance #' A non-negative numeric value. For an identified optimum and a candidate #' optimum, if all coordinate differences are smaller than \code{tolerance}, #' they are considered as equal. #' #' @return #' A boolean. If \code{TRUE}, \code{argument} is not contained in \code{L}. #' If \code{FALSE}, \code{argument} is already contained in \code{L}. #' #' @keywords #' internal unique_optimum <- function(L, argument, tolerance) { for (i in seq_len(length(L))) { if (sqrt(sum((argument - L[[i]]$argument)^2)) < tolerance) { return(FALSE) } } return(TRUE) }
/scratch/gouwar.j/cran-all/cranData/vntrs/R/unique_optimum.R
#' @aliases vntrs-package #' @keywords internal "_PACKAGE" ## usethis namespace: start ## usethis namespace: end NULL
/scratch/gouwar.j/cran-all/cranData/vntrs/R/vntrs-package.R
#' Variable neighborhood trust region search #' #' @description #' This function performs variable neighborhood trust region search. #' #' @references #' Bierlaire et al. (2009) "A Heuristic for Nonlinear Global Optimization" #' \doi{10.1287/ijoc.1090.0343}. #' #' @inheritParams check_f #' @param minimize #' If \code{TRUE}, \code{f} gets minimized. If \code{FALSE}, maximized. #' @inheritParams check_controls #' @param quiet #' If \code{TRUE}, progress messages are suppressed. #' @param seed #' Set a seed for the sampling of the random starting points. #' @return #' A data frame. Each row contains information of an identified optimum. The #' first \code{npar} columns \code{"p1"},...,\code{"p<npar>"} store the argument #' values, the next column \code{"value"} has the optimal function values and #' the last column \code{"global"} contains \code{TRUE} for global optima and #' \code{FALSE} for local optima. #' #' @examples #' rosenbrock <- function(x) { #' stopifnot(is.numeric(x)) #' stopifnot(length(x) == 2) #' f <- expression(100 * (x2 - x1^2)^2 + (1 - x1)^2) #' g1 <- D(f, "x1") #' g2 <- D(f, "x2") #' h11 <- D(g1, "x1") #' h12 <- D(g1, "x2") #' h22 <- D(g2, "x2") #' x1 <- x[1] #' x2 <- x[2] #' f <- eval(f) #' g <- c(eval(g1), eval(g2)) #' h <- rbind(c(eval(h11), eval(h12)), c(eval(h12), eval(h22))) #' list(value = f, gradient = g, hessian = h) #' } #' vntrs(f = rosenbrock, npar = 2, seed = 1, controls = list(neighborhoods = 1)) #' #' @export vntrs <- function(f, npar, minimize = TRUE, controls = NULL, quiet = TRUE, seed = NULL) { ### check inputs if (!is.logical(minimize)) { stop("'minimize' must be a boolean.", call. = FALSE ) } if (!is.logical(quiet)) { stop("'quiet' must be a boolean.", call. = FALSE ) } if (quiet) { sink(tempfile()) on.exit(sink()) } if (!is.null(seed)) { set.seed(seed) } cat("Check controls.\n") controls <- check_controls(controls = controls) if (!is.null(controls$time_limit)) { start_time <- Sys.time() } cat("Check function.\n") check_f(f = f, npar = npar, controls = controls) ### initialization of variable neighborhood search cat("Initialize VNTRS.\n") initialization <- initialize( f = f, npar = npar, minimize = minimize, controls = controls ) L <- initialization$L x_best <- initialization$x_best ### iterative variable neighborhood search cat("Start VNTRS.\n") k <- 1 stop <- FALSE while (k <= controls$neighborhoods) { if (stop) break ### select neighbors cat(paste0("* Select neighborhood ", k, ".\n")) z <- select_neighbors( f = f, x = x_best, neighborhood_expansion = 1.5^(k - 1), controls = controls ) ### perform local search around neighbors for (j in seq_len(length(z))) { ### check total time if (!is.null(controls$time_limit)) { if (difftime(Sys.time(), start_time, units = "secs") > controls$time_limit) { stop <- TRUE warning("Stopped early because 'controls$time_limit' reached.", call. = FALSE, immediate. = TRUE, noBreaks. = TRUE ) break } } ### perform local search cat("** Neighbor", j) start <- Sys.time() local_search <- local( f = f, parinit = z[[j]], minimize = minimize, controls = controls, L = L ) end <- Sys.time() ### save local optimum (if unique one has been found) t <- difftime(end, start, units = "auto") cat(paste0(" [", sprintf("%.0f", t), " ", units(t), "]")) if (local_search$success) { cat(" [found optimum]") if (unique_optimum( L = L, argument = local_search$argument, tolerance = controls$tolerance )) { cat(" [optimum is unknown]") L <- c(L, list(local_search)) } } cat("\n") } ### if identified better optimum, reset neighborhoods pos_x_best_new <- do.call( what = ifelse(minimize, which.min, which.max), args = list(unlist(lapply(L, function(x) x$value))) ) x_best_new <- L[[pos_x_best_new]]$argument if (!isTRUE(all.equal( target = x_best_new, current = x_best, tolerance = controls$tolerance ))) { cat("* Reset neighborhood, because better optimum was found.\n") x_best <- x_best_new k <- 1 } else { k <- k + 1 } } ### prepare output if (length(L) == 0) { warning("No optima found.", call. = FALSE) return(NULL) } arguments <- sapply(L, function(x) x$argument) if (npar > 1) { arguments <- t(arguments) } values <- sapply(L, function(x) x$value) global <- values == ifelse(minimize, min, max)(values) out <- data.frame(arguments, values, global) colnames(out) <- c(paste0("p", 1:npar), "value", "global") ### return output cat("Done.\n") return(out) }
/scratch/gouwar.j/cran-all/cranData/vntrs/R/vntrs.R
## dbank.R: ## Functions for reading and processing DementiaBank files ## ### This program is free software; you can redistribute it and/or ### modify it under the terms of the GNU General Public License ### as published by the Free Software Foundation; either version 2 ### of the License, or (at your option) any later version. ### ### (c) 2019 S. Luz ([email protected]) ## ##' Build a data frame createwith vocalisation statistics ##' ##' @title makeVocalStatsDataset: create a dataset of vocalisation statistics (1 row per patient) ##' @param dir a string or vector containing the location (directory path) of the DementiaBank transcript files (.cha files) ##' @param sildir directory where silence csv files are stored ##' @param silsuffix the suffix of the silence profile files 'c.mp3.csv'. The format of such files should be the format used by Audacity label files, i.e. 'start time, end time, label' (without header), where 'label' should be 'silence' ##' @param srdir directory where speech rate csv (1 value per utterance) files are stored ##' @param srsuffix the suffix of the speech rate files (default: sre) ##' @param sprate compute speech rate? (default: TRUE; this has no effect at the moment) ##' @param sprate compute speech rate? (not in use yet) ##' @return a session's vocalisation feature stats ##' @rdname makeVocalStatsDataset ##' @importFrom stats median sd ##' @examples ##' \dontrun{ ##' makeVocalStatsDataset(dir=c('ADReSS-IS2020-data/train/transcription/cc/', ##' 'ADReSS-IS2020-data/train/transcription/cd/'), ##' sildir='ADReSS/silence/', ##' srdir='ADReSS/speech_rate/', ##' silsuffix='.wav-sil.csv') ##' } ##' @export makeVocalStatsDataset <- function(dir=c("data/Pitt/Dementia/cookie", "data/Pitt/Control/cookie"), sildir=NULL, silsuffix='c.mp3.csv', srdir='data/Pitt/speech_rate/', srsuffix='sra', sprate=T) { files <- c() for (d in dir) { files <- c(files, dir(d, pattern='.+\\.cha$', full.names=T)) } l <- length(files) d <- data.frame(id=character(l), age=numeric(l), gender=factor(rep('female', l),levels=c('male','female')), mmse=numeric(l), dur.mean=numeric(l), dur.sd=numeric(l), dur.median=numeric(l), srate.mean=numeric(l), srate.max=numeric(l), srate.min=numeric(l), srate.sd=numeric(l), number.utt=numeric(l), sildur.mean=numeric(l), sildur.sd=numeric(l), sildur.median=numeric(l), dur.max=numeric(l), sildur.max=numeric(l), dur.min=numeric(l), sildur.min=numeric(l), stringsAsFactors=F ) dx <- character(l) for (i in 1:l){ f <- files[i] t <- makeSessionDataSet(f, sildir=sildir, silsuffix=silsuffix, srdir=srdir, srsuffix=srsuffix, sprate=sprate) d$id[i] <- gsub("(.cha|.*/)", "", f) d$age[i] <- as.numeric(t$ids$age[t$ids$PAR=='PAR']) d$gender[i] <- as.character(t$ids$gender[t$ids$PAR=='PAR']) d$mmse[i] <- as.numeric(t$ids$mmse[t$ids$PAR=='PAR']) ## get diagnostic (categorisation target) dx[i] <- as.character(t$ids$Dx[t$ids$PAR=='PAR']) ## limit analysis to patient speech t$trans <- t$trans[t$trans$speaker=='PAR',] dur <- t$trans$end - t$trans$begin d$dur.mean[i] <- mean(dur, na.rm=T) d$dur.sd[i] <- sd(dur, na.rm=T) d$dur.median[i] <- median(dur, na.rm=T) d$dur.max[i] <- max(dur, na.rm=T) d$dur.min[i] <- min(dur, na.rm=T) d$srate.mean[i] <- 1/mean(1/t$trans$speechrate) ## use hamonic mean for ratios d$srate.max[i] <- max(t$trans$speechrate) d$srate.min[i] <- min(t$trans$speechrate) d$srate.sd[i] <- sd(t$trans$speechrate) d$number.utt[i] <- length(t$trans$utterance) sildur <- t$sil$end - t$sil$begin if (length(sildur) == 0 || sildur == 0) {sildur <- c(0,0)} #print(sildur) d$sildur.mean[i] <-mean(sildur, na.rm=T) d$sildur.sd[i] <- sd(sildur, na.rm=T) if (is.na(d$sildur.sd[i])) {d$sildur.sd[i] <- 0} d$sildur.median[i] <-median(sildur, na.rm=T) d$sildur.max[i] <- max(sildur, na.rm=T) d$sildur.min[i] <- min(sildur, na.rm=T) } cbind(d, dx=dx) } ##' makeSessionDataSet: create a data frame for a session (e.g. cookie scene description) ##' ##' @title makeSessionDataSet: create a data frame for a session (e.g. cookie scene description) based on .cha transcription files ##' @param f CHA file to read ##' @param sildir directory where silence profiles are stored ##' @param silsuffix suffix for silence files ##' @param srdir directory where speech rate csv (1 value per utterance) files are stored ##' @param srsuffix the suffix of the speech rate files (default: sre) ##' @param sprate estimate speech rate? (default: TRUE) ##' @return a speech session data frame ##' @author luzs ##' @export makeSessionDataSet <- function(f, sildir=NULL, silsuffix='c.mp3.csv', srdir='../data/ADReSS/speech_rate/', srsuffix='sra', sprate=T) { cat('reading ', f, '\n') t <- read.cha(f, sildir=sildir, silsuffix=silsuffix) if (sprate){ srfile <- gsub('cha$', srsuffix, f) srfile <- gsub('.+/', srdir, srfile) t <- appendSpeechRate(t,file=srfile) t } } ##' appendSpeechRate: append pre-generated speech rate data (see audioproc.R) ##' ##' @title appendSpeechRate: append pre-generated speech rate data to given dataframe t ##' @param t a table read through read.cha ##' @param file speech rate file ##' @return dataframe t bound to speech rates per utterance ##' @author luzs ##' @importFrom utils read.csv appendSpeechRate <- function(t, file=NULL) { speechrate <- read.csv(file, col.names='speechrate') t$trans <- cbind(t$trans, speechrate) t } ##' getSyllablesAndSilences: process Praat's grid for syllable nuclei, based on De Jong's approach ##' ##' @title getSyllablesAndSilences: process Praat's grid for syllable nuclei ##' @param txtgrid Path to Praat grid file generated by praat-syllable-syllable-nuclei-v2 ##' @return list of syllables and silences ##' @author luzs ##' @references ##' De Jong, N. H. and Wempe, T. (2009). Praat script to detect syllable nuclei ##' and measure speech rate automatically. Behavior Research Methods, ##' 41(2):385–390, May. ##' @export getSyllablesAndSilences <- function(txtgrid){ grid <- readLines(txtgrid) sttier <- '' sttime <- 0 sylstart <- c() silstart <- c() silend <- c() siltype <- c() silstarted <- FALSE for (l in grid) { if (length(grep('name = "syllables"', l, value=F))>0) { sttier <- 'syllables' next } if (length(grep('name = "silences"', l, value=F))>0){ sttier <- 'silences' next } if (sttier == 'syllables') { r <- regexec('.*number *= *([0-9\\.]+)',l) if (r[[1]][1] > 0) sylstart <- c(sylstart, as.numeric(regmatches(l,r)[[1]][2])) next } if(sttier == 'silences'){ r <- regmatches(l,regexec('.*(xmin|xmax) *= *([0-9\\.]+)',l)) if (is.na(r[[1]][2]) || length(r[[1]][2])==0 ){ r <- regmatches(l,regexec('.*(text) *= "*(.+)"',l)) if (!is.na(r[[1]][2]) && length(r[[1]][2])>0 ){ siltype <- c(siltype, r[[1]][3]) cat('r-- ',r[[1]][3],'\n') } next } cat(l, '--', as.character(r),'\n') if (r[[1]][2] == 'xmin') silstart <- c(silstart, as.numeric(r[[1]][3])) else if (r[[1]][2] == 'xmax'){ silend <- c(silend, as.numeric(r[[1]][3])) silstarted <- FALSE } } } list(sylstart=sylstart, silstart=silstart[-1], silend=silend[-1], siltype=siltype) } ##' read.cha: read CHA transcription file (format used by DementiaBank) ##' ##' @title read.cha read CHA transcription file (format used by DementiaBank) ##' @param file .cha file to reas ##' @param sildir silences directory ##' @param silsuffix silence files suffix ##' @return a list containing the PID, a dataframe containing the speaker IDs and demogrephics, and a dataframe containing the speaker IDs, transcribed utterances, start and en times, speech rates etc. ##' @author luzs ##' @export read.cha <- function(file, sildir=NULL, silsuffix='c.mp3.csv'){ text <- readLines(file) ## get rid of spurious formatting for(X in rev(grep('^\t.*', text))) { text[X-1] <- paste(c(text[X-1], sub('^\t', ' ', text[X])), collapse=' ') } text <- text[grep('^\t.*', text, invert=T)] cha <- list(pid=getPID(text), ids=getIDs(text), trans=getTranscript(text), sil=getSilences(file, sildir=sildir, silsuffix=silsuffix) ) cha } ## Read description file read.meta <- function(file="../data/Pitt/data.csv"){ md <- read.csv(file, skip=2) md } ##' getPIDs get study-wide unique patient IDs from CHA content ##' ##' @title getIDs get study-wide unique patient IDs from CHA content ##' @param text a string vector containing the lines of a CHA file ##' @return a vector with participants IDs ##' @author luzs ##' @export getPID <- function(text){ for (line in text){ pid <- regmatches(line,regexec('^@PID:\\W+(.+)\\W*',line))[[1]][2] if (!is.na(pid)) break } if (is.na(pid)) warning('No PID found!') pid } ##' getIDs get speaker IDs from CHA content ##' ##' @title getIDs get speaker role IDs (PAR, INV) and info from CHA content ##' @param text a string vector containing the lines of a CHA file ##' @return a vector with participants IDs ##' @author luzs ##' @export getIDs <- function(text){ ids <- c() ##data.frame() for (line in text){ id <- regmatches(line,regexec('^@ID:\\W+(.+)\\W*',line))[[1]][2] if (!is.na(id)){ ids <- rbind(ids, strsplit(id, ';?\\|')[[1]]) } } colnames(ids) <- c('language','UPMC','PAR','age','gender','Dx','Id','participant','mmse','ign') data.frame(ids,stringsAsFactors=F) } ##' getTranscript ##' ##' @title getTranscript: get transcription lines from .cha content ##' @param text a string vector containing the lines of a CHA file ##' @return a list of transcriptions (participant and interviewer utterances) ##' @author luzs ##' @export getTranscript <- function(text){ trans <- c() buffer <- '' for (line in text){ tmp <- regmatches(line, regexec('^\\*(PAR|INV):\\W+(.+)\\W*([0-9]+)_([0-9]+)', line))[[1]] if (length(tmp)>0){ trans <- rbind(trans, data.frame(speaker=tmp[2], utterance=I(tmp[3]), begin=as.numeric(tmp[4]), end=as.numeric(tmp[5]))) } } trans } ##' getSilences read silences file ##' ##' @title getSilences read silences file ##' @param file CSV formatted silences file ##' @param sildir dir where silence files are ##' @param silsuffix ## suffix for silence files ##' @return silences dataframe ##' @author luzs ##' @export getSilences <- function(file, sildir=NULL, silsuffix='c.mp3.csv' ) { sf <- sub('\\.cha', silsuffix,file) if (!is.null(sildir)){ sf <- sub('.*/', sildir, sf) } else return(NULL) s <- read.csv(sf,sep='\t',header=F, col.names=c('begin','end', 'desc')) s$begin <- s$begin * 1000 s$end <- s$end * 1000 s }
/scratch/gouwar.j/cran-all/cranData/vocaldia/R/dbank.R
## MARKOV.R: ## Functions for dealing with transition matrices representing Markov diagrams. ## ## The input format for 'matrix' is tttdlist$ttarray, where: tttdlist ## contains: (1) a TTARRAY: the vocalisation matrix proper, in which ## all rows sum to 1, and (2) TDARRAY: an absolute (i.e. based on ## real-valued time intervals rather than sample counts) turn duration ## vector which corresponds to the static distribution for TTARRAY ## (i.e. TTARRAY^n[], the matrix representation of the ## Chapman-Kolmogorov equation, as n approaches INF) See vocalgraphs.R ## $ttarray format ## ### This program is free software; you can redistribute it and/or ### modify it under the terms of the GNU General Public License ### as published by the Free Software Foundation; either version 2 ### of the License, or (at your option) any later version. ### ### (c) 2017 S. Luz ([email protected]) pauseTypes <- c('Pause', 'SwitchingPause', 'GrpPause', 'GrpSwitchingPause') categories <- c('Vocalisation', 'SwitchingVocalisation', 'Pause', 'SwitchingPause', 'GrpPause', 'GrpSwitchingPause', 'GrpVocalisation') catDescriptions <- array(dim=length(categories),dimnames=list(categories), data=c('Vocalisation', 'Switching Vocalisation', 'Pause', 'Switching pause', 'Group pause', 'Group switching Pause', 'Group vocalisation')) ##' Compute the stationary distribution for a Markov diagram ##' ##' Return static matrix (i.e. the stationary distribution) for the ##' Markov process represented by the given adjacency matrix. In the ##' particular case of vocaldia's, each column should roughly ##' correspond to the amount of time a speaker held the floor for). ##' Of course, not all Markov chains converge, an example being: ##' \preformatted{ ##' 1 ##' /----->-------\ ##' A B ##' \----<--------/ ##' 1 ##' ##' which gives ##' ##' . | 0 1 | | 0x0+1x1 0x1+1x0| | 1 0 | ##' . M = | 1 0 | and M^2 = | 1x0+0x1 1x1+1x0| = | 0 1 | ##' ##' } ## whose matrix representation is: ## \deqn{M= \left( \begin{array}{cc} ## 0 & 1 \\ ## 1 & 0 \end{array} \right)\] ## and ## \[ M^2 = \left( \begin{array}{cc} ## 0\times 0+1 \times 1 0 \times 1+1 \times 0 \\ ## 1\times 0+0\times 1 1\times 1+1\times 0 \end{array} \right) ## = ## \left( \begin{array}{cc} ## 1 & 0 \\ ## 0 & 1 \end{array} \right)\] ## } ##' @title staticMatrix Iterate until transition probabilities converge (or give up). ##' @param matrix an adjecency matrix of trnasition probabilities ##' @param limit maximum number of iterations until we give up on ##' convergence ##' @param digits the number of decimal places to compare ##' @param history if TRUE, keep track of all matrix products ##' @return a matrixseries object; that is, a list where each element ##' is either the initial matrix or the product of the two ##' preceding matrices ##' @examples ##' data(vocdia) ##' x2 <- staticMatrix(vocmatrix$ttarray, digits=4, history=TRUE) ##' ## original matrix ##' round(x2[[1]],3) ##' ## stationary matrix (M^139) ##' round(x2[[length(x2)]],3) ##' @export staticMatrix <- function (matrix, limit=1000, digits=4, history=F) { exp <- 2 ma <- matrix mb <- ma %*% matrix if (history) mseries <- list(ma, mb) while (exp < limit && !all(round(ma,digits=digits) == round(mb, digits=digits))) { exp <- exp + 1 ma <- mb mb <- ma %*% matrix if (history) mseries[[exp]] <- mb } ## when history isn't specified, we only keep first and last iterations if (!history) mseries <- list(matrix, mb) if (exp == limit) print(paste("staticMatrix: exp limit reached before convergence at matrix^",exp, sep="")) else print(paste("staticMatrix: values converged for matrix^",exp, sep="")) class(mseries) <- 'matrixseries' return(mseries) } ## ##' Matrix exponentials ##' ##' A (sort of) exponential function for matrix multiplication (to be ##' used with \code{\link{staticMatrix}}). ##' @title matrixExp: raise matrix to exp. ##' @param matrix a matrix ##' @param exp the power to which matrix will be raised ##' @param mmatrix a placeholder. ##' @return matrix^exp ##' @examples ##' data(vocdia) ##' matrixExp(vocmatrix$ttarray, 3) ##' @export matrixExp <- function(matrix, exp, mmatrix=matrix) { if (exp < 1){ warning("matrixExp only accepts positive values") } i <- 1 #m <- matrix while (i < exp){ i <- i + 1 mmatrix <- mmatrix %*% matrix } return(mmatrix) } ##' Visualise convergence properties of vocalisation graphs ##' ##' A 'toy' for visualisation of convergence properties of ##' vocalisation graphs. Plot the convergence paths of each ##' Vocalisation event (i.e. each row-column transition probability, ##' grouped by colour according to the inciding node) ##' @title plotConvergence: plots Markov diagram convergence. ##' @param x an object of class matrixseries; a list where the ##' \eqn{i^{th}} element corresponds to \eqn{M^i}. ##' @param par graphic parameters alist ##' @param interact if TRUE, pauses the drawing after each node. ##' @param ... extra graphics parameters for plot. ##' @return the matrixseries ##' @examples ##' data(vocdia) ##' plot(staticMatrix(vocmatrix$ttarray, digits=4, history=TRUE)) ##' @export plot.matrixseries <- function(x, ..., par=list(), interact=F) { mseries <- x op <- par(no.readonly = TRUE); on.exit(par(op)) par(par) #mseries <- staticMatrix(matrix, limit=limit, digits=digits, history=T) matrix <- startmatrix(mseries) convpoint <- length(mseries) mm <- mseries[[convpoint]] taillen <- round(convpoint/4) limit <- convpoint + taillen mseries <- c(mseries, lapply(1:taillen, function(y){mm <<- mm %*% matrix})) plot(sapply(1:limit, function(x){mseries[[x]][1,1]}), axes=F, type='l', ylim=c(0,1), xlab='iteration', ylab='amount of speech (steady-state value)', ...) axis(1) axis(2) offs = limit/20 y = limit names <- rownames(matrix) for (j in 1:ncol(matrix)) { y <- y - offs print(paste("Plotting convergence for col ",j)) text(y, mseries[[limit]][1,j]+.02, labels=names[j]) for (i in 1:nrow(matrix)) { if (i == 1 && j == 1) next; cseries <- sapply(1:limit, function(x){mseries[[x]][i,j]}) lines(cseries, col=j) } if (interact) locator(1) } mseries } ##' Access initital matrix in a \code{matrixseries} ##' ##' Access initital matrix in a \code{matrixseries} ##' @title startmatrix: return the first matrix of a converging series. ##' @param mseries a matrixseries object ##' @return the initial matrix. ##' @examples ##' \dontrun{ ##' data(vocdia) ##' x2 <- staticMatrix(vocmatrix$ttarray, digits=4, history=TRUE) ##' ## original matrix ##' startmatrix(x2) ##' } ##' @rdname startmatrix ##' @export startmatrix startmatrix <- function(mseries) UseMethod('startmatrix') ##' @rdname startmatrix ##' @method startmatrix default ##' @S3method startmatrix default ##' @export startmatrix startmatrix.default <- function(mseries){ warning(paste("startmatrix() does not know how to handle object of class ", class(mseries))) } ##' @rdname startmatrix ##' @method startmatrix matrixseries ##' @S3method startmatrix matrixseries ##' @export startmatrix startmatrix.matrixseries <- function(mseries){ mseries[[1]] } ##' Anonymise a vocalisation diagram ##' ##' "anonymise" a \code{vocaldia} turn taking probability matrix by ##' replacing speaker names by variables \eqn{s_1,...,s_n s.t. s_1} is ##' the speaker who spoke the least and \eqn{s_n} the one who did the most ##' talking. ##' @title anonymise: anonymise a vocalisation diagram ##' @param vd a vocalisation diagram (vocaldia object) ##' @return a new vocaldia with speaker names replaced by variables ##' \eqn{s_1,...,s_n} s.t. \eqn{s_1} is the speaker who spoke the least ##' and \eqn{s_n} the one who did the most talking. ##' @rdname anonymise ##' @export anonymise anonymise <- function(vd) UseMethod('anonymise') ##' @rdname anonymise ##' @method anonymise vocaldia ##' @S3method anonymise vocaldia ##' @examples ##' \dontrun{ ##' data(vocdia) ##' x2 <- getSampledVocalMatrix(subset(atddia, id=='Abbott_Maddock_01'), ##' individual=TRUE, nodecolumn='speaker') ##' anonymise(x2) ##' } anonymise.vocaldia <- function(vd){ excluded <- c(categories, "Grp", "Floor") ## get array of speakers sorted decreasingly by accumulated turn duration ordspk <- sort(vd$tdarray[!names(vd$tdarray) %in% excluded], decreasing=T) ## get array indices ordered as ordspk idx <- pmatch(names(ordspk), names(vd$tdarray)) ## get a variable per speaker spkvars <- paste(rep("s",length(ordspk)), LETTERS[1:length(ordspk)], sep='') ## replace speaker names on accumulated turn duration table names(vd$tdarray)[idx] <- spkvars ## now do the same for transition turn transiton table row names... idx <- pmatch(names(ordspk), dimnames(vd$ttarray)[[1]]) dimnames(vd$ttarray)[[1]][idx] <- spkvars ## now do the same to col names... idx <- pmatch(names(ordspk), dimnames(vd$ttarray)[[2]]) dimnames(vd$ttarray)[[2]][idx] <- spkvars return(vd) } ##' @rdname anonymise ##' @method anonymise default ##' @S3method anonymise default ##' @export anonymise anonymise.default <- function(vd){ warning(paste("anonymise() does not know how to handle object of class ", class(vd), '. Try passing a vocaldia.')) } ##' Assign types to the pauses (Floor events) in a sequence ##' ##' Identify the pauses in a vector as one of the pauses in ##' \code{pauseTypes} ##' @title identifyPauses: label pauses according to type. ##' @param vocvector a character vector containing a sequence of ##' vocalisation events ##' @return A vector with all Floor events replaced by the appropriate ##' pause type identifier. ##' @examples ##' data(vocdia) ##' identifyPauses(atddia$speaker[1:60]) ##' @export identifyPauses <- function(vocvector){ vocvector <- as.character(vocvector) indices <- which(vocvector=='Floor') laindex <- length(vocvector) ##fvec <- vector(length=length(indices)) vocvector <- as.character(vocvector) for (i in indices){ if (i == 1 || i == laindex){ vocvector[i] <- 'Pause' next } if (vocvector[i-1] == 'Grp' || vocvector[i-1] == 'GrpVocalisation'){ if (vocvector[i+1] == 'Grp' || vocvector[i+1] == 'GrpVocalisation') vocvector[i] <- 'GrpPause' else vocvector[i] <- 'GrpSwitchingPause' next } if (vocvector[i-1] == vocvector[i+1]) vocvector[i] <- 'Pause' else vocvector[i] <- 'SwitchingPause' } vocvector } ##' Identify switching vocalisations ##' ##' SwitchingVocalisation is a vocalisation that signals a immediate ##' speaker transition; that is, a transition from speaker to ##' speaker (as opposed to speaker to Grp or speaker to Pause). ##' ##' E.g (speakers A, B, C): ##' \preformatted{ ##' AAAAAAAABBBBBBBCCCCCBBBBBPauseBBBBSwitchingPauseAAAAAGrp ##' ^ ^ ^ ^ ^ ^ ##' | | | | | | ##' | | | ----------- Non-SwitchingVocalisation ##' | | | ##' ---------------------> SwitchingVocalisation ##' } ##' ##' @title identifyVocalisations: replace appropriate vocalisation ##' types ##' @param vocvector a character vector containing a sequence of ##' vocalisation events ##' @param idswitchvoc if TRUE distinguise between ##' SwitchingVocalisation and Vocalisation. ##' @return A vector with all events replaced by the appropriate type ##' identifier. ##' @examples ##' data(vocdia) ##' identifyVocalisations(atddia$speaker[1:60]) ##' @export identifyVocalisations <- function(vocvector, idswitchvoc=T){ vocvector <- as.character(vocvector) vocvector <- identifyGrpVocalisations(vocvector) vi <- which(!(vocvector %in% c(pauseTypes,categories,'Floor','Grp'))) if (idswitchvoc){ ## find indices of SwitchingVocalisations vsi <- vi[which(sapply(1:(length(vi)-1), function(i){ vi[i+1]==vi[i]+1 && vocvector[vi[i]]!=vocvector[vi[i+1]] }))] vocvector[vi] <- 'Vocalisation' vocvector[vsi] <- 'SwitchingVocalisation' } else vocvector[vi] <- 'Vocalisation' vocvector } ##' Identify group vocalisations ##' ##' Standardise identifier for group vocalisations ##' @title identifyGrpVocalisations: replace appropriate vocalisation ##' types ##' @param vocvector a character vector containing a sequence of ##' vocalisation events ##' @return A vector with all events replaced by the appropriate type ##' identifier. ##' @examples ##' data(vocdia) ##' identifyGrpVocalisations(atddia$speaker[1:60]) ##' @export identifyGrpVocalisations <- function(vocvector){ vocvector <- as.character(vocvector) grpindices <- which(vocvector=='Grp') vocvector[grpindices] <- 'GrpVocalisation' vocvector } ## probs and entropy ##' Conditional (transition ) probability ##' ##' Retrieve \eqn{p(a|b)}, probability of a transition from b to a in an ##' adjacency matrix ##' @title getPofAgivenB: transtion probability. ##' @param a target node ##' @param b source node ##' @param ttarray adjacency matrix ##' @return a transition probability. ##' @export getPofAgivenB <- function(a, b, ttarray){ if (! all(c(a,b) %in% names(ttarray[1,]))) ## one of the nodes doesn't exist; return 0 0 else ttarray[b,a] } ##' Compute the entropy of a distribution. ##' ##' Compute the entropy of a distribution. ##' @title getEntropy: safely return the Shannon entropy of a distribution. ##' @param distribution a probability distribution. ##' @return a numeric value. ##' @export getEntropy <- function (distribution){ PtimesLOG2 <- distribution * log((1 / distribution), 2) ## define "0 log 0 = 0" (i.e. get rid of NaN's) sum(PtimesLOG2[!is.na(PtimesLOG2)]) } ## external format conversion ############################## ##' Plot a vocalisation diagram ##' ##' Plot a vocalisation diagram ##' @title plot.vocaldia ##' @param x a vocalisation diagram ##' @param ... arguments for the layout algorithm ##' @return \code{NULL} ##' @examples ##' data(vocdia) ##' if (require('igraph')) ##' plot(getSampledVocalMatrix(subset(atddia, id=='Abbott_Maddock_01'), ##' individual=TRUE, nodecolumn='speaker')) ##' @export plot.vocaldia <- function(x, ...){ x if (requireNamespace('igraph', quietly = TRUE)){ ## require('igraph') g <- igraph.vocaldia(x) plot(g, layout=igraph::layout.fruchterman.reingold(g), ...) return(g) } ##if (requireNamespace("Rgraphviz", quietly = TRUE)){ ## cat('Rgraphviz support under construction. PLease use igraph instead') ## if (package=='Rgraphviz'){ ## require('Rgraphviz') ## g <- graphNEL.vocaldia(vd) ## w <- as.character(round(unlist(edgeWeights(g)), digits=3)) ## w <- w[setdiff(seq(along=w), removedEdges(g))] ## names(w) <- edgeNames(g) ## ea <- at <- list() ## ea$label <- w ## at$edge$fontzise=30 ## plot(g, edgeAttrs=ea, attrs=at, ...) ## return(g) ##} warning(paste('Package igraph not installed. Try installing igraph or "require(igraph)" if installed.')) } ##' Create an igraph vocalisation diagram ##' ##' Create a vocalisation diagram ##' @title igraph.vocaldia: Create an igraph vocalisation diagram ##' @param vd a vocalisation diagram ##' @param ... arguments for the layout algorithm ##' @return an igraph ##' @examples ##' data(vocdia) ##' if (require('igraph')) ##' igraph.vocaldia(getSampledVocalMatrix(subset(atddia, ##' id=='Abbott_Maddock_01'), ##' individual=TRUE, nodecolumn='speaker')) ##' @export igraph.vocaldia <- function(vd, ...){ if (requireNamespace('igraph', quietly = TRUE)){ g <- igraph::graph.adjacency(vd$ttarray, weighted=T) igraph::V(g)$label <- names(vd$ttarray[1,]) igraph::E(g)$label <- round(igraph::E(g)$weight,digits=3) igraph::V(g)$size <- 25*exp(vd$tdarray) g$layout <- igraph::layout.kamada.kawai(g) return(g) } else warning(paste('Package igraph not supported. Try installing igraph or "require(igraph)" if installed.')) } ## ##' Create a graphNEL vocalisation diagram ## ##' ## ##' Create a vocalisation diagram ## ##' @title graphNEL.vocaldia: Create a graphNEL vocalisation diagram ## ##' @param vd a vocalisation diagram ## ##' @param ... arguments for the layout algorithm ## ##' @return a graphNEL ## ##' @examples ## ##' data(vocdia) ## ##' graphNEL.vocaldia(getSampledVocalMatrix(subset(atddia, id=='Abbott_Maddock_01'), ## ##' individual=TRUE, nodecolumn='speaker')) ## ##' @export ## graphNEL.vocaldia <- function(vd, ...){ ## as(unclass(vd$ttarray), 'graphNEL') ## } ## ## ##' Write vocalisation diagram to file in dot (graphviz) notation ##' ##' Write a vocalisation diagram ##' @title write.vocaldia ##' @param vd a vocalisation diagram ##' @param file name of file to which dot diagram will be written. ##' @param ... arguments passed on to toDotNotation. If "", write to STDOUT. ##' @return \code{NULL} ##' @examples ##' data(vocdia) ##' write.vocaldia(getSampledVocalMatrix(subset(atddia, ##' id=='Abbott_Maddock_01'), ##' individual=TRUE, nodecolumn='speaker'), ##' file=tempfile(pattern='vocaldia-', fileext='.dot')) ##' @export write.vocaldia <- function(vd, file="", ...){ o <- toDotNotation(vd, ...) o <- paste('## Generated automatically by: ', format(sys.call()), o) if (file!="") cat("Writing ", file, '\n') cat(o, file=file) } ##' Create vocalisation diagram to file in dot (graphviz) notation ##' ##' Create a vocalisation diagram in dot notation ##' @title toDotNotation: conver vocaldia to graphviz dot notation ##' @param vd a vocalisation diagram ##' @param individual if TRUE write individual node names ##' @param varsizenode if true set varsizenode in dot ##' @param shape node shape ##' @param fontsize font size ##' @param rankdir direction of ranking (LR, RF etc) ##' @param nodeattribs attributes for node ##' @param comment comments ##' @return character data containing the diagram in dot format. ##' @examples ##' data(vocdia) ##' toDotNotation(getSampledVocalMatrix(subset(atddia, ##' id=='Abbott_Maddock_01'), ##' individual=TRUE, nodecolumn='speaker')) ##' @seealso graphviz manual ##' @export toDotNotation <- function(vd, individual=T, varsizenode=T, shape='circle', fontsize=16, rankdir='LR', nodeattribs='fixedsize=true;', comment="") { head <- paste("## diagram generated by vocalgraphs.r\n## ", comment, "\ndigraph finite_state_machine {\n", 'shape=',shape,';', 'fontsize=', fontsize,';', 'rankdir=', rankdir, ';', nodeattribs) links <- "" nodes <- dimnames(vd$ttarray)[[1]] for (i in nodes){ if (vd$tdarray[i] == 0) next width <- log(1000*vd$tdarray[i],base=5) width <- if (width < .4 ) .4 else width ## minimal acceptable width if (individual){ nodelabel <- i } else if (width < .6) { nodelabel <- catDescriptions[i] } else { nodelabel <- catDescriptions[i] } head <- paste(head, " ", i, "[", (if (varsizenode) paste("width =", width,', ') else ""), (if (varsizenode) sprintf("label = \"%s \\n%.3f\"", nodelabel, vd$tdarray[i]) else paste("label = \"",nodelabel,"\", ") ), if (width < .6) "fontsize=8", "];\n") for (j in nodes) { if (vd$ttarray[i,j] == 0) next links <- paste(links, " ", i, "->", j, "[ label =", sprintf("%.3f", vd$ttarray[i,j]), "];\n") } } o <- paste(head, links, "}\n") return(o); }
/scratch/gouwar.j/cran-all/cranData/vocaldia/R/markov.R
## VOCALGRAPHS.R: ## Functions for creating adjacency matrices of vocalisation graphs from dataframes. ## ### This program is free software; you can redistribute it and/or ### modify it under the terms of the GNU General Public License ### as published by the Free Software Foundation; either version 2 ### of the License, or (at your option) any later version. ### ### (c) 2017 S. Luz ([email protected]) ##source('markov.R') ##' @docType package ##' @bibliography /home/luzs/lib/tex/bib/luz.bib ##' Create and manipulate vocalisation matrices and diagrams ##' ##' \code{vocaldia} provides a number of functions for generating ##' vocalisation diagrams (vocaldias) from data frames containing, minimally, a ##' column for start time of a vocalisation event (speech, silence, ##' group-talk etc), a column for end time, and a column for the event ##' identifier. ##' ##' Functions \code{\link{getSampledVocalMatrix}} and ##' \code{\link{getTurnTakingProbMatrix}} generate alternative ##' versions of adjacency matrices for ##' vocaldias. \code{\link{staticMatrix}} generates steady state ##' diagrams from a vocaldia. \code{\link{printARFFfile}} generates a ##' 'flat' representation of vocaldias for classifier training and ##' evaluation. ##' ##' @author Saturnino Luz \email{luzs@@acm.org} ##' @references ##' S. Luz. Automatic identification of experts and performance ##' prediction in the multimodal math data corpus through analysis ##' of speech interaction. In \emph{Proceedings of the 15th ACM on ##' International Conference on Multimodal Interaction, ICMI'13}, ##' pages 575--582, New York, NY, USA, 2013. ACM. ##' ##' S. Luz. The non-verbal structure of patient case discussions in ##' multidisciplinary medical team meetings. \emph{ACM Transactions on ##' Information Systems}, 30(3):17:1--17:24, 2012 ##' ##' Dabbs, J. M. J. and Ruback, B. Dimensions of group process: Amount and ##' structure of vocal interaction. \emph{Advances in Experimental Social ##' Psychology} 20, 123-169, 1987. ##' ##' Jaffe , J. and Feldstein, S. Rhythms of ##' dialogue. ser. \emph{Personality and Psychopathology}. Academic ##' Press, New York, 1976. ##' @import graphics "_PACKAGE" ##' A sample vocalisation matrix ##' ##' A \code{vocaldia} object containing a 3-speaker dialogue ##' ##' @format A list containing 2 arrays ##' \describe{ ##' \item{ttarray}{The vocaldia adjacency matrix} ##' \item{tdarray}{The proportional durations (stationary probabilities) of each event (node)} ##' } ##' @source This dataset was generated from the Multomodal Learning ##' Analytics dataset, for the eponymous ICMI'13 Grand ##' Challenge. The use these vocaldias were put to is described in ##' Luz (2013). The full dataset and code is available ##' at https://gitlab.scss.tcd.ie/saturnino.luz/icmi-mla-challenge ##' @references ##' S. Luz. Automatic identification of experts and performance ##' prediction in the multimodal math data corpus through analysis ##' of speech interaction. In \emph{Proceedings of the 15th ACM on ##' International Conference on Multimodal Interaction, ICMI'13}, ##' pages 575--582, New York, NY, USA, 2013. ACM. "vocmatrix" ##' A sample Medical Team Meeting dialogue encoded as a vocaldia ##' ##' A dataset containing 38 dialogues (17 control patients, and 21 AD ##' patients) and 7869 vocalisation events. ##' ##' @format A data frame with 7869 rows and 7 variables: ##' \describe{ ##' \item{id}{The dialogue indentifier} ##' \item{begin}{The start time of a speech turn or silence interval} ##' \item{end}{The end time of a speech turn or silence interval} ##' \item{speaker}{An identifier for the speaker of the turn, or Floor for silence.} ##' \item{role}{The speaker's role (patient, interviewer, other, or Floor} ##' \item{trans}{The transcription of the turn (blanked out for anonymity)} ##' \item{dx}{The diagnosis (ad or nonad} ##' } ##' @source This dataset was generated from the Carolina Conversations ##' Collection, and used in the work described in De La Fuente, ##' Albert and Luz: ##' "Detecting cognitive decline through dialogue processing", ##' 2017. For the full data set, please contact the Medical ##' University of South Carolina (MUSC) ##' http://carolinaconversations.musc.edu/ "atddia" ##' Generate a probabilistic vocalisation diagram through 'sampling'. ##' ##' A vocalisation diagram (vocaldia) is a representation of a ##' dialogue as a Markov process whose cell <m,n> contains the ##' transition probability from node n to node m). ##' @title getSampledVocalCountMatrix: generate vocalisation diagrams ##' @param df a data frame consisting, minimally, of a column for ##' vocalisation/pause start times, a column for end times, and a ##' column identifying the speaker, speaker role or 'Floor' (for ##' silences). ##' @param ... general parameter to be passed to ##' \code{\link{getSampledVocalCountMatrix}} ##' @return a vocaldia object, consisting of a vocalisation matrix ##' (vocmatrix) where cell <m,n> contains the transition ##' probability from node n to node m, and a table of prior ##' probabilities (stationary distribution) per node. ##' @author Saturnino Luz \email{luzs@@acm.org} ##' @references ##' S. Luz. Automatic identification of experts and performance ##' prediction in the multimodal math data corpus through analysis ##' of speech interaction. In \emph{Proceedings of the 15th ACM on ##' International Conference on Multimodal Interaction, ICMI'13}, ##' pages 575--582, New York, NY, USA, 2013. ACM. ##' @examples ##' data(vocdia) ##' getSampledVocalMatrix(subset(atddia, ##' id=='Abbott_Maddock_01'),nodecolumn='speaker', individual=TRUE) ##' @seealso \code{\link{getSampledVocalCountMatrix}} ##' @export getSampledVocalMatrix <- function (df, ...) { vcm <- getSampledVocalCountMatrix(df, ...) tdarray <- vcm$tdarray/sum(vcm$tdarray) ## normalise turn-distribution vector rsum <- apply(vcm$ttarray, 1, sum) ## sum rows ttarray <- apply(vcm$ttarray, 2, function(z){z/rsum}) ## normalise each column class(ttarray) <- 'vocalmatrix' class(tdarray) <- 'vocduration' vd <- list(ttarray=ttarray, tdarray=tdarray) class(vd) <- 'vocaldia' return(vd) } ##' Generate a count vocalisation diagram through 'sampling'. ##' ##' A vocalisation diagram (vocaldia) is a representation of a ##' dialogue as a Markov process whose cell <m,n> contains the ##' transition probability from node n to node m). This function for ##' 'cases' (an identifier for a case or a vector of identifiers ##' identifying a set of cases) in data frame 'df', obtained by ##' sampling the timeline every 'rate'-th second (see ##' getSampledVocalCountMatrix). ##' @title getSampledVocalCountMatrix: generate vocalisation diagrams ##' @param cdf a data frame consisting, minimally, of a column for ##' vocalisation/pause start times, a column for end times, and a ##' column identifying the speaker, speaker role or 'Floor' (for ##' silences). ##' @param rate the rate at which to sample the vocalisation events ##' (in seconds) ##' @param individual whether to include individual speakers or group ##' them into a single Vocalisation node ##' @param noPauseTypes if TRUE, ignore distinctions between pauses ##' (SwitchingPause, GrpSwitchingPause, etc) ##' @param begin the name of the column containing the start time of ##' the vocalisation event in a row. ##' @param end the name of the column containing the end time of the ##' vocalisation event in the same row. ##' @param nodecolumn the name of the column containing the node ##' (speaker) name (e.g. 'speaker', 'role'). ##' @return a vocaldia object, consisting of a vocalisation matrix ##' (vocmatrix) where cell <m,n> contains the counts of ##' transitions from node n to node m, and a table of prior ##' probabilities (stationary distribution) per node. ##' @seealso (Luz, 2013) ##' @examples ##' data(vocdia) ##' getSampledVocalCountMatrix(subset(atddia, ##' id=='Abbott_Maddock_01'), nodecolumn='role') ##' @export getSampledVocalCountMatrix <- function (cdf, rate=1, individual=FALSE, noPauseTypes=FALSE, begin='begin', end='end', nodecolumn='role') { ##cdf <- subset(df, id %in% ids) if(!individual) cdf[[nodecolumn]] <- as.factor(identifyVocalisations(cdf[[nodecolumn]])) if(!noPauseTypes) cdf[[nodecolumn]] <- as.factor(identifyPauses(cdf[[nodecolumn]])) ## no need for df from this point on; one could split this into 2 ## separate functions btime <- cdf[[begin]][1] etime <- cdf[[end]][length(cdf[[end]])] nodes <- as.character(unique(cdf[[nodecolumn]])) ## absolute duration (static distribution); ttarray^n (i.e. the ## matrix representation of the Chapman-Kolmogorov equation) should ## converge to tdarray as n approaches infinity tdarray <- array(data=0, dim=c(length(nodes)), dimnames=list(nodes)) tdarray <- sapply(nodes,function(y){sum(cdf[[end]][cdf[[nodecolumn]]==y] -cdf[[begin]][cdf[[nodecolumn]]==y])}) ttarray <- array(data=0, dim=c(length(nodes),length(nodes)), dimnames=list(nodes, nodes)) pspk <- character(length=0) for (t in seq(btime, etime, by=rate) ) { sp <- as.character(cdf[[nodecolumn]][cdf[[begin]] <= t & t < cdf[[end]]]) if ( length(sp) > 1 ) { warning(paste("Invalid overlapping vocalisation at ", t)) sp <- sp[1] } if ( length(sp) == 0 || length(pspk) == 0) { pspk <- sp next } ttarray[pspk, sp] <- ttarray[pspk, sp] + 1 pspk <- sp } ## prevent zero-transition rows (and div by zero in getSampledVocalMatrix) if (sum(ttarray[pspk,])==0) ttarray[pspk,pspk] <- 1 return(list(ttarray=ttarray, tdarray=tdarray)) } ##' Identify turn types ##' ##' Return one of {Vocalisation, GrpVocalisation, ...} or identifier. ##' @title getTurnType: return type of turn ##' @param df a data frame consisting, minimally, of a column for ##' vocalisation/pause start times, a column for end times, and a ##' column identifying the speaker, speaker role or 'Floor' (for ##' silences). ##' @param i the identifier (index number) whose type will be returned ##' @param individual if TRUE, return the identifier, a Pause or Grp ##' @param nodecolumn the name of the column containing the node ##' (speaker) name (e.g. 'speaker', 'role'). ##' @param noPauseTypes if TRUE, ignore distinctions between pauses ##' (SwitchingPause, GrpSwitchingPause, etc) ##' @return a string containing the turn type or identifier. ##' @examples ##' data(vocdia) ##' atddia[1:10,] ##' getTurnType(atddia, 3, nodecolumn='role') ## a vocalisation ##' getTurnType(atddia, 4, nodecolumn='role') ## a pause ##' @export getTurnType <- function(df, i, individual=FALSE, nodecolumn='speaker', noPauseTypes=F) { ##paste(i,", ") currentspeaker <- as.character(df[[nodecolumn]][i]) nextspeaker <- 'none' if (length(df$id) > i && df$id[i] == df$id[i+1] ){ nextspeaker <- as.character(df[[nodecolumn]][i+1]) } prevspeaker <- if (i > 1 && df$id[i] == df$id[i-1]) as.character(df[[nodecolumn]][i-1]) else 'none' if (currentspeaker == 'Floor') { if (noPauseTypes) return('Pause') else return(getPauseType(prevspeaker, nextspeaker)) } else if (currentspeaker == 'Grp'){ if (individual) return(currentspeaker) else return('GrpVocalisation') } else { if (individual) return(currentspeaker) else return('Vocalisation') } } ##' Identify the type of pause between vocalisations. ##' ##' The type of pause a 'Floor' (silence) event represents can be: ##' 'Pause', 'SwitchingPause', 'GrpPause', or 'GrpSwitchingPause'. See ##' (Luz, 2013) for details. ##' @title getPauseType: name pause type between two vocalisation events. ##' @param prevspeaker speaker of the vocalisation immediately before Floor ##' @param nextspeaker speaker of the vocalisation immediately after Floor ##' @return the pause type. ##' @seealso \code{\link{namePauses}} ##' @examples ##' getPauseType('a', 'b') ##' ## [1] "SwitchingPause" ##' getPauseType('a', 'Grp') ##' ## [1] "SwitchingPause" ##' getPauseType('Grp', 'Grp') ##' ## [1] "GrpPause" ##' getPauseType('Grp', 'a') ##' ## [1] "GrpSwitchingPause" ##' getPauseType('a', 'a') ##' ##[1] "Pause" ##' @export getPauseType <- function(prevspeaker, nextspeaker){ if (prevspeaker == 'Floor' ){ warning("OOPS! Two consecutive Floor turns found. A coding error??") return('Pause') } if ( prevspeaker == 'none' || nextspeaker == 'none') return('Pause') if ( prevspeaker == 'Grp'){ if (nextspeaker == 'Grp') return('GrpPause') else return('GrpSwitchingPause') } if (prevspeaker != nextspeaker) return('SwitchingPause') else return ('Pause') } ##' Replace identified pause pause types in data frame. ##' ##' replace all 'Floor' speakers in df by 'Pause', 'SwitchingPause' ##' etc, and return a new data fame containing pause types in place of ##' 'Floor' (see markov.R, identifyPauses() for a better ##' implementation) ##' @title namePauses: name pause types. ##' @param df a data frame consisting, minimally, of a column for ##' vocalisation/pause start times, a column for end times, and a ##' column identifying the speaker, speaker role or 'Floor' (for ##' silences). ##' @param nodecolumn the name of the column containing the node ##' (speaker) name (e.g. 'speaker', 'role'). ##' @return a data.frame with pauses in nodecolumn replaced by different pause types. ##' @seealso \code{\link{identifyPauses}} for a better implementation ##' @examples ##' data(vocdia) ##' x <- subset(atddia, id=='Abbott_Maddock_01') ##' x[1:15,1:6] ##' namePauses(x)[1:15,1:6] ##' @export namePauses <- function(df, nodecolumn='role') { nspeakers <- sapply(as.integer(row.names(df)), function (X) {getTurnType(df, X, individual=T)}) df[[nodecolumn]] <- nspeakers return(df) } ## non-sampled versions of vocalisation graphs (no-self transitions allowed) ########################################################################### ##' Convert a data frame into a vocalisation diagram using counts rather than sampling. ##' ##' Unlike \code{\link{getSampledVocalMatrix}}, this function is based ##' on transition counts rather than sampled intervals. As a result, ##' where in this version self transitions will always be set to 0 ##' (since a vocalisation by a speaker is never followed by another ##' vocalisation by the same speaker) in the sampled version self ##' transitons will usually dominate the distribution, since the ##' speaker who is speaking now is very likely to be the one who were ##' speaking one second ago. ##' @title getTurnTakingProbMatrix: create a vocaldia from a ##' data.frame. ##' @param df a data frame consisting, minimally, of a column for ##' vocalisation/pause start times, a column for end times, and a ##' column identifying the speaker, speaker role or 'Floor' (for ##' silences). ##' @param individual whether to include individual speakers or group ##' them into a single Vocalisation node ##' @param ... other parameters to be passed to ##' \code{\link{getTurnTakingMatrix}}. ##' @return a vocaldia object, consisting of a vocalisation matrix ##' (vocmatrix) where cell \eqn{(m,n)} contains the probabilities \eqn{P(n|m)} ##' transitions to node \eqn{n} from node \eqn{m}, and a table of prior ##' probabilities (stationary distribution) per node. ##' @seealso (Luz, 2013) and \code{\link{getTurnTakingMatrix}}. ##' ##' S. Luz. Automatic identification of experts and performance ##' prediction in the multimodal math data corpus through analysis ##' of speech interaction. In \emph{Proceedings of the 15th ACM on ##' International Conference on Multimodal Interaction, ICMI'13}, ##' pages 575--582, New York, NY, USA, 2013. ACM. ##' @examples ##' x <- subset(atddia, id=='Abbott_Maddock_01') ##' getTurnTakingProbMatrix(x) ##' getTurnTakingProbMatrix(x, individual=TRUE) ##' @export getTurnTakingProbMatrix <- function(df, individual=FALSE, ...) { t <- getTurnTakingMatrix(df, individual=individual, ...) if (individual){ nodes <- dimnames(t$ttarray)[[1]] } else { nodes <- categories } for (i in nodes){ s <- sum(t$ttarray[i,]) t$ttarray[i,] <- if (s > 0) t$ttarray[i,]/sum(t$ttarray[i,]) else 0 } t$tdarray <- t$tdarray/sum(t$tdarray) class(t$ttarray) <- 'vocalmatrix' class(t$tdarray) <- 'vocduration' class(t) <- 'vocaldia' return(t) } ##' Generate a vocalisation diagram with absolute vocalisation durations. ##' ##' A vocalisation diagram (vocaldia) is a representation of a ##' dialogue as a Markov process whose cell <m,n> contains the ##' transition probability from node n to node m). Unlike ##' \code{\link{getSampledVocalCountMatrix}} this function ##' accummulates event durations directly, therefore resulting in no ##' self-transitions (in general). ##' @title getSampledVocalCountMatrix: generate vocalisation diagrams ##' @param df a data frame consisting, minimally, of a column for ##' vocalisation/pause start times, a column for end times, and a ##' column identifying the speaker, speaker role or 'Floor' (for ##' silences). ##' @param begin the name of the column containing the start time of ##' the vocalisation event in a row. ##' @param end the name of the column containing the end time of the ##' vocalisation event in the same row. ##' @param nodecolumn the name of the column containing the node ##' (speaker) name (e.g. 'speaker', 'role'). ##' @param individual whether to include individual speakers or group ##' them into a single Vocalisation node ##' @param noPauseTypes if TRUE, ignore distinctions between pauses ##' (SwitchingPause, GrpSwitchingPause, etc) ##' @return a vocaldia object, consisting of a vocalisation matrix ##' (vocmatrix) where cell <m,n> contains the counts of ##' transitions from node n to node m, and a table of absolute ##' durations of vocalisation events. ##' @seealso (Luz, 2013) and \code{\link{getTurnTakingMatrix}}. ##' @references ##' S. Luz. Automatic identification of experts and performance ##' prediction in the multimodal math data corpus through analysis ##' of speech interaction. In \emph{Proceedings of the 15th ACM on ##' International Conference on Multimodal Interaction, ICMI'13}, ##' pages 575--582, New York, NY, USA, 2013. ACM. ##' @examples ##' x <- subset(atddia, id=='Abbott_Maddock_01') ##' getTurnTakingMatrix(x) ##' getTurnTakingMatrix(x, individual=TRUE) ##' @export getTurnTakingMatrix <- function(df, begin='begin', end='end', nodecolumn='role', individual=FALSE, noPauseTypes=FALSE) { ##df <- subset(df, df$id %in% ids) if (individual){ nodes <- as.character(unique(df[[nodecolumn]])) nodes <- nodes[nodes != 'Floor'] if (!noPauseTypes){ nodes <- c(nodes, pauseTypes) } else { nodes <- c(nodes, 'Floor') } } else { nodes <- categories } ## ttarray (turn transition array) is a category x category matrix ## s.t. cell (i,j) stores the number of transitions from turn ## category i to j ttarray <- array(data=0, dim=c(length(nodes),length(nodes)), dimnames=list(nodes, nodes)) ## tdarray (turn duration array) stores the total duration of each turn category tdarray <- array(data=0, dim=c(length(nodes)), dimnames=list(nodes)) prevttype <- 'none' prevcase <- 'none' for (i in 1:length(df[[nodecolumn]])) { ##if ( !df$id[i] %in% ids ) next; ttype <- getTurnType(df, i, individual=individual, nodecolumn=nodecolumn, noPauseTypes=noPauseTypes) tdarray[ttype] <- tdarray[ttype] + (df$end[i]-df$begin[i]); if (prevcase == df$id[i]) { ## no transition is recorded across cases ttarray[prevttype,ttype] <- ttarray[prevttype,ttype] + 1 } else { prevcase <- df$id[i] } prevttype <- ttype } return(list(ttarray=ttarray, tdarray=tdarray)) } ##' Generate ARFF files from vocalisation diagrams ##' ##' Use this function to generate turn-taking diragrams in ARFF format for ## processing with, for instance, the WEKA machine learning toolkit. ##' @title printARFFfile: Create arff files by creating and flattening vocaldias ##' @param df df a data frame consisting, minimally, of a column for ##' vocalisation/pause start times, a column for end times, and a ##' column identifying the speaker, speaker role or 'Floor' (for ##' silences). ##' @param ids Ids of dialogues to generate (as defined in column named idcolumn) ##' @param idcolumn the name of the column containing the dialogue id ##' @param noPauseTypes if TRUE, ignore distinctions between pauses ##' (SwitchingPause, GrpSwitchingPause, etc) ##' @param sampled if >0 use \code{\link{getSampledVocalMatrix}} with rate=sampled ##' @param individual whether to include individual speakers or group ##' them into a single Vocalisation node ##' @param nodecolumn the name of the column containing the node ##' (speaker) name (e.g. 'speaker', 'role'). ##' @param classcolumn the name of the column containing the target class (or value). ##' @param file name of ARFF file to be generated, or "" (print to console). ##' @return NULL ##' @seealso ##' \code{\link{getSampledVocalCountMatrix}}, ##' \code{\link{getTurnTakingProbMatrix}}. ##' @references ##' S. Luz. Automatic identification of experts and performance ##' prediction in the multimodal math data corpus through analysis ##' of speech interaction. In \emph{Proceedings of the 15th ACM on ##' International Conference on Multimodal Interaction, ICMI'13}, ##' pages 575--582, New York, NY, USA, 2013. ACM. ##' @examples ##' data(vocdia) ##' atdarff <- tempfile(pattern='vocaldia-', fileext='arff') ##' printARFFfile(atddia, individual=TRUE, classcolumn='dx', ##' file=atdarff, noPauseTypes=FALSE) ##' library("foreign") ##' x1 <- read.arff(atdarff) ##' x1[1:3,] ##' ## remove empty columns ##' x1[,c(unlist(apply(x1[1:(ncol(x1)-1)],2,sum)!=0), TRUE)] ##' @export printARFFfile <- function(df, ids=c(), idcolumn='id', noPauseTypes=F, sampled=0, individual=TRUE, nodecolumn="role", classcolumn='dx', file="") { call <- format(sys.call()) ## exclude dubious annotation #print(formals()) head <- paste(c("% file automatically generated by vocalgraphs.R\n", "% ", call, "\n\n@RELATION mdtm\n\n"), collapse="") if (length(ids) == 0) { ids <- levels(df$id) } nodes <- c() tvector <- c(list()) vi <- 0 for (i in ids) { if (sampled>0) t <- getSampledVocalMatrix(df[df[[idcolumn]]==i,], individual=individual, noPauseTypes=noPauseTypes, nodecolumn=nodecolumn, rate=sampled) else t <- getTurnTakingProbMatrix(df[df[[idcolumn]]==i,], nodecolumn=nodecolumn, individual=individual, noPauseTypes=noPauseTypes) nodes <- union(nodes, names(t$tdarray)) tvector <- c(tvector, list(list(ttpm=t, id=i))) } ## attributes consits of all node names (the values of which ## represent the probability of that node) and all combinations of ## nodes (values representing transition probabilities) attributes <- c(paste(nodes), sapply(nodes, function(x){paste(nodes,'-',x,sep='')})) targetclass <- unique(df[[classcolumn]]) mla <- max(nchar(attributes)) body <- c(sapply(attributes, function(x){pad <- mla - nchar(x); paste(c("@ATTRIBUTE ",x,rep(" ",pad)," REAL\n"),collapse="")}), paste(c("@ATTRIBUTE ", classcolumn ," {", paste(targetclass,collapse=","),"}\n"),collapse=""), "\n\n@DATA\n") ## for (tv in tvector) { t <- tv$ttpm i <- tv$id ## tda: sparse array containing all spkr turn durations, ## whereas t$tdarray contain only those that occur in case i tda <- array(data=0, dim=c(length(nodes)), dimnames=list(nodes)) tda[names(t$tdarray)] <- t$tdarray tta <- array(data=0, dim=c(length(nodes),length(nodes)), dimnames=list(nodes,nodes)) line <- paste(tda, collapse=",") n <- dimnames(t$ttarray)[[1]] for (j in n){ for (k in n){ if (t$ttarray[j,k] == 0) next tta[j,k] <- t$ttarray[j,k] } } line <- paste(c(line, paste(tta, collapse=","), as.character(df[[classcolumn]][df$id==i][1])), collapse=",") body <- c(body, line,"\n") } o <- paste(c(head,body),collapse="") cat(o, file=file) } ## loadDataset <- function(file='~/lib/projects/DTP/HRI-Dementia/data/ccc/dialog_ALL.csv'){ ## d <- read.csv(file, as.is=T) ## ## fill in missing roles (possibly inconsistently; check) ## d$role[d$role==''] <- 'Other' ## d$begin <- as.numeric(d$begin) ## d$end <- as.numeric(d$end) ## numsil <- sum(sapply(1:(nrow(d)-1), function(X)d$end[X]<d$begin[X+1])) ## dout <- matrix(nrow=nrow(d)+numsil-1, ncol=ncol(d)) ## colnames(dout) <- names(d) ## j <- 0 ## for (i in 1:(nrow(d)-1)){ ## dout[j <- j+1,] <- as.character(d[i,]) ## if (d$end[i] < d$begin[i+1]) { ## dout[j <- j+1,] <- c(d$id[i], d$end[i], d$begin[i+1], ## 'Floor', 'Floor', d$dx[i], '___') ## } ## } ## daux <- data.frame(dout[,1], as.numeric(dout[,2]), as.numeric(dout[,3]), ## dout[,4], dout[,5], dout[,6]) ## ##daux <- data.frame(dout[,1:(ncol(d)-1)], stringsAsFactors=F) ## ## keep transcriptions as strings rather than converting them to factors ## dout <- cbind(daux,trans=dout[,ncol(d)], stringsAsFactors=F) ## names(dout) <- colnames(d) ## ## annotated silences (inconsistent; check manually) ## sil <- c(grep('^[^a-zA-Z]+$', dout$trans), which(dout$speaker=='')) ## dout$role[sil] <- dout$speaker[sil] <- 'Floor' ## dout ## }
/scratch/gouwar.j/cran-all/cranData/vocaldia/R/vocalgraphs.R
## From the ldr package, version 1.3.3 ## By Kofi Placid Adragni and Andrew M. Raim ## https://www.jstatsoft.org/article/view/v061i03 ## No modifications bf <- function(y, case=c("poly", "categ", "fourier", "pcont", "pdisc"), degree=1, nslices=1, scale=FALSE) { Indicator<-function(x, H) return(ifelse((x %in% H), 1, 0)) case <- match.arg(case); nobs=length(y); # nocov start if (case=="categ") { bins.y<-unique(sort(y)); r<- length(unique(sort(y)))-1; fy<-array(rep(0), c(r, nobs)); for (i in 1:r){ fy[i,]<-sapply(y, function(x) (x==bins.y[i]))} } else if (case=="fourier") { fy<-array(rep(0), c(2*degree, nobs)); for(i in 1:degree) { fy[2*i-1, 1:nobs]<- cos(2*pi*y*i); fy[2*i, 1:nobs]<- sin(2*pi*y*i); } } # nocov end else if (case=="poly") { if (degree==0) stop("This case is not defined"); fy <- array(rep(0), c(degree, nobs)); for (k in 1:degree) fy[k, ] <- y^k; } # nocov start else if (case=="pdisc") { if ((nslices==0) | (nslices==1)){message("The minimum number of slices is 2"); nslices=2;} r <- (degree + 1) * nslices - 1; fy <- array(rep(0), c(r, nobs)); slicing <- ldr.slices(y,nslices); bins.y <- slicing$bins; if (degree==0) # Piecewise constant discontinuous { for(i in 1:r) fy[i,] <- Indicator(y, bins.y[[i]]); } else if (degree==1) # Piecewise linear discontinuous { for(i in 1:(nslices-1)) { fy[2*i-1, ] <- Indicator(y, bins.y[[i]]); fy[2*i, ] <- Indicator(y, bins.y[[i]])*(y-bins.y[[i]][1]); } fy[2*nslices-1, ] <- Indicator(y, bins.y[[nslices]])*(y-bins.y[[nslices]][1]); } else if (degree==2) # Piecewise quadratic discontinuous { for(i in 1:(nslices-1)) { fy[3*(i-1)+1, ] <- Indicator(y, bins.y[[i]]); fy[3*(i-1)+2, ] <- Indicator(y, bins.y[[i]])*(y-bins.y[[i]][1]); fy[3*(i-1)+3, ] <- Indicator(y, bins.y[[i]])*(y-bins.y[[i]][1])**2; } fy[3*nslices-2, ] <- Indicator(y, bins.y[[nslices]])*(y-bins.y[[nslices]][1]); fy[3*nslices-1, ] <- Indicator(y, bins.y[[nslices]])*(y-bins.y[[nslices]][1])**2; } else if (degree==3)# Piecewise cubic discontinuous { for(i in 1:(nslices-1)) { fy[4*(i-1)+1, ] <- Indicator(y, bins.y[[i]]); fy[4*(i-1)+2, ] <- Indicator(y, bins.y[[i]])*(y-bins.y[[i]][1]); fy[4*(i-1)+3, ] <- Indicator(y, bins.y[[i]])*(y-bins.y[[i]][1])**2; fy[4*(i-1)+4, ] <- Indicator(y, bins.y[[i]])*(y-bins.y[[i]][1])**3; } fy[4*nslices-3, ] <- Indicator(y, bins.y[[nslices]])*(y-bins.y[[nslices]][1]); fy[4*nslices-2, ] <- Indicator(y, bins.y[[nslices]])*(y-bins.y[[nslices]][1])**2; fy[4*nslices-1, ] <- Indicator(y, bins.y[[nslices]])*(y-bins.y[[nslices]][1])**3; } } else if (case=="pcont") { if ((nslices==0) | (nslices==1)){message("The minimum number of slices is 2"); nslices=2;} if (degree==0) stop("Piecewise Constant Continuous is not defined."); r <- nslices*degree+1; fy <- array(rep(0), c(r, nobs)); slicing <- ldr.slices(y, nslices); bins.y <- slicing$bins; if (degree==1)# Piecewise linear continuous { fy[1,] <- Indicator(y, bins.y[[1]]); if (r>1) for(i in 1:nslices) fy[i+1,] <- Indicator(y, bins.y[[i]])*(y - bins.y[[i]][1]); } else if (degree==2)# Piecewise quadratic continuous { fy[1,] <- Indicator(y, bins.y[[1]]); for(i in 1:nslices) { fy[2*i,] <- Indicator(y, bins.y[[i]])*(y - bins.y[[i]][1]); fy[2*i+1,] <- Indicator(y, bins.y[[i]])*(y - bins.y[[i]][1])**2; } } else if (degree==3)# Piecewise cubic continuous { fy[1,] <- Indicator(y, bins.y[[1]]); for(i in 1:nslices) { fy[3*i-1,] <- Indicator(y, bins.y[[i]])*(y - bins.y[[i]][1]); fy[3*i,] <- Indicator(y, bins.y[[i]])*(y - bins.y[[i]][1])**2; fy[3*i+1,] <- Indicator(y, bins.y[[i]])*(y - bins.y[[i]][1])**3; } } } # nocov end return( scale(t(Re(fy)), center=TRUE, scale=scale)) }
/scratch/gouwar.j/cran-all/cranData/voi/R/bf.R
##' Check the fit of a regression model used to estimate EVPPI or EVSI ##' ##' Produces diagnostic plots and summaries of regression models used to estimate EVPPI or EVSI, ##' mainly in order to check that the residuals have mean zero. ##' ##' @details For VoI estimation, the key thing we are looking for is that the residuals ##' have mean zero, hence that the mean of the model output is represented well by the ##' regression function of the model input parameters. It should not matter if the ##' variance of the residuals is non-constant, or non-normally distributed. ##' ##' Models produced with `method="gam"` are summarised using \code{\link{gam.check}}. ##' ##' Models produced `method="earth"` are summarised using \code{\link{plot.earth}}. ##' ##' For any regression model, if `fitted()` and `residuals()` methods are defined for those models, ##' then a histogram of the residuals and a scatterplot of residuals against fitted values is produced. ##' ##' @param x Output from \code{\link{evppi}} or \code{\link{evsi}}. The argument \code{check=TRUE} ##' must have been used when calling \code{evppi} or \code{evsi}, to allow the regression model ##' objects from \code{gam} or \code{earth} to be preserved. (This is not done by ##' default, since these objects can be large.). \code{attr(x, "models")} contains these objects. ##' ##' @param pars Parameter (or parameter group) whose EVPPI calculation is to be checked. ##' This should be in the \code{pars} component of the object returned by \code{\link{evppi}}. ##' Only relevant if \code{x} is the result of an \code{\link{evppi}} calculation. By default, ##' the first calculation shown in \code{x} is checked. ##' ##' @param n Sample size whose EVSI calculation is to be checked. ##' This should be in the \code{n} component of the object returned by \code{\link{evsi}}. ##' Only relevant if \code{x} is the result of an \code{\link{evsi}} calculation. ##' ##' @param comparison Only relevant if there are more than two treatments in the decision model. ##' Different regression models are then used for the comparisons of different treatments ##' with the baseline treatment. ##' \code{comparison} is an integer identifying which of these models is checked. ##' ##' @param outcome \code{"costs"} or \code{"effects"}. Only relevant if `outputs` was ##' in cost-effectiveness format when ##' calling \code{evppi} or \code{evsi}, hence different regressions are used for costs and ##' effects. By default, \code{outcome="costs"} is used, so that the regression ##' for costs is checked. ##' ##' @param plot If \code{FALSE}, only numerical statistics are returned, and a plot is not made. ##' ##' @return Where possible, an appropriate statistic is returned that allows the regression ##' model to be compared with other regression models implemented using the same \code{method} ##' but with different assumptions. For \code{method="gam"}, ##' this is Akaike's information criterion (AIC). ##' For \code{method="earth"}, this is the generalised cross-validation statistic ##' \code{gcv}. Currently not implemented for other methods. ##' ##' @examples ##' pars <- c("p_side_effects_t1", "p_side_effects_t2") ##' evtest <- evppi(chemo_nb, chemo_pars, pars=pars, check=TRUE) ##' evtest ##' check_regression(evtest) ##' ##' ## with no interaction term ##' evtest2 <- evppi(chemo_nb, chemo_pars, pars=pars, ##' gam_formula="s(p_side_effects_t1)+s(p_side_effects_t2)", ##' check=TRUE) ##' evtest2 ##' check_regression(evtest2) ##' ##' ## doesn't make much difference to the estimate ##' ## fit is OK in either case ##' ##' @export check_regression <- function(x, pars=NULL, n=NULL, comparison=1, outcome="costs", plot=TRUE){ if (inherits(x, "evppi")) { if (is.null(pars)) pars <- x$pars[1] if (!(pars %in% x$pars)) stop(sprintf("parameter `%s` not found", pars)) method <- attr(x, "methods")[match(pars, unique(x$pars))] } else if (inherits(x, "evsi")){ if (is.null(n)) pars <- as.character(x$n[1]) if (!(n %in% x$n)) stop(sprintf("sample size `%s` not found", n)) method <- attr(x,"method") } else stop("`x` should be an object returned by evppi() or evsi()") if (method %in% npreg_methods){ cea <- (attr(x, "outputs") == "cea") mods <- attr(x, "models") if (is.null(mods)) stop("evppi() or evsi() should be run with `check=TRUE` to enable regression checks") if (inherits(x, "evppi")){ } else if (inherits(x, "evsi")) { } ncomp <- if (cea) length(mods[[1]][[1]]) else length(mods[[1]]) if (!(comparison %in% 1:ncomp)) stop(sprintf("`comparison` should be a positive integer <= %s", ncomp)) if (cea){ if (!(outcome %in% c("costs","effects"))) stop("`outcome` should be \"costs\" or \"effects\"") outcome <- if (outcome=="costs") "c" else "e" mod <- mods[[pars]][[outcome]][[comparison]] } else { mod <- mods[[pars]][[comparison]] } if (plot) { check_plot_default(mod) } } else { message("`check_reg` is only applicable when method=\"gam\", \"earth\", \"gp\" or \"inla\"") } check_stats_fn <- get(sprintf("check_stats_%s", method)) check_stats_fn(mod) } ## Should work for any model for which fitted() and residuals() works. check_plot_default <- function(mod){ fit <- as.numeric(fitted(mod)) res <- as.numeric(residuals(mod)) if (!is.numeric(fit)) warning("fitted() does not work on regression model object, so can't produce diagnostic plots") if (!is.numeric(res)) warning("residuals() does not work on regression model object, so can't produce diagnostic plots") dat <- data.frame(fit = fit, res = res) bw <- 2 * IQR(dat$res) / length(dat$res)^(1/3) p1 <- ggplot2::ggplot(dat, aes(x=res)) + ggplot2::geom_histogram(binwidth=bw) + xlab("Residuals") + ylab("Frequency") p2 <- ggplot2::ggplot(dat, aes(x=fit, y=res)) + ggplot2::geom_point() + xlab("Fitted values") + ylab("Residuals") gridExtra::grid.arrange(p1, p2, nrow=1) }
/scratch/gouwar.j/cran-all/cranData/voi/R/check_regression.R
##' @rdname chemo_model ##' @format NULL "chemo_cea" ##' @rdname chemo_model ##' @format NULL "chemo_nb" ##' @rdname chemo_model ##' @format NULL "chemo_pars" ##' @rdname chemo_model ##' @format NULL "chemo_cea_501" ##' @rdname chemo_model "chemo_constants" ##' @rdname chemo_model "chemo_evsi_or"
/scratch/gouwar.j/cran-all/cranData/voi/R/data.R
##' Expected net benefit of sampling ##' ##' Calculates the expected net benefit of sampling for a typical study to inform ##' a health economic evaluation, given estimates of the per-person expected ##' value of sample information, decision population size and study setup and ##' per-participant costs. The optimal sample size for each willingness-to-pay, ##' population size and time horizon is also determined. ##' ##' \code{pop},\code{time} and \code{dis} may be supplied as vectors ##' of different lengths. In that case, the ENBS is calculated for all ##' possible combinations of the values in these vectors. ##' ##' @param evsi Data frame giving estimates of the expected value of sample ##' information, as returned by \code{\link{evsi}}. This may contain ##' multiple estimates, one for each sample size and willingness to pay. ##' ##' @param costs_setup Setup costs of the study. This can either be a constant, ##' or a vector of two elements giving a 95% credible interval (with mean ##' defined by the midpoint), or a vector of three elements assumed to define ##' the mean and 95% credible interval. ##' ##' @param costs_pp Per-participant costs of the study, supplied in the same ##' format as \code{cost_setup}. ##' ##' @param pcut Cut-off probability which defines a "near-optimal" sample size. ##' The minimum and maximum sample size for which the ENBS is within ##' \code{pcut} (by default 5%) of its maximum value will be determined. ##' ##' @param smooth If \code{TRUE}, then the maximum ENBS is determined after ##' fitting a nonparametric regression to the data frame \code{x}, which ##' estimates and smooths the ENBS for every integer sample size in the range ##' of \code{x$n}. The regression is done using the default settings of ##' \code{\link[mgcv]{gam}} from the \pkg{mgcv} package. ##' ##' If this is \code{FALSE}, then no smoothing or interpolation is done, and ##' the maximum is determined by searching over the values supplied in ##' \code{x}. ##' ##' @param smooth_df Basis dimension for the smooth regression. Passed as the ##' `k` argument to the `s()` term in \code{\link[mgcv]{gam}}. Defaults to ##' 6, or the number of unique sample sizes minus 1 if this is lower. Set ##' to a higher number if you think the smoother does not capture the ##' relation of ENBS to sample size accurately enough. ##' ##' @inheritParams pop_voi ##' ##' @return Data frame with components \code{enbs} giving the ENBS, and ##' \code{sd} giving the corresponding standard deviation. The rows of the ##' data frame correspond to the rows of \code{evsi}, and any \code{n} and ##' \code{k} are inherited from \code{evsi}. Additional columns include: ##' ##' \code{pce}: the probability that the study is cost-effective, i.e. that ##' the ENBS is positive, obtained from a normal distribution defined by the ##' estimate and standard deviation. ##' ##' \code{enbsmax}: The maximum ENBS for each willingness-to-pay \code{k}. ##' ##' \code{nmax}: The sample size \code{n} at which this maximum is achieved. ##' ##' A second data frame is returned as the \code{"enbsmax"} attribute. ##' This has one row per willingness-to-pay (`k`), giving the optimal ##' ENBS (`enbsmax`) the optimal sample size (`nmax`) and an interval ##' estimate for the optimal sample size (`nlower` to `nupper`). ##' ##' If \code{pop}, \code{time} or \code{dis} were supplied as vectors ##' of more than one element, then additional columns will be returned ##' in these data frames to identify the population, time or discount ##' rate for each ENBS calculation. An index \code{ind} is also returned ##' to identify the unique combination that each row refers to. ##' ##' @references Value of Information for Healthcare Decision Making ##' (CRC Press, eds. Heath, Kunst and Jackson: forthcoming) ##' ##' @export enbs <- function(evsi, costs_setup, costs_pp, pop, time, dis=0.035, smooth=FALSE, smooth_df=NULL, pcut=0.05){ costs_setup <- costs_elic(costs_setup) costs_pp <- costs_elic(costs_pp) ## Handle vectorised population size, time horizon and discount pdt <- expand.grid(pop=pop, time=time, dis=dis) npdt <- nrow(pdt) pdt$ind <- 1:nrow(pdt) pdt <- pdt[rep(pdt$ind, each=nrow(evsi)),] evsi <- evsi[rep(1:nrow(evsi), npdt),] for (i in c("pop","time","dis")) evsi[[i]] <- pdt[[i]] ## Calculate population EVSI, costs, ENBS, and their SEs pop_evsi <- pop_voi(evsi$evsi, evsi$pop, evsi$time, dis) costs <- costs_setup["mean"] + evsi$n * costs_pp["mean"] enbs <- pop_evsi - costs evsi_sd <- if (is.null(evsi$sd)) 0 else evsi$sd costs_sd <- sqrt(costs_setup["sd"]^2 + evsi$n^2 * costs_pp["sd"]^2) pop_mult <- (pop_evsi / evsi$evsi)^2 enbs_sd <- sqrt((pop_mult*evsi_sd)^2 + costs_sd^2) enbs <- data.frame(n=evsi$n, k=evsi$k, enbs=enbs, sd=enbs_sd, pce = pnorm(0, enbs, enbs_sd, lower.tail=FALSE)) ## Unique combinations of WTP, population, time and discount to optimise for evsi$ind <- interaction(evsi$k, evsi$pop, evsi$time, evsi$dis) evsi$ind <- enbs$ind <- match(evsi$ind, unique(evsi$ind)) ind_lookup <- evsi[,c("ind","k","pop","time","dis")] ind_lookup <- ind_lookup[!duplicated(ind_lookup$ind),] ## Determine the optimal sample size for each of these maxlist <- lapply(split(enbs, enbs$ind), enbs_opt, pcut=pcut, smooth=smooth, smooth_df=smooth_df) enbsmax <- do.call(rbind, maxlist) rownames(enbsmax) <- NULL has_combs <- 0 for (i in c("k", "pop", "time", "dis")){ if (length(unique(ind_lookup[[i]])) > 1){ enbsmax[[i]] <- ind_lookup[[i]][match(enbsmax$ind, ind_lookup$ind)] enbs[[i]] <- ind_lookup[[i]][match(enbs$ind, ind_lookup$ind)] has_combs <- has_combs + 1 } } for (i in c("enbsmax","nmax","nlower","nupper")) enbs[[i]] <- enbsmax[[i]][match(enbs$ind, enbsmax$ind)] if (has_combs > 1) enbsmax$ind <- enbs$ind <- NULL attr(enbs,"enbsmax") <- enbsmax enbs } ##' Determine the optimum sample size in an analysis of the expected net benefit ##' of sampling ##' ##' The optimum sample size for a given willingness to pay is determined either ##' by a simple search over the supplied ENBS estimates for different sample ##' sizes, or by a regression and interpolation method. ##' ##' @param x Data frame containing a set of ENBS estimates for ##' different sample sizes, which will be optimised over. Usually ##' this is for a common willingness-to-pay. The required components ##' are \code{enbs} and \code{n}. ##' ##' @param keep_preds If \code{TRUE} and \code{smooth=TRUE} then the data frame of ##' predictions from the smooth regression model is stored in the \code{"preds"} ##' attribute of the result. ##' ##' @inheritParams enbs ##' ##' @return A data frame with one row, and the following columns: ##' ##' \code{ind}: An integer index identifying, e.g. the willingness to pay and other common characteristics of the ENBS estimates (e.g. incident population size, decision time horizon). This is copied from \code{x$ind}. ##' ##' \code{enbsmax}: the maximum ENBS ##' ##' \code{nmax}: the sample size at which this maximum is achieved ##' ##' \code{nlower}: the lowest sample size for which the ENBS is within ##' ##' \code{pcut} (default 5%) of its maximum value ##' ##' \code{nupper}: the corresponding highest ENBS ##' ##' @export enbs_opt <- function(x, pcut=0.05, smooth=FALSE, smooth_df=NULL, keep_preds=FALSE){ if (smooth) { nrange <- seq(min(x$n), max(x$n), by=1) if (is.null(smooth_df)) smooth_df <- min(6, length(unique(x$n)) - 1) mod <- mgcv::gam(enbs~s(n, k=smooth_df), data=x) enbs_smooth <- predict(mod, newdata=list(n=nrange)) x <- data.frame(n=nrange, enbs=enbs_smooth, ind = if (is.null(x$ind)) 1 else x$ind[1] ) } maxind <- which.max(x$enbs) x$enbsmax <- x$enbs[maxind] x$nmax <- x$n[maxind] near_max <- x$n[x$enbs > x$enbsmax - abs(pcut*x$enbsmax)] x$nlower <- min(near_max) x$nupper <- max(near_max) res <- x[maxind,,drop=FALSE] res <- res[,c("ind","enbsmax","nmax","nlower","nupper"),drop=FALSE] if (keep_preds) attr(res, "preds") <- x res } ##' Population expected value of information ##' ##' Convert per-person expected value of information to the population ##' expected value of information, given a discount rate over some ##' time horizon. ##' ##' Calculated as \code{voi*pop/dis*(1 - exp(-dis*time))}, or \code{voi*pop} ##' if the discount rate is zero. This is a continuous-time variant ##' of the typical discrete-time discounting formula. ##' ##' Any arguments may be supplied as vectors, in which case, all ##' arguments are replicated to the length of the longest argument. ##' ##' @param voi Vector of estimates of any per-person value of information ##' measure, e.g. the \code{evsi} column of the data frame returned by ##' \code{\link{evsi}} or the correspondingly-named columns of the ##' data frames returned by \code{\link{evppi}} or \code{\link{evpi}}. ##' ##' @param pop Size of the population who would be affected by the decision. ##' ##' @param time Time horizon over which discounting will be applied. ##' ##' @param dis Discount rate used when converting per-person to population EVSI. ##' ##' @return A vector of population VoI estimates. ##' ##' @export pop_voi <- function(voi, pop, time, dis=0.035){ ## vectorising... nmax <- max(length(voi), length(pop), length(time), length(dis)) voi <- rep(voi, length.out=nmax) pop <- rep(pop, length.out=nmax) time <- rep(time, length.out=nmax) dis <- rep(dis, length.out=nmax) ifelse(dis==0, voi*pop, voi*pop/dis*(1 - exp(-dis*time))) } costs_elic <- function(costs){ if (!is.numeric(costs)) stop("costs should be numeric") if (!length(costs) %in% c(1,2,3)) stop("length of costs argument should be 1, 2 or 3") costs <- sort(costs) if (length(costs)==1) { cmean <- costs csd <- clsd <- 0 } if (length(costs)==2) { cmean <- mean(costs) csd <- (costs[2] - costs[1]) / 4 clsd <- (log(costs[2]) - log(costs[1])) / 4 } if (length(costs)==3) { cmean <- costs[2] csd <- (costs[3] - costs[1]) / 4 clsd <- (log(costs[3]) - log(costs[1])) / 4 } ## log versions currently unused. c(mean=cmean, sd=csd, lmean=log(cmean), lsd=clsd) }
/scratch/gouwar.j/cran-all/cranData/voi/R/enbs.R
##' Calculate the expected value of perfect information from a decision model ##' ##' Calculate the expected value of perfect information from a decision model using standard Monte Carlo simulation ##' ##' @inheritParams evppi ##' ##' @return The expected value of perfect information, either as a single value, or a data frame indicating the value for each willingness-to-pay. ##' ##' @export evpi <- function(outputs, nsim=NULL) { outputs <- check_outputs(outputs) if (is.null(nsim)) nsim <- if (inherits(outputs, "nb")) nrow(outputs) else nrow(outputs$e) outputs <- subset_outputs(outputs, nsim) if (inherits(outputs, "nb")){ res <- mean(apply(outputs, 1, max)) - max(colMeans(outputs)) } else if (inherits(outputs, "cea")){ nwtp <- length(outputs$k) res <- numeric(length(nwtp)) for (i in 1:nwtp){ nb <- outputs$e * outputs$k[i] - outputs$c res[i] <- mean(apply(nb, 1, max)) - max(colMeans(nb)) } res <- data.frame(k = outputs$k, evpi = res) } res }
/scratch/gouwar.j/cran-all/cranData/voi/R/evpi.R
##' Calculate the expected value of partial perfect information from a decision-analytic model ##' ##' Calculate the expected value of partial perfect information from a decision-analytic model ##' ##' @param outputs This could take one of two forms ##' ##' "net benefit" form: a matrix or data frame of samples from the uncertainty ##' distribution of the expected net benefit. The number of rows should equal ##' the number of samples, and the number of columns should equal the number ##' of decision options. ##' ##' "cost-effectiveness analysis" form: a list with the following named ##' components: ##' ##' \code{"c"}: a matrix or data frame of samples from the distribution of ##' costs. There should be one column for each decision option. ##' ##' \code{"e"}: a matrix or data frame of samples from the distribution of ##' effects, likewise. ##' ##' \code{"k"}: a vector of willingness-to-pay values. ##' ##' Objects of class \code{"bcea"}, as created by the \pkg{BCEA} package, are in ##' this "cost-effectiveness analysis" format, therefore they may be supplied as ##' the \code{outputs} argument. ##' ##' Users of \pkg{heemod} can create an object of this form, given an object ##' produced by \code{run_psa} (\code{obj}, say), with \code{\link{import_heemod_outputs}}. ##' ##' If \code{outputs} is a matrix or data frame, it is assumed to be of "net ##' benefit" form. Otherwise if it is a list, it is assumed to be of "cost ##' effectiveness analysis" form. ##' ##' @param inputs Matrix or data frame of samples from the uncertainty ##' distribution of the input parameters of the decision model. The number ##' of columns should equal the number of parameters, and the columns should ##' be named. This should have the same number of rows as there are samples ##' in \code{outputs}, and each row of the samples in \code{outputs} should ##' give the model output evaluated at the corresponding parameters. ##' ##' Users of \pkg{heemod} can create an object of this form, given an object ##' produced by \code{run_psa} (\code{obj}, say), with \code{\link{import_heemod_inputs}}. ##' ##' @param pars Either a character vector, or a list of character vectors. ##' ##' If a character vector is supplied, then a single, joint EVPPI calculation is done with ##' for the parameters named in this vector. ##' ##' If a list of character vectors is supplied, then multiple EVPPI calculations are ##' performed, one for each list component defined in the above ##' vector form. ##' ##' \code{pars} must be specified if \code{inputs} is a matrix or data frame. ##' This should then correspond to particular columns of \code{inputs}. If ##' \code{inputs} is a vector, this is assumed to define the single parameter ##' of interest, and then \code{pars} is not required. ##' ##' @param method Character string indicating the calculation method. If one ##' string is supplied, this is used for all calculations. A vector of different strings ##' can be supplied if a different method is desired for different list components ##' of \code{pars}. ##' ##' The default methods are based on nonparametric regression: ##' ##' \code{"gam"} for a generalized additive model implemented in the \code{\link{gam}} ##' function from the \pkg{mgcv} package. This is the default method for ##' calculating the EVPPI of 4 or fewer parameters. ##' ##' \code{"gp"} for a Gaussian process regression, as described by Strong et al. ##' (2014) and implemented in the \pkg{SAVI} package ##' (\url{https://github.com/Sheffield-Accelerated-VoI/SAVI}). This is the default method for calculating the EVPPI ##' of more than 4 parameters. ##' ##' \code{"inla"} for an INLA/SPDE Gaussian process regression method, from ##' Heath et al. (2016). ##' ##' \code{"bart"} for Bayesian additive regression trees, using the \pkg{dbarts} package. ##' Particularly suited for joint EVPPI of many parameters. ##' ##' \code{"earth"} for a multivariate adaptive regression spline with the ##' \pkg{earth} package (Milborrow, 2019). ##' ##' \code{"so"} for the method of Strong and Oakley (2013). Only supported ##' for single parameter EVPPI. ##' ##' \code{"sal"} for the method of Sadatsafavi et al. (2013). Only supported ##' for single parameter EVPPI. ##' ##' @param se If this is \code{TRUE}, calculate a standard error for the EVPPI ##' if possible. Currently only supported for methods \code{"gam"}, \code{"earth"} and ##' \code{method="bart"}. (In the latter method it is more correctly called ##' a posterior standard deviation). These represent uncertainty about the ##' parameters of the fitted regression model, and will naturally be lower when ##' more simulations from the decision model are used to fit it. They do not ##' represent uncertainty about the structure of the regression model, ##' ##' @param B Number of parameter replicates for calculating the standard error. ##' Only applicable to \code{method="gam"}. For \code{method="bart"} the ##' analogous quantity is the number of MCMC samples, which is controlled by ##' the \code{ndpost} argument to \code{\link[dbarts]{bart}}, which can be ##' passed as an argument to \code{\link{evppi}}. ##' ##' @param nsim Number of simulations from the decision model to use ##' for calculating EVPPI. The first \code{nsim} rows of the ##' objects in \code{inputs} and \code{outputs} are used. ##' ##' @param verbose If \code{TRUE}, then messages are printed ##' describing each step of the calculation, if the method supplies ##' these. Can be useful to see the progress of slow calculations. ##' ##' @param check If \code{TRUE}, then extra information about the estimation ##' is saved inside the object that this function returns. This currently ##' only applies to the regression-based methods \code{"gam"} and \code{"earth"} ##' where the fitted regression model objects are saved. This allows use ##' of the \code{\link{check_regression}} function, which produces some ##' diagnostic checks of the regression models. ##' ##' @param ... Other arguments to control specific methods. ##' ##' For \code{method="gam"}, the following arguments can be supplied: ##' ##' * \code{gam_formula}: a character string giving the right hand side of the ##' formula supplied to the \code{gam()} function. By default, this is a tensor ##' product of all the parameters of interest, e.g. if \code{pars = ##' c("pi","rho")}, then \code{gam_formula} defaults to \code{t(pi, rho, ##' bs="cr")}. The option \code{bs="cr"} indicates a cubic spline regression ##' basis, which is more computationally efficient than the default "thin plate" ##' basis. If there are four or more parameters of interest, then the ##' additional argument \code{k=4} is supplied to \code{te()}, specifying a ##' four-dimensional basis, which is currently the default in the SAVI package. ##' ##' If there are spaces in the variable names in \code{inputs}, then these should ##' be converted to underscores before forming an explicit \code{gam_formula}. ##' ##' ##' For \code{method="gp"}, the following arguments can be supplied: ##' ##' * \code{gp_hyper_n}: number of samples to use to estimate the hyperparameters ##' in the Gaussian process regression method. By default, this is the minimum ##' of the following three quantities: 30 times the number of parameters of ##' interest, 250, and the number of simulations being used for calculating ##' EVPPI. ##' ##' * \code{maxSample}: Maximum sample size to employ for \code{method="gp"}. Only ##' increase this from the default 5000 if your computer has sufficent memory to ##' invert square matrices with this dimension. ##' ##' For \code{method="inla"}, the following arguments can be supplied, as described in detail in Baio, Berardi and Heath (2017): ##' ##' * \code{int.ord} (integer) maximum order of interaction terms to include in ##' the regression predictor, e.g. if \code{int.ord=k} then all k-way ##' interactions are used. Currently this applies to both effects and costs. ##' #' * \code{cutoff} (default 0.3) controls the #' density of the points inside the mesh in the spatial part of the mode. #' Acceptable values are typically in #' the interval (0.1,0.5), with lower values implying more points (and thus #' better approximation and greatercomputational time). #' #' * \code{convex.inner} (default = -0.4) and \code{convex.outer} (default = -0.7) #' control the boundaries for the mesh. These should be negative values and can #' be decreased (say to -0.7 and -1, respectively) to increase the distance #' between the points and the outer boundary, which also increases precision and #' computational time. #' #' * \code{robust}. if \code{TRUE} then INLA will use a t prior distribution for #' the coefficients of the linear predictor, rather than the default normal distribution. #' #' * \code{h.value} (default=0.00005) controls the accuracy of the INLA #' grid-search for the estimation of the hyperparameters. Lower values imply a #' more refined search (and hence better accuracy), at the expense of #' computational speed. #' #' * \code{plot_inla_mesh} (default \code{FALSE}) Produce a plot of the mesh. #' #' * \code{max.edge} Largest allowed triangle edge length when constructing the #' mesh, passed to \code{\link[INLA]{inla.mesh.2d}}. #' #' * \code{pfc_struc} Variance structure to pass to \code{pfc} in the \pkg{ldr} #' package for principal fitted components. The default \code{"AIC"} selects the #' one that fits best given two basis terms. Change this to, e.g. \code{"iso"}, #' \code{"aniso"} or \code{"unstr"} if an "Error in eigen..." is obtained. #' #' For any of the nonparametric regression methods: #' #' * \code{ref} The reference decision option used to define the #' incremental net benefit, cost or effects before performing #' nonparametric regression. Either an integer column number, or the #' name of the column from \code{outputs}. #' ##' For \code{method="so"}: ##' ##' * \code{n.blocks} Number of blocks to split the sample into. Required. ##' ##' For \code{method="sal"}: ##' ##' * \code{n.seps} Number of separators (default 1). #' #' @return A data frame with a column \code{pars}, indicating the parameter(s), #' and a column \code{evppi}, giving the corresponding EVPPI. #' #' If \code{outputs} is of "cost-effectiveness analysis" form, so that there is #' one EVPPI per willingness-to-pay value, then a column \code{k} identifies the #' willingness-to-pay. #' #' If standard errors are requested, then the standard errors are returned in #' the column \code{se}. #' ##' @references ##' ##' Strong, M., Oakley, J. E., & Brennan, A. (2014). Estimating multiparameter ##' partial expected value of perfect information from a probabilistic ##' sensitivity analysis sample: a nonparametric regression approach. Medical ##' Decision Making, 34(3), 311-326. ##' ##' Heath, A., Manolopoulou, I., & Baio, G. (2016). Estimating the expected ##' value of partial perfect information in health economic evaluations using ##' integrated nested Laplace approximation. Statistics in Medicine, 35(23), ##' 4264-4280. ##' ##' Baio, G., Berardi, A., & Heath, A. (2017). Bayesian cost-effectiveness ##' analysis with the R package BCEA. New York: Springer. ##' ##' Milborrow, S. (2019) earth: Multivariate Adaptive Regression Splines. R ##' package version 5.1.2. Derived from mda:mars by Trevor Hastie and Rob ##' Tibshirani. Uses Alan Miller's Fortran utilities with Thomas Lumley's leaps ##' wrapper. https://CRAN.R-project.org/package=earth. ##' ##' Strong, M., & Oakley, J. E. (2013). An efficient method for computing ##' single-parameter partial expected value of perfect information. Medical ##' Decision Making, 33(6), 755-766. Chicago ##' ##' Sadatsafavi, M., Bansback, N., Zafari, Z., Najafzadeh, M., & Marra, C. ##' (2013). Need for speed: an efficient algorithm for calculation of ##' single-parameter expected value of partial perfect information. Value in ##' Health, 16(2), 438-448. ##' ##' ##' @export evppi <- function(outputs, inputs, pars=NULL, method=NULL, se=FALSE, B=1000, nsim=NULL, verbose=FALSE, check=FALSE, ...) { inputs <- check_inputs(inputs, iname=deparse(substitute(inputs))) outputs <- check_outputs(outputs, inputs) if (!is.list(pars)) pars <- list(pars) for (i in seq_along(pars)){ pars[[i]] <- check_pars(pars[[i]], inputs) if (is.null(names(pars)) || identical(names(pars)[i], "") || is.na(names(pars)[i])) names(pars)[i] <- paste(pars[[i]], collapse=",") } npars <- length(pars) if (is.null(nsim)) nsim <- nrow(inputs) outputs <- subset_outputs(outputs, nsim) inputs <- inputs[1:nsim,,drop=FALSE] if (is.null(method)) methods <- sapply(pars, default_evppi_method) if (length(method) > 0) methods <- rep(method, length.out=npars) eres <- vector(npars, mode="list") for (i in seq_len(npars)){ if (methods[i] %in% npreg_methods) { evppi_fn <- evppi_npreg } else if (methods[i]=="so") { evppi_fn <- evppi_so } else if (methods[i]=="sal") { evppi_fn <- evppi_sal } else stop("Other methods not implemented yet") ip <- remove_constant_linear_cols(inputs[,pars[[i]],drop=FALSE], pars[[i]]) if (!is.null(ip$res)) eres[[i]] <- ip$res else eres[[i]] <- evppi_fn(outputs=outputs, inputs=ip$inputs, pars=ip$pars, method=methods[i], se=se, B=B, verbose=verbose, ...) } res <- do.call("rbind", eres) nwtp <- if (inherits(outputs, "nb")) 1 else length(outputs$k) res <- cbind(pars=rep(names(pars), each = nwtp), res) if (check){ attr(res, "models") <- lapply(eres, function(x)attr(x, "models")) names(attr(res, "models")) <- names(pars) } attr(res, "methods") <- methods attr(res, "outputs") <- class(outputs)[1] class(res) <- c("evppi", attr(res,"class")) res } ## could do fancier S3 stuff with implementing subset operator, but too much ## faff subset_outputs <- function(outputs, ...){ UseMethod("subset_outputs", outputs) } subset_outputs.nb <- function(outputs, nsim, ...){ outputs <- outputs[1:nsim,,drop=FALSE] class(outputs) <- c("nb", attr(outputs, "class")) outputs } subset_outputs.cea <- function(outputs, nsim, ...){ outputs$c <- outputs$c[1:nsim,,drop=FALSE] outputs$e <- outputs$e[1:nsim,,drop=FALSE] class(outputs) <- c("cea", attr(outputs, "class")) outputs } default_evppi_method <- function(pars){ default_npreg_method(pars) } default_npreg_method <- function(pars){ if (length(pars) <= 4) "gam" else "gp" } check_inputs <- function(inputs, iname=NULL){ if (is.vector(inputs) && is.numeric(inputs)) { inputs <- data.frame(input = inputs) names(inputs) <- gsub(" ", "", iname) } if (!is.matrix(inputs) && !is.data.frame(inputs)){ stop("`inputs` should be a numeric vector, matrix or data frame") } as.data.frame(inputs) } check_outputs_matrix <- function(outputs, inputs, name){ if (ncol(outputs) < 2) stop(sprintf("`%s` should have two or more columns", name)) # or else voi always zero if (nrow(outputs) != nrow(inputs)) stop(sprintf("Number of rows of `%s` (%s) should equal the number of rows of `inputs` (%s)", name, nrow(outputs), nrow(inputs))) } check_outputs <- function(outputs, inputs=NULL){ if (is.matrix(outputs) || is.data.frame(outputs)){ class(outputs) <- c("nb", attr(outputs, "class")) if (!is.null(inputs)) # check not required for EVPI check_outputs_matrix(outputs, inputs, "outputs") } else if (is.list(outputs)){ class(outputs) <- c("cea", attr(outputs, "class")) required_names <- c("c","e","k") for (i in required_names){ if (!(i %in% names(outputs))) stop(sprintf("component named `(%s)` not found in `outputs` list", i)) } if (!is.null(inputs)){ check_outputs_matrix(outputs$c, inputs, "outputs$c") check_outputs_matrix(outputs$e, inputs, "outputs$e") } check_wtp(outputs$k, "outputs$k") } else stop("`outputs` should be a matrix, data frame or list, see help(evppi)") outputs } check_wtp <- function(k, name){ if (!is.numeric(k)) stop(sprintf("%s should be numeric", name)) } check_pars <- function(pars, inputs, evppi=TRUE){ if (is.null(pars) && evppi){ if (ncol(inputs)==1) pars <- colnames(inputs) else stop("`pars` should be specified if there are two or more parameters in `inputs`") } validate_char(pars, "pars") badpars <- pars[!(pars %in% colnames(inputs))] if (length(badpars)>0){ stop(sprintf("parameters of interest `%s` not found in columns of `inputs`", paste(badpars,collapse=","))) } pars } validate_char <- function(x,name=NULL){ if (is.null(name)) name <- deparse(substitute(x)) if (!is.null(x) && !is.character(x)) stop(sprintf("`%s` should be a character vector",name)) } clean_pars <- function(pars) { parsc <- gsub(" ", "_", pars) r_specials <- c("letters","month.abb","month.name","pi") for (i in seq_along(r_specials)){ inds <- parsc == r_specials[i] if (any(inds)){ for (j in which(inds)){ stop(sprintf("Parameter name `%s` is also the name of a R internal constant. This should be changed to another name to allow the `gam` method for VoI calculation to be used", parsc[j])) } } } parsc } remove_constant_linear_cols <- function(inputs, pars){ inputs <- as.matrix(inputs) p <- ncol(inputs) inds <- seq_len(p) inds_const <- (apply(inputs, 2, var) == 0) pars_const <- pars[which(inds_const)] if (sum(inds_const) == p){ res <- data.frame(evppi = 0) } else res <- NULL if (sum(inds_const) > 0){ inds_drop <- inds[which(inds_const)] inputs <- inputs[, -inds_drop, drop=FALSE] # now with constants removed message(sprintf("Input parameters %s are constant", paste(sprintf("\"%s\"", pars_const), collapse=","))) } rankifremoved <- sapply(1:NCOL(inputs), function (x) qr(inputs[, -x])$rank) pars_lin <- NULL while(length(unique(rankifremoved)) > 1) { linearCombs <- which(rankifremoved == max(rankifremoved)) inds_drop <- max(linearCombs) pars_lin <- c(pars_lin, colnames(inputs)[inds_drop]) inputs <- inputs[, -inds_drop, drop=FALSE] rankifremoved <- sapply(1:NCOL(inputs), function(x) qr(inputs[, -x])$rank) } if(qr(inputs)$rank == rankifremoved[1]) { inds_drop <- 1 pars_lin <- c(pars_lin, colnames(inputs)[inds_drop]) inputs <- inputs[, -inds_drop, drop=FALSE] # special case only lincomb left } if (length(pars_lin) > 0) message(sprintf("Ignoring input parameters %s that are linearly dependent on others", paste(sprintf("\"%s\"", pars_lin), collapse=","))) pars_keep <- setdiff(pars, c(pars_const, pars_lin)) list(inputs = as.data.frame(inputs), pars = pars_keep, res=res) }
/scratch/gouwar.j/cran-all/cranData/voi/R/evppi.R