content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Create a wordsearch puzzle
#' @param words a vector of hidden words (character/vector)
#' @param clues a vector of word clues (optional; character/vector)
#' @param r number of rows
#' @param c number of columns
#' @param image path to an image that the resulting grid should look like.NULL for no shape
#'
#' @examples
#' # Example 1 ----
#' words <- c("dog", "cat", "horse", "frog", "cow", "fox")
#' ex1 <- wordsearch(words, r = 10, c = 10)
#' plot(ex1, solution = TRUE)
#'
#' # Example 2 ----
#' clues <- c("Bark", "Meow", "Neigh", "Ribbit", "Moo", "Nee Nee Nee")
#' ex2 <- wordsearch(words = words, clues = clues)
#' plot(ex2, solution = TRUE, title = "Animal Sounds", legend_size = 4)
#'
#' # Example 3 ----
#' math <- dplyr::tribble(
#' ~problem, ~solution,
#' "2 + 2", "four",
#' "5 + 3", "eight",
#' "9 - 4", "five",
#' "1 + 0", "one",
#' "2 + 1", "three",
#' "5 + 5", "ten",
#' "6 - 6", "zero"
#' )
#' ex3 <- wordsearch(words = math$solution, clues = math$problem)
#' plot(ex3, solution = TRUE, title = "Math is Fun")
#'
#' @return wordsearch object
#' @export
wordsearch <- function(words = c("finding", "needles", "inside", "haystacks"),
clues = words,
r = 10,
c = 10,
image = NULL) {
# create empty matrix
x <- matrix(NA, nrow = r, ncol = c)
# prepare the word list
words <- prepare_words(words)
# check conditions
# -- do not allow duplicates in word list
if (any(duplicated(words)))
stop("Must provide a set of words without duplicates.")
# -- remove words that won't fit
id <- nchar(words) <= max(c(r, c))
words <- words[id]
clues <- clues[id]
if (length(words) == 0) {
message("No words can be placed. Try a larger grid-size, or shorter words.")
return(NULL)
}
# generate shape file (if provided)
shape_matrix <- NULL
if (!is.null(image))
shape_matrix <- image_matrix(image, r, c)
# add words to the board (one-at-a-time)
for (i in seq_along(words)) {
x_new <- add_word(x, words[i], shape_matrix = shape_matrix)
if (identical(x, x_new))
break
x <- x_new
}
# update word list (based on what was placed)
# TODO -- Fix issue w/ duplicate words --------------------------------------
new_words <- unique(attr(x, "positions")$word)
message(paste0("Found positions for ", length(new_words), "/", length(words), " words."))
words <- new_words
# save solution
solution <- x
# fill remaining matrix with random letters
ids <- is.na(x)
if (!is.null(image))
ids <- ids & shape_matrix
x[ids] <- sample(LETTERS, sum(ids), replace = TRUE)
# convert to a 'wordsearch' class object
out <-
list(
search = x,
words = words,
clues = clues,
solution = solution,
image = image,
shape_matrix = shape_matrix
)
as_wordsearch(out)
}
# Constructors =============================================================
#' Assign an object to the `wordsearch` class
#' @param x an object containing wordsearch data
#' @return wordsearch object: a list with the following elements:
#'
#' search: a matrix representation of the wordsearch
#' with 'positions' attribute a tibble representation of
#' the solution
#' words: (character/vector)
#' clues: (character/vector)
#' solution: a matrix representation of the wordsearch solution
#' with 'positions' attribute a tibble representation
#' of the solution
#' image: image for shaping wordsearch (NULL if not provided)
#' shape_matrix: binary matrix representation of shape (NULL if no image)
#'
#' @export
as_wordsearch <- function(x) {
if (!is_wordsearch(x))
class(x) <- append("wordsearch", class(x))
x
}
#' Check if an object is of the `wordsearch` class
#' @param x an R object to check
#' @return logical/scalar
#' @export
is_wordsearch <- function(x) {
inherits(x, "wordsearch")
}
# Methods ===================================================================
#' Print details for a wordsearch puzzle
#' @param x wordsearch object (class: wordsearch)
#' @param ... additional printing args
#' @return wordsearch object
#' @export
print.wordsearch <- function(x, ...) {
cat(paste("Wordsearch\n"))
cat(paste("Rows:", nrow(x$search), "\n"))
cat(paste("Columns:", ncol(x$search), "\n"))
cat(paste("Hidden Words:", length(x$words), "\n"))
cat(paste("Clues:", ifelse(!identical(prepare_words(x$words), prepare_words(x$clues)), "No", "Yes"), "\n"))
cat(paste("Custom Shape:", ifelse(is.null(x$shape_matrix), "No", "Yes"), "\n"))
invisible(x)
}
#' Draw a wordsearch puzzle
#' @param x wordsearch object (class: wordsearch)
#' @param solution show solution? (logical/scalar)
#' @param clues show clues? (logical/scalar)
#' @param title puzzle title (character/scalar)
#' @param puzzle_size letter size of puzzle; ignore to auto-size (numeric/scalar)
#' @param legend_size letter size of word list; set to NULL to auto-size (numeric/scalar)
#' @param ... additional plotting args
#' @import ggplot2
#' @return ggplot object
#' @export
plot.wordsearch <- function(x,
solution = FALSE,
clues = TRUE,
title = "",
puzzle_size = NULL,
legend_size = NULL,
...) {
ids <- expand.grid(i = 1:nrow(x$search), j = 1:ncol(x$search))
xt <-
purrr::map2_df(
ids$i,
ids$j,
~tibble::tibble(
i = .x,
j = .y,
value = x$search[.x, .y],
word = !is.na(x$solution[.x, .y]),
outline = ifelse(is.null(x$shape_matrix), TRUE, x$shape_matrix[.x, .y])
)
)
# draw wordsearch
g1 <- xt %>%
dplyr::filter(!is.na(.data$value)) %>%
ggplot2::ggplot()
if (is.null(puzzle_size)) {
g1 <- g1 +
ggfittext::geom_fit_text(
aes(x = .data$i, y = .data$j, label = .data$value),
grow = T,
min.size = 0
)
} else {
g1 <- g1 +
ggplot2::geom_text(
aes(x = .data$i, y = .data$j, label = .data$value),
size = puzzle_size
)
}
g1 <- g1 +
ggplot2::scale_y_reverse() +
ggplot2::theme_void() +
ggplot2::theme(
aspect.ratio = ncol(x$search) / nrow(x$search)
)
# add solution (upon request)
if (solution) {
g1 <- g1 +
ggplot2::geom_line(
aes(x = .data$i, y = .data$j, group = .data$word),
color = "red",
data = attr(x$search, "positions")
)
}
# use custom shape
# -- if using a 'shape_matrix', add the outline
# -- otherwise, add a border
if (is.null(x$shape_matrix)) {
g1 <- g1 +
ggplot2::annotate("rect",
xmin = 0.5, ymin = 0.5,
xmax = max(xt$i) + 0.5, ymax = max(xt$j) + 0.5,
alpha = 0, color = "black"
)
} else {
g1 <- g1 +
ggplot2::geom_tile(
aes(x = .data$i, y = .data$j),
alpha = .1, fill = "gray", color = "gray",
data = dplyr::filter(xt, .data$outline)
)
}
# add title (upon request)
if (title != "") {
g1 <- g1 +
ggplot2::ggtitle(title) +
ggplot2::theme(
plot.title = element_text(hjust = 0.5, size = 24, face = "bold")
)
}
# TODO: add a background image
# draw word list; merge with puzzle
if (clues) {
tmp <- dplyr::tibble(
i = 1,
j = 1:length(x$clues),
word = x$clues
)
g2 <- ggplot2::ggplot(tmp)
if (is.null(legend_size)) {
g2 <- g2 +
ggfittext::geom_fit_text(
aes(x = .data$i, y = .data$j, label = .data$word),
reflow = T,
grow = F, # NOTE: grow=T slows this process...
min.size = 0
)
} else {
g2 <- g2 +
# geom_text(
# aes(x = i, y = j, label = word),
# size = legend_size,
# hjust = 0.5
# )
ggtext::geom_richtext(
aes(x = .data$i, y = .data$j, label = .data$word),
fill = NA,
size = legend_size,
label.color = NA, # remove background and outline
label.padding = grid::unit(rep(0, 4), "pt") # remove padding
)
}
g2 <- g2 +
ggplot2::ggtitle(expression(underline("Word List"))) +
ggplot2::theme_void() +
ggplot2::scale_y_reverse() +
ggplot2::theme(
plot.title = element_text(hjust = 0.5, size = 16, face = "bold")
)
g1 <- cowplot::plot_grid(g1, g2, nrow = 1, rel_widths = c(3/4, 1/4))
}
g1
}
|
/scratch/gouwar.j/cran-all/cranData/worrrd/R/wordsearch.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 7,
out.width = '100%'
)
## ----setup, message=FALSE, warning=FALSE--------------------------------------
library(worrrd)
## -----------------------------------------------------------------------------
words <- c("dog", "cat", "horse", "frog", "cow", "fox")
ex1 <- wordsearch(words, r = 10, c = 10)
plot(ex1, solution = TRUE)
## -----------------------------------------------------------------------------
dat <-
dplyr::tribble(
~word, ~clue,
"apple", "keeps the doctor away",
"banana", "yellow; great dipped in chocolate",
"orange", "citrus; soccer halftime snack",
"lime", "citrus; green",
"pear", "Bosc; Bartlett; D'Anjou",
"strawberry", "red sweet berry full of seeds"
)
ex1 <- crossword(words = dat$word, clues = dat$clue, r = 40, c = 40)
plot(ex1, solution = TRUE, clues = TRUE, legend_size = 2)
|
/scratch/gouwar.j/cran-all/cranData/worrrd/inst/doc/getting-started.R
|
---
title: "Getting Started"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Getting Started}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 7,
out.width = '100%'
)
```
```{r setup, message=FALSE, warning=FALSE}
library(worrrd)
```
## Wordsearch
Let's start off with creating a basic wordsearch.
```{r}
words <- c("dog", "cat", "horse", "frog", "cow", "fox")
ex1 <- wordsearch(words, r = 10, c = 10)
plot(ex1, solution = TRUE)
```
## Crossword
Crosswords require `clues` in addition to `words`.
```{r}
dat <-
dplyr::tribble(
~word, ~clue,
"apple", "keeps the doctor away",
"banana", "yellow; great dipped in chocolate",
"orange", "citrus; soccer halftime snack",
"lime", "citrus; green",
"pear", "Bosc; Bartlett; D'Anjou",
"strawberry", "red sweet berry full of seeds"
)
ex1 <- crossword(words = dat$word, clues = dat$clue, r = 40, c = 40)
plot(ex1, solution = TRUE, clues = TRUE, legend_size = 2)
```
|
/scratch/gouwar.j/cran-all/cranData/worrrd/inst/doc/getting-started.Rmd
|
---
title: "Getting Started"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Getting Started}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 7,
out.width = '100%'
)
```
```{r setup, message=FALSE, warning=FALSE}
library(worrrd)
```
## Wordsearch
Let's start off with creating a basic wordsearch.
```{r}
words <- c("dog", "cat", "horse", "frog", "cow", "fox")
ex1 <- wordsearch(words, r = 10, c = 10)
plot(ex1, solution = TRUE)
```
## Crossword
Crosswords require `clues` in addition to `words`.
```{r}
dat <-
dplyr::tribble(
~word, ~clue,
"apple", "keeps the doctor away",
"banana", "yellow; great dipped in chocolate",
"orange", "citrus; soccer halftime snack",
"lime", "citrus; green",
"pear", "Bosc; Bartlett; D'Anjou",
"strawberry", "red sweet berry full of seeds"
)
ex1 <- crossword(words = dat$word, clues = dat$clue, r = 40, c = 40)
plot(ex1, solution = TRUE, clues = TRUE, legend_size = 2)
```
|
/scratch/gouwar.j/cran-all/cranData/worrrd/vignettes/getting-started.Rmd
|
#' Run \code{pull_wos} across multiple queries
#'
#' @inheritParams query_wos
#' @param queries Vector of queries to issue to the WoS API and pull data for.
#'
#' @return The same set of data frames that \code{\link{pull_wos}} returns, with
#' the addition of a data frame named \code{query}. This data frame frame tells
#' you which publications were returned by a given query.
#'
#' @examples
#' \dontrun{
#'
#' queries <- c('TS = "dog welfare"', 'TS = "cat welfare"')
#' # we can name the queries so that these names appear in the queries data
#' # frame returned by pull_wos_apply():
#' names(queries) <- c("dog welfare", "cat welfare")
#' pull_wos_apply(queries)
#'}
#' @export
pull_wos_apply <- function(queries,
editions = c("SCI", "SSCI", "AHCI", "ISTP", "ISSHP",
"BSCI", "BHCI", "IC", "CCR", "ESCI"),
sid = auth(Sys.getenv("WOS_USERNAME"),
Sys.getenv("WOS_PASSWORD")),
...) {
if (is.null(names(queries))) {
names(queries) <- queries
}
query_names <- names(queries)
if (length(query_names) != length(unique(query_names))) {
stop("The names of your queries must be unique", call. = FALSE)
}
res_list <- pbapply::pblapply(
query_names, one_pull_wos_apply,
queries = queries,
editions = editions,
sid = sid,
... = ...
)
df_names <- c(unique(schema$df), "query")
out <- lapply2(
df_names,
function(x) unique(do.call(rbind, lapply(res_list, function(y) y[[x]])))
)
append_class(out, "wos_data")
}
one_pull_wos_apply <- function(query_name, queries, editions, sid, ...) {
query <- queries[[query_name]]
message("\n\nPulling WoS data for the following query: ", query_name, "\n\n")
wos_out <- pull_wos(query = query, editions = editions, sid = sid, ...)
uts <- wos_out[["publication"]][["ut"]]
num_pubs <- length(uts)
if (num_pubs == 0)
query_df <- data.frame(
ut = character(),
query = character(),
stringsAsFactors = FALSE
)
else
query_df <- data.frame(
ut = uts,
query = rep(query_name, num_pubs),
stringsAsFactors = FALSE
)
wos_out$query <- query_df
wos_out
}
#' Run \code{query_wos} across multiple queries
#'
#' @inheritParams query_wos
#' @param queries Vector of queries run.
#'
#' @return A data frame which lists the number of records returned by each of
#' your queries.
#'
#' @examples
#' \dontrun{
#'
#' queries <- c('TS = "dog welfare"', 'TS = "cat welfare"')
#' query_wos_apply(queries)
#'}
#' @export
query_wos_apply <- function(queries,
editions = c("SCI", "SSCI", "AHCI", "ISTP", "ISSHP",
"BSCI", "BHCI", "IC", "CCR", "ESCI"),
sid = auth(Sys.getenv("WOS_USERNAME"),
Sys.getenv("WOS_PASSWORD")),
...) {
if (is.null(names(queries))) {
names(queries) <- queries
}
query_names <- names(queries)
if (length(query_names) != length(unique(query_names))) {
stop("The names of your queries must be unique", call. = FALSE)
}
rec_cnt <- vapply(
queries, one_query_wos_apply,
editions = editions,
sid = sid,
... = ...,
FUN.VALUE = numeric(1)
)
data.frame(
query = query_names,
rec_cnt = unname(rec_cnt),
stringsAsFactors = FALSE
)
}
one_query_wos_apply <- function(query, editions, sid, ...) {
q_out <- query_wos(query, editions, sid, ...)
q_out$rec_cnt
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/apply.R
|
#' Authenticate user credentials
#'
#' \code{auth} asks the API's server for a session ID (SID), which you can then
#' pass along to either \code{\link{query_wos}} or \code{\link{pull_wos}}. Note,
#' there are limits on how many session IDs you can get in a given period of time
#' (roughly 5 SIDs in a 5 minute period).
#'
#' @param username Your username. Specify \code{username = NULL} if you want to
#' use IP-based authentication.
#' @param password Your password. Specify \code{password = NULL} if you want to
#' use IP-based authentication.
#'
#' @return A session ID
#'
#' @examples
#' \dontrun{
#'
#' # Pass user credentials in manually:
#' auth("some_username", password = "some_password")
#'
#' # Use the default of looking for username and password in envvars, so you
#' # don't have to keep specifying them in your code:
#' Sys.setenv(WOS_USERNAME = "some_username", WOS_PASSWORD = "some_password")
#' auth()
#'}
#' @export
auth <- function(username = Sys.getenv("WOS_USERNAME"),
password = Sys.getenv("WOS_PASSWORD")) {
ip_based <- is.null(username) && is.null(password)
if (!ip_based) {
if (username == "" || password == "") {
stop(
"You need to provide a username and password to use the API",
call. = FALSE
)
}
}
body <-
'<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:auth="http://auth.cxf.wokmws.thomsonreuters.com">
<soapenv:Header/>
<soapenv:Body>
<auth:authenticate/>
</soapenv:Body>
</soapenv:Envelope>'
url <- "http://search.webofknowledge.com/esti/wokmws/ws/WOKMWSAuthenticate"
# Send HTTP POST request
if (is.null(username) && is.null(password)) {
response <- httr::POST(
url,
body = body,
httr::timeout(30),
ua()
)
} else {
response <- httr::POST(
url,
body = body,
httr::authenticate(username, password = password),
httr::timeout(30),
ua()
)
}
# Confirm server didn't throw an error
check_resp(response)
# Pull out SID from XML
doc <- get_xml(response)
parse_el_txt(doc, xpath = "//return")
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/auth.R
|
# Wrapper around creation of data frames from (list of) list of parse fields
data_frame_wos <- function(parse_list) {
# Create list of data frame sets, one set of dfs for each round of downloading
df_list <- lapply(parse_list, get_dfs)
# bind data frames together
suppressWarnings(bind_dfs(df_list))
}
# Create various data frames
get_dfs <- function(one_set) {
publication <- get_pub_df(one_set$pub_parselist)
ut_value_dfs <- get_ut_value_dfs(one_set$pub_parselist)
ut_vec <- publication$ut
author <- nested_list_to_df(one_set$author_parselist, ut_vec = ut_vec)
address <- nested_list_to_df(one_set$address_parselist, ut_vec = ut_vec)
grant <- nested_list_to_df(one_set$grant_parselist, ut_vec = ut_vec)
list(
publication = publication,
author = author,
address = address,
jsc = ut_value_dfs$jsc,
keyword = ut_value_dfs$keyword,
keywords_plus = ut_value_dfs$keywords_plus,
grant = grant,
doc_type = ut_value_dfs$doc_type
)
}
# Get publication-level data frame from parsed field list
get_pub_df <- function(pub_list) {
pub_level <- c(
"ut", "title", "journal", "sortdate", "value", "local_count"
)
cols <- lapply(pub_list, function(x) {
vec <- unlist(x[pub_level])
if (length(vec) != length(pub_level)) return(NA)
abstract <- x[["abstract"]]
abs2 <- if (is.na(abstract[1])) NA else paste0(abstract, collapse = " ")
names(abs2) <- "abstract"
c(vec, abs2)
})
as.data.frame(do.call(rbind, cols), stringsAsFactors = FALSE)
}
# Get "UT-value" data frames (e.g., data frames with key value pairs, with the
# key being UT and value being some field)
get_ut_value_dfs <- function(pub_parselist) {
n_df <- lapply(pub_parselist, one_ut_value_df)
lapply2(ut_val_flds, function(f)
do.call(rbind, lapply(n_df, function(x) x[[f]]))
)
}
one_ut_value_df <- function(one_list) {
lapply2(ut_val_flds, function(f) {
vec <- one_list[[f]]
# have to seperate these two if statements so null's aren't checked in
# second statement (which would throw warning)
if (is.null(vec)) return(NULL)
if (is.na(vec[1])) return(NULL)
len <- length(vec)
ut <- rep(one_list$ut, len)
df <- data.frame(
ut = ut,
f = vec,
stringsAsFactors = FALSE
)
colnames(df)[2] <- f
df
}
)
}
ut_val_flds <- c(
"jsc", "keyword", "keywords_plus", "grant_number", "grant_agency", "doc_type"
)
nested_list_to_df <- function(list, ut_vec) {
times <- vapply(list, function(x) if (is.matrix(x)) nrow(x) else 0, numeric(1))
ut <- rep(ut_vec, times)
binded <- do.call(rbind, list)
df <- as.data.frame(binded, stringsAsFactors = FALSE)
cbind.data.frame(ut, df, stringsAsFactors = FALSE)
}
bind_dfs <- function(df_batchs) {
lapply2(names(df_batchs[[1]]), function(x) {
df <- do.call(rbind, lapply(df_batchs, function(y) y[[x]]))
append_class(df, class = paste0(x, "_df")) # add classes to dfs for oop in process-wos
})
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/data-frame-wos.R
|
download_wos <- function(query_result, ...) {
# Make sure query didn't return more than 100,000 results. The API doesn't
# allow you to download a data set that is more than 100,000 records in size
rec_cnt <- query_result$rec_cnt
if (rec_cnt >= 100000) {
stop(
"Can't download result sets that have 100,000 or more records.
Try breaking your query into pieces using the PY tag
(see FAQs at https://vt-arc.github.io/wosr/articles/faqs.html#how-do-i-download-data-for-a-query-that-returns-more-than-100000-records for details)"
)
}
# Return NA if no pubs matched the query
if (rec_cnt == 0) return(NA)
from <- seq(1, to = rec_cnt, by = 100)
count <- rep(100, times = length(from))
count[length(count)] <- rec_cnt - from[length(count)] + 1
pbapply::pblapply(seq_len(length(from)), function(x, ...) {
response <- one_pull(
query_result$query_id,
first_record = from[x],
count = count[x],
sid = query_result$sid,
...
)
check_resp(response)
response
})
}
one_pull <- function(query_id, first_record, count, sid, ...) {
# Create body of HTTP request, which asks for data for a given number of records
# (count), starting at record number first_record. This allows paginated
# download of results. Also note that you are passing along the ID for a
# particular query, so that the server knows which result set to look in.
body <- paste0(
'<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body>
<ns2:retrieve xmlns:ns2="http://woksearch.v3.wokmws.thomsonreuters.com">
<queryId>', query_id, '</queryId>
<retrieveParameters>
<firstRecord>', first_record, '</firstRecord>
<count>', count, '</count></retrieveParameters>
</ns2:retrieve>
</soap:Body>
</soap:Envelope>'
)
# If you run into throttling error (2 calls per second per SID), just sleep
# for a second and try again (up to three tries)
for (i in 1:3) {
response <- wok_search(body, sid, ...)
if (httr::http_error(response)) {
er <- parse_er(response)
if (grepl("throttle", er, ignore.case = TRUE)) {
Sys.sleep(1)
}
} else {
return(response)
}
}
response
}
wok_search <- function(body, sid, ...) {
httr::POST(
"http://search.webofknowledge.com/esti/wokmws/ws/WokSearch",
body = body,
httr::add_headers("cookie" = paste0("SID=", sid)),
ua(),
...
)
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/download-wos.R
|
enforce_schema <- function(wos_unenforced) {
lapply2(names(wos_unenforced), function(name) {
maybe_df <- wos_unenforced[[name]]
is_df <- is.data.frame(maybe_df)
is_null <- is.null(maybe_df)
# have to run these logical tests in wonky way b/c don't want warning about
# is.na(NULL), etc
df_has_no_rows <- if (is_df) nrow(maybe_df) == 0 else FALSE
na_or_empty <- if (!is_null) df_has_no_rows || is.na(maybe_df) else FALSE
df <- if (na_or_empty || is_null) make_df(name) else maybe_df
cast_fields(df, name)
})
}
make_df <- function(df_name) {
fields <- schema[schema$df == df_name, "field"]
df <- data.frame(matrix(ncol = length(fields), nrow = 0))
colnames(df) <- fields
df
}
cast_fields <- function(df, df_name) {
df_schema <- schema[schema$df == df_name, ]
col_list <- lapply(df_schema$field, function(x) {
dtype <- df_schema[df_schema$field == x, "dtype"]
cast_fun <- switch(
dtype,
"character" = as.character,
"date" = as.Date,
"integer" = as.integer
)
cast_fun(df[[x]])
})
as.data.frame(col_list, stringsAsFactors = FALSE, col.names = df_schema$field)
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/enfore-schema.R
|
#' Write WoS data
#'
#' Writes each of the data frames in an object of class \code{wos_data} to its
#' own csv file.
#'
#' @param wos_data An object of class \code{wos_data}, created by calling
#' \code{\link{pull_wos}}.
#' @param dir Path to the directory where you want to write the files. If the
#' directory doesn't yet exist, \code{write_wos_data} will create it for you.
#' Note, this directory cannot already have WoS data files in it.
#'
#' @return Nothing. Files are written to disk.
#'
#' @examples
#' \dontrun{
#'
#' sid <- auth("your_username", password = "your_password")
#' wos_data <- pull_wos("TS = (dog welfare) AND PY = 2010", sid = sid)
#'
#' # Write files to working directory
#' write_wos_data(wos_data, ".")
#'
#' # Write files to "wos-data" dir
#' write_wos_data(wos_data, "wos-data")
#'}
#' @export
write_wos_data <- function(wos_data, dir) {
if (!dir.exists(dir)) dir.create(dir, recursive = TRUE)
if (length(list.files(dir, "publication\\.csv", all.files = TRUE)))
stop(
"It looks like there are already WoS files in ", normalizePath(dir),
". Remove these files and try again."
)
if (!("wos_data" %in% class(wos_data)))
stop("You must pass an object of class `wos_data` into write_wos_data")
lapply(
names(wos_data),
function(x) utils::write.csv(
wos_data[[x]], full_path(dir, x), row.names = FALSE
)
)
invisible()
}
full_path <- function(dir, x) file.path(dir, paste0(x, ".csv"))
#' Read WoS data
#'
#' Reads in a series of CSV files (which were written via
#' \code{\link{write_wos_data}}) and places the data in an object of class
#' \code{wos_data}.
#'
#' @param dir Path to the directory where you wrote the CSV files.
#'
#' @return An object of class \code{wos_data}.
#'
#' @examples
#' \dontrun{
#'
#' sid <- auth("your_username", password = "your_password")
#' wos_data <- pull_wos("TS = (dog welfare) AND PY = 2010", sid = sid)
#'
#' # Write files to working directory
#' write_wos_data(wos_data, ".")
#' # Read data back into R
#' wos_data <- read_wos_data(".")
#' }
#' @export
read_wos_data <- function(dir) {
files <- list.files(dir)
dfs <- unique(schema$df)
have_all_files <- all(dfs %in% gsub("\\.csv$", "", files))
if (!have_all_files)
stop(
"Directory ", normalizePath(dir),
" doesn't have all of the following files: ",
paste0(paste0(dfs, ".csv"), collapse = ", ")
)
wos_data <- lapply2(
dfs, function(x)
utils::read.csv(full_path(dir, x), stringsAsFactors = FALSE)
)
wos_data <- enforce_schema(wos_data)
append_class(wos_data, "wos_data")
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/io.R
|
#' wosr
#'
#' @docType package
#' @import xml2
#' @name wosr
NULL
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/package.R
|
parse_wos <- function(all_resps) {
pbapply::pblapply(all_resps, one_parse)
}
# Parse one resposnes and place results into list
one_parse <- function(response) {
# Create html parse tree
doc <- get_xml(response)
# Get nodes corresponding to each publication
doc_list <- xml_find_all(doc, xpath = "//rec")
# Parse data
list(
pub_parselist = parse_gen_pub_data(doc_list),
author_parselist = parse_author_node_data(doc_list),
address_parselist = parse_address_node_data(doc_list),
grant_parselist = parse_grant_data(doc_list)
)
}
# Function to pull out data from elements (and their attributes) that don't
# require we care about their ancestor nodes (beyond the fact that they exist
# in a given rec node)
parse_gen_pub_data <- function(doc_list) {
pub_els_xpath <- c(
ut = ".//uid[1]", # document id
title = ".//summary//title[@type='item'][1]", # title
journal = ".//summary//title[@type='source'][1]", # journal
doc_type = ".//summary//doctype", # doc type
abstract = ".//fullrecord_metadata//p[ancestor::abstract_text]", # abstract
jsc = ".//fullrecord_metadata//subject[@ascatype='traditional']", # JSCs
keyword = ".//fullrecord_metadata//keyword", # keywords
keywords_plus = ".//static_data//keywords_plus/keyword" # keywords plus
)
pub_els_out <- parse_els_apply(doc_list, xpath = pub_els_xpath)
pub_atrs_xpath <- c(
sortdate = ".//summary//pub_info[1]", # publication's pub date
value = ".//dynamic_data//identifier[@type='doi'][1]", # publication's DOI
local_count = ".//citation_related//silo_tc[1]" # times cited
)
atr_list <- parse_atrs_apply(doc_list, xpath = pub_atrs_xpath)
bind_el_atr(pub_els_out, atr_list = atr_list)
}
# For each pub, find the nodes containing author data and extract the relevant
# child node values and attributes from those nodes
parse_author_node_data <- function(doc_list) {
author_list <- split_nodes(
doc_list,
xpath = ".//summary//names//name[@role='author' and string-length(@seq_no)>0]"
)
message_long_parse(author_list, "authors")
el_xpath <- c(
display_name = "display_name[1]", # display name (e.g., baker, chris)
first_name = "first_name[1]",
last_name = "last_name[1]",
email = "email_addr[1]" # author's email
)
atr_xpath <- c(
seq_no = ".", # author's listing sequence
daisng_id = ".", # author's DaisNG ID
addr_no = "." # Authors address number, for linking to address data
)
parse_deep(author_list, el_xpath = el_xpath, atr_xpath = atr_xpath)
}
# For each pub, find the nodes containing address data and extract the relevant
# child node values and attributes from those nodes
parse_address_node_data <- function(doc_list) {
address_list <- split_nodes(
doc_list,
xpath = ".//fullrecord_metadata//addresses/address_name/address_spec"
)
message_long_parse(address_list, "addresses")
el_xpath <- c(
org_pref = "organizations/organization[@pref='Y'][1]", # preferred name of org
org = "organizations/organization[not(@pref='Y')][1]", # regular name of org
city = "city[1]", # org city
state = "state[1]", # org state
country = "country[1]" # org country
)
atr_xpath <- c(addr_no = ".")
parse_deep(address_list, el_xpath = el_xpath, atr_xpath = atr_xpath)
}
parse_grant_data <- function(doc_list) {
grant_list <- split_nodes(doc_list, ".//fund_ack/grants/grant")
el_xpath <- c(grant_agency = "grant_agency", grant_id = "grant_ids/grant_id")
parse_deep_grants(grant_list, el_xpath = el_xpath)
}
## utility parsing functions
get_xml <- function(response) {
raw_xml <- httr::content(response, as = "text")
unescaped_xml <- unescape_xml(raw_xml)
unescaped_xml <- paste0("<x>", unescaped_xml, "</x>")
read_html(unescaped_xml)
}
unescape_xml <- function(x) {
x <- gsub("<", "<", x)
x <- gsub(">", ">", x)
gsub("&", "&", x)
}
split_nodes <- function(doc_list, xpath)
lapply(doc_list, xml_find_all, xpath = xpath)
parse_deep <- function(entity_list, el_xpath, atr_xpath) {
lapply(entity_list, function(x) {
one_ent_data <- lapply(x, function(q) {
els <- parse_els(q, xpath = el_xpath)
atrs <- parse_atrs(q, xpath = atr_xpath)
unlist(c(els, atrs))
})
do.call(rbind, one_ent_data)
})
}
parse_deep_grants <- function(entity_list, el_xpath) {
lapply(entity_list, function(x) {
one_ent_data <- lapply(x, function(q) {
temp <- parse_els(q, xpath = el_xpath)
num_ids <- length(temp$grant_id)
if (num_ids >= 2) temp$grant_agency <- rep(temp$grant_agency, num_ids)
do.call(cbind, temp)
})
do.call(rbind, one_ent_data)
})
}
parse_els_apply <- function(doc_list, xpath)
lapply(doc_list, parse_els, xpath = xpath)
parse_els <- function(doc, xpath)
lapply(xpath, function(x) parse_el_txt(doc, xpath = x))
parse_el_txt <- function(doc, xpath) {
txt <- xml_text(xml_find_all(doc, xpath = xpath))
na_if_missing(txt)
}
parse_atrs_apply <- function(doc_list, xpath)
lapply(doc_list, parse_atrs, xpath = xpath)
parse_atrs <- function(doc, xpath) {
lapply2(names(xpath), function(x) {
el <- xml_find_all(doc, xpath = xpath[[x]])
atr_out <- xml_attr(el, attr = x)
na_if_missing(atr_out)
})
}
na_if_missing <- function(x) if (is.null(x) || length(x) == 0) NA else x
bind_el_atr <- function(el_list, atr_list)
lapply(seq_along(el_list), function(x) c(el_list[[x]], atr_list[[x]]))
message_long_parse <- function(list, entity) {
num_ents <- vapply(list, length, numeric(1))
if (any(num_ents >= 100)) {
message(
"At least one of your publications has more than 100 ", entity,
" listed on it. Parsing the data from these publications will take",
" some time."
)
}
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/parse-wos.R
|
#' @export
print.wos_data <- function(x, ...) {
utils::str(
x, vec.len = 1, max.level = 2, give.attr = FALSE, strict.width = "cut"
)
}
#' @export
print.query_result <- function(x, ...) {
cat("Matching records:", format_num(x$rec_cnt))
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/print.R
|
process_incites <- function(incites_df, as_raw = FALSE) {
if (!is.data.frame(incites_df)) return(NULL)
if (as_raw) return(incites_df)
colnames(incites_df) <- tolower(colnames(incites_df))
cols <- c(
"isi_loc", "article_type", "tot_cites", "journal_expected_citations",
"journal_act_exp_citations", "impact_factor", "avg_expected_rate",
"percentile", "nci", "esi_most_cited_article",
"hot_paper", "is_international_collab", "is_institution_collab",
"is_industry_collab", "oa_flag"
)
bad_cols <- cols[!cols %in% colnames(incites_df)]
if (length(bad_cols) != 0) {
stop(
"API isn't serving these columns anymore: ",
paste0(bad_cols, collapse = ", ")
)
}
incites_df <- incites_df[, cols]
colnames(incites_df)[1] <- "ut"
incites_df$ut <- paste0("WOS:", incites_df$ut)
incites_df[, 3:15] <- apply(incites_df[, 3:15], MARGIN = 2, FUN = as.numeric)
incites_df[, 10:15] <- apply(
incites_df[, 10:15], MARGIN = 2, FUN = function(x) x == 1
)
incites_df
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/process-incites.R
|
process_wos_apply <- function(df_list) {
proc_out <- lapply(
df_list, function(x) {
if (is.data.frame(x))
if (nrow(x) != 0) process_wos(x) else NULL
else
NULL
}
)
# Pull out data frames in proc_out$author and reorder dfs
wos_data <- c(
proc_out[1], # publication
proc_out$author[1], # author
proc_out[3], # address
proc_out$author[2], # author_address
proc_out[4:length(proc_out)] # rest of columns
)
append_class(wos_data, "wos_data")
}
process_wos <- function(x) UseMethod("process_wos")
process_wos.default <- function(x) x
process_wos.publication_df <- function(x) {
colnames(x)[colnames(x) == "local_count"] <- "tot_cites"
colnames(x)[colnames(x) == "value"] <- "doi"
colnames(x)[colnames(x) == "sortdate"] <- "date"
x$journal <- to_title_case(x$journal)
x
}
# Have to convert to lower first if you want to use toTitleCase for strings that
# are in all caps
to_title_case <- function(x) {
ifelse(is.na(x), NA, tools::toTitleCase(tolower(x)))
}
# split author data frame into two data frames to make data relational
process_wos.author_df <- function(x) {
colnames(x)[colnames(x) == "seq_no"] <- "author_no"
splt <- strsplit(x$addr_no, " ")
times <- vapply(splt, function(x) if (is.na(x[1])) 0 else length(x), numeric(1))
ut <- rep(x$ut, times)
author_no <- rep(x$author_no, times)
addr_no <- unlist(splt[vapply(splt, function(x) !is.na(x[1]), logical(1))])
if (sum(times) != 0)
author_address <- data.frame(
ut = ut,
author_no = author_no,
addr_no = addr_no,
stringsAsFactors = FALSE
)
else
author_address <- NULL
author_cols <- c(
"ut", "author_no", "display_name", "first_name", "last_name",
"email", "daisng_id"
)
list(
author = x[, author_cols],
author_address = author_address
)
}
process_wos.address_df <- function(x) {
x[, c("ut", "addr_no", "org_pref", "org", "city", "state", "country")]
}
process_wos.jsc_df <- function(x) {
# There are duplicate JSC values (differing only in capitalization). Remove these.
x$jsc <- to_title_case(x$jsc)
unique(x)
}
process_wos.keywords_plus_df <- function(x) {
# "keywords plus" keywords are in upper-case, but regular keywords are in
# lower case. standardize to one case (lower).
x$keywords_plus <- tolower(x$keywords_plus)
x
}
process_wos.keyword_df <- function(x) {
x$keyword <- tolower(x$keyword)
x
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/process-wos.R
|
#' Pull cited references
#'
#' @inheritParams query_wos
#' @param uts Vector of UTs (i.e., publications) whose cited references you want.
#'
#' @return A data frame with the following columns:
#' \describe{
#' \item{ut}{The publication that is doing the citing. These are the UTs that
#' you submitted to \code{pull_cited_refs}. If one of your publications
#' doesn't have any cited refs, it will not appear in this column.}
#'
#' \item{doc_id}{The cited ref's document identifier (similar to a UT).}
#'
#' \item{title}{Roughly equivalent to the cited ref's title.}
#'
#' \item{journal}{Roughly equivalent to the cited ref's journal.}
#'
#' \item{author}{The cited ref's first author.}
#'
#' \item{tot_cites}{The total number of citations the cited ref has received.}
#'
#' \item{year}{The cited ref's publication year.}
#'
#' \item{page}{The cited ref's page number.}
#'
#' \item{volume}{The cited ref's journal volume.}
#' }
#'
#' @examples
#' \dontrun{
#'
#' sid <- auth("your_username", password = "your_password")
#' uts <- c("WOS:000362312600021", "WOS:000439855300030", "WOS:000294946900020")
#' pull_cited_refs(uts, sid)
#'}
#' @export
pull_cited_refs <- function(uts,
sid = auth(Sys.getenv("WOS_USERNAME"),
Sys.getenv("WOS_PASSWORD")),
...) {
uts <- trim_uts(uts)
out <- pbapply::pblapply(uts, pull_one_ut_of_cited_refs, sid = sid, ... = ...)
full_df <- do.call(rbind, out)
cast_cited_ref_df(full_df)
}
pull_one_ut_of_cited_refs <- function(ut, sid, ...) {
qry_res <- try(retry_throttle(query_cited_refs(ut, sid, ...)), silent = TRUE)
if ("try-error" %in% class(qry_res)) {
msg <- attributes(qry_res)$condition$message
if (grepl("No document found for requested UID", msg, ignore.case = TRUE)) {
Sys.sleep(1)
return(NULL)
} else {
stop(msg)
}
}
if (qry_res$rec_cnt == 0) {
Sys.sleep(1)
return(NULL)
}
first_records <- (ceiling(qry_res$rec_cnt / 100) - 1) * 100 + 1
list_of_lists <- lapply(first_records, function(x, ...) {
retry_throttle(pull_one_set_of_cited_refs(qry_res$query_id, x, sid, ...))
})
res_list <- do.call(c, list_of_lists)
res_df <- do.call(rbind, lapply(res_list, unlist))
res_df <- cbind(rep(paste0("WOS:", ut), nrow(res_df)), res_df)
colnames(res_df)[1] <- "ut"
# have to sleep here to avoid throttling error
Sys.sleep(1)
res_df
}
query_cited_refs <- function(ut, sid, ...) {
body <- paste0(
'<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:woksearch="http://woksearch.v3.wokmws.thomsonreuters.com">
<soapenv:Header/>
<soapenv:Body>
<woksearch:citedReferences>
<databaseId>WOS</databaseId>
<uid>', ut, '</uid>
<queryLanguage>en</queryLanguage>
<retrieveParameters>
<firstRecord>1</firstRecord>
<count>1</count>
</retrieveParameters>
</woksearch:citedReferences>
</soapenv:Body>
</soapenv:Envelope>'
)
response <- wok_search(body, sid, ...)
check_resp(response)
doc <- get_xml(response)
query_id <- parse_el_txt(doc, xpath = "//queryid")
rec_cnt <- parse_el_txt(doc, xpath = "//recordsfound")
list(
query_id = as.numeric(query_id),
rec_cnt = as.numeric(rec_cnt)
)
}
pull_one_set_of_cited_refs <- function(query_id, first_record, sid, ...) {
body <- paste0(
'<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Header/>
<soap:Body>
<woksearch:citedReferencesRetrieve xmlns:woksearch="http://woksearch.v3.wokmws.thomsonreuters.com">
<queryId>', query_id, '</queryId>
<retrieveParameters>
<firstRecord>', first_record, '</firstRecord>
<count>100</count>
</retrieveParameters>
</woksearch:citedReferencesRetrieve>
</soap:Body>
</soap:Envelope>'
)
response <- wok_search(body, sid, ...)
check_resp(response)
doc <- get_xml(response)
doc_list <- xml_find_all(doc, xpath = "//return")
xpath <- c(
doc_id = ".//docid[1]",
title = ".//citedtitle[1]",
journal = ".//citedwork[1]",
author = ".//citedauthor[1]",
tot_cites = ".//timescited[1]",
year = ".//year[1]",
page = ".//page[1]",
volume = ".//volume[1]"
)
parse_els_apply(doc_list, xpath = xpath)
}
cast_cited_ref_df <- function(df) {
df <- as.data.frame(df, stringsAsFactors = FALSE)
if (!nrow(df)) {
cols <- c(
"ut", "doc_id", "title", "journal", "author",
"tot_cites", "year", "page", "volume"
)
lst <- vector("list", length(cols))
names(lst) <- cols
df <- as.data.frame(lapply(lst, as.character), stringsAsFactors = FALSE)
}
df$tot_cites <- as.numeric(df$tot_cites)
df$year <- as.numeric(df$year)
df
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/pull-cited-refs.R
|
#' Pull data from the InCites API
#'
#' \strong{Important note:} The throttling limits on the InCites API are not
#' documented anywhere and are difficult to determine from experience. As such,
#' whenever \code{pull_incites} receives a throttling error from the server, it
#' uses exponential backoff (with a maximum wait time of 45 minutes) to determine
#' how long to wait before retrying.
#'
#' @param uts A vector of UTs whose InCites data you would like to download.
#' Each UT is a 15-digit identifier for a given publication. You
#' can specify the UT using only these 15 digits or you can append the 15 digits
#' with "WOS:" (e.g., "000346263300011" or "WOS:000346263300011").
#' @param key The developer key that the server will use for authentication.
#' @param as_raw Do you want the data frame that is returned by the API to be
#' returned to you in its raw form? This option can be useful if the API has
#' changed the format of the data that it is serving, in which case specifying
#' \code{as_raw = TRUE} may avoid an error that would otherwise occur during
#' \code{pull_incites}'s data processing step.
#' @param ... Arguments passed along to \code{\link[httr]{GET}}.
#'
#' @return A data frame where each row corresponds to a different publication.
#' The definitions for the columns in this data frame can be found online at
#' the API's documentation \href{http://about.incites.thomsonreuters.com/api/#/}{page}
#' (see the \code{DocumentLevelMetricsByUT} method details for definitions).
#' Note that the column names are all converted to lowercase by
#' \code{pull_incites} and the 0/1 flag variables are converted to booleans.
#' Also note that not all publications indexed in WoS are also indexed in
#' InCites, so you may not get data back for some UTs.
#'
#' @examples
#' \dontrun{
#'
#' uts <- c(
#' "WOS:000346263300011", "WOS:000362312600021", "WOS:000279885800004",
#' "WOS:000294667500003", "WOS:000294946900020", "WOS:000412659200006"
#' )
#' pull_incites(uts, key = "some_key")
#'
#' pull_incites(c("000346263300011", "000362312600021"), key = "some_key")
#'}
#' @export
pull_incites <- function(uts, key = Sys.getenv("INCITES_KEY"), as_raw = FALSE, ...) {
if (key == "") {
stop(
"You need to provide an InCites API key to use `pull_incites()`",
call. = FALSE
)
}
urls <- get_urls(trim_uts(uts))
out_list <- pbapply::pblapply(urls, try_incites_req, key = key, ... = ...)
unique(process_incites(do.call("rbind", out_list), as_raw))
}
get_urls <- function(uts) {
ut_list <- split_uts(uts)
lapply(ut_list, get_url)
}
split_uts <- function(uts) {
len <- seq_along(uts)
f <- ceiling(len / 100)
split(uts, f = f)
}
get_url <- function(uts) {
paste0(
"https://api.clarivate.com/api/incites/DocumentLevelMetricsByUT/json?UT=",
paste0(uts, collapse = ",")
)
}
backoff_wait <- function(try) {
exp_backoff <- ceiling((2^try - 1) / 2)
ifelse(exp_backoff > 32, 45, exp_backoff)
}
try_incites_req <- function(url, key, ...) {
# Try making the HTTP request up to 10 times (spaced apart based on exponential backoff)
for (i in 1:10) {
maybe_data <- try(one_incites_req(url, key, ...), silent = TRUE)
if (!("try-error" %in% class(maybe_data))) {
Sys.sleep(2)
return(maybe_data)
} else {
if (grepl("limit", maybe_data[1])) {
minutes <- backoff_wait(i)
mins_txt <- if (minutes == 1) " minute." else " mintues."
message(
"\nRan into throttling limit. Retrying request in ",
minutes, mins_txt
)
Sys.sleep(60 * minutes)
} else {
stop(maybe_data[1])
}
}
}
stop("\n\nRan into throttling limit 10 times, stopping")
}
one_incites_req <- function(url, key, ...) {
response <- httr::GET(url, ua(), httr::add_headers(c("X-TR-API-APP-ID" = key)), ...)
raw_txt <- httr::content(response, as = "text", encoding = "UTF-8")
if (grepl("rate limit quota violation", raw_txt, ignore.case = TRUE))
stop("limit")
if (httr::http_error(response))
stop(httr::http_status(response))
json_resp <- jsonlite::fromJSON(raw_txt)
maybe_data_frame <- json_resp$api$rval[[1]]
if (is.data.frame(maybe_data_frame)) maybe_data_frame else NULL
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/pull-incites.R
|
#' Pull related records
#'
#' Pull the records that have at least one citation in common with a publication
#' of interest.
#'
#' @inheritParams query_wos
#' @param uts The documents whose related records you want to pull.
#' @param num_recs Number of related records to pull for each UT. This value
#' must be <= 100.
#'
#' @return A data frame with the following columns:
#' \describe{
#' \item{ut}{The publications that you passed into \code{pull_related_recs}.
#' If one of your publications doesn't have any related records, it won't
#' appear here.}
#'
#' \item{related_rec}{The publication that is related to \code{ut}.}
#'
#' \item{rec_num}{The related record's ordering in the result set returned
#' by the API. Records that share more citations with your UTs will have
#' smaller \code{rec_num}s.}
#' }
#'
#' @examples
#' \dontrun{
#'
#' sid <- auth("your_username", password = "your_password")
#' uts <- c("WOS:000272877700013", "WOS:000272366800025")
#' out <- pull_related_recs(uts, 5, sid = sid)
#'}
#' @export
pull_related_recs <- function(uts,
num_recs,
editions = c("SCI", "SSCI", "AHCI", "ISTP", "ISSHP",
"BSCI", "BHCI", "IC", "CCR", "ESCI"),
sid = auth(Sys.getenv("WOS_USERNAME"),
Sys.getenv("WOS_PASSWORD")),
...) {
if (num_recs > 100) {
stop("num_recs cannot be greater than 100", call. = FALSE)
}
uts <- trim_uts(uts)
out <- pbapply::pblapply(
uts, pull_one_ut_of_related_recs,
num_recs = num_recs,
editions = editions,
sid = sid,
... = ...
)
full_mat <- do.call(rbind, out)
cast_related_recs(full_mat)
}
pull_one_ut_of_related_recs <- function(ut, num_recs, editions, sid, ...) {
body <- get_rr_body(ut, num_recs, editions)
response <- retry_throttle(wok_search(body, sid, ...))
# if the record doesn't have any citations, the API will return an HTTP error
# starting with "Exception occurred processing request"
c_resp <- try(check_resp(response), silent = TRUE)
if ("try-error" %in% class(c_resp)) {
msg <- attributes(c_resp)$condition$message
if (grepl("Exception occurred processing request", msg, ignore.case = TRUE)) {
Sys.sleep(1)
return(NULL)
} else {
stop(msg)
}
}
doc <- get_xml(response)
rfound <- parse_el_txt(doc, "//recordsfound")
if (is.na(rfound) || rfound == "0") {
out <- NULL
} else {
uts <- parse_el_txt(doc, '//optionvalue/value')
ut <- paste0("WOS:", rep(ut, length(uts)))
out <- matrix(c(ut, uts, seq_along(uts)), ncol = 3)
}
Sys.sleep(1)
out
}
get_rr_body <- function(ut, num_recs, editions) {
paste0(
'<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:woksearch="http://woksearch.v3.wokmws.thomsonreuters.com">
<soapenv:Header/>
<soapenv:Body>
<woksearch:relatedRecords>
<databaseId>WOS</databaseId>
<uid>', ut, '</uid>',
paste_eds(editions),
'<queryLanguage>en</queryLanguage>
<retrieveParameters>
<firstRecord>1</firstRecord>
<count>', num_recs, '</count>
<option>
<key>RecordIDs</key>
<value>On</value>
</option>
</retrieveParameters>
</woksearch:relatedRecords>
</soapenv:Body>
</soapenv:Envelope>'
)
}
cast_related_recs <- function(full_mat) {
df <- as.data.frame(full_mat, stringsAsFactors = FALSE)
if (!nrow(df)) {
df <- data.frame(matrix(ncol = 3, nrow = 0), stringsAsFactors = FALSE)
}
colnames <- c("ut", "related_rec", "rec_num")
colnames(df) <- colnames
df$ut <- as.character(df$ut)
df$related_rec <- as.character(df$related_rec)
df$rec_num <- as.numeric(df$rec_num)
df
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/pull-related-recs.R
|
#' Pull data from the Web of Science
#'
#' \code{pull_wos} wraps the process of querying, downloading, parsing, and
#' processing Web of Science data.
#'
#' @inheritParams query_wos
#'
#' @return A list of the following data frames:
#' \describe{
#' \item{publication}{A data frame where each row corresponds to a different
#' publication. Note that each publication has a distinct \code{ut}. There is
#' a one-to-one relationship between a \code{ut} and each of the columns
#' in this table.}
#'
#' \item{author}{A data frame where each row corresponds to a different
#' publication/author pair (i.e., a \code{ut}/\code{author_no} pair). In
#' other words, each row corresponds to a different author on a publication.
#' You can link the authors in this table to the \code{address} and
#' \code{author_address} tables to get their addresses (if they exist). See
#' example in FAQs for details.}
#'
#' \item{address}{A data frame where each row corresponds to a different
#' publication/address pair (i.e., a \code{ut}/\code{addr_no} pair). In
#' other words, each row corresponds to a different address on a publication.
#' You can link the addresses in this table to the \code{author} and
#' \code{author_address} tables to see which authors correspond to which
#' addresses. See example in FAQs for details.}
#'
#' \item{author_address}{A data frame that specifies which authors correspond
#' to which addresses on a given publication. This data frame is meant to
#' be used to link the \code{author} and \code{address} tables together.}
#'
#' \item{jsc}{A data frame where each row corresponds to a different
#' publication/jsc (journal subject category) pair. There is a many-to-many
#' relationship between \code{ut}'s and \code{jsc}'s.}
#'
#' \item{keyword}{A data frame where each row corresponds to a different
#' publication/keyword pair. These are the author-assigned keywords.}
#'
#' \item{keywords_plus}{A data frame where each row corresponds to a different
#' publication/keywords_plus pair. These keywords are the keywords assigned
#' by Clarivate Analytics through an automated process.}
#'
#' \item{grant}{A data frame where each row corresponds to a different
#' publication/grant agency/grant ID triplet. Not all publications acknowledge
#' a specific grant number in the funding acknowledgement section, hence the
#' \code{grant_id} field can be \code{NA}.}
#'
#' \item{doc_type}{A data frame where each row corresponds to a different
#' publication/document type pair.}
#' }
#'
#' @examples
#' \dontrun{
#'
#' sid <- auth("your_username", password = "your_password")
#' pull_wos("TS = (dog welfare) AND PY = 2010", sid = sid)
#'
#' # Re-use session ID. This is best practice to avoid throttling limits:
#' pull_wos("TI = \"dog welfare\"", sid = sid)
#'
#' # Get fresh session ID:
#' pull_wos("TI = \"pet welfare\"", sid = auth("your_username", "your_password"))
#'
#' # It's best to see how many records your query matches before actually
#' # downloading the data. To do this, call query_wos before running pull_wos:
#' query <- "TS = ((cadmium AND gill*) NOT Pisces)"
#' query_wos(query, sid = sid) # shows that there are 1,611 matching publications
#' pull_wos(query, sid = sid)
#'}
#' @export
pull_wos <- function(query,
editions = c("SCI", "SSCI", "AHCI", "ISTP", "ISSHP",
"BSCI", "BHCI", "IC", "CCR", "ESCI"),
sid = auth(Sys.getenv("WOS_USERNAME"),
Sys.getenv("WOS_PASSWORD")),
...) {
# First send the query to the API and get back the metadata we'll need to set
# up the downloading of the data
qr_out <- query_wos(query, editions = editions, sid = sid, ...)
# Create empty list enforce_schema will fill with empty data frames
if (qr_out$rec_cnt == 0) {
dfs <- unique(schema$df)
wos_unenforced <- vector("list", length = length(dfs))
names(wos_unenforced) <- dfs
} else {
# Download the raw XML and put it in a list
message("Downloading data\n")
all_resps <- download_wos(qr_out, ...)
all_resps <- all_resps[vapply(all_resps, length, numeric(1)) > 1]
# Parse out various fields
message("\nParsing XML\n")
parse_list <- parse_wos(all_resps)
# Create data frames from list of parsed fields
df_list <- data_frame_wos(parse_list)
wos_unenforced <- process_wos_apply(df_list)
}
wos_data <- enforce_schema(wos_unenforced)
append_class(wos_data, "wos_data")
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/pull-wos.R
|
#' Query the Web of Science
#'
#' Returns the number of records that match a given query. It's best to call
#' this function before calling \code{\link{pull_wos}} so that you know how
#' many records you're trying to download before attempting to do so.
#'
#' @param query Query string. See the \href{https://images.webofknowledge.com/images/help/WOK/hs_search_operators.html#dsy863-TRS_search_operator_precedence}{WoS query documentation} page
#' for details on how to write a query as well as this list of \href{http://images.webofknowledge.com.ezproxy.lib.vt.edu/WOKRS527R13/help/WOS/hp_advanced_examples.html}{example queries}.
#' @param editions Web of Science editions to query. Possible values are listed
#' \href{http://ipscience-help.thomsonreuters.com/wosWebServicesLite/dbEditionsOptionsGroup/databaseEditionsWos.html}{here}.
#' @param sid Session identifier (SID). The default setting is to get a fresh
#' SID each time you query WoS via a call to \code{\link{auth}}. However,
#' you should try to reuse SIDs across queries so that you don't run into the
#' throttling limits placed on new sessions.
#' @param ... Arguments passed along to \code{\link[httr]{POST}}.
#'
#' @return An object of class \code{query_result}. This object contains the number
#' of publications that are returned by your query (\code{rec_cnt}), as well as
#' some info that \code{\link{pull_wos}} uses when it calls \code{query_wos}
#' internally.
#'
#' @examples
#' \dontrun{
#'
#' # Get session ID and reuse it across queries:
#' sid <- auth("some_username", password = "some_password")
#'
#' query_wos("TS = (\"dog welfare\") AND PY = (1990-2007)", sid = sid)
#'
#' # Finds records in which Max Planck appears in the address field.
#' query_wos("AD = Max Planck", sid = sid)
#'
#' # Finds records in which Max Planck appears in the same address as Mainz
#' query_wos("AD = (Max Planck SAME Mainz)", sid = sid)
#' }
#' @export
query_wos <- function(query,
editions = c("SCI", "SSCI", "AHCI", "ISTP", "ISSHP",
"BSCI", "BHCI", "IC", "CCR", "ESCI"),
sid = auth(Sys.getenv("WOS_USERNAME"),
Sys.getenv("WOS_PASSWORD")),
...) {
# Create XML body to POST to server
body <- paste0(
'<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:woksearch="http://woksearch.v3.wokmws.thomsonreuters.com">
<soapenv:Header/>
<soapenv:Body>
<woksearch:search>
<queryParameters>
<databaseId>WOS</databaseId>
<userQuery> ', escape_query(query), ' </userQuery>',
paste_eds(editions),
'<queryLanguage>en</queryLanguage>
</queryParameters>
<retrieveParameters>
<firstRecord>1</firstRecord>
<count>0</count>
</retrieveParameters>
</woksearch:search>
</soapenv:Body>
</soapenv:Envelope>'
)
# Send HTTP request
response <- httr::POST(
"http://search.webofknowledge.com/esti/wokmws/ws/WokSearch",
body = body,
httr::add_headers("cookie" = sprintf("SID=%s", sid)),
ua(),
...
)
# Confirm server didn't throw an error
check_resp(response)
# Pull out metadata from XML
doc <- get_xml(response)
query_id <- parse_el_txt(doc, xpath = "//queryid")
rec_cnt <- parse_el_txt(doc, xpath = "//recordsfound")
structure(
list(
query_id = as.numeric(query_id),
rec_cnt = as.numeric(rec_cnt),
sid = sid
),
class = "query_result"
)
}
# Create part of XML body that contains the WOS editions that should be searched
# for a given query
paste_eds <- function(editions) {
edition_vec <- sprintf(
"<editions>
<collection>WOS</collection>
<edition>%s</edition>
</editions>", editions
)
paste(edition_vec, collapse = " ")
}
escape_query <- function(query) gsub("&", "&", query)
#' Create a vector of UT-based queries
#'
#' Use this function when you have a bunch of UTs whose data you want to pull
#' and you need to write a series of UT-based queries to do so (i.e., queries
#' in the form "UT = (WOS:000186387100005 OR WOS:000179260700001)").
#'
#' @param uts UTs that will be placed inside the UT-based queries.
#' @param uts_per_query Number of UTs to include in each query. Note, there is
#' a limit on how long your query can be, so you probably want to keep this set
#' to around 200.
#'
#' @return A vector of queries. You can feed these queries to
#' \code{\link{pull_wos_apply}} to download data for each query.
#'
#' @examples
#' \dontrun{
#'
#' data <- pull_wos('TS = ("animal welfare") AND PY = (2002-2003)')
#' queries <- create_ut_queries(data$publication$ut)
#' pull_wos_apply(queries)
#'}
#' @export
create_ut_queries <- function(uts, uts_per_query = 200) {
ut_list <- split(uts, ceiling(seq_along(uts) / uts_per_query))
vapply(
ut_list,
function(x) sprintf("UT = (%s)", paste0(x, collapse = " OR ")),
character(1)
)
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/query-wos.R
|
check_resp <- function(response) {
if (httr::http_error(response)) {
stop(parse_er(response), call. = FALSE)
}
}
parse_er <- function(response) {
doc <- get_xml(response)
parse_el_txt(doc, xpath = "//faultstring")
}
lapply2 <- function(...) sapply(..., simplify = FALSE, USE.NAMES = TRUE)
replace_if_0_rows <- function(x, replace = NULL) {
if (is.data.frame(x)) {
if (nrow(x) == 0) return(replace)
}
x
}
ua <- function() httr::user_agent("https://github.com/vt-arc/wosr")
format_num <- function(x) format(
x, big.mark = ",", scientific = FALSE, trim = TRUE
)
append_class <- function(x, class) structure(x, class = c(class(x), class))
trim_uts <- function(x) gsub("^WOS:", "", x, ignore.case = TRUE)
retry_throttle <- function(expr) {
tryCatch(
expr = expr,
error = function(e) {
throt_er <- grepl(
"throttle|limit of [0-9] requests per period", e$message,
ignore.case = TRUE
)
if (throt_er) {
Sys.sleep(3)
message("\nRan into throttling error. Sleeping and trying again.")
expr
} else {
stop(e$message)
}
}
)
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/R/utils.R
|
build_site <- function() {
# Render extra vignettes and move vignettes for pkgdown to pick up
extra_vigs <- list.files(
"inst/site/vignettes", full.names = TRUE, pattern = "\\.Rmd|\\.png"
)
to <- gsub("inst/site/vignettes", "vignettes/", extra_vigs)
on.exit(try(unlink(x = to, force = TRUE)))
file.copy(extra_vigs, to = to)
pkgdown::build_site()
}
|
/scratch/gouwar.j/cran-all/cranData/wosr/inst/site/build-site.R
|
---
title: "Frequently asked questions"
output: html_document
---
### Why does the WoS API sometimes return a different number of records than the WoS web interface?
The API does not conduct lemmatization before applying your query, while the web app does. This means that the API will typically return a smaller result set than the web app.
### What are the throttling limits on the WoS and InCites APIs?
There are two important limits that you should be aware of:
* For the WoS API, you can't request more than 5 session IDs (SIDs) in a 5 minute period (i.e., you can't call `auth()` more than 5 times in a five minute period). You can reuse a SID across queries, though, so this limit isn't a big deal.
* According to the documentation, the InCites API limits users to 1,000 requests per 24 hour period, where each request can contain up to 100 UTs. That would suggest that you can download up to 100,000 publications-worth of data from the InCites API each day. However, based on my experience, there may another (undocumented) throttling limit which restricts the user to requesting no more than ~ 1,500 publications per 30 minute period.[^1]
### Why doesn't `pull_incites()` return data for all of my publications?
Not all publications that are indexed in the Web of Science database are also indexed in InCites.
### How do I link together the `author` and `address` data frames returned by `pull_wos()`?
You can join the data frames using the `author_address` linking table, like so:
```{r, eval = FALSE}
library(wosr)
library(dplyr)
data <- pull_wos("TS = \"dog welfare\"")
data$author %>%
left_join(data$author_address, by = c("ut", "author_no")) %>%
left_join(data$address, by = c("ut", "addr_no"))
```
### How do I download data for a query that returns more than 100,000 records?
The WoS API doesn't allow you to download data for a query that matches 100,000 or more publications. You can get around this by breaking your query into pieces using the publication year tag (`PY`). For example, if you have a broad query like `"TS = dog"` (which matches over 250,000 records), you could break it up into four sub-queries that have contiguous date ranges (and which return fewer than 100,000 records each). For example:
```{r, eval = FALSE}
queries <- c(
"TS = dog AND PY = 1900-1980",
"TS = dog AND PY = 1981-2000",
"TS = dog AND PY = 2001-2010",
"TS = dog AND PY = 2011-2018"
)
results <- pull_wos_apply(queries)
```
### There are some fields that I'm interested in that `pull_wos()` doesn't return. How do I get them?
Open up an issue on `wosr`'s [issue page](https://github.com/vt-arc/wosr/issues) describing the field(s) that you want.
[^1]: To accommodate this limit, `pull_incites()` sleeps for a given amount of time (determined by how many times it has received a throttling error for the request it is trying to make) before retrying the request.
|
/scratch/gouwar.j/cran-all/cranData/wosr/inst/site/vignettes/faqs.Rmd
|
---
title: "Getting started"
output: html_document
---
```{r, echo = FALSE, message = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
progress = FALSE,
error = FALSE,
message = FALSE
)
options(digits = 2)
```
1. The first step is to open up a session with the WoS API. `auth()` will authenticate your credentials with the the API's server and return a session ID (SID).
```{r, eval = FALSE}
library(wosr)
sid <- auth(username = "your_username", password = "your_password")
```
```{r, echo = FALSE}
library(wosr)
sid <- auth()
```
2. Now we can query the Web of Science to see how many records match our query string.
```{r}
# Find all publications that contain "animal welfare" in their titles (TI tag)
# and have the words "dog" and "welfare" somewhere in their titles, abstracts, or
# list of keywords (TS tag).
query <- 'TI = ("animal welfare") AND TS = (dog welfare)'
query_wos(query, sid = sid)
```
3. Pull the data.
```{r}
data <- pull_wos(query, sid = sid)
data
```
4. `pull_wos()` returns a series of data frames that are like tables in a relational database. You an link these data frames together as needed, to answer whatever questions you have. For example:
- What are the top 5 journal subject categories (JSCs) in this set of publications and which publications are classified into these JSCs?
```{r}
library(dplyr)
top_jscs <-
data$jsc %>%
group_by(jsc) %>%
count() %>%
arrange(desc(n)) %>%
head()
top_jscs
```
```{r}
data$jsc %>%
inner_join(top_jscs, by = "jsc") %>%
inner_join(data$publication, by = "ut") %>%
select(title) %>%
distinct() %>%
head()
```
- Which publications have "cat" in their abstracts, who are the authors on those publications, and which organizations are those authors from?
```{r}
cat_pubs <-
data$publication %>%
filter(grepl("\\bcat\\b", abstract, ignore.case = TRUE)) %>%
select(ut)
cat_pubs
```
```{r}
cat_authors <-
data$author %>%
semi_join(cat_pubs, by = "ut") %>%
select(ut, author_no, display_name)
cat_authors
```
```{r}
cat_authors %>%
inner_join(data$author_address, by = c("ut", "author_no")) %>%
inner_join(data$address, by = c("ut", "addr_no")) %>%
select(ut, author_no, display_name, org)
```
- Which funding organizations were responsible for funding top-cited publications?
```{r}
data$grant %>%
inner_join(data$publication, by = "ut") %>%
select(grant_agency, ut, tot_cites) %>%
distinct() %>%
arrange(desc(tot_cites)) %>%
head()
```
5. Download more detailed citation data (from the InCites API) for the top-cited publications
```{r, eval = FALSE}
top_100_pubs <-
data$publication %>%
arrange(desc(tot_cites)) %>%
slice(1:100) %>%
.$ut
head(pull_incites(top_100_pubs, key = "your_incites_key"))
```
```{r, echo = FALSE}
top_100_pubs <-
data$publication %>%
arrange(desc(tot_cites)) %>%
slice(1:100) %>%
.$ut
head(pull_incites(top_100_pubs))
```
|
/scratch/gouwar.j/cran-all/cranData/wosr/inst/site/vignettes/getting-started.Rmd
|
# wowa Package v4.0
#SEXP way Rcpp
library(Rcpp)
wowa <- function()
{
# This function outputs a list of all functions included in this toolbox.
print("The list of functions in wowa Tool Box:")
print("wowa.OWA([inputs],[OWA weights])")
print("wowa.WAM([inputs],[weights])")
print("wowa.weightedf([inputs], [ Weights], [dimension], [n-variate function to extend],[tree depth])")
print("wowa.weightedOWAQuantifierBuild([number of variables],[input weights], [Associated OWA Weights])")
print("wowa.weightedOWAQuantifier([number of variables],[input weights], [Associated OWA Weights],[quantifier] )")
print("wowa.ImplicitWOWA([inputs],[Weights], [Associated OWA Weights],[dimension])")
print("wowa.WAn([inputs], [Weights]Associated OWA Weights],[dimension],[Bivariate function],[tree depth])")
}
wowa.WAM <- function(n, x, w) {
return(.Call('WOWA_WAM', PACKAGE = 'wowa', n, x, w))
}
wowa.OWA <- function(n, x, w) {
return(.Call('WOWA_OWA', PACKAGE = 'wowa', n, x, w))
}
#// OWA_OWA is defined in Rcppexports.cpp. same for the following functions
wowa.weightedf <- function(x, p, w, n, Fn, L) {
return(.Call('WOWA_weightedf', PACKAGE = 'wowa', x, p, w, n, Fn, L ))
}
wowa.weightedOWAQuantifierBuild <- function(p, w, n) {
out<-.Call('WOWA_weightedOWAQuantifierBuild', PACKAGE = 'wowa', p, w, n)
return (out)
}
wowa.weightedOWAQuantifier <- function(x, p, w, n, spl) {
return( .Call('WOWA_weightedOWAQuantifier', PACKAGE = 'wowa', x, p, w, n, spl$spl, spl$Tnum))
}
wowa.ImplicitWOWA <- function(x, p, w, n) {
return(.Call('WOWA_ImplicitWOWA', PACKAGE = 'wowa', x, p, w, n))
}
wowa.WAn <- function(x, w, n, Fn, L) {
return(.Call('WOWA_WAn', PACKAGE = 'wowa', x, w, n, L, Fn))
}
|
/scratch/gouwar.j/cran-all/cranData/wowa/R/RcppExports.R
|
#' Simulated 6D data with a sine curve
#'
#' The data has 6 columns, labelled V1-V6,
#' where the sine curve is in V5, V6. The
#' other columns are normal samples.
#'
#' @name sine_curve measurements
#' @aliases sine_curve
#' @docType data
#' @format A 500x6 data frame
#' @keywords datasets
#' @examples
#' library(woylier)
#' data(sine_curve)
#' plot(sine_curve$V5, sine_curve$V6)
NULL
|
/scratch/gouwar.j/cran-all/cranData/woylier/R/data.R
|
#' Build a d-dimensional pre-projection space by orthonormalizing Fz with regard to Fa
#' @keywords internal
#' @param Fa starting pxd frame
#' @param Fz ending pxd frame
#' @returns B pre-projection px2d matrix
preprojection <- function(Fa, Fz) {
# check both are matrices are both correct size
stopifnot("Your inputs do not have the same number of columns!" = ncol(Fa) == ncol(Fz))
stopifnot("Your inputs do not have the same number of row!" = nrow(Fa) == nrow(Fz))
# check each is orthonormal
stopifnot("The current frame must be orthonormal!" = tourr::is_orthonormal(Fa))
stopifnot("The target frame must be orthonormal!" = tourr::is_orthonormal(Fz))
Fz_star <- tourr::orthonormalise_by(Fz, Fa)
B <- cbind(Fa, Fz_star)
return(B)
}
#' Construct preprojected frames
#' @keywords internal
#' @param Fr Orthonormal frame
#' @param B pre-projection px2d matrix
#' @returns Preprojected 2dxd frame on preprojection space (first dxd entry of this matrix is identity matrix by construction)
construct_preframe <- function(Fr, B) {
W <- t(B) %*% Fr
return(W)
}
#' Compute the angle between 2d vectors 360 degrees
#' @keywords internal
#' @param x vector with length 2
#' @param y vector with length 2
#' @return angle in radians
angle2 <- function(x, y){
theta <- atan2(x[2], x[1]) - atan2(y[2], y[1])
return(theta)
}
#' Takes i and k-th row of a matrix and rotate matrix by theta angle (requires matrix a to be 2*q matrix)
#' @keywords internal
#' @param a matrix
#' @param i row
#' @param k row that we want to zero the element
#' @param theta angle between them
#' @return rotated matrix a
#' refer to Algorithm 5.1.6 of Matrix computation (Golub, Van)
row_rot <- function(a, i, k, theta) {
n <- ncol(a)
for (q in 1:n){
x = a[i, q]
y = a[k, q]
a[i, q] = cos(theta)*x - sin(theta)*y
a[k, q] = sin(theta)*x + cos(theta)*y
}
return(a)
}
#' Calculate angles of required rotations to map Wz to Wa
#' @keywords internal
#' @param Wa starting preprojected frame
#' @param Wz target preprojected frame
#' @return named list of angles
calculate_angles <- function(Wa, Wz) {
angles = list()
wi = Wz
for (col in 1:ncol(Wz)) {
for (row in col:(nrow(Wz)-1)){
# store angles in a named list
x <- as.matrix(c(Wa[col, col], Wa[row+1, col]))
y <- as.matrix(c(wi[col, col], wi[row+1, col]))
theta = angle2(x, y)
angles[paste0(col, row +1)] = theta
wi = row_rot(wi, col, row+1, theta)
}
}
return(angles)
}
#' It implements series of Givens rotations that maps Wa to Wz
#' @keywords internal
#' @param Wa starting preprojected frame
#' @param angles angles of required rotations to map Wz to Wa
#' @param stepfraction for the interpolation of rotations
#' @return Givens path by stepfraction in pre-projected space
givens_rotation <- function(Wa, angles, stepfraction) {
w_i = Wa
for (col in ncol(Wa):1) {
for (row in (nrow(Wa)-1):col){
# rotating in reverse order
index = paste0(col, row+1)
theta = - as.numeric(angles[index])
w_i = row_rot(w_i, col, row+1, theta*stepfraction)
}
}
return(w_i)
}
#' Reconstruct interpolated frames using pre-projection
#' @keywords internal
#' @param B pre-projection px2d matrix
#' @param Wt A givens path by stepfraction
#' @returns A frame of on the step of interpolation
construct_moving_frame <- function(Wt, B) {
Ft = B %*% Wt
return(Ft)
}
#' Construct full interpolated frames
#' @param nsteps number of steps of interpolation
#' @param Fa starting pxd frame
#' @param Fz target pxd frame
#' @returns array with nsteps matrix. Each matrix is interpolated frame in between starting and target frames.
#' @export
#' @examples
#' p <- 4
#' base1 <- tourr::orthonormalise(tourr::basis_random(p, d=1))
#' base2 <- tourr::orthonormalise(tourr::basis_random(p, d=1))
#' path <- givens_full_path(base1, base2, nsteps=10)
givens_full_path <- function(Fa, Fz, nsteps) {
B <- preprojection(Fa, Fz)
Wa <- construct_preframe(Fa, B)
Wz <- construct_preframe(Fz, B)
angles <- calculate_angles(Wa, Wz)
path <- array(dim = c(nrow(B), ncol(Wa), nsteps))
for (i in 1:nsteps) {
stepfraction <- i/nsteps
Wt = givens_rotation(Wa, angles, stepfraction)
Ft = construct_moving_frame(Wt, B)
path[,,i] <- Ft
}
return(path)
}
|
/scratch/gouwar.j/cran-all/cranData/woylier/R/givens.R
|
frozen <- NULL
#' Create a grand tour with Givens interpolation
#' @param d dimension of projection
#' @param ... additional parameters to pass through
#' @return creates grand tour
#' @export
#' @examples
#' data(sine_curve)
#' tourr::animate(sine_curve, grand_tour_givens(), tourr::display_xy())
grand_tour_givens <- function(d = 2, ...) {
generator <- function(current, data, ...) {
if (is.null(current)) {
return(tourr::basis_init(ncol(data), d))
}
target <- tourr::basis_random(ncol(data), d)
list(target = target)
}
new_givens_path("grand", generator)
}
#' Create a guided tour with Givens interpolation
#' @param index_f the index function to optimize.
#' @param d target dimensionality
#' @param alpha the initial size of the search window, in radians
#' @param cooling the amount the size of the search window should be adjusted
#' by after each step
#' @param optim character indicating the search strategy to use: \code{search_geodesic}, \code{search_better},
#' \code{search_better_random}, \code{search_polish}. Default is \code{search_geodesic}.
#' @param max.tries the maximum number of unsuccessful attempts to find
#' a better projection before giving up
#' @param max.i the maximum index value, stop search if a larger value is found
#' @param n_sample number of samples to generate if \code{search_f} is \code{search_polish}
#' @param ... arguments sent to the search_f
#' @return creates guided tour
#' @export
#' @examples
#' data(sine_curve)
#' tourr::animate_xy(sine_curve, guided_tour_givens(tourr::splines2d()), sphere=FALSE)
guided_tour_givens <- function(index_f, d = 2, alpha = 0.5, cooling = 0.99, max.tries = 25,
max.i = Inf, optim = "search_geodesic", n_sample = 100, ...) {
generator <- function(current, data, tries, ...) {
index <- function(proj) {
index_f(as.matrix(data) %*% proj)
}
valid_fun <- c(
"search_geodesic", "search_better", "search_better_random",
"search_polish", "search_posse"
)
method <- valid_fun[vapply(valid_fun, function(x) {
identical(x, optim)
}, logical(1))]
search_f <- switch(method,
search_geodesic = tourr::search_geodesic,
search_better = tourr::search_better,
search_better_random = tourr::search_better_random,
search_polish = tourr::search_polish,
search_posse = tourr::search_posse
)
if (is.null(current)) {
current <- tourr::basis_random(ncol(data), d)
cur_index <- index(current)
tryCatch({
rcd_env <- parent.frame(n = 3)
rcd_env[["record"]] <- dplyr::add_row(
rcd_env[["record"]],
basis = list(current),
index_val = cur_index,
info = "new_basis",
method = method,
alpha = formals(tourr::guided_tour)$alpha,
tries = 1,
loop = 1
)
},
error = function(e){
assign("record",
tibble::tibble(basis = list(),
index_val = numeric(),
info = character(),
method = character(),
alpha = numeric(),
tries = numeric(),
loop = numeric()),
envir = parent.frame())
rcd_env[["record"]] <- tibble::tibble(
basis = list(current),
index_val = cur_index,
info = "new_basis",
method = method,
alpha = formals(tourr::guided_tour)$alpha,
tries = 1,
loop = 1)
}
)
return(current)
}
cur_index <- index(current)
if (cur_index > max.i) {
message("Found index ", cur_index, ", larger than selected maximum ", max.i, ". Stopping search.\n",
sep = ""
)
message("Final projection: \n")
if (ncol(current) == 1) {
for (i in 1:length(current)) {
message(sprintf("%.3f", current[i]), " ")
}
message("\n")
}
else {
for (i in 1:nrow(current)) {
for (j in 1:ncol(current)) {
message(sprintf("%.3f", current[i, j]), " ")
}
message("\n")
}
}
return(NULL)
}
# current, alpha = 1, index, max.tries = 5, n = 5, delta = 0.01, cur_index = NA, ..
basis <- search_f(current, alpha, index, tries, max.tries, cur_index = cur_index, frozen = frozen, n_sample = n_sample, ...)
if (method == "search_posse") {
if (!is.null(basis$h)) {
if (basis$h > 30) {
alpha <<- alpha * cooling
}
}
} else {
alpha <<- alpha * cooling
}
list(target = basis$target, index = index)
}
new_givens_path("guided", generator)
}
#' Path needed for tour with Givens interpolation
#' @param name name to give tour path
#' @param generate basis generator function
#' @param frozen matrix giving frozen variables, as described in
#' \code{freeze}
#' @return creates path for Givens interpolation
#' @keywords internal
new_givens_path <- function(name, generator, frozen = NULL, ...) {
tries <- 1 # Needed for guided
tour_path <- function(current, data, ...) {
if (is.null(current)) {
return(generator(NULL, data, tries, ...))
}
# Keep trying until we get a frame that's not too close to the
# current frame
dist <- 0
while (dist < 1e-3) {
if (name %in% c("guided", "frozen-guided")) tries <<- tries + 1
gen <- generator(current, data, tries, ...)
target <- gen$target
# generator has run out, so give up
if (is.null(target)) {
return(NULL)
}
givens_components <- givens_path(current, target, frozen, ...)
dist <- sum(abs(unlist(givens_components$tau)))
if (dist < 1e-2) {
return(NULL)
}
#message("generation: dist = ", dist, "\n")
}
list(ingred = givens_components, index = gen$index, tries = tries)
}
structure(
tour_path,
name = name,
class = c("tour_path", "function")
)
}
#' A planned tour path using frame-to-frame interpolation.
#'
#' The planned tour takes you from one basis to the next in a
#' set order. Once you have visited all the planned bases, you either stop
#' or start from the beginning once more (if \code{cycle = TRUE}).
#'
#' Usually, you will not call this function directly, but will pass it to
#' a method that works with tour paths like \code{\link{animate}},
#' \code{\link{save_history}} or \code{\link{render}}.
#'
#' @param basis_set the set of bases as a list of projection matrices
#' or a 3d array
#' @param cycle cycle through continuously (\code{TRUE}) or stop after
#' first pass (\code{FALSE})
#' @keywords hplot dynamic
#' @seealso The \code{\link{little_tour}}, a special type of planned tour
#' which cycles between all axis parallel projections.
#' @return creates planned tour path
#' @export
#' @examples
#' library(tourr)
#' twod <- save_history(flea[, 1:3], max = 5)
#' str(twod)
#' animate_xy(flea[, 1:3], planned_tour_givens(twod))
#' animate_xy(flea[, 1:3], planned_tour_givens(twod, TRUE))
#' oned <- save_history(flea[, 1:6], grand_tour(1), max = 3)
#' animate_dist(flea[, 1:6], planned_tour_givens(oned))
planned_tour_givens <- function(basis_set, cycle = FALSE) {
index <- 1
basis_set <- as.list(basis_set)
n <- length(basis_set)
if (cycle) {
generator <- function(current, data, ...) {
if (is.null(current)) {
return(basis_set[[1]])
}
index <<- (index %% n) + 1
target <- basis_set[[index]]
list(target = target)
}
} else {
generator <- function(current, data, ...) {
if (is.null(current)) {
return(basis_set[[1]])
}
index <<- index + 1
if (index > n) {
return(NULL)
}
target <- basis_set[[index]]
list(target = target)
}
}
new_givens_path("planned", generator)
}
|
/scratch/gouwar.j/cran-all/cranData/woylier/R/givens_path.R
|
#' Generate the background sphere or torus
#' @param n number of points on the sphere
#' @param p dimension of data
#' @param d dimension of projection
#' @return n number of points on the surface of sphere
#' @export
#' @examples
#' p <- 4
#' sp <- generate_space_view(p=p)
generate_space_view <- function(n=1000, p=3, d=1) {
# this will generate the background sphere or torus
if (d == 1) {
proj_space <- geozoo::sphere.hollow(n=n, p=p)$points
}
else {
# proj_space <- geozoo::torus(n, p=2*d)$points
}
proj_space <- as.data.frame(proj_space)
return(proj_space)
}
#' Overlay paths of interpolation on the sphere
#' @param proj_space n number of points on the surface of sphere
#' @param path interpolated path
#' @return data frame with interpolated path and points on sphere surface
#' @export
#' @examples
#' p <- 4
#' base1 <- tourr::basis_random(p, d=1)
#' base2 <- tourr::basis_random(p, d=1)
#' path <- givens_full_path(base1, base2, nsteps=10)
#' sp <- generate_space_view(p=p)
#' sp_path <- add_path(sp, path)
#' tourr::animate_xy(sp_path[,1:4], col=sp_path$type)
add_path <- function(proj_space, path) {
# overlay sequence of dots for the path
proj_space$type <- "proj_space"
path <- as.data.frame(t(apply(path, 3, c)))
path$type <- "path"
space_and_path <- rbind(proj_space, path)
return(space_and_path)
}
|
/scratch/gouwar.j/cran-all/cranData/woylier/R/plot.R
|
#' Interpolation format for tourr
#'
#' Generates the interpolation in the form needed for
#' tourr, modelled on geodesic_path()
#' @param current starting frame
#' @param target target frame
#' @param frozen indicator whether some dimensions fixed
#' @param ... arguments sent to later functions
#' @keywords internal
#' @export
#' @return
#' \item{interpolate}{A function with single parameter in \[0, 1\] that
#' returns an interpolated frame between the current and future frames.
#' 0 gives the current plane, 1 gives the new target frame in plane of
#' current frame.}
#' \item{dist}{The distance, in radians, between the current and target
#' frames.}
#' \item{Fa}{The current frame.}
#' \item{Fz}{The new target frame.}
#' \item{tau}{The principle angles between the current and target frames.}
#' \item{Ga}{The current plane.}
#' \item{Gz}{The target plane.}
givens_path <- function (current, target, frozen = NULL, ...) {
if (is.null(frozen)) {
# Regular geodesic
givens <- givens_info(current, target)
interpolate <- function(pos) {
givens_step_fraction(givens, pos)
}
} else {
message("Givens path cannot handle frozen yet \n")
}
list(
interpolate = interpolate,
Fa = current,
Fz = target,
Ga = givens$Wa,
Gz = givens$Wz,
tau = givens$tau,
dist = tourr::proj_dist(current, target)
)
}
#' Calculate information needed for Givens interpolation
#' The methodology is outlined in
#' \url{http://www-stat.wharton.upenn.edu/~buja/PAPERS/paper-dyn-proj-algs.pdf}
#' @param Fa starting frame, will be orthonormalized if necessary
#' @param Fz target frame, will be orthonormalized if necessary
#' @keywords internal
givens_info <- function(Fa, Fz) {
if (!tourr::is_orthonormal(Fa)) {
# message("Orthonormalising Fa")
Fa <- tourr::orthonormalise(Fa)
}
if (!tourr::is_orthonormal(Fz)) {
# message("Orthonormalising Fz")
Fz <- tourr::orthonormalise(Fz)
}
B <- preprojection(Fa, Fz)
Wa <- construct_preframe(Fa, B)
Wz <- construct_preframe(Fz, B)
angles <- calculate_angles(Wa, Wz)
list(B = B, Wa = Wa, Wz = Wz, tau = angles)
}
#' Step along a Givens interpolated path by fraction of path length.
#' @param interp interpolated path
#' @param fraction fraction of distance between start and end frames
#' @keywords internal
givens_step_fraction <- function(interp, fraction) {
# Interpolate between starting and end frames
# - must multiply column wise (hence all the transposes)
Wt = givens_rotation(interp$Wa, interp$tau, fraction)
Ft = construct_moving_frame(Wt, interp$B)
return(Ft)
}
|
/scratch/gouwar.j/cran-all/cranData/woylier/R/tour.R
|
#' @keywords internal
"_PACKAGE"
## usethis namespace: start
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/woylier/R/woylier-package.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(woylier)
library(geozoo)
library(ggplot2)
library(dplyr)
library(purrr)
## -----------------------------------------------------------------------------
# Generate 1D example
set.seed(2022)
p <- 6
base1 <- tourr::basis_random(p, d=1)
base2 <- tourr::basis_random(p, d=1)
# First example
frames <- givens_full_path(base1, base2, nsteps = 10)
sp <- generate_space_view(p=p)
sp_path <- add_path(sp, frames)
point1 <- as.data.frame(t(base1))
point1$type <- "point1"
point2 <- as.data.frame(t(base2))
point2$type <- "point2"
sp_path <- rbind(sp_path, point1, point2)
p
tourr::animate_xy(sp_path[,1:p], col=sp_path$type,
axes="bottomleft")
## -----------------------------------------------------------------------------
# Generate 2D example
set.seed(2022)
n <- 1000
p <- 3
d <- 2
base1 <- tourr::basis_random(p, d)
base2 <- tourr::basis_random(p, d)
frames_2d <- givens_full_path(base1, base2, 10)
proj_2d <- map(1:n, ~tourr::basis_random(n = p, d=d)) %>%
purrr::flatten_dbl() %>%
matrix(ncol = p*2, byrow = TRUE) %>%
as_tibble()
# Path
path_2d <- t(apply(frames_2d, 3, c)) %>%
as.data.frame()
# Join
proj_2d <- proj_2d %>%
mutate(type="torus")
path_2d <- path_2d %>%
mutate(type="path")
proj_path <- bind_rows(proj_2d, path_2d)
tourr::animate_xy(proj_path[,1:6],
col=proj_path$type,
axes="bottomleft")
|
/scratch/gouwar.j/cran-all/cranData/woylier/inst/doc/Plotting_with_woylier.R
|
---
title: "Plotting with woylier"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Plotting with woylier}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
This vignette will demonstrate how to plot the interpolated path with the **woylier** package.
```{r setup}
library(woylier)
library(geozoo)
library(ggplot2)
library(dplyr)
library(purrr)
```
# 1D example
1D projection of data in high dimension linear combination of data that is normalized. Therefore, we can plot the point on the surface of a hypersphere. The plot below the Givens interpolation steps between 2 points, 1D projection of 6D data that is.
```{r}
# Generate 1D example
set.seed(2022)
p <- 6
base1 <- tourr::basis_random(p, d=1)
base2 <- tourr::basis_random(p, d=1)
# First example
frames <- givens_full_path(base1, base2, nsteps = 10)
sp <- generate_space_view(p=p)
sp_path <- add_path(sp, frames)
point1 <- as.data.frame(t(base1))
point1$type <- "point1"
point2 <- as.data.frame(t(base2))
point2$type <- "point2"
sp_path <- rbind(sp_path, point1, point2)
p
tourr::animate_xy(sp_path[,1:p], col=sp_path$type,
axes="bottomleft")
```
# 2D example
In case of 2D projections, we can plot the interpolated path between 2 frames on the surface of torus. Torus can be seen as crossing of 2 circles that are orthonormal.
```{r}
# Generate 2D example
set.seed(2022)
n <- 1000
p <- 3
d <- 2
base1 <- tourr::basis_random(p, d)
base2 <- tourr::basis_random(p, d)
frames_2d <- givens_full_path(base1, base2, 10)
proj_2d <- map(1:n, ~tourr::basis_random(n = p, d=d)) %>%
purrr::flatten_dbl() %>%
matrix(ncol = p*2, byrow = TRUE) %>%
as_tibble()
# Path
path_2d <- t(apply(frames_2d, 3, c)) %>%
as.data.frame()
# Join
proj_2d <- proj_2d %>%
mutate(type="torus")
path_2d <- path_2d %>%
mutate(type="path")
proj_path <- bind_rows(proj_2d, path_2d)
tourr::animate_xy(proj_path[,1:6],
col=proj_path$type,
axes="bottomleft")
```
|
/scratch/gouwar.j/cran-all/cranData/woylier/inst/doc/Plotting_with_woylier.Rmd
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----eval=FALSE---------------------------------------------------------------
# # install.packages("remotes")
# remotes::install_github("numbats/woylier")
## ----setup--------------------------------------------------------------------
library(woylier)
library(geozoo)
library(ggplot2)
library(dplyr)
library(purrr)
## -----------------------------------------------------------------------------
# Generate 1D example
set.seed(2022)
p <- 6
base1 <- tourr::basis_random(p, d=1)
base2 <- tourr::basis_random(p, d=1)
base1
base2
givens_full_path(base1, base2, nsteps = 5)
## -----------------------------------------------------------------------------
# Generate 2D example
set.seed(2022)
p <- 6
base3 <- tourr::basis_random(p, d=2)
base4 <- tourr::basis_random(p, d=2)
base3
base4
givens_full_path(base3, base4, nsteps = 5)
|
/scratch/gouwar.j/cran-all/cranData/woylier/inst/doc/woylier.R
|
---
title: "woylier"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{woylier}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
The "woylier" package provides alternative method for generating a tour path by interpolating between d-D frames in p-D space rather than d-D planes. A tour path is a sequence of projection and we use interpolation method to produce the path. The \CRANpkg{tourr} package uses geodesic interpolation between planes. Geodesic interpolation path is the locally shortest path between planes with no within-plane spin. As a result of this method, the rendered target plane could be the rotated version of the target plane we wanted. This is not a problem when the structure we are looking can be identified without turning the axis around.
The "woylier" package implements the Givens interpolation paths method proposed by [Buja et al. (2005)](https://www.sciencedirect.com/science/article/abs/pii/S0169716104240147?via%3Dihub) in R. This algorithm adapts Given’s matrix decomposition technique which allows the interpolation to be between frames rather than planes.
# Installation
You can install the development version of woylier from GitHub with:
```{r eval=FALSE}
# install.packages("remotes")
remotes::install_github("numbats/woylier")
```
```{r setup}
library(woylier)
library(geozoo)
library(ggplot2)
library(dplyr)
library(purrr)
```
# Example 1
In this example, we have 2 random 1D basis in 6D data space and the `givens_full_path()` function returns the intermediate interpolation step projections in given number of steps. The code chunk below demonstrates the interpolation between 2 random basis in 5 steps.
```{r}
# Generate 1D example
set.seed(2022)
p <- 6
base1 <- tourr::basis_random(p, d=1)
base2 <- tourr::basis_random(p, d=1)
base1
base2
givens_full_path(base1, base2, nsteps = 5)
```
# Example 2
In this example, we have 2 random 2D basis in 6D data space and the `givens_full_path()` function returns the intermediate interpolation step projections in given number of steps. The code chunk below demonstrates the interpolation between 2 random basis in 5 steps.
```{r}
# Generate 2D example
set.seed(2022)
p <- 6
base3 <- tourr::basis_random(p, d=2)
base4 <- tourr::basis_random(p, d=2)
base3
base4
givens_full_path(base3, base4, nsteps = 5)
```
|
/scratch/gouwar.j/cran-all/cranData/woylier/inst/doc/woylier.Rmd
|
# This code is for examining the rotations
library(tourr)
library(tidyverse)
set.seed(1970)
path_geo_1d_1 <- save_history(flea[, 1:3], grand_tour(1), max = 3)
path_geo_1d_1 <- interpolate(path_geo_1d_1)
#path_geo_1d_1 <-
# cbind(path_geo_1d_1, group = rep("1", nrow(path_geo_1d_1)))
path_geo_1d_1_m <- t(apply(path_geo_1d_1, 3, c)) %>%
as.data.frame() %>%
mutate(id = "path")
# generating spheres
library(geozoo)
d <- sphere.hollow(p = 3, n = 1000)
#d <- sphere.hollow(p = 5, n = 1000)
d <- data.frame(d$points)
animate_xy(d, axes="off")
animate_slice(d, axes="off")
# Add path to sphere
d <- d %>%
rename(V1=X1, V2=X2, V3=X3) %>%
mutate(id = "sphere")
path_sphere <- bind_rows(path_geo_1d_1_m, d)
animate_xy(path_sphere[,1:3], col=path_sphere$id, axes="off")
|
/scratch/gouwar.j/cran-all/cranData/woylier/inst/examples/plot.r
|
# Exploring the currency cross-rates data
library(tourr)
library(tidyverse)
library(GGally)
library(woylier)
set.seed(202212)
# Read data
rates <- read_csv("data-raw/rates_Nov19_Mar20.csv")
rates <- rates %>% select(date, ARS, AUD, EUR, JPY, KRW, MYR)#, NZD, GBP)
rates_std <- apply(rates[,-1], 2, function(x) (x-mean(x))/sd(x))
# animate_xy(rates_std, tour_path = guided_tour(splines2d()))
#rates <- rates %>% select(date, ARS, AUD, EUR, JPY, MYR, ISK)#, NZD, GBP)
animate_xy(rates[,-1])
animate_xy(rates[,-1], tour_path = guided_tour(splines2d()))
pp_geo <- animate_xy(rates_std[,-1], tour_path = guided_tour(splines2d()))
pp_giv1 <- animate_xy(rates_std[,-1], tour_path = guided_tour_givens(splines2d(), search_f = search_better))
pp_giv2 <- animate_xy(rates_std[,-1], tour_path = guided_tour_givens(splines2d(), search_f = search_better_random))
basis1 <- as.matrix(pp_giv1[737,]$basis[[1]], ncol = 2, byrow=TRUE)
pp1x <- as_tibble(as.matrix(rates_std[,-1])%*%basis1)
ggplot(pp1x, aes(x=V1, y=V2))+
geom_point()
# Check PCA: why doesn't PCA detect correlation
# Correlation is between NZD and GBP
# Use PCA to remove linear dependence
rates_pca <- prcomp(rates[,-1], scale. = TRUE)
ggscatmat(rates_pca$x)
# standardise the pca
rates_pca_sd <- apply(rates_pca$x, 2, function(x) (x-mean(x))/sd(x))
summary(rates_pca$x)
animate_xy(rates_pca_sd)
# the index value of the givens does not go as high as geodesic
animate_xy(rates_pca_sd[,1:4], tour_path = guided_tour_givens(splines2d(), search_f = search_better))
animate_xy(rates_pca_sd[,1:4], tour_path = guided_tour_givens(splines2d(), search_f = search_better_random, max.tries = 100))
animate_xy(rates_pca_sd[,1:4], tour_path = guided_tour(splines2d()))
# modified the splines2d
new_splines2d <- function ()
{
function(mat) {
mat <- as.data.frame(mat)
colnames(mat) <- c("x", "y")
kx <- ifelse(length(unique(mat$x[!is.na(mat$x)])) < 20,
3, 10)
mgam1 <- mgcv::gam(y ~ s(x, bs = "cr", k = kx), data = mat)
measure <- 1 - var(residuals(mgam1), na.rm = T)/var(mat$y, na.rm = T)
return(measure)
}
}
# try modified splines index
animate_xy(rates_pca_sd[,1:4], tour_path = guided_tour_givens(new_splines2d(), search_f = search_better, max.tries = 1000))
animate_xy(rates_pca_sd[,1:4], tour_path = guided_tour_givens(new_splines2d(), search_f = search_better_random, max.tries = 1000))
animate_xy(rates_pca_sd[,1:4], tour_path = guided_tour(new_splines2d()))
set.seed(202212)
basis2 <- basis_random(n=4, d = 2)
#
r1 <- animate_xy(rates_pca_sd[,1:4], tour_path = guided_tour(new_splines2d(), current = basis2, search_f = search_better, max.tries = 1000), rescale=FALSE)
r1$basis[2619]
# index value 0.696
r2 <- animate_xy(rates_pca_sd[,1:4], tour_path = guided_tour_givens(new_splines2d(), current = basis2, search_f = search_better, max.tries = 1000), rescale=FALSE)
# index value 0.762859
r3 <- animate_xy(rates_pca_sd[,1:4], tour_path = guided_tour_givens(new_splines2d(), current = basis2, search_f = search_better_random, max.tries = 1000), rescale=FALSE)
# index value 0.808788
mat <- data.frame(rates_pca_sd[,2:1])
mat_idx <- round(new_splines2d()(mat), 2)
mat_idx
mat_rot <- data.frame(x = cos(pi/4) * mat$PC1 +
sin(pi/4) * mat$PC2 ,
y = -sin(pi/4) * mat$PC1 +
cos(pi/4) * mat$PC2)
mat_idx2 <- round(new_splines2d()(mat_rot), 2)
mat_idx2
ggplot(as_tibble(rates_pca_sd), aes(x = PC2, y= PC1)) +
geom_point()
|
/scratch/gouwar.j/cran-all/cranData/woylier/inst/examples/rates.R
|
---
title: "Plotting with woylier"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Plotting with woylier}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
This vignette will demonstrate how to plot the interpolated path with the **woylier** package.
```{r setup}
library(woylier)
library(geozoo)
library(ggplot2)
library(dplyr)
library(purrr)
```
# 1D example
1D projection of data in high dimension linear combination of data that is normalized. Therefore, we can plot the point on the surface of a hypersphere. The plot below the Givens interpolation steps between 2 points, 1D projection of 6D data that is.
```{r}
# Generate 1D example
set.seed(2022)
p <- 6
base1 <- tourr::basis_random(p, d=1)
base2 <- tourr::basis_random(p, d=1)
# First example
frames <- givens_full_path(base1, base2, nsteps = 10)
sp <- generate_space_view(p=p)
sp_path <- add_path(sp, frames)
point1 <- as.data.frame(t(base1))
point1$type <- "point1"
point2 <- as.data.frame(t(base2))
point2$type <- "point2"
sp_path <- rbind(sp_path, point1, point2)
p
tourr::animate_xy(sp_path[,1:p], col=sp_path$type,
axes="bottomleft")
```
# 2D example
In case of 2D projections, we can plot the interpolated path between 2 frames on the surface of torus. Torus can be seen as crossing of 2 circles that are orthonormal.
```{r}
# Generate 2D example
set.seed(2022)
n <- 1000
p <- 3
d <- 2
base1 <- tourr::basis_random(p, d)
base2 <- tourr::basis_random(p, d)
frames_2d <- givens_full_path(base1, base2, 10)
proj_2d <- map(1:n, ~tourr::basis_random(n = p, d=d)) %>%
purrr::flatten_dbl() %>%
matrix(ncol = p*2, byrow = TRUE) %>%
as_tibble()
# Path
path_2d <- t(apply(frames_2d, 3, c)) %>%
as.data.frame()
# Join
proj_2d <- proj_2d %>%
mutate(type="torus")
path_2d <- path_2d %>%
mutate(type="path")
proj_path <- bind_rows(proj_2d, path_2d)
tourr::animate_xy(proj_path[,1:6],
col=proj_path$type,
axes="bottomleft")
```
|
/scratch/gouwar.j/cran-all/cranData/woylier/vignettes/Plotting_with_woylier.Rmd
|
---
title: "woylier"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{woylier}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
The "woylier" package provides alternative method for generating a tour path by interpolating between d-D frames in p-D space rather than d-D planes. A tour path is a sequence of projection and we use interpolation method to produce the path. The \CRANpkg{tourr} package uses geodesic interpolation between planes. Geodesic interpolation path is the locally shortest path between planes with no within-plane spin. As a result of this method, the rendered target plane could be the rotated version of the target plane we wanted. This is not a problem when the structure we are looking can be identified without turning the axis around.
The "woylier" package implements the Givens interpolation paths method proposed by [Buja et al. (2005)](https://www.sciencedirect.com/science/article/abs/pii/S0169716104240147?via%3Dihub) in R. This algorithm adapts Given’s matrix decomposition technique which allows the interpolation to be between frames rather than planes.
# Installation
You can install the development version of woylier from GitHub with:
```{r eval=FALSE}
# install.packages("remotes")
remotes::install_github("numbats/woylier")
```
```{r setup}
library(woylier)
library(geozoo)
library(ggplot2)
library(dplyr)
library(purrr)
```
# Example 1
In this example, we have 2 random 1D basis in 6D data space and the `givens_full_path()` function returns the intermediate interpolation step projections in given number of steps. The code chunk below demonstrates the interpolation between 2 random basis in 5 steps.
```{r}
# Generate 1D example
set.seed(2022)
p <- 6
base1 <- tourr::basis_random(p, d=1)
base2 <- tourr::basis_random(p, d=1)
base1
base2
givens_full_path(base1, base2, nsteps = 5)
```
# Example 2
In this example, we have 2 random 2D basis in 6D data space and the `givens_full_path()` function returns the intermediate interpolation step projections in given number of steps. The code chunk below demonstrates the interpolation between 2 random basis in 5 steps.
```{r}
# Generate 2D example
set.seed(2022)
p <- 6
base3 <- tourr::basis_random(p, d=2)
base4 <- tourr::basis_random(p, d=2)
base3
base4
givens_full_path(base3, base4, nsteps = 5)
```
|
/scratch/gouwar.j/cran-all/cranData/woylier/vignettes/woylier.Rmd
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Identify the WPA metrics that have the biggest change between two
#' periods.
#'
#' @description
#' `r lifecycle::badge('experimental')`
#'
#' This function uses the Information Value algorithm to predict
#' which Workplace Analytics metrics are most explained by the change in dates.
#'
#' @author Mark Powers <mark.powers@@microsoft.com>
#'
#' @param data Person Query as a dataframe including date column named "Date"
#' This function assumes the data format is `MM/DD/YYYY` as is standard in a
#' Workplace Analytics query output.
#' @param before_start Start date of "before" time period in `YYYY-MM-DD`.
#' Defaults to earliest date in dataset.
#' @param before_end End date of "before" time period in `YYYY-MM-DD`
#' @param after_start Start date of "after" time period in `YYYY-MM-DD`.
#' Defaults to day after before_end.
#' @param after_end End date of "after" time period in `YYYY-MM-DD`. Defaults to
#' latest date in dataset.
#' @param mybins Number of bins to cut the data into for Information Value
#' analysis. Defaults to 10.
#' @param return String specifying what to return. The current only valid
#' option is `"table"`.
#'
#' @return
#' data frame containing all the variables and the corresponding Information
#' Value.
#'
#' @import dplyr
#'
#' @family Variable Association
#' @family Information Value
#' @family Time-series
#'
#' @examples
#' \donttest{
#' # Returns a data frame
#' sq_data %>%
#' IV_by_period(
#' before_start = "2019-12-15",
#' before_end = "2019-12-29",
#' after_start = "2020-01-05",
#' after_end = "2020-01-26"
#' )
#' }
#' @export
IV_by_period <-
function(data,
before_start = min(as.Date(data$Date, "%m/%d/%Y")),
before_end,
after_start = as.Date(before_end) + 1,
after_end = max(as.Date(data$Date, "%m/%d/%Y")),
mybins = 10,
return = "table") {
## Check inputs
required_variables <- c("Date",
"PersonId")
## Error message if variables are not present
## Nothing happens if all present
data %>%
check_inputs(requirements = required_variables)
daterange_1_start <- as.Date(before_start)
daterange_1_end <- as.Date(before_end)
daterange_2_start <- as.Date(after_start)
daterange_2_end <- as.Date(after_end)
WpA_dataset <- data %>% mutate(Date = as.Date(Date, "%m/%d/%Y"))
# Check for dates in data file
if (daterange_1_start < min(WpA_dataset$Date) |
daterange_1_start > max(WpA_dataset$Date) |
daterange_1_end < min(WpA_dataset$Date) |
daterange_1_end > max(WpA_dataset$Date) |
daterange_2_start < min(WpA_dataset$Date) |
daterange_2_start > max(WpA_dataset$Date) |
daterange_2_end < min(WpA_dataset$Date) |
daterange_2_end > max(WpA_dataset$Date)) {
stop('Dates not found in dataset')
geterrmessage()
}
# Create variable => Period
WpA_dataset_table <-
WpA_dataset %>%
mutate(
Period = case_when(
Date >= daterange_1_start &
Date <= daterange_1_end ~ "Before",
Date >= daterange_2_start &
Date <= daterange_2_end ~ "After"
)
) %>% filter(Period == "Before" | Period == "After")
WpA_dataset_table <-
WpA_dataset_table %>% mutate(outcome = case_when(Period == "Before" ~ "0",
Period == 'After' ~ "1"))
# De-select character columns
train <-
WpA_dataset_table %>%
transform(outcome = as.numeric(outcome)) %>%
select_if(is.numeric)
# Filter out NAs
train <- train %>%
filter(rowSums(is.na(.[, ])) < 1)
# Rename Outcome Variable
# train <- transform(train, outcome = as.numeric(outcome))
train <- rename(train, 'Outcome' = "outcome")
colnames(train)
# Calculate Odds
odds <-
sum(train$Outcome) / (length(train$Outcome) - sum(train$Outcome))
lnodds <- log(odds)
# IV Analysis
# IV <- create_infotables(data = train, y = "Outcome", bins = mybins)
IV <- map_IV(data = train,
outcome = "Outcome",
bins = mybins)
# if(return == "detailed"){
# # Ranking variables using IV
# wb <- createWorkbook()
# addWorksheet(wb, "Ranking")
# writeDataTable(wb, "Ranking", x = data.frame(IV$Summary))
#
# # Export Individual Tables
# for(i in names(IV$Tables)){
# print(i)
# addWorksheet(wb, substr(i, start = nchar(i) - 30, stop = nchar(i)))
# temp <- IV$Tables[[i]]
# temp$ODDS <- exp(temp$WOE + lnodds)
# temp$PROB <- (temp$ODDS / (temp$ODDS + 1))
# writeDataTable(wb, substr(i, start = nchar(i) - 30, stop = nchar(i)) , x = data.frame(temp))
# }
#
# # Save Workbook
# saveWorkbook(wb, "Output_IV_v2.xlsx", overwrite = TRUE)
#
# # Plot Graph
# pdf("Output_IV_v2.pdf")
# plot_infotables(IV, IV$Summary$Variable[], same_scale=TRUE)
# dev.off()
# } else
if (return == "table") {
# Store all individual dataframes
Tables <- c()
Summary <- data.frame(IV$Summary)
Tables$Summary <- Summary
for (i in names(IV$Tables)) {
temp <- IV$Tables[[i]]
temp$ODDS <- exp(temp$WOE + lnodds)
temp$PROB <- (temp$ODDS / (temp$ODDS + 1))
Tables[[i]] <- create_dt(temp, rounding = 2)
}
# Return ranking table
return(Tables$Summary)
# print("Access individual metrics via Outputs[[metric_name]], e.g., Outputs[[Workweek_span]]")
# # Store each variable's plot
# plots <- c()
# for (i in names(IV$Tables)) {
# plots[[i]] <- plot_infotables(IV, i)
# }
} else {
stop("Please enter a valid input for `return`, either detailed or table.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/IV_by_Period.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Generate a Information Value HTML Report
#'
#' @description
#' The function generates an interactive HTML report using Standard Person Query
#' data as an input. The report contains a full Information Value analysis, a
#' data exploration technique that helps determine which columns in a data set
#' have predictive power or influence on the value of a specified dependent
#' variable.
#'
#' @param data A Standard Person Query dataset in the form of a data frame.
#' @param predictors A character vector specifying the columns to be used as
#' predictors. Defaults to NULL, where all numeric vectors in the data will be
#' used as predictors.
#' @param outcome A string specifying a binary variable, i.e. can only contain
#' the values 1 or 0.
#' @param bins Number of bins to use in `Information::create_infotables()`,
#' defaults to 10.
#' @param max_var Numeric value to represent the maximum number of variables to
#' show on plots.
#' @param path Pass the file path and the desired file name, _excluding the file
#' extension_. For example, `"IV report"`.
#' @param timestamp Logical vector specifying whether to include a timestamp in
#' the file name. Defaults to TRUE.
#'
#' @section Creating a report:
#'
#' Below is an example on how to run the report.
#'
#' ```
#' library(dplyr)
#'
#' sq_data %>%
#' mutate(CH_binary = ifelse(Collaboration_hours > 12, 1, 0)) %>% # Simulate binary variable
#' IV_report(outcome = "CH_binary",
#' predictors = c("Email_hours", "Workweek_span"))
#' ```
#'
#' @family Reports
#' @family Variable Association
#' @family Information Value
#'
#' @inherit generate_report return
#'
#' @export
IV_report <- function(data,
predictors = NULL,
outcome,
bins = 5,
max_var = 9,
path = "IV report",
timestamp = TRUE){
# Create timestamped path (if applicable) -----------------------------------
if(timestamp == TRUE){
newpath <- paste(path, wpa::tstamp())
} else {
newpath <- path
}
# Return IV object directly -------------------------------------------------
# Call `calculate_IV()` only once
IV_obj <-
data %>%
create_IV(outcome = outcome,
predictors = predictors,
bins = bins,
return = "IV")
# IV_names
IV_names <- names(IV_obj$Tables)
# List of tables -----------------------------------------------------------
table_list <-
IV_names %>%
purrr::map(function(x){
IV_obj$Tables[[x]] %>%
mutate(ODDS = exp(WOE + IV_obj$lnodds),
PROB = ODDS / (ODDS + 1))
}) %>%
purrr::set_names(IV_names)
# List of ggplot objects ----------------------------------------------------
plot_list <-
IV_obj$Summary$Variable %>%
as.character() %>%
purrr::map(~plot_WOE(IV = IV_obj, predictor = .))
# Restrict maximum plots to `max_var` ---------------------------------------
if(length(plot_list) > max_var){
plot_list <- plot_list[1:max_var]
table_list <- table_list[1:max_var]
}
table_names <- gsub("_", " ", x = names(table_list))
# Output list ---------------------------------------------------------------
output_list <-
list(
data %>% check_query(return = "text"),
data %>% create_IV(outcome = outcome, predictors=predictors, bins= bins),
data %>% create_IV(outcome = outcome,
predictors = predictors,
bins = bins,
return="summary"),
read_preamble("blank.md") # Header for WOE Analysis
) %>%
c(plot_list) %>%
c(list(read_preamble("blank.md"))) %>% # Header for Summary Tables
c(table_list) %>%
purrr::map_if(is.data.frame, create_dt) %>%
purrr::map_if(is.character, md2html)
title_list <-
c("Data Overview",
"Top Predictors",
"",
"WOE Analysis",
rep("", length(plot_list)),
"Summary - Predictors",
table_names)
n_title <- length(title_list)
title_levels <-
c(
2,
2,
4,
2, # Header for WOE Analysis
rep(4, length(plot_list)),
2, # Header for WOE Analysis
rep(3, length(table_list))
)
generate_report(title = "Information Value Report",
filename = newpath,
outputs = output_list,
titles = title_list,
subheaders = rep("", n_title),
echos = rep(FALSE, n_title),
levels = title_levels,
theme = "cosmo",
preamble = read_preamble("IV_report.md"))
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/IV_report.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Ljung and Box Portmanteau Test
#'
#' @description The Ljung-Box (1978) modified portmanteau test. In the
#' multivariate time series, this test statistic is asymptotically equal to
#' `Hosking`.
#'
#' This method and the bottom documentation is taken directly from the
#' original 'portes' package.
#'
#' @param obj a univariate or multivariate series with class "numeric",
#' "matrix", "ts", or ("mts" "ts"). It can be also an object of fitted
#' time-series model with class "ar", "arima0", "Arima", ("ARIMA forecast
#' ARIMA Arima"), "lm", ("glm" "lm"), or "varest". obj may also an object with
#' class "list" (see details and following examples).
#'
#' @param lags vector of lag auto-cross correlation coefficients used for
#' `Hosking` test.
#'
#' @param order Default is zero for testing the randomness of a given sequence
#' with class "numeric", "matrix", "ts", or ("mts" "ts"). In general order
#' equals to the number of estimated parameters in the fitted model. If obj is
#' an object with class "ar", "arima0", "Arima", "varest", ("ARIMA forecast
#' ARIMA Arima"), or "list" then no need to enter the value of order as it
#' will be automatically determined. For obj with other classes, the order is
#' needed for degrees of freedom of asymptotic chi-square distribution.
#'
#' @param season seasonal periodicity for testing seasonality. Default is 1 for
#' testing the non seasonality cases.
#'
#' @param squared.residuals if `TRUE` then apply the test on the squared values.
#' This checks for Autoregressive Conditional Heteroscedastic, `ARCH`,
#' effects. When `squared.residuals = FALSE`, then apply the test on the usual
#' residuals.
#'
#' @details However the portmanteau test statistic can be applied directly on
#' the output objects from the built in R functions ar(), ar.ols(), ar.burg(),
#' ar.yw(), ar.mle(), arima(), arim0(), Arima(), auto.arima(), lm(), glm(),
#' and VAR(), it works with output objects from any fitted model. In this
#' case, users should write their own function to fit any model they want,
#' where they may use the built in R functions FitAR(), garch(), garchFit(),
#' fracdiff(), tar(), etc. The object obj represents the output of this
#' function. This output must be a list with at least two outcomes: the fitted
#' residual and the order of the fitted model (list(res = ..., order = ...)).
#' See the following example with the function FitModel().
#'
#' Note: In stats R, the function Box.test was built to compute the Box and
#' Pierce (1970) and Ljung and Box (1978) test statistics only in the
#' univariate case where we can not use more than one single lag value at a
#' time. The functions BoxPierce and LjungBox are more accurate than Box.test
#' function and can be used in the univariate or multivariate time series at
#' vector of different lag values as well as they can be applied on an output
#' object from a fitted model described in the description of the function
#' BoxPierce.
#'
#' @return
#' The Ljung and Box test statistic with the associated p-values for different
#' lags based on the asymptotic chi-square distribution with `k^2(lags-order)`
#' degrees of freedom.
#'
#' @author
#' Esam Mahdi and A.I. McLeod
#'
#' @references
#' Ljung, G.M. and Box, G.E.P (1978). "On a Measure of Lack of Fit in Time
#' Series Models". Biometrika, 65, 297-303.
#'
#' @examples
#' x <- rnorm(100)
#' LjungBox(x) # univariate test
#'
#' x <- cbind(rnorm(100),rnorm(100))
#' LjungBox(x) # multivariate test
#'
#' @export
LjungBox <- function(
obj,
lags = seq(5, 30, 5),
order = 0,
season = 1,
squared.residuals = FALSE
){
class.obj <- class(obj)[1]
TestType <- "0"
if (class.obj == "ts" || class.obj == "numeric" || class.obj ==
"matrix" || class.obj == "mts")
TestType <- "1"
if (class.obj == "ar" || class.obj == "arima0" || class.obj ==
"Arima" || class.obj == "ARIMA" || class.obj == "varest" || class.obj == "lm"
|| class.obj == "glm" || class.obj == "list")
TestType <- "2"
if (TestType == "0")
stop("obj must be class ar, arima0, Arima, (ARIMA forecast_ARIMA Arima), varest, lm, (glm lm), ts, numeric, matrix, (mts ts), or list")
Maxlag <- max(lags)
if (TestType == "1")
res <- stats::as.ts(obj)
else {
GetResid <- GetResiduals(obj)
res <- GetResid$res
order <- GetResid$order
}
if (squared.residuals){
res <- res ^ 2
}
n <- NROW(res)
k <- NCOL(res)
if (Maxlag*season >= n){
stop("Maximum value of arguments lags * season can't exceed n!")
}
df <- k^2*(lags-order)
NegativeDF <- which(df<0)
df[NegativeDF] <- 0
Accmat <- stats::acf(res, lag.max = Maxlag*season, plot = FALSE, type = "correlation")$acf
inveseR0 <- solve(Accmat[1,,])
prodvec <- numeric(Maxlag*season)
for(l in 1:Maxlag){
tvecR <- t(as.vector(Accmat[l*season+1,,]))
prodvec[l] <- 1/(n-l)*crossprod(t(tvecR),crossprod(t(kronecker(inveseR0,inveseR0)),t(tvecR)))
}
Q <- n*(n+2)*cumsum(prodvec)
STATISTIC <- Q[lags]
PVAL <- 1 - stats::pchisq(STATISTIC,df)
PVAL[NegativeDF] <- NA
summary <- matrix(c(lags,STATISTIC,df,PVAL),ncol=4)
dimnames(summary) <-
list(
rep("", length(STATISTIC)),
c("lags", "statistic", "df", "p-value")
)
return(summary)
}
#' @title
#' Extract Residuals from ARIMA, VAR, or any Simulated Fitted Time Series Model
#'
#' @description
#' This utility function is useful to use in the portmanteau functions,
#' BoxPierce, MahdiMcLeod, Hosking, LiMcLeod, LjungBox, and portest.
#' GetResiduals() function takes a fitted time-series object with class "ar",
#' "arima0", "Arima", ("ARIMA forecast ARIMA Arima"), "lm", ("glm" "lm"),
#' "varest", or "list". and returns the residuals and the order from the fitted
#' object.
#'
#' This method and the bottom documentation is taken directly from the original
#' 'portes' package.
#'
#' @param obj a fitted time-series model with class "ar", "arima0", "Arima",
#' ("ARIMA forecast ARIMA Arima"), "lm", ("glm" "lm"), "varest", or "list".
#'
#' @return
#' List of order of fitted time series model and residuals from this model.
#'
#' @author
#' Esam Mahdi and A.I. McLeod.
#'
#' @examples
#' fit <- arima(Nile, c(1, 0, 1))
#' GetResiduals(fit)
#'
#' @export
GetResiduals <- function(obj){
class.obj = class(obj)[1]
if (class.obj != "ar" && class.obj != "arima0" && class.obj != "Arima" && class.obj != "varest" &&
class.obj != "ARIMA" && class.obj != "lm"
&& class.obj != "glm" && class.obj != "list" )
stop("obj must be class ar, arima0, Arima, (ARIMA forecast_ARIMA Arima), varest, lm, (glm lm), or list")
if (all(class.obj=="ar")){
order <- obj$order
res <- stats::ts(as.matrix(obj$resid)[-(1:order),])
} else if (all(class.obj == "arima0") || all(class.obj == "Arima")|| all (class.obj == "ARIMA")) {
pdq <- obj$arma
p <- pdq[1]
q <- pdq[2]
ps <- pdq[3]
qs <- pdq[4]
order <- p+q+ps+qs
res <- stats::ts(obj$residuals)
} else if (all(class.obj=="varest")){
order <- obj$p
res <- stats::resid(obj)
} else if (all(class.obj == "list")){
order <- obj$order
if(is.null(order)){
order <- 0
}
res <- obj$res
}
if (all(class.obj=="lm") || all(class.obj == "glm")){
order <- 0
res <- obj$residuals
}
list(order = order, res = res)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/Ljungbox.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Distribution of After-hours Collaboration Hours as a 100% stacked bar
#' @name afterhours_dist
#'
#' @description Analyse the distribution of weekly after-hours collaboration time.
#' Returns a stacked bar plot by default.
#' Additional options available to return a table with distribution elements.
#'
#' @details
#' Uses the metric \code{After_hours_collaboration_hours}.
#' See `create_dist()` for applying the same analysis to a different metric.
#'
#' @inheritParams create_dist
#' @inherit create_dist return
#'
#' @param cut A vector specifying the cuts to use for the data,
#' accepting "default" or "range-cut" as character vector,
#' or a numeric value of length three to specify the exact breaks to use. e.g. c(1, 3, 5)
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#' @import scales
#' @importFrom tidyr spread
#' @importFrom stats median
#' @importFrom stats sd
#'
#' @family Visualization
#' @family After-hours Collaboration
#'
#' @examples
#' # Return plot
#' afterhours_dist(sq_data, hrvar = "Organization")
#'
#' # Return summary table
#' afterhours_dist(sq_data, hrvar = "Organization", return = "table")
#'
#' # Return result with a custom specified breaks
#' afterhours_dist(sq_data, hrvar = "LevelDesignation", cut = c(4, 7, 9))
#' @export
afterhours_dist <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot",
cut = c(1, 2, 3)) {
create_dist(data = data,
metric = "After_hours_collaboration_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return,
cut = cut)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/afterhours_dist.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Distribution of After-hours Collaboration Hours (Fizzy Drink plot)
#'
#' @description
#' Analyze weekly after-hours collaboration hours distribution, and returns
#' a 'fizzy' scatter plot by default.
#' Additional options available to return a table with distribution elements.
#'
#' @details
#' Uses the metric `After_hours_collaboration_hours`.
#' See `create_fizz()` for applying the same analysis to a different metric.
#'
#' @inheritParams create_fizz
#' @inherit create_fizz return
#'
#' @family Visualization
#' @family After-hours Collaboration
#'
#' @examples
#' # Return plot
#' afterhours_fizz(sq_data, hrvar = "LevelDesignation", return = "plot")
#'
#' # Return summary table
#' afterhours_fizz(sq_data, hrvar = "Organization", return = "table")
#' @export
afterhours_fizz <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot"){
create_fizz(data = data,
metric = "After_hours_collaboration_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/afterhours_fizz.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title After-hours Collaboration Time Trend - Line Chart
#'
#' @description
#' Provides a week by week view of after-hours collaboration time, visualized as
#' line charts. By default returns a line chart for after-hours collaboration
#' hours, with a separate panel per value in the HR attribute. Additional
#' options available to return a summary table.
#'
#' @details
#' Uses the metric `After_hours_collaboration_hours`.
#'
#' @seealso [create_line()] for applying the same analysis to a different metric.
#'
#' @inheritParams create_line
#' @inherit create_line return
#'
#' @family Visualization
#' @family After-hours Collaboration
#'
#' @examples
#' # Return a line plot
#' afterhours_line(sq_data, hrvar = "LevelDesignation")
#'
#' # Return summary table
#' afterhours_line(sq_data, hrvar = "LevelDesignation", return = "table")
#'
#' @export
afterhours_line <- function(data,
hrvar = "Organization",
mingroup=5,
return = "plot"){
## Inherit arguments
create_line(data = data,
metric = "After_hours_collaboration_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/afterhours_line.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Rank groups with high After-Hours Collaboration Hours
#'
#' @description
#' This function scans a Standard Person Query for groups with high levels of
#' After-Hours Collaboration. Returns a plot by default, with an option to
#' return a table with all groups (across multiple HR attributes) ranked by
#' hours of After-Hours Collaboration Hours.
#'
#' @details
#' Uses the metric \code{After_hours_collaboration_hours}.
#' See `create_rank()` for applying the same analysis to a different metric.
#'
#' @inheritParams create_rank
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#' @import scales
#' @importFrom stats reorder
#'
#' @family Visualization
#' @family After-hours Collaboration
#'
#' @return
#' When 'table' is passed in `return`, a summary table is returned as a data frame.
#'
#' @export
afterhours_rank <- function(data,
hrvar = extract_hr(data),
mingroup = 5,
mode = "simple",
plot_mode = 1,
return = "plot"){
data %>%
create_rank(metric = "After_hours_collaboration_hours",
hrvar = hrvar,
mingroup = mingroup,
mode = mode,
plot_mode = plot_mode,
return = return)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/afterhours_rank.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Summary of After-Hours Collaboration Hours
#'
#' @description
#' Provides an overview analysis of after-hours collaboration time.
#' Returns a bar plot showing average weekly after-hours collaboration hours by default.
#' Additional options available to return a summary table.
#'
#' @details
#' Uses the metric \code{After_hours_collaboration_hours}.
#'
#' @inheritParams create_bar
#' @inherit create_bar return
#'
#' @family Visualization
#' @family After-hours Collaboration
#'
#' @examples
#' # Return a ggplot bar chart
#' afterhours_summary(sq_data, hrvar = "LevelDesignation")
#'
#' # Return a summary table
#' afterhours_summary(sq_data, hrvar = "LevelDesignation", return = "table")
#'
#' @export
afterhours_summary <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot"){
create_bar(data = data,
metric = "After_hours_collaboration_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return,
bar_colour = "alert")
}
#' @rdname afterhours_summary
#' @export
afterhours_sum <- afterhours_summary
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/afterhours_summary.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title After-Hours Time Trend
#'
#' @description
#' Provides a week by week view of after-hours collaboration time.
#' By default returns a week by week heatmap, highlighting the points in time with most activity.
#' Additional options available to return a summary table.
#'
#' @details
#' Uses the metric `After_hours_collaboration_hours`.
#'
#' @inheritParams create_trend
#'
#' @family Visualization
#' @family After-hours Collaboration
#'
#' @examples
#' # Run plot
#' afterhours_trend(sq_data)
#'
#' # Run table
#' afterhours_trend(sq_data, hrvar = "LevelDesignation", return = "table")
#'
#' @return
#' Returns a 'ggplot' object by default, where 'plot' is passed in `return`.
#' When 'table' is passed, a summary table is returned as a data frame.
#'
#' @export
afterhours_trend <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot"){
create_trend(data,
metric = "After_hours_collaboration_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/afterhours_trend.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Anonymise a categorical variable by replacing values
#'
#' @description
#' Anonymize categorical variables such as HR variables by replacing values with
#' dummy team names such as 'Team A'. The behaviour is to make 1 to 1
#' replacements by default, but there is an option to completely randomise
#' values in the categorical variable.
#'
#' @param x Character vector to be passed through.
#' @param scramble Logical value determining whether to randomise values in the
#' categorical variable.
#' @param replacement Character vector containing the values to replace original
#' values in the categorical variable. The length of the vector must be at
#' least as great as the number of unique values in the original variable.
#' Defaults to `NULL`, where the replacement would consist of `"Team A"`,
#' `"Team B"`, etc.
#'
#' @examples
#' unique(anonymise(sq_data$Organization))
#'
#' rep <- c("Manager+", "Manager", "IC")
#' unique(anonymise(sq_data$Layer), replacement = rep)
#'
#' @seealso jitter
#' @export
anonymise <- function(x,
scramble = FALSE,
replacement = NULL){
n_to_rep <- length(x)
v_to_rep <- unique(x)
nd_to_rep <- length(v_to_rep)
if(is.null(replacement)){
replacement <- paste("Team", LETTERS[1:nd_to_rep])
} else {
replacement <- replacement[1:nd_to_rep]
}
if(scramble == TRUE){
sample(x = replacement,
size = n_to_rep,
replace = TRUE)
} else if(scramble == FALSE){
replacement[match(x, v_to_rep)]
}
}
#' @rdname anonymise
#' @export
anonymize <- anonymise
#' @title Jitter metrics in a data frame
#'
#' @description Convenience wrapper around `jitter()` to add a layer of
#' anonymity to a query. This can be used in combination with `anonymise()` to
#' produce a demo dataset from real data.
#'
#' @param data Data frame containing a query.
#' @param cols Character vector containing the metrics to jitter. When set to
#' `NULL` (default), all numeric columns in the data frame are jittered.
#' @param ... Additional arguments to pass to `jitter()`.
#'
#' @importFrom dplyr mutate
#' @importFrom dplyr across
#'
#' @examples
#' jittered <- jitter_metrics(sq_data, cols = "Collaboration_hours")
#' head(
#' data.frame(
#' original = sq_data$Collaboration_hours,
#' jittered = jittered$Collaboration_hours
#' )
#' )
#'
#' @seealso anonymise
#' @export
jitter_metrics <- function(data, cols = NULL, ...){
if(!is.null(cols)){
data %>%
mutate(
across(
.cols = cols,
.fns = ~abs(jitter(., ...))
)
)
} else {
data %>%
mutate(
across(
.cols = where(~is.numeric(.)),
.fns = ~abs(jitter(., ...))
)
)
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/anonymise.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title
#' Calculate Weight of Evidence (WOE) and Information Value (IV) between a
#' single predictor and a single outcome variable.
#'
#' @description
#' Calculates Weight of Evidence (WOE) and Information Value (IV) between a
#' single predictor and a single outcome variable. This function implements the
#' common Information Value calculations whilst maintaining the minimum reliance
#' on external dependencies. Use `map_IV()` for the equivalent of
#' `Information::create_infotables()`, which performs calculations for multiple
#' predictors and a single outcome variable.
#'
#' @details
#' The approach used mirrors the one used in `Information::create_infotables()`.
#'
#' @param data Data frame containing the data.
#' @param outcome String containing the name of the outcome variable.
#' @param predictor String containing the name of the predictor variable.
#' @param bins Numeric value representing the number of bins to use.
#'
#' @import dplyr
#'
#' @return A data frame is returned as an output.
#'
calculate_IV <- function(data,
outcome,
predictor,
bins){
pred_var <- data[[predictor]]
outc_var <- data[[outcome]]
# Check inputs
if(sum(is.na(outc_var)) > 0){
stop(
glue::glue(
"dependent variable {outcome} has missing values in the input training data frame"
)
)
}
# Compute q
q <- stats::quantile(
pred_var,
probs = c(1:(bins - 1) / bins),
na.rm = TRUE,
type = 3
)
# Compute cuts
cuts <- unique(q)
# Compute intervals
intervals <-
findInterval(
pred_var,
vec = cuts,
rightmost.closed = FALSE)
# Compute cut_table
cut_table <-
table(
intervals,
outc_var) %>%
as.data.frame.matrix()
## get min/max
cut_table_2 <-
data.frame(
var = pred_var,
intervals
) %>%
group_by(intervals) %>%
summarise(
min = min(var, na.rm = TRUE) %>% round(digits = 1),
max = max(var, na.rm = TRUE) %>% round(digits = 1),
n = n(),
.groups = "drop"
) %>%
mutate(!!sym(predictor) :=
glue::glue("[{round(min, digits = 1)},{round(max, digits = 1)}]")) %>%
mutate(percentage = n / sum(n)) %>%
select(!!sym(predictor), intervals, n, percentage)
# Create variables that are double
cut_table_1 <- as.numeric(cut_table$`1`)
cut_table_0 <- as.numeric(cut_table$`0`)
# Non-events in group
n_non_event <- cut_table_1 * sum(cut_table_0) # t$y_1*sum_y_0
n_yes_event <- cut_table_0 * sum(cut_table_1) # t$y_0*sum_y_1
# Compute WOE
cut_table_2$WOE <-
ifelse(
cut_table$`1` > 0 & cut_table$`0` > 0, # Both positive
log(n_non_event / n_yes_event), # % of non-events divided by % of events
0) # Otherwise impute 0
# Compute IV_weight
p1 <- cut_table$`1` / sum(cut_table$`1`)
p0 <- cut_table$`0` / sum(cut_table$`0`)
cut_table_2$IV_weight <- p1 - p0
cut_table_2$IV <- cut_table_2$WOE * cut_table_2$IV_weight
cut_table_2 %>%
mutate(IV = cumsum(IV)) %>%
# Maintain consistency with `Information::create_infotables()`
select(
!!sym(predictor),
N = "n",
Percent = "percentage",
WOE,
IV)
}
#' @title
#' Calculate Weight of Evidence (WOE) and Information Value (IV) between
#' multiple predictors and a single outcome variable, returning a list of
#' statistics.
#'
#' @description
#' This is a wrapper around `calculate_IV()` to loop through multiple predictors
#' and calculate their Weight of Evidence (WOE) and Information Value (IV) with
#' respect to an outcome variable.
#'
#' @details
#' The approach used mirrors the one used in `Information::create_infotables()`.
#'
#' @param data Data frame containing the data.
#' @param outcome String containing the name of the outcome variable.
#' @param predictors Character vector containing the names of the predictor
#' variables. If `NULL` (default) is supplied, all numeric variables in the
#' data will be used.
#' @param bins Numeric value representing the number of bins to use. Defaults to
#' 10.
#'
#' @import dplyr
#'
#' @return A list of data frames is returned as an output. The first layer of
#' the list contains `Tables` and `Summary`:
#' - `Tables` is a list of data frames containing the WOE and cumulative sum
#' IV for each predictor.
#' - `Summary` is a single data frame containing the IV for all predictors.
#'
map_IV <- function(data,
predictors = NULL,
outcome,
bins = 10){
if(is.null(predictors)){
predictors <-
data %>%
select(-!!sym(outcome)) %>%
select(
where(is.numeric)
) %>%
names()
}
# List of individual tables
Tables <-
predictors %>%
purrr::map(function(pred){
calculate_IV(
data = data,
outcome = outcome,
predictor = pred,
bins = bins
)
}) %>%
purrr::set_names(
nm = purrr::map(
.,
function(df){
names(df)[[1]]
}
)
)
# Compile Summary Table
Summary <-
list("df" = Tables,
"names" = names(Tables)) %>%
purrr::pmap(function(df, names){
IV_final <-
df %>%
slice(nrow(df)) %>%
pull(IV)
data.frame(
Variable = names,
IV = IV_final
)
}) %>%
bind_rows() %>%
arrange(desc(IV))
# Reorder and combine list
c(
list("Tables" = Tables[Summary$Variable]), # Reordered
list("Summary" = Summary)
)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/calculate_IV.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Generate a Capacity report in HTML
#'
#' @description
#' The function generates an interactive HTML report using the Standard Person
#' Query data as an input. The report contains a series of summary analysis and
#' visualisations relating to key **capacity** metrics in Viva
#' Insights,including length of week and time in after-hours collaboration.
#'
#' @template spq-params
#' @param path Pass the file path and the desired file name, _excluding the file extension_.
#' For example, `"capacity report"`.
#' @param timestamp Logical vector specifying whether to include a timestamp in the file name.
#' Defaults to TRUE.
#'
#' @importFrom purrr map_if
#' @importFrom dplyr `%>%`
#'
#' @family Reports
#'
#' @inherit generate_report return
#'
#' @export
capacity_report <- function(data,
hrvar = "Organization",
mingroup = 5,
path = "capacity report",
timestamp = TRUE){
## Create timestamped path (if applicable)
if(timestamp == TRUE){
newpath <- paste(path, wpa::tstamp())
} else {
newpath <- path
}
# Set outputs
output_list <-
list(data %>% check_query(return = "text", validation = TRUE),
read_preamble("workloads_section.md"), # Header
data %>% workloads_summary(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% workloads_summary(hrvar = hrvar, mingroup = mingroup, return = "table"),
data %>% workloads_dist(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% workloads_dist(hrvar = hrvar, mingroup = mingroup, return = "table"),
data %>% workloads_trend(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% workloads_trend(hrvar = hrvar, mingroup = mingroup, return = "table"),
data %>% workloads_line(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% workloads_line(hrvar = hrvar, mingroup = mingroup, return = "table"),
read_preamble("afterhours_section.md"), # Header
data %>% afterhours_summary(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% afterhours_summary(hrvar = hrvar, mingroup = mingroup, return = "table"),
data %>% afterhours_dist(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% afterhours_dist(hrvar = hrvar, mingroup = mingroup, return = "table"),
data %>% afterhours_trend(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% afterhours_trend(hrvar = hrvar, mingroup = mingroup, return = "table")) %>%
purrr::map_if(is.data.frame, create_dt) %>%
purrr::map_if(is.character, md2html)
# Set header titles
title_list <-
c("Data Overview",
"Workloads",
"Workloads Summary - Plot",
"Workloads Summary - Table",
"Workloads Distribution - Plot",
"Workloads Distribution - Table",
"Workloads Trend - Plot",
"Workloads Trend - Table",
"Workloads over time - Plot",
"Workloads over time - Table",
"After hours",
"After hours Summary - Plot",
"After hours Summary - Table",
"After hours Distribution - Plot",
"After hours Distribution - Table",
"After hours Trend - Plot",
"After hours Trend - Table")
# Set header levels
n_title <- length(title_list)
levels_list <- rep(3, n_title)
levels_list[c(1, 2, 11)] <- 2 # Section header
generate_report(title = "Capacity Report",
filename = newpath,
outputs = output_list,
titles = title_list,
subheaders = rep("", n_title),
echos = rep(FALSE, n_title),
levels = rep(3, n_title),
theme = "cosmo",
preamble = read_preamble("capacity_report.md"))
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/capacity_report.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Check a query to ensure that it is suitable for analysis
#'
#' @description Prints diagnostic data about the data query to the R console,
#' with information such as date range, number of employees, HR attributes
#' identified, etc.
#'
#' @details This can be used with any person-level query, such as the standard
#' person query, Ways of Working assessment query, and the hourly collaboration
#' query. When run, this prints diagnostic data to the R console.
#'
#' @param data A person-level query in the form of a data frame. This includes:
#' - Standard Person Query
#' - Ways of Working Assessment Query
#' - Hourly Collaboration Query
#'
#' All person-level query have a `PersonId` column and a `Date` column.
#'
#' @param return String specifying what to return. This must be one of the
#' following strings:
#' - `"message"` (default)
#' - `"text"`
#'
#' See `Value` for more information.
#'
#' @param validation Logical value to specify whether to show summarized version. Defaults to `FALSE`. To hide checks on variable
#' names, set `validation` to `TRUE`.
#'
#' @return
#' A different output is returned depending on the value passed to the `return`
#' argument:
#' - `"message"`: a message is returned to the console.
#' - `"text"`: string containing the diagnostic message.
#'
#' @examples
#' check_query(sq_data)
#'
#' @family Data Validation
#'
#' @export
check_query <- function(data, return = "message", validation = FALSE){
if(!is.data.frame(data)){
stop("Input is not a data frame.")
}
if("PersonId" %in% names(data)){
if(validation == FALSE){
check_person_query(data = data, return = return)
} else if(validation == TRUE){
# Different displays required for validation_report()
check_query_validation(data = data, return = return)
}
} else {
message("Note: checks are currently unavailable for a non-Person query")
}
}
#' @title Check a Person Query to ensure that it is suitable for analysis
#'
#' @description
#' Prints diagnostic data about the data query to the R console, with information
#' such as date range, number of employees, HR attributes identified, etc.
#'
#' @inheritParams check_query
#'
#' @details Used as part of `check_query()`.
#'
#' @noRd
#'
check_person_query <- function(data, return){
## Query Type - Uses `identify_query()`
main_chunk <- paste0("The data used is a ", identify_query(data))
## PersonId
if(!("PersonId" %in% names(data))){
stop("There is no `PersonId` variable in the input.")
} else {
new_chunk <- paste("There are", dplyr::n_distinct(data$PersonId), "employees in this dataset.")
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
}
## Date
if(!("Date" %in% names(data))){
stop("There is no `Date` variable in the input.")
} else if("Influence_rank" %in% names(data)){
# Omit date conversion
new_chunk <- paste0("Date ranges from ", min(data$Date), " to ", max(data$Date), ".")
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
} else {
data$Date <- as.Date(data$Date, "%m/%d/%Y")
new_chunk <- paste0("Date ranges from ", min(data$Date), " to ", max(data$Date), ".")
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
}
## Extract unique identifiers of query ------------------------------------
extracted_chr <-
data %>%
hrvar_count_all(return = "table") %>%
filter(`Unique values`==1) %>%
pull(Attributes)
if (length(extracted_chr)>1) {
extractHRValues <- function(data, hrvar){
data %>%
summarise(FirstValue = first(!!sym(hrvar))) %>%
mutate(HRAttribute = wrap(hrvar, wrapper = "`")) %>%
select(HRAttribute, FirstValue) %>%
mutate(FirstValue = as.character(FirstValue)) # Coerce type
}
result <-
extracted_chr %>%
purrr::map(function(x){ extractHRValues(data = data, hrvar = x)}) %>%
bind_rows()
new_chunk <- paste("Unique identifiers include:",
result %>%
mutate(identifier = paste(HRAttribute, "is", FirstValue)) %>%
pull(identifier) %>%
paste(collapse = "; "))
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
}
## HR Variables
hr_chr <- extract_hr(data, max_unique = 200) %>% wrap(wrapper = "`")
new_chunk <- paste("There are", length(hr_chr), "(estimated) HR attributes in the data:" )
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
new_chunk <- paste(hr_chr, collapse = ", ")
main_chunk <- paste(main_chunk, new_chunk, sep = "\n")
## `IsActive` flag
if(!("IsActive" %in% names(data))){
new_chunk <- "The `IsActive` flag is not present in the data."
main_chunk <- paste(main_chunk, new_chunk, sep = "\n")
} else {
data$IsActive <- as.logical(data$IsActive) # Force to logical
active_n <- dplyr::n_distinct(data[data$IsActive == TRUE, "PersonId"])
new_chunk <- paste0("There are ", active_n, " active employees out of all in the dataset.")
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
}
## Variable check header
new_chunk <- "Variable name check:"
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
## Collaboration_hours
if(!("Collaboration_hours" %in% names(data)) &
("Collaboration_hrs" %in% names(data))){
new_chunk <- "`Collaboration_hrs` is used instead of `Collaboration_hours` in the data."
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
} else if(!("Collaboration_hrs" %in% names(data)) &
("Collaboration_hours" %in% names(data))){
new_chunk <- "`Collaboration_hours` is used instead of `Collaboration_hrs` in the data."
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
} else {
new_chunk <- "No collaboration hour metric exists in the data."
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
}
## Instant_Message_hours
if(!("Instant_message_hours" %in% names(data)) &
("Instant_Message_hours" %in% names(data))){
new_chunk <- "`Instant_Message_hours` is used instead of `Instant_message_hours` in the data."
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
} else if(!("Instant_Message_hours" %in% names(data)) &
("Instant_message_hours" %in% names(data))){
new_chunk <- "`Instant_message_hours` is used instead of `Instant_Message_hours` in the data."
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
} else {
new_chunk <- "No instant message hour metric exists in the data."
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
}
## Return
if(return == "message"){
main_chunk <- paste("", main_chunk, sep = "\n")
message(main_chunk)
} else if(return == "text"){
main_chunk
} else {
stop("Please check inputs for `return`")
}
}
#' @title Perform a query check for the validation report
#'
#' @description
#' Prints diagnostic data about the data query to the R console, with information
#' such as date range, number of employees, HR attributes identified, etc.
#' Optimised for the `validation_report()`
#'
#' @inheritParams check_query
#'
#' @details Used as part of `check_query()`.
#'
#' @noRd
check_query_validation <- function(data, return){
## Query Type - Initialise
main_chunk <- ""
## PersonId
if(!("PersonId" %in% names(data))){
stop("There is no `PersonId` variable in the input.")
} else {
new_chunk <- paste("There are", dplyr::n_distinct(data$PersonId), "employees in this dataset.")
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
}
## Date
if(!("Date" %in% names(data))){
stop("There is no `Date` variable in the input.")
} else {
data$Date <- as.Date(data$Date, "%m/%d/%Y")
new_chunk <- paste0("Date ranges from ", min(data$Date), " to ", max(data$Date), ".")
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
}
## Extract unique identifiers of query ------------------------------------
extracted_chr <- data %>%
hrvar_count_all(return = "table") %>%
filter(`Unique values`==1) %>%
pull(Attributes)
if (length(extracted_chr) > 1) {
extractHRValues <- function(data, hrvar){
data %>%
summarise(FirstValue = first(!!sym(hrvar))) %>%
mutate(HRAttribute = wrap(hrvar, wrapper = "`")) %>%
select(HRAttribute, FirstValue) %>%
mutate(FirstValue = as.character(FirstValue)) # Coerce type
}
result <-
extracted_chr %>%
purrr::map(function(x){ extractHRValues(data = data, hrvar = x)}) %>%
bind_rows()
new_chunk <- paste("Unique identifiers include:",
result %>%
mutate(identifier = paste(HRAttribute, "is", FirstValue)) %>%
pull(identifier) %>%
paste(collapse = "; "))
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
}
## HR Variables
hr_chr <- extract_hr(data, max_unique = 200) %>% wrap(wrapper = "`")
new_chunk <- paste("There are", length(hr_chr), "(estimated) HR attributes in the data:" )
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
new_chunk <- paste(hr_chr, collapse = ", ")
main_chunk <- paste(main_chunk, new_chunk, sep = "\n")
## `IsActive` flag
if(!("IsActive" %in% names(data))){
new_chunk <- "The `IsActive` flag is not present in the data."
main_chunk <- paste(main_chunk, new_chunk, sep = "\n")
} else {
data$IsActive <- as.logical(data$IsActive) # Force to logical
active_n <- dplyr::n_distinct(data[data$IsActive == TRUE, "PersonId"])
new_chunk <- paste0("There are ", active_n, " active employees out of all in the dataset.")
main_chunk <- paste(main_chunk, new_chunk, sep = "\n\n")
}
## Return
if(return == "message"){
main_chunk <- paste("", main_chunk, sep = "\n")
message(main_chunk)
} else if(return == "text"){
main_chunk
} else {
stop("Please check inputs for `return`")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/check_query.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Generate a Coaching report in HTML
#'
#' @description
#' The function generates an interactive HTML report using Standard Person Query
#' data as an input. The report contains a series of summary analysis and
#' visualisations relating to key **coaching** metrics in Viva Insights,
#' specifically relating to the time spent between managers and their direct
#' reports.
#'
#' @template spq-params
#' @param path Pass the file path and the desired file name, _excluding the file
#' extension_. For example, `"coaching report"`.
#' @param timestamp Logical vector specifying whether to include a timestamp in
#' the file name. Defaults to `TRUE`.
#'
#' @importFrom purrr map_if
#' @importFrom dplyr `%>%`
#'
#' @family Reports
#'
#' @inherit generate_report return
#'
#' @export
coaching_report <- function(data,
hrvar = "LevelDesignation",
mingroup = 5,
path = "coaching report",
timestamp = TRUE){
## Create timestamped path (if applicable)
if(timestamp == TRUE){
newpath <- paste(path, wpa::tstamp())
} else {
newpath <- path
}
output_list <-
list(data %>% check_query(return = "text", validation = TRUE),
data %>% mgrrel_matrix(hrvar = hrvar, return = "plot"), # no mingroup arg
data %>% mgrrel_matrix(hrvar = hrvar, return = "table"), # no mingroup arg
data %>% one2one_sum(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% one2one_sum(hrvar = hrvar, mingroup = mingroup, return = "table"),
data %>% one2one_dist(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% one2one_dist(hrvar = hrvar, mingroup = mingroup, return = "table"),
data %>% one2one_trend(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% one2one_trend(hrvar = hrvar, mingroup = mingroup, return = "table")) %>%
purrr::map_if(is.data.frame, create_dt) %>%
purrr::map_if(is.character, md2html)
title_list <-
c("Data Overview",
"Manager Relation Style - Plot",
"Manager Relation Style - Table",
"1-to-1 Summary - Plot",
"1-to-1 Summary - Table",
"1-to-1 Distribution - Plot",
"1-to-1 Distribution - Table",
"1-to-1 Trend - Plot",
"1-to-1 Trend - Table")
n_title <- length(title_list)
generate_report(title = "Coaching Report",
filename = newpath,
outputs = output_list,
titles = title_list,
subheaders = rep("", n_title),
echos = rep(FALSE, n_title),
levels = rep(3, n_title),
theme = "cosmo",
preamble = read_preamble("coaching_report.md"))
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/coaching_report.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Collaboration - Stacked Area Plot
#'
#' @description
#' Provides an overview analysis of Weekly Digital Collaboration.
#' Returns an stacked area plot of Email and Meeting Hours by default.
#' Additional options available to return a summary table.
#'
#' @details
#' Uses the metrics `Meeting_hours`, `Email_hours`, `Unscheduled_Call_hours`,
#' and `Instant_Message_hours`.
#'
#' @param data A Standard Person Query dataset in the form of a data frame.
#' A Ways of Working assessment dataset may also be provided, in which
#' Unscheduled call hours would be included in the output.
#' @param hrvar HR Variable by which to split metrics, defaults to `NULL`, but
#' accepts any character vector, e.g. "LevelDesignation". If `NULL` is passed,
#' the organizational attribute is automatically populated as "Total".
#' @param mingroup Numeric value setting the privacy threshold / minimum group
#' size. Defaults to 5.
#' @param return String specifying what to return. This must be one of the
#' following strings:
#' - `"plot"`
#' - `"table"`
#'
#' See `Value` for more information.
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#' @import scales
#'
#' @family Visualization
#' @family Collaboration
#'
#' @examples
#' \donttest{
#' # Return plot with total (default)
#' collaboration_area(sq_data)
#'
#' # Return plot with hrvar split
#' collaboration_area(sq_data, hrvar = "Organization")
#'
#' # Return summary table
#' collaboration_area(sq_data, return = "table")
#' }
#'
#' @return
#' A different output is returned depending on the value passed to the `return` argument:
#' - `"plot"`: 'ggplot' object. A stacked area plot for the metric.
#' - `"table"`: data frame. A summary table for the metric.
#'
#' @export
collaboration_area <- function(data,
hrvar = NULL,
mingroup=5,
return = "plot"){
## Handle variable name consistency
data <- qui_stan_c(data)
data <- qui_stan_im(data)
## Handling NULL values passed to hrvar
if(is.null(hrvar)){
data <- totals_col(data)
hrvar <- "Total"
}
## Date cleaning
data$Date <- as.Date(data$Date, format = "%m/%d/%Y")
## Lower case version of column names
lnames <- tolower(names(data))
if("unscheduled_call_hours" %in% lnames){
names(data) <-
gsub(pattern = "unscheduled_call_hours",
replacement = "Unscheduled_Call_hours",
x = names(data),
ignore.case = TRUE) # Case-insensitive
}
## Exclude metrics if not available as a metric
check_chr <- c("^Meeting_hours$",
"^Email_hours$",
"^Instant_Message_hours$",
"^Unscheduled_Call_hours$")
main_vars <-
names(data)[
grepl(pattern = paste(check_chr, collapse = "|"),
x = lnames,
ignore.case = TRUE)
]
## Analysis table
myTable <-
data %>%
rename(group = !!sym(hrvar)) %>% # Rename HRvar to `group`
select(PersonId,
Date,
group,
main_vars) %>%
group_by(Date, group) %>%
summarise_at(vars(main_vars), ~mean(.)) %>%
left_join(hrvar_count(data, hrvar, return = "table"),
by = c("group" = hrvar)) %>%
rename(Employee_Count = "n") %>%
filter(Employee_Count >= mingroup) %>%
ungroup()
myTable_long <-
myTable %>%
select(Date, group, ends_with("_hours")) %>%
gather(Metric, Hours, -Date, -group) %>%
mutate(Metric = sub(pattern = "_hours", replacement = "", x = Metric))
## Levels
level_chr <- sub(pattern = "_hours", replacement = "", x = main_vars)
## Colour definitions
colour_defs <-
c("Meeting" = "#34b1e2",
"Email" = "#1d627e",
"Instant_Message" = "#adc0cb",
"Unscheduled_Call" = "#b4d5dd")
colour_defs <- colour_defs[names(colour_defs) %in% level_chr]
plot_object <-
myTable_long %>%
mutate(Metric = factor(Metric, levels = level_chr)) %>%
ggplot(aes(x = Date, y = Hours, colour = Metric)) +
geom_area(aes(fill = Metric), alpha = 1.0, position = 'stack') +
theme_wpa_basic() +
scale_y_continuous(labels = round) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
scale_colour_manual(values = colour_defs) +
scale_fill_manual(values = colour_defs) +
facet_wrap(.~group) +
labs(title = "Total Collaboration Hours",
subtitle = paste("Weekly collaboration hours by", camel_clean(hrvar))) +
labs(caption = extract_date_range(data, return = "text"))
if(return == "table"){
myTable %>%
as_tibble() %>%
mutate(Collaboration_hours = select(., main_vars) %>%
apply(1, sum, na.rm = TRUE))
} else if(return == "plot"){
return(plot_object)
} else {
stop("Please enter a valid input for `return`.")
}
}
#' @rdname collaboration_area
#' @export
collab_area <- collaboration_area
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/collaboration_area.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Distribution of Collaboration Hours as a 100% stacked bar
#'
#' @description
#' Analyze the distribution of Collaboration Hours.
#' Returns a stacked bar plot by default.
#' Additional options available to return a table with distribution elements.
#'
#' @template ch
#'
#' @inheritParams create_dist
#' @inherit create_dist return
#'
#' @family Visualization
#' @family Collaboration
#'
#' @examples
#' # Return plot
#' collaboration_dist(sq_data, hrvar = "Organization")
#'
#' # Return summary table
#' collaboration_dist(sq_data, hrvar = "Organization", return = "table")
#' @export
collaboration_dist <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot",
cut = c(15, 20, 25)) {
## Handle variable name consistency
data <- qui_stan_c(data)
create_dist(data = data,
metric = "Collaboration_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return,
cut = cut)
}
#' @rdname collaboration_dist
#' @export
collab_dist <- collaboration_dist
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/collaboration_dist.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Distribution of Collaboration Hours (Fizzy Drink plot)
#'
#' @description
#' Analyze weekly collaboration hours distribution, and returns
#' a 'fizzy' scatter plot by default.
#' Additional options available to return a table with distribution elements.
#'
#' @template ch
#'
#' @inheritParams create_fizz
#' @inherit create_fizz return
#'
#' @family Visualization
#' @family Collaboration
#'
#' @examples
#' # Return plot
#' collaboration_fizz(sq_data, hrvar = "Organization", return = "plot")
#'
#' # Return summary table
#' collaboration_fizz(sq_data, hrvar = "Organization", return = "table")
#'
#' @export
collaboration_fizz <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot"){
## Handle variable name consistency
data <- qui_stan_c(data)
create_fizz(data = data,
metric = "Collaboration_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return)
}
#' @rdname collaboration_fizz
#' @export
collab_fizz <- collaboration_fizz
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/collaboration_fizz.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Collaboration Time Trend - Line Chart
#'
#' @description
#' Provides a week by week view of collaboration time, visualised as line charts.
#' By default returns a line chart for collaboration hours,
#' with a separate panel per value in the HR attribute.
#' Additional options available to return a summary table.
#'
#' @template ch
#'
#' @inheritParams create_line
#' @inherit create_line return
#'
#' @family Visualization
#' @family Collaboration
#'
#' @examples
#' # Return a line plot
#' collaboration_line(sq_data, hrvar = "LevelDesignation")
#'
#' # Return summary table
#' collaboration_line(sq_data, hrvar = "LevelDesignation", return = "table")
#'
#' @export
collaboration_line <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot"){
## Handle variable name consistency
data <- qui_stan_c(data)
## Inherit arguments
create_line(data = data,
metric = "Collaboration_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return)
}
#' @rdname collaboration_line
#' @export
collab_line <- collaboration_line
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/collaboration_line.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Collaboration Ranking
#'
#' @description
#' This function scans a standard query output for groups with high levels of
#' 'Weekly Digital Collaboration'. Returns a plot by default, with an option to
#' return a table with a all of groups (across multiple HR attributes) ranked by
#' hours of digital collaboration.
#'
#' @details
#' Uses the metric `Collaboration_hours`.
#' See `create_rank()` for applying the same analysis to a different metric.
#'
#' @inheritParams create_rank
#' @inherit create_rank return
#'
#' @family Visualization
#' @family Collaboration
#'
#' @examples
#' # Return rank table
#' collaboration_rank(
#' data = sq_data,
#' return = "table"
#' )
#'
#' # Return plot
#' collaboration_rank(
#' data = sq_data,
#' return = "plot"
#' )
#'
#'
#' @export
collaboration_rank <- function(data,
hrvar = extract_hr(data),
mingroup = 5,
mode = "simple",
plot_mode = 1,
return = "plot"){
## Handle variable name consistency
data <- qui_stan_c(data)
create_rank(data,
metric = "Collaboration_hours",
hrvar = hrvar,
mingroup = mingroup,
mode = mode,
plot_mode = plot_mode,
return = return)
}
#' @rdname collaboration_rank
#' @export
collab_rank <- collaboration_rank
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/collaboration_rank.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Generate a Collaboration Report in HTML
#'
#' @description
#' The function generates an interactive HTML report using Standard Person Query
#' data as an input. The report contains a series of summary analysis and
#' visualisations relating to key **collaboration** metrics,including email and
#' meeting hours.
#'
#' @template spq-params
#' @param path Pass the file path and the desired file name, _excluding the file
#' extension_. For example, `"collaboration report"`.
#' @param timestamp Logical vector specifying whether to include a timestamp in
#' the file name. Defaults to `TRUE`.
#'
#' @importFrom purrr map_if
#' @importFrom dplyr `%>%`
#'
#' @family Reports
#'
#' @inherit generate_report return
#'
#' @export
collaboration_report <- function(data,
hrvar = "AUTO",
mingroup = 5,
path = "collaboration report",
timestamp = TRUE){
## Create timestamped path (if applicable)
if(timestamp == TRUE){
newpath <- paste(path, wpa::tstamp())
} else {
newpath <- path
}
if(hrvar %in% c("AUTO", "auto")){
myrank <- data %>% collaboration_rank(mingroup = mingroup, return = "table")
hrvar <- myrank[[1,1]]
}
## Placeholder for variables not there
if("Time_in_self_organized_meetings" %in% names(data)){
som_obj <- data %>%
mutate(Percentage_of_self_organized_meetings = replace_na(Time_in_self_organized_meetings / Meeting_hours,0)) %>%
create_bar(metric = "Percentage_of_self_organized_meetings", hrvar = hrvar, mingroup = mingroup, return = "plot")
} else {
som_obj <- "> [Note] Plot for `Time_in_self_organized_meetings` is not available due to missing variable."
}
# Set outputs
output_list <-
list(
data %>% check_query(return = "text", validation = TRUE),
paste("---"),
# Collaboration Header
md2html(text = read_preamble("collaboration_section.md")),
data %>% collaboration_rank(mingroup = mingroup, return = "plot"),
data %>% collaboration_rank(mingroup = mingroup, return = "table"),
data %>% keymetrics_scan(hrvar = hrvar, mingroup = mingroup,
metrics = c("Collaboration_hours",
"Meetings",
"Meeting_hours",
"Low_quality_meeting_hours",
"Time_in_self_organized_meetings",
"Emails_sent",
"Email_hours",
"Generated_workload_email_hours",
"Total_emails_sent_during_meeting",
"Total_focus_hours"),
textsize = 3,
return = "plot"),
data %>% collaboration_sum(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% collaboration_sum(hrvar = hrvar, mingroup = mingroup, return = "table"),
data %>% collab_area(hrvar = hrvar, mingroup = mingroup, return = "plot"),
paste("---"),
md2html(text = read_preamble("meeting_section.md")), # Meeting Header
data %>% meeting_rank(mingroup = mingroup, return = "plot"),
data %>% meeting_rank(mingroup = mingroup, return = "table"),
data %>% meeting_dist(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% meeting_dist(hrvar = hrvar, mingroup = mingroup, return = "table"),
som_obj,
data %>% meeting_quality(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% meeting_trend(hrvar = hrvar, mingroup = mingroup, return = "plot"),
paste("---"),
md2html(text = read_preamble("email_section.md")), # Email Header
data %>% email_rank(mingroup = mingroup, return = "plot"),
data %>% email_rank(mingroup = mingroup, return = "table"),
data %>% email_dist(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% email_dist(hrvar = hrvar, mingroup = mingroup, return = "table"),
data %>% email_trend(hrvar = hrvar, mingroup = mingroup, return = "plot"),
paste("---"),
paste(">", "[Note] This report was generated on ",
format(Sys.time(), "%b %d %Y"),
". Data is split by ", hrvar ,".")) %>%
purrr::map_if(is.data.frame, create_dt) %>%
purrr::map_if(is.character, md2html)
# Set header titles
title_list <-
c("Data Available",
"",
"Collaboration Time", # Section header
"",
"",
"",
"",
"",
"",
"",
"Deep Dive: Meeting Hours", # Section header
"",
"",
"",
"",
"",
"",
"",
"",
"Deep Dive: Email Hours", # Section header
"",
"",
"",
"",
"",
"",
"")
# Set header levels
n_title <- length(title_list)
levels_list <- rep(4, n_title)
levels_list[c(1, 3, 11, 20)] <- 2 # Section header
# Generate report
generate_report(title = "Collaboration Report",
filename = newpath,
outputs = output_list,
titles = title_list,
subheaders = rep("", n_title),
echos = rep(FALSE, n_title),
levels = levels_list,
theme = "cosmo",
preamble = read_preamble("collaboration_report.md"))
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/collaboration_report.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Collaboration Summary
#'
#' @description
#' Provides an overview analysis of 'Weekly Digital Collaboration'.
#' Returns a stacked bar plot of Email and Meeting Hours by default.
#' Additional options available to return a summary table.
#'
#' @details
#' Uses the metrics `Meeting_hours`, `Email_hours`, `Unscheduled_Call_hours`,
#' and `Instant_Message_hours`.
#'
#' @template spq-params
#' @param return Character vector specifying what to return, defaults to "plot".
#' Valid inputs are "plot" and "table".
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#' @import scales
#' @importFrom stats reorder
#'
#' @family Visualization
#' @family Collaboration
#'
#' @return
#' Returns a 'ggplot' object by default, where 'plot' is passed in `return`.
#' When 'table' is passed, a summary table is returned as a data frame.
#'
#' @export
collaboration_sum <- function(data,
hrvar = "Organization",
mingroup=5,
return = "plot"){
if("Instant_message_hours" %in% names(data)){
data <- rename(data, Instant_Message_hours = "Instant_message_hours")
}
if("Unscheduled_Call_hours" %in% names(data)){
main_vars <- c("Meeting_hours",
"Email_hours",
"Instant_Message_hours",
"Unscheduled_Call_hours")
} else {
main_vars <- c("Meeting_hours",
"Email_hours")
}
create_stacked(data = data,
hrvar = hrvar,
metrics = main_vars,
mingroup = mingroup,
return = return)
}
#' @rdname collaboration_sum
#' @export
collab_sum <- collaboration_sum
#' @rdname collaboration_sum
#' @export
collaboration_summary <- collaboration_sum
#' @rdname collaboration_sum
#' @export
collab_summary <- collaboration_sum
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/collaboration_sum.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Collaboration Time Trend
#'
#' @description
#' Provides a week by week view of collaboration time.
#' By default returns a week by week heatmap, highlighting the points in time with most activity.
#' Additional options available to return a summary table.
#'
#' @template ch
#'
#' @inheritParams create_trend
#'
#' @family Visualization
#' @family Collaboration
#'
#' @return
#' Returns a 'ggplot' object by default, where 'plot' is passed in `return`.
#' When 'table' is passed, a summary table is returned as a data frame.
#'
#' @export
collaboration_trend <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot"){
## Handle variable name consistency
data <- qui_stan_c(data)
create_trend(data,
metric = "Collaboration_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/collaboration_trend.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Combine signals from the Hourly Collaboration query
#'
#' @description
#' Takes in an Hourly Collaboration Data, and for each hour sums and aggregates
#' the signals (e.g.`Emails_sent` and `IMs_sent`) in `Signals_sent`. This is an
#' internal function used in the Working Patterns functions.
#'
#' @param data Hourly Collaboration query containing signal variables (e.g.
#' `Emails_sent_00_01`)
#' @param hr Numeric value between 0 to 23 to iterate through
#' @param signals Character vector for specifying which signal types to combine.
#' Defaults to `c("Emails_sent", "IMs_sent")`. Other valid values include
#' `"Unscheduled_calls"` and `"Meetings"`.
#'
#' @return
#' Returns a numeric vector that represents the sum of signals sent for a given
#' hour.
#'
#' @details
#' `combine_signals` uses string matching to aggregate columns.
#'
#' @family Support
#'
#' @examples
#' # Demo using simulated variables
#' sim_data <-
#' data.frame(Emails_sent_09_10 = sample(1:5, size = 10, replace = TRUE),
#' Unscheduled_calls_09_10 = sample(1:5, size = 10, replace = TRUE))
#'
#' combine_signals(sim_data, hr = 9, signals = c("Emails_sent", "Unscheduled_calls"))
#'
#' @export
combine_signals <- function(data,
hr,
signals = c("Emails_sent", "IMs_sent")){
if(!is.numeric(hr) | hr < 0 | hr > 23){
stop("Check inputs for `hr` in `combine_signals()`")
}
# End hour
hr_two <- hr + 1
# String pad to two digits
hr1 <- ifelse(nchar(hr) == 1, paste0(0, hr), hr)
hr2 <- ifelse(nchar(hr_two) == 1, paste0(0, hr_two), hr_two)
# Create string vectors
# Use original supplied string if length of signals == 1
if(length(signals) == 1){
full_string <- paste0(signals, "_", hr1, "_", hr2)
} else {
full_string <- paste0("Signals_sent_", hr1, "_", hr2)
}
input_string <- paste0(signals, "_", hr1, "_", hr2) # Should be length > 1
# Sum columns and only return `Signals_sent_` prefixed column
data %>%
dplyr::transmute(!!sym(full_string) := select(., input_string) %>%
apply(1, sum, na.rm = TRUE))
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/combine_signals.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Generate a Connectivity report in HTML
#'
#' @description
#' The function generates an interactive HTML report using Standard Person Query
#' data as an input. The report contains a series of summary analysis and
#' visualisations relating to key **connectivity** metrics, including
#' external/internal network size vs breadth
#' (`Networking_outside_organization`, `Networking_outside_domain`).
#'
#' @template spq-params
#' @param path Pass the file path and the desired file name, _excluding the file
#' extension_. For example, `"connectivity report"`.
#' @param timestamp Logical vector specifying whether to include a timestamp in
#' the file name. Defaults to `TRUE`.
#'
#' @importFrom purrr map_if
#' @importFrom dplyr `%>%`
#'
#' @family Reports
#'
#' @inherit generate_report return
#'
#' @export
connectivity_report <- function(data,
hrvar = "LevelDesignation",
mingroup = 5,
path = "connectivity report",
timestamp = TRUE){
## Create timestamped path (if applicable)
if(timestamp == TRUE){
newpath <- paste(path, wpa::tstamp())
} else {
newpath <- path
}
output_list <-
list(data %>% check_query(return = "text", validation = TRUE),
data %>% external_network_plot(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% external_network_plot(hrvar = hrvar, mingroup = mingroup, return = "table"),
data %>% internal_network_plot(hrvar = hrvar, mingroup = mingroup, return = "plot"),
data %>% internal_network_plot(hrvar = hrvar, mingroup = mingroup, return = "table")) %>%
purrr::map_if(is.data.frame, create_dt) %>%
purrr::map_if(is.character, md2html)
title_list <-
c("Data Overview",
"External network - Plot",
"External network - Table",
"Internal network - Plot",
"Internal network - Table")
n_title <- length(title_list)
generate_report(title = "Connectivity Report",
filename = newpath,
outputs = output_list,
titles = title_list,
subheaders = rep("", n_title),
echos = rep(FALSE, n_title),
levels = rep(3, n_title),
theme = "cosmo",
preamble = read_preamble("connectivity_report.md"))
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/connectivity_report.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Copy a data frame to clipboard for pasting in Excel
#'
#' @description
#' This is a pipe-optimised function, that feeds into `wpa::export()`,
#' but can be used as a stand-alone function.
#'
#' Based on the original function from
#' <https://github.com/martinctc/surveytoolbox>.
#'
#' @param x Data frame to be passed through. Cannot contain list-columns or
#' nested data frames.
#' @param row.names A logical vector for specifying whether to allow row names.
#' Defaults to `FALSE`.
#' @param col.names A logical vector for specifying whether to allow column
#' names. Defaults to `FALSE`.
#' @param quietly Set this to TRUE to not print data frame on console
#' @param ... Additional arguments for write.table().
#'
#' @importFrom utils write.table
#'
#' @family Import and Export
#'
#' @return
#' Copies a data frame to the clipboard with no return value.
#'
#' @export
copy_df <-function(x,
row.names = FALSE,
col.names = TRUE,
quietly = FALSE,...) {
utils::write.table(x,"clipboard-50000",
sep="\t",
row.names=row.names,
col.names=col.names,...)
if(quietly==FALSE) print(x)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/copy_df.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title
#' Estimate an effect of intervention on every Viva Insights metric in input
#' file by applying single-group Interrupted Time-Series Analysis (ITSA)
#'
#' @author Aleksey Ashikhmin <alashi@@microsoft.com>
#'
#' @description
#' r lifecycle::badge('experimental')
#'
#' This function implements ITSA method described in the paper 'Conducting
#' interrupted time-series analysis for single- and multiple-group comparisons',
#' Ariel Linden, The Stata Journal (2015), 15, Number 2, pp. 480-500
#'
#' This function further requires the installation of 'sandwich', 'portes', and
#' 'lmtest' in order to work. These packages can be installed from CRAN using
#' `install.packages()`.
#'
#' @details
#' This function uses the additional package dependencies 'sandwich' and
#' 'lmtest'. Please install these separately from CRAN prior to running the
#' function.
#'
#' As of May 2022, the 'portes' package was archived from CRAN. The dependency
#' has since been removed and dependent functions `Ljungbox()` incorporated into
#' the **wpa** package.
#'
#' @param data Person Query as a dataframe including date column named `Date`.
#' This function assumes the data format is MM/DD/YYYY as is standard in a
#' Viva Insights query output.
#' @param before_start Start date of 'before' time period in MM/DD/YYYY format
#' as character type. Before time period is the period before the intervention
#' (e.g. training program, re-org, shift to remote work) occurs and bounded by
#' before_start and before_end parameters. Longer period increases likelihood
#' of achieving more statistically significant results. Defaults to earliest
#' date in dataset.
#' @param before_end End date of 'before' time period in MM/DD/YYYY format as
#' character type.
#' @param after_start Start date of 'after' time period in MM/DD/YYYY format as
#' character type. After time period is the period after the intervention
#' occurs and bounded by after_start and after_end parameters. Longer period
#' increases likelihood of achieving more statistically significant results.
#' Defaults to date after before_end.
#' @param after_end End date of 'after' time period in MM/DD/YYYY format as
#' character type. Defaults to latest date in dataset.
#' @param ac_lags_max maximum lag for autocorrelation test. Default is 7
#' @param return String specifying what output to return. Defaults to "table".
#' Valid return options include:
#' - `'plot'`: return a list of plots.
#' - `'table'`: return data.frame with estimated models' coefficients and
#' their corresponding p-values You should look for significant p-values in
#' beta_2 to indicate an immediate treatment effect, and/or in beta_3 to
#' indicate a treatment effect over time
#'
#' @import dplyr
#' @import ggplot2
#'
#' @family Flexible Input
#' @family Interrupted Time-Series Analysis
#'
#'
#' @examples
#' \donttest{
#' # Returns summary table
#' create_ITSA(
#' data = sq_data,
#' before_start = "12/15/2019",
#' before_end = "12/29/2019",
#' after_start = "1/5/2020",
#' after_end = "1/26/2020",
#' ac_lags_max = 7,
#' return = "table")
#'
#' # Returns list of plots
#'
#' plot_list <-
#' create_ITSA(
#' data = sq_data,
#' before_start = "12/15/2019",
#' before_end = "12/29/2019",
#' after_start = "1/5/2020",
#' after_end = "1/26/2020",
#' ac_lags_max = 7,
#' return = 'plot')
#'
#' # Extract a plot as an example
#' plot_list$Workweek_span
#' }
#'
#' @export
create_ITSA <-
function(data,
before_start = min(as.Date(data$Date, "%m/%d/%Y")),
before_end,
after_start,
after_end = max(as.Date(data$Date, "%m/%d/%Y")),
ac_lags_max = 7,
return = 'table') {
## Check inputs types
stopifnot(is.data.frame(data))
stopifnot(is.character(before_start)|inherits(before_start, "Date"))
stopifnot(is.character(before_end))
stopifnot(is.character(after_start))
stopifnot(is.character(after_end)|inherits(after_end, "Date"))
stopifnot(is.numeric(ac_lags_max))
stopifnot(is.character(return))
## Check if dependencies are installed
check_pkg_installed(pkgname = "sandwich")
check_pkg_installed(pkgname = "lmtest")
# check_pkg_installed(pkgname = "portes") # Removed from CRAN
## Check required columns in data
required_variables <- c("Date",
"PersonId")
## Error message if variables are not present
## Nothing happens if all present
data %>%
check_inputs(requirements = required_variables)
before_start <- as.Date(before_start, "%m/%d/%Y")
before_end <- as.Date(before_end, "%m/%d/%Y")
after_start <- as.Date(after_start, "%m/%d/%Y")
after_end <- as.Date(after_end, "%m/%d/%Y")
dateranges <- c(before_start, before_end, after_start, after_end)
WpA_dataset <- data %>% mutate(Date = as.Date(Date, "%m/%d/%Y"))
# Check for dates in data file
all_dates_in_data_range_flag <- all((min(WpA_dataset$Date) <= dateranges)&(dateranges <= max(WpA_dataset$Date)) == TRUE)
if (!all_dates_in_data_range_flag) {
stop("Not all dates are found in the dataset")
geterrmessage()
}
# Create variable => Period
WpA_dataset_table <-
WpA_dataset %>%
mutate(
Period = case_when(
(before_start <= Date) & (Date <= before_end) ~ "Before",
(after_start <= Date) & (Date <= after_end) ~ "After"
)
) %>% filter(Period == "Before" | Period == "After")
WpA_dataset_table <-
WpA_dataset_table %>% mutate(outcome = case_when(Period == "Before" ~ "0",
Period == "After" ~ "1"))
# Create "train" data with metrics and Date columns
train <-
WpA_dataset_table %>%
transform(outcome = as.numeric(outcome)) #%>%
date_column <- train[, "Date"]
train <- train %>% select_if(is.numeric) %>% mutate(Date = date_column)
# Filter out rows with missing values
train <- train %>% filter(rowSums(is.na(.[, ])) < 1)
# Aggregate metric values at Date level
grouped_by_Date_train <- train %>% group_by(Date)
# Get metric names
metric_names <- setdiff(colnames(grouped_by_Date_train), c("Date", "outcome"))
# Create empty data.frame to save results (e.g. coefficients, p-values etc) for each metric
results <- data.frame(metric_name=character(),
beta_2=double(),
beta_3=double(),
beta_2_pvalue=double(),
beta_3_pvalue=double(),
AR_flag=logical(),
error_warning=character())
# Create empty list to save plots
results_plot <- list()
# Perform ITSA for every metric in metric_names
for(metric_name in metric_names){
# if error_flag = True then there would be no plot generated when return = 'plot'
error_flag <- FALSE
# AR_flag = False indicates that lag is equal 0 otherwise it's set to True
AR_flag <- FALSE
# lm_train_success_flag indicates model estimation success
lm_train_success_flag <- FALSE
buf_trycatch <- tryCatch({
# Create a metric time-series by averaging metric values across users
metric_data <- grouped_by_Date_train %>% summarise_at(c(metric_name, "outcome"), mean, na.rm = TRUE)
metric_data <- as.data.frame(metric_data)
# Transform metric_data into ITSA format described in the paper page 485
Date <- metric_data[,"Date"]
Y <- metric_data[, metric_name]
T <- seq(1:length(metric_data[,"Date"]))
# X is dummy variable, 0 indicates pre-intervention period and 1 indicates post-intervention period
X <- metric_data[, "outcome"]
num_Zeros <- length(X) - sum(X) + 1
XT <- X*(T - num_Zeros)
data_OLS <- data.frame(Date, Y, T, X, XT, stringsAsFactors=FALSE)
single_itsa = stats::lm(Y ~ T + X + XT, data = data_OLS)
# Newey-West variance estimator produces consistent estimates when there
# is autocorrelation in addition to possible Heteroscedasticity
coeff_pvalues <- lmtest::coeftest(single_itsa,
vcov = sandwich::NeweyWest(single_itsa, lag = 0, prewhite = FALSE))
beta_2 <- round(single_itsa$coefficients[3], 3)
beta_3 <- round(single_itsa$coefficients[4], 3)
beta_2_pvalue <- round(coeff_pvalues[3,4], 3)
beta_3_pvalue <- round(coeff_pvalues[4,4], 3)
lm_train_success_flag <- TRUE
# It is important to test for the presence of autocorrelated errors
# when using regression-based time-series methods, because such tests
# provide critical diagnostic information regarding the adequacy of the time-series model
residuals <- single_itsa$residuals
N <- length(residuals)
# Run Ljung and Box Test to test for autocorrelation
lb_test <- wpa::LjungBox(
single_itsa,
lags = seq(1, ac_lags_max),
order = 4,
season = 1,
squared.residuals = FALSE
)
ind_stat_significant_coeff <- which(lb_test[,'p-value'] <= 0.05)
# LjungBox test identifies statistically significant lags then we use Newey-West method
# (heteroscedasticity and autocorrelation consistent (HAC) covariance matrix estimators)
# with maximum lag to estimate lm model coefficients' std errors and p-values with lags accounting
if(0 < length(ind_stat_significant_coeff)){
# The Newey & West (1987) estimator requires specification
# of the lag and suppression of prewhitening
coeff_pvalues <- lmtest::coeftest(single_itsa,
vcov = sandwich::NeweyWest(single_itsa,
lag = max(ind_stat_significant_coeff),
prewhite = FALSE))
AR_flag <- TRUE
}
# Look for significant p-values in beta_2 to indicate an immediate treatment
# effect, or in beta_3 to indicate a treatment effect over time
# Yt = Beta0 + beta_1*Tt + beta_2*Xt + beta_3*Xt*Tt + epst
beta_2 <- round(single_itsa$coefficients[3], 3)
beta_3 <- round(single_itsa$coefficients[4], 3)
beta_2_pvalue <- round(coeff_pvalues[3,4], 3)
beta_3_pvalue <- round(coeff_pvalues[4,4], 3)
buf <- data.frame(metric_name = metric_name,
beta_2 = beta_2,
beta_3 = beta_3,
beta_2_pvalue = beta_2_pvalue,
beta_3_pvalue = beta_3_pvalue,
AR_flag = AR_flag,
error_warning = "")
},
error = function(c){
error_flag <- TRUE
buf <- data.frame(metric_name = metric_name,
beta_2 = ifelse(exists("beta_2", inherits = FALSE), beta_2, -1),
beta_3 = ifelse(exists("beta_3", inherits = FALSE), beta_3, -1),
beta_2_pvalue = ifelse(exists("beta_2_pvalue", inherits = FALSE), beta_2_pvalue, -1),
beta_3_pvalue = ifelse(exists("beta_3_pvalue", inherits = FALSE), beta_3_pvalue, -1),
AR_flag = FALSE,
error_warning = paste0('Error: ',c$message, "; lm_train_success=", lm_train_success_flag, collapse = " "))
},
warning = function(c){
buf <- data.frame(metric_name = metric_name,
beta_2 = ifelse(exists("beta_2", inherits = FALSE), beta_2, -1),
beta_3 = ifelse(exists("beta_3", inherits = FALSE), beta_3, -1),
beta_2_pvalue = ifelse(exists("beta_2_pvalue", inherits = FALSE), beta_2_pvalue, -1),
beta_3_pvalue = ifelse(exists("beta_3_pvalue", inherits = FALSE), beta_3_pvalue, -1),
AR_flag = AR_flag,
error_warning = paste0('Warning: ',c$message, "; lm_train_success=", lm_train_success_flag, collapse = " "))
}
)
results <- rbind(results, buf_trycatch)
# Create a metric plot with its estimated ITSA model
# and save it in "results_plot" list. If error_flag is TRUE then
#save "buf_trycatch" instead for logging error message
if(return == 'plot' & !error_flag ){
event_T <- which.max(data_OLS[, "X"] == 1 )
hat_Y <- single_itsa$fitted.values
before_intervention_df <- data_OLS[1:event_T,]
before_intervention_df[event_T,"X"] <- 0
hat_Y_before_and_at_intervention <- data.frame(DateTime = data_OLS[1:event_T, "Date"], T=data_OLS[1:event_T, "T"],Y = stats::predict(single_itsa, before_intervention_df))
hat_Y_after_and_at_intervention <- data.frame(DateTime =data_OLS[event_T:dim(data_OLS)[1], "Date"], T=data_OLS[event_T:dim(data_OLS)[1], "T"], Y = hat_Y[event_T:dim(data_OLS)[1]])
# Create basic graph
p <- ggplot(data_OLS, aes(x = T, y = Y))+
geom_point(aes(y = Y), color = "blue")+
geom_line(data=hat_Y_before_and_at_intervention, aes(x=T, y=Y), size=1)+
geom_line(data=hat_Y_after_and_at_intervention, aes(x=T, y=Y), size=1)
# Calculate plotting area range and scale
dY <- (max(data_OLS[,"Y"]) - min(data_OLS[,"Y"]))/10
dX <- 1
# Calculate annotation positions on the graph
Y_at_intervention_when_no_intervention_happened <- hat_Y_before_and_at_intervention[dim(hat_Y_before_and_at_intervention)[1] ,"Y"]
Y_at_intervention_when_intervention_happened <- hat_Y_after_and_at_intervention[1,"Y"]
pos_y_end_beta_2 <- (Y_at_intervention_when_no_intervention_happened + Y_at_intervention_when_intervention_happened)/2
pos_y_start_beta_2 <- pos_y_end_beta_2 + dY
pos_x_end_beta_2 <- event_T
pos_x_start_beta_2 <- pos_x_end_beta_2 - dX
# Create data.frame with all the annotation info
annotation <- data.frame(
x = pos_x_start_beta_2 - dX,
y = Y_at_intervention_when_intervention_happened + dY,
label = c(paste0("beta_2=", round(beta_2, 3), collapse=" "))
)
# Create final plot
p_final <- p + ggtitle(metric_name) +
geom_vline(xintercept=event_T, color="red", size=1) +
annotate("segment", x = pos_x_start_beta_2, xend = pos_x_end_beta_2, y = pos_y_start_beta_2, yend = pos_y_end_beta_2, colour = "black", size=0.5, alpha=0.6, arrow=arrow())+
annotate("segment", x = pos_x_end_beta_2, xend = pos_x_end_beta_2, y = Y_at_intervention_when_no_intervention_happened, yend = Y_at_intervention_when_intervention_happened, colour = "purple", size=2, alpha=1)+
geom_text(data=annotation, aes( x=x, y=y, label=label), color="orange", size=5, angle=0, fontface="bold" )
# Change the color, the size and the face of
# the main title
p_final <- p_final + theme(
plot.title = element_text(color="blue", size=14, face="bold.italic"))
# Save plot in list
results_plot[[metric_name]] <- p_final
}else{
results_plot[[metric_name]] <- buf_trycatch
}
}
if(return == 'plot'){
return(results_plot)
}
# Remove rownames from results
rownames(results) <- c()
# Return ranking table
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_ITSA.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Calculate Information Value for a selected outcome variable
#'
#' @description
#' Specify an outcome variable and return IV outputs.
#' All numeric variables in the dataset are used as predictor variables.
#'
#' @param data A Person Query dataset in the form of a data frame.
#' @param predictors A character vector specifying the columns to be used as
#' predictors. Defaults to NULL, where all numeric vectors in the data will be
#' used as predictors.
#' @param outcome A string specifying a binary variable, i.e. can only contain
#' the values 1 or 0.
#' @param bins Number of bins to use, defaults to 5.
#' @param siglevel Significance level to use in comparing populations for the
#' outcomes, defaults to 0.05
#' @param exc_sig Logical value determining whether to exclude values where the
#' p-value lies below what is set at `siglevel`. Defaults to `FALSE`, where
#' p-value calculation does not happen altogether.
#' @param return String specifying what to return. This must be one of the
#' following strings:
#' - `"plot"`
#' - `"summary"`
#' - `"list"`
#' - `"plot-WOE"`
#' - `"IV"`
#'
#' See `Value` for more information.
#'
#' @return
#' A different output is returned depending on the value passed to the `return`
#' argument:
#' - `"plot"`: 'ggplot' object. A bar plot showing the IV value of the top
#' (maximum 12) variables.
#' - `"summary"`: data frame. A summary table for the metric.
#' - `"list"`: list. A list of outputs for all the input variables.
#' - `"plot-WOE"`: A list of 'ggplot' objects that show the WOE for each
#' predictor used in the model.
#' - `"IV"` returns a list object which mirrors the return
#' in `Information::create_infotables()`.
#'
#' @import dplyr
#'
#' @family Variable Association
#' @family Information Value
#'
#' @examples
#' # Return a summary table of IV
#' sq_data %>%
#' dplyr::mutate(X = ifelse(Workweek_span > 40, 1, 0)) %>%
#' create_IV(outcome = "X",
#' predictors = c("Email_hours",
#' "Meeting_hours",
#' "Instant_Message_hours"),
#' return = "plot")
#'
#'
#' # Return summary
#' sq_data %>%
#' dplyr::mutate(X = ifelse(Collaboration_hours > 10, 1, 0)) %>%
#' create_IV(outcome = "X",
#' predictors = c("Email_hours", "Meeting_hours"),
#' return = "summary")
#'
#' @export
create_IV <- function(data,
predictors = NULL,
outcome,
bins = 5,
siglevel = 0.05,
exc_sig = FALSE,
return = "plot"){
# Preserve string ----------------------------------------------------------
pred_chr <- NULL
pred_chr <- predictors
# Select training dataset --------------------------------------------------
if(is.null(tidyselect::all_of(predictors))){
train <-
data %>%
rename(outcome = outcome) %>%
select_if(is.numeric) %>%
tidyr::drop_na()
} else {
train <-
data %>%
rename(outcome = outcome) %>%
select(tidyselect::all_of(predictors), outcome) %>%
tidyr::drop_na()
}
# Calculate odds -----------------------------------------------------------
odds <- sum(train$outcome) / (length(train$outcome) - sum(train$outcome))
lnodds <- log(odds)
# Assert -------------------------------------------------------------------
# Must be logical for `exc_sig`
if(!(is.logical(exc_sig))){
stop("invalid input to `exc_sig`")
}
# Calculate p-value --------------------------------------------------------
predictors <-
data.frame(Variable = unlist(names(train))) %>%
dplyr::filter(Variable != "outcome") %>%
mutate(Variable = as.character(Variable)) # Ensure not factor
if(exc_sig == TRUE){
for (i in 1:(nrow(predictors))){
predictors$pval[i] <-
p_test(train,
outcome = "outcome",
behavior = predictors$Variable[i])
}
# Filter out variables whose p-value is above the significance level ------
predictors <- predictors %>% dplyr::filter(pval <= siglevel)
if(nrow(predictors) == 0){
stop("There are no predictors where the p-value lies below the significance level.",
"You may set `exc_sig == FALSE` or increase the threshold on `siglevel`.")
}
}
train <- train %>% select(predictors$Variable, outcome)
# IV Analysis -------------------------------------------------------------
## Following section is equivalent to:
# IV <- Information::create_infotables(data = train, y = "outcome", bins = bins)
IV <- map_IV(data = train,
predictors = predictors$Variable, # filtered set
outcome = "outcome", # string not variable
bins = bins)
IV_names <- names(IV$Tables)
IV_summary <- inner_join(IV$Summary, predictors, by = c("Variable"))
if(return == "summary"){
IV_summary
} else if(return == "IV"){
c(
IV,
list("lnodds" = lnodds)
)
} else if(return == "plot"){
top_n <-
min(
c(12, nrow(IV_summary))
)
IV_summary %>%
utils::head(top_n) %>%
create_bar_asis(group_var = "Variable",
bar_var = "IV",
title = "Information Value (IV)",
subtitle =
paste("Showing top",
top_n,
"predictors"))
} else if(return == "plot-WOE"){
## Return list of ggplots
IV$Summary$Variable %>%
as.character() %>%
purrr::map(~plot_WOE(IV = IV, predictor = .))
} else if(return == "list"){
# Output list
output_list <-
IV_names %>%
purrr::map(function(x){
IV$Tables[[x]] %>%
mutate(ODDS = exp(WOE + lnodds),
PROB = ODDS / (ODDS + 1))
}) %>%
purrr::set_names(IV_names)
output_list
} else {
stop("Please enter a valid input for `return`.")
}
}
#' @title Plot WOE graphs with an IV object
#'
#' @description
#' Internal function within `create_IV()` that plots WOE graphs using an IV
#' object. Can also be used for plotting individual WOE graphs.
#'
#' @param IV IV object created with 'Information'.
#' @param predictor String with the name of the predictor variable.
#'
#' @return
#' 'ggplot' object. Bar plot with 'WOE' as the y-axis and bins of the predictor
#' variable as the horizontal axis.
#'
#' @family Support
#' @family Variable Association
#' @family Information Value
#'
#' @import dplyr
#' @import ggplot2
#'
#' @export
plot_WOE <- function(IV, predictor){
# Identify right table
plot_table <-
IV$Tables[[predictor]] %>%
mutate(labelpos = ifelse(WOE <= 0, 1.2, -1))
# Get range
WOE_range <-
IV$Tables %>%
purrr::map(~pull(., WOE)) %>%
unlist() %>%
range()
# Plot
plot_table %>%
mutate(!!sym(predictor) :=
factor(!!sym(predictor),
levels =
pull(
plot_table,
predictor
)
)) %>%
ggplot(aes(x = !!sym(predictor),
y = WOE)) +
geom_col(fill = rgb2hex(49,97,124)) +
geom_text(aes(label = round(WOE, 1),
vjust = labelpos)) +
labs(title = us_to_space(predictor),
subtitle = "Weight of Evidence",
x = us_to_space(predictor),
y = "Weight of Evidence (WOE)") +
theme_wpa_basic() +
ylim(WOE_range[1] * 1.1, WOE_range[2] * 1.1)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_IV.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Mean Bar Plot for any metric
#'
#' @description
#' Provides an overview analysis of a selected metric by calculating a mean per
#' metric.
#' Returns a bar plot showing the average of a selected metric by default.
#' Additional options available to return a summary table.
#'
#' @template spq-params
#' @param mingroup Numeric value setting the privacy threshold / minimum group
#' size. Defaults to 5.
#' @param metric Character string containing the name of the metric,
#' e.g. "Collaboration_hours"
#' @param return String specifying what to return. This must be one of the
#' following strings:
#' - `"plot"`
#' - `"table"`
#'
#' See `Value` for more information.
#'
#' @param bar_colour String to specify colour to use for bars.
#' In-built accepted values include `"default"` (default), `"alert"` (red), and
#' `"darkblue"`. Otherwise, hex codes are also accepted. You can also supply
#' RGB values via `rgb2hex()`.
#' @param na.rm A logical value indicating whether `NA` should be stripped
#' before the computation proceeds. Defaults to `FALSE`.
#' @param percent Logical value to determine whether to show labels as
#' percentage signs. Defaults to `FALSE`.
#' @param plot_title An option to override plot title.
#' @param plot_subtitle An option to override plot subtitle.
#' @param legend_lab String. Option to override legend title/label. Defaults to
#' `NULL`, where the metric name will be populated instead.
#' @param rank String specifying how to rank the bars. Valid inputs are:
#' - `"descending"` - ranked highest to lowest from top to bottom (default).
#' - `"ascending"` - ranked lowest to highest from top to bottom.
#' - `NULL` - uses the original levels of the HR attribute.
#' @param xlim An option to set max value in x axis.
#' @param text_just `r lifecycle::badge('experimental')` A numeric value
#' controlling for the horizontal position of the text labels. Defaults to
#' 0.5.
#' @param text_colour `r lifecycle::badge('experimental')` String to specify
#' colour to use for the text labels. Defaults to `"#FFFFFF"`.
#'
#'
#' @return
#' A different output is returned depending on the value passed to the `return` argument:
#' - `"plot"`: 'ggplot' object. A bar plot for the metric.
#' - `"table"`: data frame. A summary table for the metric.
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#' @importFrom scales wrap_format
#' @importFrom stats reorder
#'
#' @family Visualization
#' @family Flexible
#'
#' @examples
#' # Return a ggplot bar chart
#' create_bar(sq_data, metric = "Collaboration_hours", hrvar = "LevelDesignation")
#'
#' # Change bar colour
#' create_bar(sq_data,
#' metric = "After_hours_collaboration_hours",
#' bar_colour = "alert")
#'
#' # Custom data label positions and formatting
#' sq_data %>%
#' create_bar(
#' metric = "Meetings",
#' text_just = 1.1,
#' text_colour = "black",
#' xlim = 20)
#'
#' # Return a summary table
#' create_bar(sq_data,
#' metric = "Collaboration_hours",
#' hrvar = "LevelDesignation",
#' return = "table")
#' @export
create_bar <- function(data,
metric,
hrvar = "Organization",
mingroup = 5,
return = "plot",
bar_colour = "default",
na.rm = FALSE,
percent = FALSE,
plot_title = us_to_space(metric),
plot_subtitle = paste("Average by", tolower(camel_clean(hrvar))),
legend_lab = NULL,
rank = "descending",
xlim = NULL,
text_just = 0.5,
text_colour = "#FFFFFF"){
## Check inputs
required_variables <- c("Date",
metric,
"PersonId")
## Error message if variables are not present
## Nothing happens if all present
data %>%
check_inputs(requirements = required_variables)
## Handle `legend_lab`
if(is.null(legend_lab)){
legend_lab <- gsub("_", " ", metric)
}
## Handling NULL values passed to hrvar
if(is.null(hrvar)){
data <- totals_col(data)
hrvar <- "Total"
}
## Clean metric name
clean_nm <- us_to_space(metric)
## Data for bar plot
plot_data <-
data %>%
rename(group = !!sym(hrvar)) %>%
group_by(PersonId, group) %>%
summarise(!!sym(metric) := mean(!!sym(metric), na.rm = na.rm)) %>%
ungroup() %>%
left_join(data %>%
rename(group = !!sym(hrvar)) %>%
group_by(group) %>%
summarise(Employee_Count = n_distinct(PersonId)),
by = "group") %>%
filter(Employee_Count >= mingroup)
## Colour bar override
if(bar_colour == "default"){
bar_colour <- "#34b1e2"
} else if(bar_colour == "alert"){
bar_colour <- "#FE7F4F"
} else if(bar_colour == "darkblue"){
bar_colour <- "#1d627e"
}
## Bar plot
plot_object <- data %>%
create_stacked(
metrics = metric,
hrvar = hrvar,
mingroup = mingroup,
stack_colours = bar_colour,
percent = percent,
plot_title = plot_title,
plot_subtitle = plot_subtitle,
legend_lab = legend_lab,
return = "plot",
rank = rank,
xlim = xlim,
text_just = text_just,
text_colour = text_colour
)
summary_table <-
plot_data %>%
select(group, !!sym(metric)) %>%
group_by(group) %>%
summarise(!!sym(metric) := mean(!!sym(metric)),
n = n())
if(return == "table"){
return(summary_table)
} else if(return == "plot"){
return(plot_object)
} else {
stop("Please enter a valid input for `return`.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_bar.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Create a bar chart without aggregation for any metric
#'
#' @description
#' This function creates a bar chart directly from the aggregated / summarised
#' data. Unlike `create_bar()` which performs a person-level aggregation, there
#' is no calculation for `create_bar_asis()` and the values are rendered as they
#' are passed into the function.
#'
#' @param data Plotting data as a data frame.
#' @param group_var String containing name of variable for the group.
#' @param bar_var String containing name of variable representing the value of
#' the bars.
#' @param title Title of the plot.
#' @param subtitle Subtitle of the plot.
#' @param caption Caption of the plot.
#' @param ylab Y-axis label for the plot (group axis)
#' @param xlab X-axis label of the plot (bar axis).
#' @param percent Logical value to determine whether to show labels as
#' percentage signs. Defaults to `FALSE`.
#' @param bar_colour String to specify colour to use for bars.
#' In-built accepted values include "default" (default), "alert" (red), and
#' "darkblue". Otherwise, hex codes are also accepted. You can also supply
#' RGB values via `rgb2hex()`.
#' @param rounding Numeric value to specify number of digits to show in data
#' labels
#'
#' @return
#' 'ggplot' object. A horizontal bar plot.
#'
#' @examples
#' # Creating a custom bar plot without mean aggregation
#' library(dplyr)
#'
#' sq_data %>%
#' group_by(Organization) %>%
#' summarise(across(.cols = Meeting_hours,
#' .fns = ~sum(., na.rm = TRUE))) %>%
#' create_bar_asis(group_var = "Organization",
#' bar_var = "Meeting_hours",
#' title = "Total Meeting Hours over period",
#' subtitle = "By Organization",
#' caption = extract_date_range(sq_data, return = "text"),
#' bar_colour = "darkblue",
#' rounding = 0)
#'
#' @import ggplot2
#' @import dplyr
#'
#' @family Visualization
#' @family Flexible
#'
#' @examples
#' library(dplyr)
#'
#' # Summarise Non-person-average median `Emails_sent`
#' med_df <-
#' sq_data %>%
#' group_by(Organization) %>%
#' summarise(Emails_sent_median = median(Emails_sent))
#'
#' med_df %>%
#' create_bar_asis(
#' group_var = "Organization",
#' bar_var = "Emails_sent_median",
#' title = "Median Emails Sent by Organization",
#' subtitle = "Person Averaging Not Applied",
#' bar_colour = "darkblue",
#' caption = extract_date_range(sq_data, return = "text")
#' )
#'
#'
#' @export
create_bar_asis <- function(data,
group_var,
bar_var,
title = NULL,
subtitle = NULL,
caption = NULL,
ylab = group_var,
xlab = bar_var,
percent = FALSE,
bar_colour = "default",
rounding = 1){
## Colour bar override
if(bar_colour == "default"){
bar_colour <- "#34b1e2"
} else if(bar_colour == "alert"){
bar_colour <- "#FE7F4F"
} else if(bar_colour == "darkblue"){
bar_colour <- "#1d627e"
}
up_break <- max(data[[bar_var]], na.rm = TRUE) * 1.3
if(percent == FALSE){
returnPlot <-
data %>%
ggplot(aes(x = reorder(!!sym(group_var), !!sym(bar_var)), y = !!sym(bar_var))) +
geom_col(fill = bar_colour) +
geom_text(aes(label = round(!!sym(bar_var), digits = rounding)),
position = position_stack(vjust = 0.5),
# hjust = -0.25,
color = "#FFFFFF",
fontface = "bold",
size = 4)
} else if(percent == TRUE){
returnPlot <-
data %>%
ggplot(aes(x = reorder(!!sym(group_var), !!sym(bar_var)), y = !!sym(bar_var))) +
geom_col(fill = bar_colour) +
geom_text(aes(label = scales::percent(!!sym(bar_var),
accuracy = 10 ^ -rounding)),
position = position_stack(vjust = 0.5),
# hjust = -0.25,
color = "#FFFFFF",
fontface = "bold",
size = 4)
}
returnPlot +
scale_y_continuous(expand = c(.01, 0), limits = c(0, up_break)) +
coord_flip() +
labs(title = title,
subtitle = subtitle,
caption = caption,
y = camel_clean(xlab),
x = ylab) +
theme_wpa_basic() +
theme(
axis.line = element_blank(),
axis.ticks = element_blank(),
axis.text.x = element_blank(),
axis.title = element_blank()
)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_bar_asis.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Box Plot for any metric
#'
#' @description
#' Analyzes a selected metric and returns a box plot by default.
#' Additional options available to return a table with distribution elements.
#'
#' @details
#' This is a general purpose function that powers all the functions
#' in the package that produce box plots.
#'
#' @template spq-params
#' @param metric Character string containing the name of the metric,
#' e.g. "Collaboration_hours"
#'
#' @param return String specifying what to return. This must be one of the
#' following strings:
#' - `"plot"`
#' - `"table"`
#'
#' See `Value` for more information.
#'
#' @return
#' A different output is returned depending on the value passed to the `return` argument:
#' - `"plot"`: 'ggplot' object. A box plot for the metric.
#' - `"table"`: data frame. A summary table for the metric.
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#' @import scales
#' @importFrom stats median
#' @importFrom stats sd
#'
#' @family Visualization
#' @family Flexible
#'
#' @examples
#' # Create a fizzy plot for Work Week Span by Level Designation
#' create_boxplot(sq_data,
#' metric = "Workweek_span",
#' hrvar = "LevelDesignation",
#' return = "plot")
#'
#' # Create a summary statistics table for Work Week Span by Organization
#' create_boxplot(sq_data,
#' metric = "Workweek_span",
#' hrvar = "Organization",
#' return = "table")
#'
#' # Create a fizzy plot for Collaboration Hours by Level Designation
#' create_boxplot(sq_data,
#' metric = "Collaboration_hours",
#' hrvar = "LevelDesignation",
#' return = "plot")
#' @export
create_boxplot <- function(data,
metric,
hrvar = "Organization",
mingroup = 5,
return = "plot") {
## Check inputs
required_variables <- c("Date",
metric,
"PersonId")
## Error message if variables are not present
## Nothing happens if all present
data %>%
check_inputs(requirements = required_variables)
## Handling NULL values passed to hrvar
if(is.null(hrvar)){
data <- totals_col(data)
hrvar <- "Total"
}
## Clean metric name
clean_nm <- us_to_space(metric)
plot_data <-
data %>%
rename(group = !!sym(hrvar)) %>% # Rename HRvar to `group`
group_by(PersonId, group) %>%
summarise(!!sym(metric) := mean(!!sym(metric))) %>%
ungroup() %>%
left_join(data %>%
rename(group = !!sym(hrvar)) %>%
group_by(group) %>%
summarise(Employee_Count = n_distinct(PersonId)),
by = "group") %>%
filter(Employee_Count >= mingroup)
## Get max value
max_point <- max(plot_data[[metric]]) * 1.2
plot_legend <-
plot_data %>%
group_by(group) %>%
summarize(Employee_Count = first(Employee_Count)) %>%
mutate(Employee_Count = paste("n=",Employee_Count))
## summary table
summary_table <-
plot_data %>%
select(group, tidyselect::all_of(metric)) %>%
group_by(group) %>%
summarise(mean = mean(!!sym(metric)),
median = median(!!sym(metric)),
sd = sd(!!sym(metric)),
min = min(!!sym(metric)),
max = max(!!sym(metric)),
range = max - min,
n = n())
## group order
group_ord <-
summary_table %>%
arrange(desc(mean)) %>%
pull(group)
plot_object <-
plot_data %>%
mutate(group = factor(group, levels = group_ord)) %>%
ggplot(aes(x = group, y = !!sym(metric))) +
geom_boxplot(color = "#578DB8") +
ylim(0, max_point) +
annotate("text", x = plot_legend$group, y = 0, label = plot_legend$Employee_Count) +
scale_x_discrete(labels = scales::wrap_format(10)) +
theme_wpa_basic() +
theme(axis.text=element_text(size=12),
axis.text.x = element_text(angle = 30, hjust = 1),
plot.title = element_text(color="grey40", face="bold", size=18),
plot.subtitle = element_text(size=14),
legend.position = "top",
legend.justification = "right",
legend.title=element_text(size=14),
legend.text=element_text(size=14)) +
labs(title = clean_nm,
subtitle = paste("Distribution of",
tolower(clean_nm),
"by",
tolower(camel_clean(hrvar)))) +
xlab(hrvar) +
ylab(paste("Average", clean_nm)) +
labs(caption = extract_date_range(data, return = "text"))
if(return == "table"){
summary_table %>%
as_tibble() %>%
return()
} else if(return == "plot"){
return(plot_object)
} else if(return == "data"){
plot_data %>%
mutate(group = factor(group, levels = group_ord)) %>%
arrange(desc(group))
} else {
stop("Please enter a valid input for `return`.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_boxplot.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Create a bubble plot with two selected Viva Insights metrics (General
#' Purpose), with size representing the number of employees in the group.
#'
#' @description Returns a bubble plot of two selected metrics, using size to map
#' the number of employees.
#'
#' @details This is a general purpose function that powers all the functions in
#' the package that produce bubble plots.
#'
#' @param data A Standard Person Query dataset in the form of a data frame.
#' @param metric_x Character string containing the name of the metric, e.g.
#' "Collaboration_hours"
#' @param metric_y Character string containing the name of the metric, e.g.
#' "Collaboration_hours"
#' @param hrvar HR Variable by which to split metrics, defaults to
#' "Organization" but accepts any character vector, e.g. "LevelDesignation"
#' @param mingroup Numeric value setting the privacy threshold / minimum group
#' size. Defaults to 5.
#'
#' @param return String specifying what to return. This must be one of the
#' following strings: - `"plot"` - `"table"`
#'
#' @param bubble_size A numeric vector of length two to specify the size range
#' of the bubbles
#'
#' @import dplyr
#' @import ggplot2
#' @import scales
#'
#' @family Visualization
#' @family Flexible
#'
#' @examples
#' \donttest{
#' create_bubble(sq_data,
#' "Internal_network_size",
#' "External_network_size",
#' "Organization")
#'
#' create_bubble(
#' sq_data,
#' "Generated_workload_call_hours",
#' "Generated_workload_email_hours",
#' "Organization",
#' mingroup = 100,
#' return = "plot"
#' )
#' }
#' @return A different output is returned depending on the value passed to the
#' `return` argument:
#' - `"plot"`: 'ggplot' object. A bubble plot for the metric.
#' - `"table"`: data frame. A summary table for the metric.
#'
#' @export
create_bubble <- function(data,
metric_x,
metric_y,
hrvar = "Organization",
mingroup = 5,
return = "plot",
bubble_size = c(1, 10)){
## Check inputs
required_variables <- c(hrvar,
metric_x,
metric_y,
"PersonId")
## Error message if variables are not present
## Nothing happens if all present
data %>%
check_inputs(requirements = required_variables)
## Handling NULL values passed to hrvar
if(is.null(hrvar)){
data <- totals_col(data)
hrvar <- "Total"
}
## Clean metric names
clean_x <- us_to_space(metric_x)
clean_y <- us_to_space(metric_y)
myTable <-
data %>%
group_by(PersonId, !!sym(hrvar)) %>%
summarise_at(vars(!!sym(metric_x), !!sym(metric_y)), ~mean(., na.rm = TRUE)) %>%
group_by(!!sym(hrvar)) %>%
summarise_at(vars(!!sym(metric_x), !!sym(metric_y)), ~mean(., na.rm = TRUE)) %>%
ungroup() %>%
left_join(hrvar_count(data, hrvar = hrvar, return = "table"),
by = hrvar) %>%
filter(n >= mingroup)
plot_object <-
myTable %>%
ggplot(aes(x = !!sym(metric_x),
y = !!sym(metric_y),
label = !!sym(hrvar))) +
geom_point(alpha = 0.5, color = rgb2hex(0, 120, 212), aes(size = n)) +
ggrepel::geom_text_repel(size = 3) +
labs(title = paste0(clean_x, " and ", clean_y),
subtitle = paste("By", camel_clean(hrvar)),
caption = paste("Total employees =", sum(myTable$n), "|",
extract_date_range(data, return = "text"))) +
xlab(clean_x) +
ylab(clean_y) +
scale_size(range = bubble_size) +
theme_wpa_basic()
if(return == "table"){
return(myTable)
} else if(return == "plot"){
return(plot_object)
} else {
stop("Please enter a valid input for `return`.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_bubble.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Create a density plot for any metric
#'
#' @description
#' Provides an analysis of the distribution of a selected metric.
#' Returns a faceted density plot by default.
#' Additional options available to return the underlying frequency table.
#'
#' @template spq-params
#' @param metric String containing the name of the metric,
#' e.g. "Collaboration_hours"
#'
#' @param ncol Numeric value setting the number of columns on the plot. Defaults
#' to `NULL` (automatic).
#'
#' @param return String specifying what to return. This must be one of the
#' following strings:
#' - `"plot"`
#' - `"table"`
#' - `"data"`
#' - `"frequency"`
#'
#' See `Value` for more information.
#'
#' @return
#' A different output is returned depending on the value passed to the `return`
#' argument:
#' - `"plot"`: 'ggplot' object. A faceted density plot for the metric.
#' - `"table"`: data frame. A summary table for the metric.
#' - `"data"`: data frame. Data with calculated person averages.
#' - `"frequency`: list of data frames. Each data frame contains the
#' frequencies used in each panel of the plotted histogram.
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#' @import scales
#' @importFrom tidyr spread
#' @importFrom stats median
#' @importFrom stats sd
#'
#' @family Flexible
#'
#' @examples
#' # Return plot for whole organization
#' create_density(sq_data, metric = "Collaboration_hours", hrvar = NULL)
#'
#' # Return plot
#' create_density(sq_data, metric = "Collaboration_hours", hrvar = "Organization")
#'
#' # Return plot but coerce plot to two columns
#' create_density(sq_data, metric = "Collaboration_hours", hrvar = "Organization", ncol = 2)
#'
#' # Return summary table
#' create_density(sq_data,
#' metric = "Collaboration_hours",
#' hrvar = "Organization",
#' return = "table")
#' @export
create_density <- function(data,
metric,
hrvar = "Organization",
mingroup = 5,
ncol = NULL,
return = "plot") {
## Check inputs
required_variables <- c("Date",
metric,
"PersonId")
## Error message if variables are not present
## Nothing happens if all present
data %>%
check_inputs(requirements = required_variables)
## Create NULL variables
density <- scaled <- ndensity <- NULL
## Clean metric name
clean_nm <- us_to_space(metric)
## Handling NULL values passed to hrvar
if(is.null(hrvar)){
data <- totals_col(data)
hrvar <- "Total"
}
## Basic Data for bar plot
## Calculate person-averages
plot_data <-
data %>%
rename(group = !!sym(hrvar)) %>%
group_by(PersonId, group) %>%
summarise(!!sym(metric) := mean(!!sym(metric))) %>%
ungroup() %>%
left_join(data %>%
rename(group = !!sym(hrvar)) %>%
group_by(group) %>%
summarise(Employee_Count = n_distinct(PersonId)),
by = "group") %>%
filter(Employee_Count >= mingroup)
## Employee count / base size table
plot_legend <-
plot_data %>%
group_by(group) %>%
summarize(Employee_Count = first(Employee_Count)) %>%
mutate(Employee_Count = paste("n=",Employee_Count))
if(return == "table"){
## Table to return
plot_data %>%
group_by(group) %>%
summarise(
mean = mean(!!sym(metric), na.rm = TRUE),
median = median(!!sym(metric), na.rm = TRUE),
max = max(!!sym(metric), na.rm = TRUE),
min = min(!!sym(metric), na.rm = TRUE)
) %>%
left_join(data %>%
rename(group = !!sym(hrvar)) %>%
group_by(group) %>%
summarise(Employee_Count = n_distinct(PersonId)),
by = "group")
} else if(return == "plot"){
## Density plot
plot_data %>%
ggplot(aes(x = !!sym(metric))) +
geom_density(lwd = 1, colour = 4, fill = 4, alpha = 0.25) +
facet_wrap(group ~ ., ncol = ncol) +
theme_wpa_basic() +
theme(strip.background = element_rect(color = "#1d627e",
fill = "#1d627e"),
strip.text = element_text(size = 10,
colour = "#FFFFFF",
face = "bold")) +
labs(title = clean_nm,
subtitle = paste("Distribution of", tolower(clean_nm), "by", tolower(camel_clean(hrvar)))) +
xlab(clean_nm) +
ylab("Density") +
labs(caption = extract_date_range(data, return = "text"))
} else if(return == "frequency"){
hist_obj <-
plot_data %>%
ggplot(aes(x = !!sym(metric))) +
geom_density() +
facet_wrap(group ~ ., ncol = ncol)
ggplot2::ggplot_build(hist_obj)$data[[1]] %>%
select(
group,
PANEL,
y,
x,
density,
scaled,
ndensity,
count,
n
) %>%
group_split(PANEL)
} else if(return == "data"){
plot_data
} else {
stop("Please enter a valid input for `return`.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_density.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Horizontal 100 percent stacked bar plot for any metric
#'
#' @description
#' Provides an analysis of the distribution of a selected metric.
#' Returns a stacked bar plot by default.
#' Additional options available to return a table with distribution elements.
#'
#' @template spq-params
#' @param metric String containing the name of the metric,
#' e.g. "Collaboration_hours"
#'
#' @param return String specifying what to return. This must be one of the
#' following strings:
#' - `"plot"`
#' - `"table"`
#'
#' See `Value` for more information.
#'
#' @param cut A numeric vector of length three to specify the breaks for the
#' distribution,
#' e.g. c(10, 15, 20)
#' @param dist_colours A character vector of length four to specify colour
#' codes for the stacked bars.
#' @param unit String to specify what unit to use. This defaults to `"hours"`
#' but can accept any custom string. See `cut_hour()` for more details.
#' @inheritParams cut_hour
#' @param sort_by String to specify the bucket label to sort by. Defaults to
#' `NULL` (no sorting).
#' @param labels Character vector to override labels for the created
#' categorical variables. Must be a named vector - see examples.
#'
#' @return
#' A different output is returned depending on the value passed to the `return` argument:
#' - `"plot"`: 'ggplot' object. A stacked bar plot for the metric.
#' - `"table"`: data frame. A summary table for the metric.
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#' @import scales
#' @importFrom tidyr spread
#' @importFrom stats median
#' @importFrom stats sd
#'
#' @family Visualization
#' @family Flexible
#'
#' @examples
#' # Return plot
#' create_dist(sq_data, metric = "Collaboration_hours", hrvar = "Organization")
#'
#' # Return summary table
#' create_dist(sq_data, metric = "Collaboration_hours", hrvar = "Organization", return = "table")
#'
#' # Use custom labels by providing a label vector
#' eh_labels <- c(
#' "Fewer than fifteen" = "< 15 hours",
#' "Between fifteen and twenty" = "15 - 20 hours",
#' "Between twenty and twenty-five" = "20 - 25 hours",
#' "More than twenty-five" = "25+ hours"
#' )
#'
#' sq_data %>%
#' create_dist(metric = "Email_hours",
#' labels = eh_labels, return = "plot")
#'
#' # Sort by a category
#' sq_data %>%
#' create_dist(metric = "Collaboration_hours",
#' sort_by = "25+ hours")
#' @export
create_dist <- function(data,
metric,
hrvar = "Organization",
mingroup = 5,
return = "plot",
cut = c(15, 20, 25),
dist_colours = c("#facebc",
"#fcf0eb",
"#b4d5dd",
"#bfe5ee"),
unit = "hours",
lbound = 0,
ubound = 100,
sort_by = NULL,
labels = NULL) {
## Check inputs -----------------------------------------------------------
required_variables <- c("Date",
metric,
"PersonId")
## Error message if variables are not present -----------------------------
## Nothing happens if all present
data %>%
check_inputs(requirements = required_variables)
## Clean metric name ------------------------------------------------------
clean_nm <- us_to_space(metric)
## Replace labels ---------------------------------------------------------
replace_labels <- function(x, labels){
ifelse(
is.na(names(labels[match(x, labels)])),
x,
names(labels[match(x, labels)])
)
}
## Handling NULL values passed to hrvar -----------------------------------
if(is.null(hrvar)){
data <- totals_col(data)
hrvar <- "Total"
}
## Basic Data for bar plot ------------------------------------------------
plot_data <-
data %>%
rename(group = !!sym(hrvar)) %>%
group_by(PersonId, group) %>%
summarise(!!sym(metric) := mean(!!sym(metric))) %>%
ungroup() %>%
left_join(data %>%
rename(group = !!sym(hrvar)) %>%
group_by(group) %>%
summarise(Employee_Count = n_distinct(PersonId)),
by = "group") %>%
filter(Employee_Count >= mingroup)
## Create buckets of collaboration hours ---------------------------------
plot_data <-
plot_data %>%
mutate(bucket_hours = cut_hour(!!sym(metric),
cuts = cut,
unit = unit,
lbound = lbound,
ubound = ubound))
## Employee count / base size table --------------------------------------
plot_legend <-
plot_data %>%
group_by(group) %>%
summarize(Employee_Count = first(Employee_Count)) %>%
mutate(Employee_Count = paste("n=",Employee_Count))
## Data for bar plot
plot_table <-
plot_data %>%
group_by(group, bucket_hours) %>%
summarize(Employees = n(),
Employee_Count = first(Employee_Count),
percent = Employees / Employee_Count ) %>%
arrange(group, desc(bucket_hours))
## Table for annotation --------------------------------------------------
annot_table <-
plot_legend %>%
dplyr::left_join(plot_table, by = "group")
## Remove max from axis labels, and add %
max_blank <- function(x){
as.character(
c(
scales::percent(
x[1:length(x) - 1]
),
"")
)
}
# paste0(x*100, "%")
## Replace dist_colours --------------------------------------------------
if((length(dist_colours) - length(cut)) < 1){
dist_colours <- heat_colours(n = length(cut) + 1)
message("Insufficient colours supplied to `dist_colours` - using default colouring palette instead.",
"Please supply a vector of colours of length n + 1 where n is the length of vector supplied to `cut`.")
}
## Table to return -------------------------------------------------------
return_table <-
plot_table %>%
select(group, bucket_hours, percent) %>%
{if(is.null(labels)){
.
} else if(is.function(labels)){
mutate(., bucket_hours = do.call(what = labels, args = list(bucket_hours)))
} else {
mutate(., bucket_hours = replace_labels(x = bucket_hours, labels = labels))
}} %>%
spread(bucket_hours, percent) %>%
left_join(data %>%
rename(group = !!sym(hrvar)) %>%
group_by(group) %>%
summarise(Employee_Count = n_distinct(PersonId)),
by = "group") %>%
ungroup() %>%
{ if(is.null(sort_by)){
.
} else {
arrange(., desc(!!sym(sort_by)))
}} %>%
mutate(group = factor(group, levels = unique(group)))
## Bar plot -------------------------------------------------------------
plot_object <-
plot_table %>%
mutate(group = factor(group, levels = levels(return_table$group))) %>%
ggplot(aes(x = group,
y = Employees,
# y = stats::reorder(Employees, group),
fill = bucket_hours)) +
geom_bar(stat = "identity", position = position_fill(reverse = TRUE)) +
scale_y_continuous(expand = c(.01, 0), labels = max_blank, position = "right") +
coord_flip() +
annotate("text", x = plot_legend$group, y = 1.15, label = plot_legend$Employee_Count, size = 3) +
annotate("rect", xmin = 0.5, xmax = length(plot_legend$group) + 0.5, ymin = 1.05, ymax = 1.25, alpha = .2) +
annotate(x = length(plot_legend$group) + 0.8,
xend = length(plot_legend$group) + 0.8,
y = 0,
yend = 1,
colour = "black",
lwd = 0.75,
geom = "segment") +
# Option to override labels ---------------------------------------------
{if(is.null(labels)){
scale_fill_manual(name = "", values = rev(dist_colours))
} else if(is.function(labels)){
scale_fill_manual(name = "", labels = labels, values = rev(dist_colours))
} else {
# # Match with values, replace with names
# # Flip names and values to be used for `scale_fill_manual()`
flip <- function(x){ setNames(object = names(x), nm = x)}
scale_fill_manual(name = "",
labels = flip(labels),
values = rev(dist_colours))
}} +
theme_wpa_basic() +
theme(axis.line = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()) +
labs(
title = clean_nm,
subtitle = paste("Percentage of employees by", tolower(camel_clean(hrvar))),
x = camel_clean(hrvar),
caption = extract_date_range(data, return = "text")
)
# Return options ---------------------------------------------------------
if(return == "table"){
return_table
} else if(return == "plot"){
return(plot_object)
} else {
stop("Please enter a valid input for `return`.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_dist.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Create interactive tables in HTML with 'download' buttons.
#'
#' @description
#' See
#' <https://martinctc.github.io/blog/vignette-downloadable-tables-in-rmarkdown-with-the-dt-package/>
#' for more.
#'
#' @param x Data frame to be passed through.
#' @param rounding Numeric vector to specify the number of decimal points to display
#' @param freeze Number of columns from the left to 'freeze'. Defaults to 2,
#' which includes the row number column.
#' @param percent Logical value specifying whether to display numeric columns
#' as percentages.
#'
#' @family Import and Export
#'
#' @examples
#' out_tb <- hrvar_count(sq_data, hrvar = "Organization", return = "table")
#' create_dt(out_tb)
#'
#' @return
#' Returns an HTML widget displaying rectangular data.
#'
#' @export
create_dt <- function(x, rounding = 1, freeze = 2, percent = FALSE){
# Round all numeric to "rounding" number of dp
num_cols <- dplyr::select_if(x, is.numeric) %>% names()
if(length(num_cols) == 0){ # No numeric columns
out <-
DT::datatable(
x,
extensions = c('Buttons',
'FixedColumns'),
options = list(
dom = 'Blfrtip',
fixedColumns = list(leftColumns = freeze),
scrollX = TRUE,
buttons = c('copy', 'csv', 'excel', 'pdf', 'print'),
lengthMenu = list(c(10, 25, 50, -1),
c(10, 25, 50, "All"))
)
)
} else {
out <-
DT::datatable(
x,
extensions = c('Buttons',
'FixedColumns'),
options = list(
dom = 'Blfrtip',
fixedColumns = list(leftColumns = freeze),
scrollX = TRUE,
buttons = c('copy', 'csv', 'excel', 'pdf', 'print'),
lengthMenu = list(c(10, 25, 50, -1),
c(10, 25, 50, "All"))
)
) %>%
DT::formatRound(columns = num_cols, rounding)
if(percent == TRUE){
out <-
out %>%
DT::formatPercentage(columns = num_cols, rounding)
}
}
out
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_dt.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Fizzy Drink / Jittered Scatter Plot for any metric
#'
#' @description
#' Analyzes a selected metric and returns a 'fizzy' scatter plot by default.
#' Additional options available to return a table with distribution elements.
#'
#' @details
#' This is a general purpose function that powers all the functions
#' in the package that produce 'fizzy drink' / jittered scatter plots.
#'
#' @template spq-params
#' @param metric Character string containing the name of the metric,
#' e.g. `"Collaboration_hours"`
#' @param return String specifying what to return. This must be one of the following strings:
#' - `"plot"`
#' - `"table"`
#'
#' See `Value` for more information.
#'
#' @return
#' A different output is returned depending on the value passed to the `return` argument:
#' - `"plot"`: 'ggplot' object. A jittered scatter plot for the metric.
#' - `"table"`: data frame. A summary table for the metric.
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#' @import scales
#' @importFrom stats median
#' @importFrom stats sd
#'
#' @family Visualization
#' @family Flexible
#'
#' @examples
#' # Create a fizzy plot for Work Week Span by Level Designation
#' create_fizz(sq_data, metric = "Workweek_span", hrvar = "LevelDesignation", return = "plot")
#'
#' # Create a summary statistics table for Work Week Span by Organization
#' create_fizz(sq_data, metric = "Workweek_span", hrvar = "Organization", return = "table")
#'
#' # Create a fizzy plot for Collaboration Hours by Level Designation
#' create_fizz(sq_data, metric = "Collaboration_hours", hrvar = "LevelDesignation", return = "plot")
#' @export
create_fizz <- function(data,
metric,
hrvar = "Organization",
mingroup = 5,
return = "plot") {
## Check inputs
required_variables <- c("Date",
metric,
"PersonId")
## Error message if variables are not present
## Nothing happens if all present
data %>%
check_inputs(requirements = required_variables)
## Handling NULL values passed to hrvar
if(is.null(hrvar)){
data <- totals_col(data)
hrvar <- "Total"
}
## Clean metric name
clean_nm <- us_to_space(metric)
## Plot data
plot_data <-
data %>%
rename(group = !!sym(hrvar)) %>% # Rename HRvar to `group`
group_by(PersonId, group) %>%
summarise(!!sym(metric) := mean(!!sym(metric))) %>%
ungroup() %>%
left_join(data %>%
rename(group = !!sym(hrvar)) %>%
group_by(group) %>%
summarise(Employee_Count = n_distinct(PersonId)),
by = "group") %>%
filter(Employee_Count >= mingroup)
## Get max value
max_point <- max(plot_data[[metric]]) * 1.2
plot_legend <-
plot_data %>%
group_by(group) %>%
summarize(Employee_Count = first(Employee_Count)) %>%
mutate(Employee_Count = paste("n=",Employee_Count))
plot_object <-
plot_data %>%
ggplot(aes(x = group, y = !!sym(metric))) +
geom_point(size = 1,
alpha = 1/5,
color = "#578DB8",
position = position_jitter(width = 0.1, height = 0.1)) +
theme_wpa_basic() +
theme(
axis.line = element_blank(),
panel.grid.major.x = element_line(colour = "grey80"),
axis.ticks = element_blank(),
axis.title = element_blank()
) +
annotate("text",
x = plot_legend$group,
y = max_point,
label = plot_legend$Employee_Count,
size = 3) +
annotate("rect",
xmin = 0.5,
xmax = length(plot_legend$group) + 0.5,
ymin = max_point*0.95,
ymax = max_point*1.05,
alpha = .2) +
scale_y_continuous(
position = "right",
limits = c(0, max_point * 1.1)) +
coord_flip() +
labs(title = clean_nm,
subtitle = paste("Distribution of",
tolower(clean_nm),
"by",
tolower(camel_clean(hrvar))),
caption = extract_date_range(data, return = "text"),
x = hrvar,
y = paste("Average", clean_nm))
summary_table <-
plot_data %>%
select(group, tidyselect::all_of(metric)) %>%
group_by(group) %>%
summarise(mean = mean(!!sym(metric)),
median = median(!!sym(metric)),
sd = sd(!!sym(metric)),
min = min(!!sym(metric)),
max = max(!!sym(metric)),
range = max - min,
n = n())
if(return == "table"){
summary_table %>%
as_tibble() %>%
return()
} else if(return == "plot"){
return(plot_object)
} else {
stop("Please enter a valid input for `return`.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_fizz.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Create a histogram plot for any metric
#'
#' @description
#' Provides an analysis of the distribution of a selected metric.
#' Returns a faceted histogram by default.
#' Additional options available to return the underlying frequency table.
#'
#' @template spq-params
#' @param metric String containing the name of the metric,
#' e.g. "Collaboration_hours"
#'
#' @param binwidth Numeric value for setting `binwidth` argument within
#' `ggplot2::geom_histogram()`. Defaults to 1.
#'
#' @param ncol Numeric value setting the number of columns on the plot. Defaults
#' to `NULL` (automatic).
#'
#' @param return String specifying what to return. This must be one of the
#' following strings:
#' - `"plot"`
#' - `"table"`
#' - `"data"`
#' - `"frequency"`
#'
#' See `Value` for more information.
#'
#' @return
#' A different output is returned depending on the value passed to the `return`
#' argument:
#' - `"plot"`: 'ggplot' object. A faceted histogram for the metric.
#' - `"table"`: data frame. A summary table for the metric.
#' - `"data"`: data frame. Data with calculated person averages.
#' - `"frequency`: list of data frames. Each data frame contains the
#' frequencies used in each panel of the plotted histogram.
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#' @import scales
#' @importFrom tidyr spread
#' @importFrom stats median
#' @importFrom stats sd
#'
#' @family Flexible
#'
#' @examples
#' # Return plot for whole organization
#' create_hist(sq_data, metric = "Collaboration_hours", hrvar = NULL)
#'
#' # Return plot
#' create_hist(sq_data, metric = "Collaboration_hours", hrvar = "Organization")
#'
#' # Return plot but coerce plot to two columns
#' create_hist(sq_data, metric = "Collaboration_hours", hrvar = "Organization", ncol = 2)
#'
#' # Return summary table
#' create_hist(sq_data,
#' metric = "Collaboration_hours",
#' hrvar = "Organization",
#' return = "table")
#' @export
create_hist <- function(data,
metric,
hrvar = "Organization",
mingroup = 5,
binwidth = 1,
ncol = NULL,
return = "plot") {
## Check inputs
required_variables <- c("Date",
metric,
"PersonId")
## Error message if variables are not present
## Nothing happens if all present
data %>%
check_inputs(requirements = required_variables)
## Clean metric name
clean_nm <- us_to_space(metric)
## Handling NULL values passed to hrvar
if(is.null(hrvar)){
data <- totals_col(data)
hrvar <- "Total"
}
## Basic Data for bar plot
## Calculate person-averages
plot_data <-
data %>%
rename(group = !!sym(hrvar)) %>%
group_by(PersonId, group) %>%
summarise(!!sym(metric) := mean(!!sym(metric))) %>%
ungroup() %>%
left_join(data %>%
rename(group = !!sym(hrvar)) %>%
group_by(group) %>%
summarise(Employee_Count = n_distinct(PersonId)),
by = "group") %>%
filter(Employee_Count >= mingroup)
## Employee count / base size table
plot_legend <-
plot_data %>%
group_by(group) %>%
summarize(Employee_Count = first(Employee_Count)) %>%
mutate(Employee_Count = paste("n=",Employee_Count))
## Bar plot
plot_object <-
plot_data %>%
ggplot(aes(x = !!sym(metric))) +
geom_histogram(binwidth = binwidth, colour = "white", fill="#34b1e2") +
facet_wrap(group ~ ., ncol = ncol) +
theme_wpa_basic() +
theme(strip.background = element_rect(color = "#1d627e",
fill = "#1d627e"),
strip.text = element_text(size = 10,
colour = "#FFFFFF",
face = "bold")) +
labs(title = clean_nm,
subtitle = paste("Distribution of", tolower(clean_nm), "by", tolower(camel_clean(hrvar)))) +
xlab(clean_nm) +
ylab("Number of employees") +
labs(caption = extract_date_range(data, return = "text"))
## Table to return
return_table <-
plot_data %>%
group_by(group) %>%
summarise(
mean = mean(!!sym(metric), na.rm = TRUE),
median = median(!!sym(metric), na.rm = TRUE),
max = max(!!sym(metric), na.rm = TRUE),
min = min(!!sym(metric), na.rm = TRUE),
.groups = "drop"
) %>%
left_join(data %>%
rename(group = !!sym(hrvar)) %>%
group_by(group) %>%
summarise(Employee_Count = n_distinct(PersonId)),
by = "group")
if(return == "table"){
return_table
} else if(return == "plot"){
return(plot_object)
} else if(return == "frequency"){
ggplot2::ggplot_build(plot_object)$data[[1]] %>%
select(group,
PANEL,
x,
xmin,
xmax,
y) %>%
group_split(PANEL)
} else if(return == "data"){
plot_data
} else {
stop("Please enter a valid input for `return`.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_hist.R
|
#' @title
#' Create an incidence analysis reflecting proportion of population scoring above
#' or below a threshold for a metric
#'
#' @description
#' An incidence analysis is generated, with each value in the table reflecting
#' the proportion of the population that is above or below a threshold for a
#' specified metric. There is an option to only provide a single `hrvar` in
#' which a bar plot is generated, or two `hrvar` values where an incidence table
#' (heatmap) is generated.
#'
#'
#' @param data A Standard Person Query dataset in the form of a data frame.
#' @param metric Character string containing the name of the metric,
#' e.g. "Collaboration_hours"
#' @param hrvar Character vector of at most length 2 containing the name of the
#' HR Variable by which to split metrics.
#' @param mingroup Numeric value setting the privacy threshold / minimum group
#' size. Defaults to 5.
#' @param threshold Numeric value specifying the threshold.
#' @param position String containing the below valid values:
#' - `"above"`: show incidence of those equal to or above the threshold
#' - `"below"`: show incidence of those equal to or below the threshold
#' @param return String specifying what to return. This must be one of the
#' following strings:
#' - `"plot"`
#' - `"table"`
#'
#' See `Value` for more information.
#'
#' @return
#' A different output is returned depending on the value passed to the `return` argument:
#' - `"plot"`: 'ggplot' object. A heat map.
#' - `"table"`: data frame. A summary table.
#'
#' @import dplyr
#' @import ggplot2
#' @importFrom scales percent
#'
#' @family Visualization
#' @family Flexible
#'
#' @examples
#' # Only a single HR attribute
#' create_inc(
#' data = sq_data,
#' metric = "After_hours_collaboration_hours",
#' hrvar = "Organization",
#' threshold = 4,
#' position = "above"
#' )
#'
#' # Two HR attributes
#' create_inc(
#' data = sq_data,
#' metric = "Collaboration_hours",
#' hrvar = c("LevelDesignation", "Organization"),
#' threshold = 20,
#' position = "below"
#' )
#'
#' @export
create_inc <- function(
data,
metric,
hrvar,
mingroup = 5,
threshold,
position,
return = "plot"
){
if(length(hrvar) == 1){
create_inc_bar(
data = data,
metric = metric,
hrvar = hrvar,
mingroup = mingroup,
threshold = threshold,
position = position,
return = return
)
} else if(length(hrvar) == 2){
create_inc_grid(
data = data,
metric = metric,
hrvar = hrvar,
mingroup = mingroup,
threshold = threshold,
position = position,
return = return
)
} else {
stop("`hrvar` can only accept a character vector of length 2.")
}
}
#' @rdname create_inc
#' @export
create_incidence <- create_inc
#' Run `create_inc` with only single `hrvar`
#' Returning a bar chart
#'
#' @noRd
create_inc_bar <- function(
data,
metric,
hrvar,
mingroup = 5,
threshold,
position,
return = "plot"
){
# Transform data so that metrics become proportions
data_t <-
data %>%
{ if (position == "above"){
mutate(., !!sym(metric) := !!sym(metric) >= threshold)
} else if (position == "below"){
mutate(., !!sym(metric) := !!sym(metric) <= threshold)
}
}
# Set title text
title_text <-
paste(
"Incidence of",
tolower(us_to_space(metric)),
position,
threshold
)
# Set subtitle text
subtitle_text <-
paste(
"Percentage and number of employees by",
hrvar
)
# Pipe result to `create_bar()`
create_bar(
data = data_t,
metric = metric,
hrvar = hrvar,
mingroup = mingroup,
return = return,
plot_title = title_text,
plot_subtitle = subtitle_text,
legend_lab = paste("% with",
tolower(us_to_space(metric)),
position,
threshold),
percent = TRUE
)
}
#' Run `create_inc` with only two `hrvar`
#' Returning a heatmap
#'
#' @noRd
create_inc_grid <- function(
data,
metric,
hrvar,
mingroup = 5,
threshold,
position,
return = "plot"
){
# Create table of proportions
myTable <-
data %>%
{ if (position == "above"){
mutate(., !!sym(metric) := !!sym(metric) >= threshold)
} else if (position == "below"){
mutate(., !!sym(metric) := !!sym(metric) <= threshold)
}
} %>%
group_by(!!sym(hrvar[1]), !!sym(hrvar[2]), PersonId) %>%
summarise(
!!sym(metric) := mean(!!sym(metric), na.rm = TRUE)
) %>%
group_by(!!sym(hrvar[1]), !!sym(hrvar[2])) %>%
summarise(
!!sym(metric) := mean(!!sym(metric), na.rm = TRUE),
n = n_distinct(PersonId),
.groups = "drop"
) %>%
filter(n >= mingroup) %>%
arrange(desc(!!sym(metric)))
if(return == "table"){
myTable
} else if(return == "plot"){
# Set title text
title_text <-
paste(
"Incidence of",
tolower(us_to_space(metric)),
position,
threshold
)
# Set subtitle text
subtitle_text <-
paste(
"Percentage and number of employees by",
hrvar[1],
"and",
hrvar[2]
)
metric_text <- NULL
myTable %>%
mutate(metric_text = paste0(
scales::percent(!!sym(metric), accuracy = 1),
" (", n, ")")) %>%
ggplot(aes(x = !!sym(hrvar[1]),
y = !!sym(hrvar[2]),
fill = !!sym(metric))) +
geom_tile() +
geom_text(aes(label = metric_text),
colour = "black",
size = 3)+
scale_fill_gradient2(low = rgb2hex(7, 111, 161),
mid = rgb2hex(241, 204, 158),
high = rgb2hex(216, 24, 42),
midpoint = 0.5,
breaks = c(0, 0.5, 1),
labels = c("0%", "", "100%"),
limits = c(0, 1)) +
scale_x_discrete(position = "top", labels = us_to_space) +
scale_y_discrete(labels = us_to_space) +
theme_wpa_basic() +
labs(
title = title_text,
subtitle = subtitle_text,
caption = paste(
extract_date_range(data, return = "text"),
"\n",
"Percentages reflect incidence with respect to population in cell."),
fill = "Incidence"
)
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_inc.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Time Trend - Line Chart for any metric
#'
#' @description
#' Provides a week by week view of a selected metric, visualised as line charts.
#' By default returns a line chart for the defined metric,
#' with a separate panel per value in the HR attribute.
#' Additional options available to return a summary table.
#'
#' @details
#' This is a general purpose function that powers all the functions
#' in the package that produce faceted line plots.
#'
#' @template spq-params
#' @param metric Character string containing the name of the metric,
#' e.g. "Collaboration_hours"
#'
#' @param ncol Numeric value setting the number of columns on the plot. Defaults
#' to `NULL` (automatic).
#'
#' @param return String specifying what to return. This must be one of the following strings:
#' - `"plot"`
#' - `"table"`
#'
#' See `Value` for more information.
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#' @import scales
#' @importFrom tidyselect all_of
#'
#' @family Visualization
#' @family Flexible
#' @family Time-series
#'
#' @examples
#' # Return plot of Email Hours
#' sq_data %>% create_line(metric = "Email_hours", return = "plot")
#'
#' # Return plot of Collaboration Hours
#' sq_data %>% create_line(metric = "Collaboration_hours", return = "plot")
#'
#' # Return plot but coerce plot to two columns
#' sq_data %>%
#' create_line(
#' metric = "Collaboration_hours",
#' hrvar = "Organization",
#' ncol = 2
#' )
#'
#' # Return plot of Work week span and cut by `LevelDesignation`
#' sq_data %>% create_line(metric = "Workweek_span", hrvar = "LevelDesignation")
#'
#' @return
#' A different output is returned depending on the value passed to the `return` argument:
#' - `"plot"`: 'ggplot' object. A faceted line plot for the metric.
#' - `"table"`: data frame. A summary table for the metric.
#'
#' @export
create_line <- function(data,
metric,
hrvar = "Organization",
mingroup = 5,
ncol = NULL,
return = "plot"){
## Check inputs
required_variables <- c("Date",
metric,
"PersonId")
## Error message if variables are not present
## Nothing happens if all present
data %>%
check_inputs(requirements = required_variables)
## Handling NULL values passed to hrvar
if(is.null(hrvar)){
data <- totals_col(data)
hrvar <- "Total"
}
## Clean metric name
clean_nm <- us_to_space(metric)
myTable <-
data %>%
mutate(Date = as.Date(Date, "%m/%d/%Y")) %>%
rename(group = !!sym(hrvar)) %>% # Rename HRvar to `group`
select(PersonId, Date, group, all_of(metric)) %>%
group_by(group) %>%
mutate(Employee_Count = n_distinct(PersonId)) %>%
filter(Employee_Count >= mingroup) # Keep only groups above privacy threshold
myTable <-
myTable %>%
group_by(Date, group) %>%
summarize(Employee_Count = mean(Employee_Count),
!!sym(metric) := mean(!!sym(metric)))
## Data frame to return
myTable_return <-
myTable %>%
select(Date, group, all_of(metric)) %>%
spread(Date, !!sym(metric))
## Data frame for creating plot
myTable_plot <-
myTable %>%
select(Date, group, all_of(metric)) %>%
group_by(Date, group) %>%
summarise_at(vars(all_of(metric)), ~mean(., na.rm = TRUE)) %>%
ungroup()
return_plot <- function(){
myTable_plot %>%
ggplot(aes(x = Date, y = !!sym(metric))) +
geom_line(colour = "#1d627e") +
facet_wrap(.~group, ncol = ncol) +
scale_fill_gradient(name="Hours", low = "white", high = "red") +
theme_wpa_basic() +
theme(strip.background = element_rect(color = "#1d627e",
fill = "#1d627e"),
strip.text = element_text(size = 10,
colour = "#FFFFFF",
face = "bold")) +
labs(title = clean_nm,
subtitle = paste("Total",
tolower(clean_nm),
"by",
tolower(camel_clean(hrvar))),
x = "Date",
y = "Weekly hours",
caption = extract_date_range(data, return = "text")) +
ylim(0, NA) # Set origin to zero
}
if(return == "table"){
myTable_return
} else if(return == "plot"){
return_plot()
} else {
stop("Please enter a valid input for `return`.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_line.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Create a line chart without aggregation for any metric
#'
#' @description
#' This function creates a line chart directly from the aggregated / summarised data.
#' Unlike `create_line()` which performs a person-level aggregation, there is no
#' calculation for `create_line_asis()` and the values are rendered as they are passed
#' into the function. The only requirement is that a `date_var` is provided for the x-axis.
#'
#' @param data Plotting data as a data frame.
#' @param date_var String containing name of variable for the horizontal axis.
#' @param metric String containing name of variable representing the line.
#' @param title Title of the plot.
#' @param subtitle Subtitle of the plot.
#' @param caption Caption of the plot.
#' @param ylab Y-axis label for the plot (group axis)
#' @param xlab X-axis label of the plot (bar axis).
#' @param line_colour String to specify colour to use for the line.
#' Hex codes are accepted. You can also supply
#' RGB values via `rgb2hex()`.
#'
#' @import ggplot2
#' @import dplyr
#'
#' @family Visualization
#' @family Flexible
#' @family Time-series
#'
#' @return
#' Returns a 'ggplot' object representing a line plot.
#'
#' @examples
#' library(dplyr)
#'
#' # Median `Emails_sent` grouped by `Date`
#' # Without Person Averaging
#' med_df <-
#' sq_data %>%
#' group_by(Date) %>%
#' summarise(Emails_sent_median = median(Emails_sent))
#'
#' med_df %>%
#' create_line_asis(
#' date_var = "Date",
#' metric = "Emails_sent_median",
#' title = "Median Emails Sent",
#' subtitle = "Person Averaging Not Applied",
#' caption = extract_date_range(sq_data, return = "text")
#' )
#'
#' @export
create_line_asis <- function(data,
date_var = "Date",
metric,
title = NULL,
subtitle = NULL,
caption = NULL,
ylab = date_var,
xlab = metric,
line_colour = rgb2hex(0, 120, 212)){
returnPlot <-
data %>%
mutate_at(vars(date_var), ~as.Date(., format = "%m/%d/%Y")) %>%
ggplot(aes(x = !!sym(date_var), y = !!sym(metric))) +
geom_line(colour = line_colour)
returnPlot +
labs(title = title,
subtitle = subtitle,
caption = caption,
y = xlab,
x = ylab) +
theme_wpa_basic()
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_line_asis.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Period comparison scatter plot for any two metrics
#'
#' @description
#' Returns two side-by-side scatter plots representing two selected metrics,
#' using colour to map an HR attribute and size to represent number of employees.
#' Returns a faceted scatter plot by default, with additional options
#' to return a summary table.
#'
#' @details
#' This is a general purpose function that powers all the functions
#' in the package that produce faceted scatter plots.
#'
#' @param data A Standard Person Query dataset in the form of a data frame.
#' @param hrvar HR Variable by which to split metrics. Accepts a character vector,
#' defaults to "Organization" but accepts any character vector, e.g. "LevelDesignation"
#' @param metric_x Character string containing the name of the metric,
#' e.g. "Collaboration_hours"
#' @param metric_y Character string containing the name of the metric,
#' e.g. "Collaboration_hours"
#' @param before_start Start date of "before" time period in YYYY-MM-DD
#' @param before_end End date of "before" time period in YYYY-MM-DD
#' @param after_start Start date of "after" time period in YYYY-MM-DD
#' @param after_end End date of "after" time period in YYYY-MM-DD
#' @param before_label String to specify a label for the "before" period. Defaults to "Period 1".
#' @param after_label String to specify a label for the "after" period. Defaults to "Period 2".
#' @param mingroup Numeric value setting the privacy threshold / minimum group size.
#' Defaults to 5.
#' @param return Character vector specifying what to return, defaults to "plot".
#' Valid inputs are "plot" and "table".
#'
#' @import dplyr
#' @import ggplot2
#'
#' @family Visualization
#' @family Flexible
#' @family Time-series
#'
#' @return
#' Returns a 'ggplot' object showing two scatter plots side by side representing
#' the two periods.
#'
#' @examples
#' # Return plot
#' create_period_scatter(sq_data,
#' hrvar = "LevelDesignation",
#' before_start = "2019-12-15",
#' before_end = "2019-12-29",
#' after_start = "2020-01-05",
#' after_end = "2020-01-26")
#'
#' # Return a summary table
#' create_period_scatter(sq_data, before_end = "2019-12-31", return = "table")
#'
#'
#' @export
create_period_scatter <- function(data,
hrvar = "Organization",
metric_x = "Multitasking_meeting_hours",
metric_y = "Meeting_hours",
before_start = min(as.Date(data$Date, "%m/%d/%Y")),
before_end,
after_start = as.Date(before_end) + 1,
after_end = max(as.Date(data$Date, "%m/%d/%Y")),
before_label = "Period 1",
after_label = "Period 2",
mingroup = 5,
return = "plot"){
## Check inputs
## Update these column names as per appropriate
required_variables <- c("Date",
hrvar,
"PersonId")
## Error message if variables are not present
## Nothing happens if all present
data %>%
check_inputs(requirements = required_variables)
daterange_1_start <- as.Date(before_start)
daterange_1_end <- as.Date(before_end)
daterange_2_start <- as.Date(after_start)
daterange_2_end <- as.Date(after_end)
# Fix dates format for WpA Queries
WpA_dataset <- data %>% mutate(Date = as.Date(Date, "%m/%d/%Y"))
# Check for dates in data file
if (daterange_1_start < min(WpA_dataset$Date) |
daterange_1_start > max(WpA_dataset$Date) |
daterange_1_end < min(WpA_dataset$Date) |
daterange_1_end > max(WpA_dataset$Date) |
daterange_2_start < min(WpA_dataset$Date) |
daterange_2_start > max(WpA_dataset$Date) |
daterange_2_end < min(WpA_dataset$Date) |
daterange_2_end > max(WpA_dataset$Date)) {
stop('Dates not found in dataset')
geterrmessage()
}
## Employee count
emp_count <-
WpA_dataset %>%
group_by(!!sym(hrvar)) %>%
summarise(n = n_distinct(PersonId))
data_p1 <-
WpA_dataset %>%
rename(group = hrvar) %>%
filter(between(Date, daterange_1_start, daterange_1_end)) %>%
group_by(PersonId, group) %>%
summarise_at(vars(!!sym(metric_x), !!sym(metric_y)), ~mean(.)) %>%
ungroup() %>%
group_by(group) %>%
summarise_at(vars(!!sym(metric_x), !!sym(metric_y)), ~mean(., na.rm = TRUE)) %>%
mutate(Period = before_label) %>%
left_join(emp_count, by = c(group = hrvar)) %>%
filter(n >= mingroup)
data_p2 <-
WpA_dataset %>%
rename(group = hrvar) %>%
filter(between(Date, daterange_2_start, daterange_2_end)) %>%
group_by(PersonId, group) %>%
summarise_at(vars(!!sym(metric_x), !!sym(metric_y)), ~mean(.)) %>%
ungroup() %>%
group_by(group) %>%
summarise_at(vars(!!sym(metric_x), !!sym(metric_y)), ~mean(., na.rm = TRUE)) %>%
mutate(Period = after_label) %>%
left_join(emp_count, by = c(group = hrvar)) %>%
filter(n >= mingroup)
## bind data
data_both <- rbind(data_p1, data_p2)
date_range_str <-
paste("Data from",
daterange_1_start,
"to",
daterange_1_end,
"and",
daterange_2_start,
"to",
daterange_2_end)
clean_x <- us_to_space(metric_x)
clean_y <- us_to_space(metric_y)
plot_title <-
paste(clean_x, "and", clean_y)
plot_object <-
data_both %>%
ggplot(aes(x = !!sym(metric_x),
y = !!sym(metric_y),
colour = group,
size = n)) +
geom_point(alpha = 0.5) +
scale_size(range = c(1, 20)) +
facet_wrap(.~Period) +
guides(size = FALSE) +
theme_wpa_basic() +
theme(legend.position = "bottom",
strip.background = element_rect(color = "#1d627e",
fill = "#1d627e"),
strip.text = element_text(size = 10,
colour = "#FFFFFF",
face = "bold")) +
ggtitle(plot_title,
subtitle = paste("Comparison of weekly averages by ", tolower(camel_clean(hrvar)))) +
ylab(clean_y) +
xlab(clean_x) +
labs(caption = date_range_str)
if(return == "table"){
# return(myTable_return)
return(data_both)
} else if(return == "plot"){
return(plot_object)
} else {
stop("Please enter a valid input for `return`.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_period_scatter.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title
#' Rank all groups across HR attributes on a selected Viva Insights metric
#'
#' @description
#' This function scans a standard Person query output for groups with high
#' levels of a given Viva Insights Metric. Returns a plot by default, with an
#' option to return a table with all groups (across multiple HR attributes)
#' ranked by the specified metric.
#'
#' @author Carlos Morales Torrado <carlos.morales@@microsoft.com>
#' @author Martin Chan <martin.chan@@microsoft.com>
#'
#' @template spq-params
#' @param metric Character string containing the name of the metric,
#' e.g. "Collaboration_hours"
#'
#' @param return String specifying what to return. This must be one of the
#' following strings:
#' - `"plot"` (default)
#' - `"table"`
#'
#' See `Value` for more information.
#'
#' @param mode String to specify calculation mode. Must be either:
#' - `"simple"`
#' - `"combine"`
#'
#' @param plot_mode Numeric vector to determine which plot mode to return. Must
#' be either `1` or `2`, and is only used when `return = "plot"`.
#' - `1`: Top and bottom five groups across the data population are highlighted
#' - `2`: Top and bottom groups _per_ organizational attribute are highlighted
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#' @import scales
#' @importFrom stats reorder
#'
#' @family Visualization
#' @family Flexible
#'
#' @examples
# Use a small sample for faster runtime
#' sq_data_small <- dplyr::slice_sample(sq_data, prop = 0.1)
#'
#' # Plot mode 1 - show top and bottom five groups
#' create_rank(
#' data = sq_data_small,
#' hrvar = c("FunctionType", "LevelDesignation"),
#' metric = "Emails_sent",
#' return = "plot",
#' plot_mode = 1
#' )
#'
#' # Plot mode 2 - show top and bottom groups per HR variable
#' create_rank(
#' data = sq_data_small,
#' hrvar = c("FunctionType", "LevelDesignation"),
#' metric = "Emails_sent",
#' return = "plot",
#' plot_mode = 2
#' )
#'
#' # Return a table
#' create_rank(
#' data = sq_data_small,
#' metric = "Emails_sent",
#' return = "table"
#' )
#'
#' \donttest{
#' # Return a table - combination mode
#' create_rank(
#' data = sq_data_small,
#' metric = "Emails_sent",
#' mode = "combine",
#' return = "table"
#' )
#' }
#'
#' @return
#' A different output is returned depending on the value passed to the `return`
#' argument:
#' - `"plot"`: 'ggplot' object. A bubble plot where the x-axis represents the
#' metric, the y-axis represents the HR attributes, and the size of the
#' bubbles represent the size of the organizations. Note that there is no
#' plot output if `mode` is set to `"combine"`.
#' - `"table"`: data frame. A summary table for the metric.
#'
#' @export
create_rank <- function(data,
metric,
hrvar = extract_hr(data, exclude_constants = TRUE),
mingroup = 5,
return = "table",
mode = "simple",
plot_mode = 1){
if(mode == "simple"){
results <-
create_bar(data,
metric = metric,
hrvar = hrvar[1],
mingroup = mingroup,
return = "table")
## Create a blank column
results$hrvar <- ""
## Empty table
results <- results[0,]
## Loop through each HR attribute supplied in argument
for (p in hrvar) {
table1 <-
data %>%
create_bar(metric = metric,
hrvar = p,
mingroup = mingroup,
return = "table")
table1$hrvar <- p
results <- rbind(results,table1)
}
output <-
results %>%
arrange(desc(get(metric))) %>%
select(hrvar, everything()) %>%
mutate(group = as.character(group)) # text fails when not string
if(return == "table"){
output
} else if(return == "plot"){
# Company average
avg_ch <-
data %>%
create_bar(hrvar = NULL, metric = metric, return = "table") %>%
pull(metric)
if(plot_mode == 1){
# Main plot
output %>%
mutate(Rank = rev(rank(!!sym(metric), ties.method = "max"))) %>%
mutate(Group =
case_when(Rank %in% 1:5 ~ "Top 5",
Rank %in% nrow(.):(nrow(.) - 5) ~ "Bottom 5",
TRUE ~ "Middle")) %>%
group_by(hrvar) %>%
mutate(OrgGroup =
case_when(!!sym(metric) == max(!!sym(metric), na.rm = TRUE) ~ "Top",
!!sym(metric) == min(!!sym(metric), na.rm = TRUE) ~ "Bottom",
TRUE ~ "Middle")) %>%
mutate(top_group = max(!!sym(metric), na.rm = TRUE)) %>%
ungroup() %>%
ggplot(aes(x = !!sym(metric),
y = reorder(hrvar, top_group))) + # Sort by top group
geom_point(aes(fill = Group,
size = n),
colour = "black",
pch = 21,
alpha = 0.8) +
labs(title = us_to_space(metric),
subtitle = "Lowest and highest group averages, by org. attribute",
y = "",
x = "") +
ggrepel::geom_text_repel(
aes(x = !!sym(metric),
y = hrvar,
label = ifelse(Group %in% c("Top 5", "Bottom 5"), group, "")),
size = 3) +
scale_x_continuous(position = "top") +
scale_fill_manual(name = "Group",
values = c(rgb2hex(68,151,169),
"white",
"#FE7F4F"),
guide = "legend") +
theme_wpa_basic() +
scale_size(guide = "none", range = c(1, 15)) +
theme(
axis.line=element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.major.y = element_line(colour = "#D9E7F7", size = 3), # lightblue bar
panel.grid.minor.x = element_line(color="gray"),
strip.placement = "outside",
strip.background = element_blank(),
strip.text = element_blank()
) +
geom_vline(xintercept = avg_ch, colour = "red")
} else if(plot_mode == 2){
output %>%
group_by(hrvar) %>%
mutate(OrgGroup =
case_when(!!sym(metric) == max(!!sym(metric), na.rm = TRUE) ~ "Top",
!!sym(metric) == min(!!sym(metric), na.rm = TRUE) ~ "Bottom",
TRUE ~ "Middle")) %>%
mutate(top_group = max(!!sym(metric), na.rm = TRUE)) %>%
ungroup() %>%
ggplot(aes(x = !!sym(metric),
y = reorder(hrvar, top_group))) + # Sort by top group
geom_point(aes(fill = OrgGroup,
size = n),
colour = "black",
pch = 21,
alpha = 0.8) +
labs(title = us_to_space(metric),
subtitle = "Group averages by organizational attribute",
y = "Organizational attributes",
x = us_to_space(metric)) +
ggrepel::geom_text_repel(aes(x = !!sym(metric),
y = hrvar,
label = ifelse(OrgGroup %in% c("Top", "Bottom"), group, "")),
size = 3) +
scale_x_continuous(position = "top") +
scale_fill_manual(name = "Group",
values = c(rgb2hex(68,151,169),
"white",
"#FE7F4F"),
guide = "legend") +
theme_wpa_basic() +
scale_size(guide = "none", range = c(1, 8)) +
theme(
panel.grid.major.x = element_blank(),
panel.grid.major.y = element_line(colour = "#D9E7F7", size = 3), # lightblue bar
strip.placement = "outside",
strip.background = element_blank(),
strip.text = element_blank()
) +
geom_vline(xintercept = avg_ch, colour = "red")
} else {
stop("Invalid plot_mode argument.")
}
} else {
stop("Invalid `return` argument.")
}
} else if(mode == "combine"){
create_rank_combine(
data = data,
hrvar = hrvar,
metric = metric,
mingroup = mingroup
)
} else {
stop("Invalid `mode` argument.")
}
}
#' @title Create combination pairs of HR variables and run 'create_rank()'
#'
#' @description Create pairwise combinations of HR variables and compute an
#' average of a specified advanced insights metric.
#'
#' @details
#' This function is called when the `mode` argument in `create_rank()` is
#' specified as `"combine"`.
#'
#' @inheritParams create_rank
#'
#' @examples
#' # Use a small sample for faster runtime
#' sq_data_small <- dplyr::slice_sample(sq_data, prop = 0.1)
#'
#' create_rank_combine(
#' data = sq_data_small,
#' metric = "Email_hours"
#' )
#'
#' @return Data frame containing the following variables:
#' - `hrvar`: placeholder column that denotes the output as `"Combined"`.
#' - `group`: pairwise combinations of HR attributes with the HR attribute
#' in square brackets followed by the value of the HR attribute.
#' - Name of the metric (as passed to `metric`)
#' - `n`
#'
#' @export
create_rank_combine <- function(data,
hrvar = extract_hr(data),
metric,
mingroup = 5){
hrvar_iter_grid <-
tidyr::expand_grid(var1 = hrvar,
var2 = hrvar) %>%
dplyr::filter(var1 != var2)
hrvar_iter_grid %>%
purrr::pmap(function(var1, var2){
data %>%
dplyr::mutate(Combined =
paste0(
"[",var1, "] ",
!!sym(var1),
" [",var2, "] ",
!!sym(var2))) %>%
create_rank(
metric = metric,
hrvar = "Combined",
mode = "simple",
mingroup = mingroup
)
}) %>%
dplyr::bind_rows()
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_rank.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Create a sankey chart from a two-column count table
#'
#' @description
#' Create a 'networkD3' style sankey chart based on a long count table
#' with two variables. The input data should have three columns, where
#' each row is a unique group:
#' 1. Variable 1
#' 2. Variable 2
#' 3. Count
#'
#' @param data Data frame of the long count table.
#' @param var1 String containing the name of the variable to be shown on the
#' left.
#' @param var2 String containing the name of the variable to be shown on the
#' right.
#' @param count String containing the name of the count variable.
#'
#' @import dplyr
#'
#' @return A 'sankeyNetwork' and 'htmlwidget' object containing a two-tier
#' sankey plot. The output can be saved locally with
#' `htmlwidgets::saveWidget()`.
#'
#' @examples
#' \donttest{
#' sq_data %>%
#' dplyr::count(Organization, FunctionType) %>%
#' create_sankey(var1 = "Organization", var2 = "FunctionType")
#' }
#'
#' @family Visualization
#' @family Flexible
#'
#' @export
create_sankey <- function(data, var1, var2, count = "n"){
## Rename
data$pre_group <- data[[var1]]
data$group <- data[[var2]]
## Set up `nodes`
group_source <- unique(data$pre_group)
group_target <- paste0(unique(data$group), " ")
groups <- c(group_source, group_target)
nodes_source <- tibble(name = group_source)
nodes_target <- tibble(name = group_target)
nodes <- rbind(nodes_source, nodes_target) %>% mutate(node = 0:(nrow(.) - 1))
## Set up `links`
links <-
data %>%
mutate(group = paste0(group, " ")) %>%
select(source = "pre_group",
target = "group",
value = count)
nodes_source <- nodes_source %>% select(name) # Make `nodes` a single column data frame
nodes_target <- nodes_target %>% select(name) # Make `nodes` a single column data frame
links <-
links %>%
left_join(nodes %>% rename(IDsource = "node"), by = c("source" = "name")) %>%
left_join(nodes %>% rename(IDtarget = "node"), by = c("target" = "name"))
networkD3::sankeyNetwork(Links = as.data.frame(links),
Nodes = as.data.frame(nodes),
Source = 'IDsource', # Change reference to IDsource
Target = 'IDtarget', # Change reference to IDtarget
Value = 'value',
NodeID = 'name',
units="count",
sinksRight = FALSE)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_sankey.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title
#' Create a Scatter plot with two selected Viva Insights metrics (General Purpose)
#'
#' @description
#' Returns a scatter plot of two selected metrics, using colour to map
#' an HR attribute.
#' Returns a scatter plot by default, with additional options
#' to return a summary table.
#'
#' @details
#' This is a general purpose function that powers all the functions
#' in the package that produce scatter plots.
#'
#' @param data A Standard Person Query dataset in the form of a data frame.
#' @param metric_x Character string containing the name of the metric,
#' e.g. "Collaboration_hours"
#' @param metric_y Character string containing the name of the metric,
#' e.g. "Collaboration_hours"
#' @param hrvar HR Variable by which to split metrics, defaults to "Organization"
#' but accepts any character vector, e.g. "LevelDesignation"
#' @param mingroup Numeric value setting the privacy threshold / minimum group size. Defaults to 5.
#' @param return Character vector specifying what to return, defaults to "plot".
#' Valid inputs are "plot" and "table".
#'
#' @import dplyr
#' @import ggplot2
#' @import scales
#'
#' @family Visualization
#' @family Flexible
#'
#' @examples
#' create_scatter(sq_data,
#' "Internal_network_size",
#' "External_network_size",
#' "Organization")
#'
#' create_scatter(sq_data,
#' "Generated_workload_call_hours",
#' "Generated_workload_email_hours",
#' "Organization", mingroup = 100, return = "plot")
#'
#' @return
#' Returns a 'ggplot' object by default, where 'plot' is passed in `return`.
#' When 'table' is passed, a summary table is returned as a data frame.
#'
#' @export
create_scatter <- function(data,
metric_x,
metric_y,
hrvar = "Organization",
mingroup = 5,
return = "plot"){
## Check inputs
required_variables <- c(hrvar,
metric_x,
metric_y,
"PersonId")
## Error message if variables are not present
## Nothing happens if all present
data %>%
check_inputs(requirements = required_variables)
## Extract values violating privacy threshold
violate_thres_chr <-
data %>%
group_by(!!sym(hrvar)) %>%
summarise(n = n_distinct(PersonId)) %>%
filter(n < mingroup) %>%
pull(!!sym(hrvar))
## Clean metric names
clean_x <- us_to_space(metric_x)
clean_y <- us_to_space(metric_y)
myTable <-
data %>%
filter(!(!!sym(hrvar) %in% violate_thres_chr)) %>%
group_by(PersonId, !!sym(hrvar)) %>%
summarise_at(vars(!!sym(metric_x),
!!sym(metric_y)),
~mean(.)) %>%
ungroup()
plot_object <-
myTable %>%
ggplot(aes(x = !!sym(metric_x),
y = !!sym(metric_y),
colour = !!sym(hrvar))) +
geom_point(alpha = 0.5) +
labs(title = paste0(clean_x, " and ", clean_y),
subtitle = paste("Distribution of employees by", tolower(camel_clean(hrvar))),
caption = extract_date_range(data, return = "text")) +
xlab(clean_x) +
ylab(clean_y) +
theme_wpa_basic()
myTable_return <-
myTable %>%
group_by(!!sym(hrvar)) %>%
summarise_at(vars(!!sym(metric_x),
!!sym(metric_y)),
~mean(.))
if(return == "table"){
return(myTable_return)
} else if(return == "plot"){
return(plot_object)
} else {
stop("Please enter a valid input for `return`.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_scatter.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Horizontal stacked bar plot for any metric
#'
#' @description
#' Creates a sum total calculation using selected metrics,
#' where the typical use case is to create different definitions of
#' collaboration hours.
#' Returns a stacked bar plot by default.
#' Additional options available to return a summary table.
#'
#' @template spq-params
#' @param metrics A character vector to specify variables to be used
#' in calculating the "Total" value, e.g. c("Meeting_hours", "Email_hours").
#' The order of the variable names supplied determine the order in which they
#' appear on the stacked plot.
#' @param return Character vector specifying what to return, defaults to "plot".
#' Valid inputs are "plot" and "table".
#' @param stack_colours
#' A character vector to specify the colour codes for the stacked bar charts.
#' @param percent Logical value to determine whether to show labels as
#' percentage signs. Defaults to `FALSE`.
#' @param plot_title String. Option to override plot title.
#' @param plot_subtitle String. Option to override plot subtitle.
#' @param legend_lab String. Option to override legend title/label. Defaults to
#' `NULL`, where the metric name will be populated instead.
#' @param rank String specifying how to rank the bars. Valid inputs are:
#' - `"descending"` - ranked highest to lowest from top to bottom (default).
#' - `"ascending"` - ranked lowest to highest from top to bottom.
#' - `NULL` - uses the original levels of the HR attribute.
#' @param xlim An option to set max value in x axis.
#' @param text_just `r lifecycle::badge('experimental')` A numeric value
#' controlling for the horizontal position of the text labels. Defaults to
#' 0.5.
#' @param text_colour `r lifecycle::badge('experimental')` String to specify
#' colour to use for the text labels. Defaults to `"#FFFFFF"`.
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#' @import scales
#' @importFrom stats reorder
#'
#' @family Visualization
#' @family Flexible
#'
#' @return
#' Returns a 'ggplot' object by default, where 'plot' is passed in `return`.
#' When 'table' is passed, a summary table is returned as a data frame.
#'
#' @examples
#' sq_data %>%
#' create_stacked(hrvar = "LevelDesignation",
#' metrics = c("Meeting_hours", "Email_hours"),
#' return = "plot")
#'
#' sq_data %>%
#' create_stacked(hrvar = "FunctionType",
#' metrics = c("Meeting_hours",
#' "Email_hours",
#' "Call_hours",
#' "Instant_Message_hours"),
#' return = "plot",
#' rank = "ascending")
#'
#' sq_data %>%
#' create_stacked(hrvar = "FunctionType",
#' metrics = c("Meeting_hours",
#' "Email_hours",
#' "Call_hours",
#' "Instant_Message_hours"),
#' return = "table")
#'
#' @export
create_stacked <- function(data,
hrvar = "Organization",
metrics = c("Meeting_hours",
"Email_hours"),
mingroup = 5,
return = "plot",
stack_colours = c("#1d627e",
"#34b1e2",
"#b4d5dd",
"#adc0cb"),
percent = FALSE,
plot_title = "Collaboration Hours",
plot_subtitle = paste("Average by", tolower(camel_clean(hrvar))),
legend_lab = NULL,
rank = "descending",
xlim = NULL,
text_just = 0.5,
text_colour = "#FFFFFF"
){
## Check inputs
required_variables <- c("Date",
metrics,
"PersonId")
## Error message if variables are not present
## Nothing happens if all present
data %>%
check_inputs(requirements = required_variables)
## Handle `legend_lab`
if(is.null(legend_lab)){
legend_lab <- gsub("_", " ", metrics)
}
## Handling NULL values passed to hrvar
if(is.null(hrvar)){
data <- totals_col(data)
hrvar <- "Total"
}
n_count <-
data %>%
rename(group = !!sym(hrvar)) %>% # Rename HRvar to `group`
group_by(group) %>%
summarise(Employee_Count = n_distinct(PersonId))
## Person level table
myTable <-
data %>%
rename(group = !!sym(hrvar)) %>% # Rename HRvar to `group`
select(PersonId, group, metrics) %>%
group_by(PersonId, group) %>%
summarise_at(vars(metrics), ~mean(.)) %>%
ungroup() %>%
mutate(Total = select(., metrics) %>% apply(1, sum)) %>%
left_join(n_count, by = "group") %>%
# Keep only groups above privacy threshold
filter(Employee_Count >= mingroup)
myTableReturn <-
myTable %>%
group_by(group) %>%
summarise_at(vars(metrics, Total), ~mean(.)) %>%
left_join(n_count, by = "group")
plot_table <-
myTable %>%
select(PersonId, group, metrics, Total) %>%
gather(Metric, Value, -PersonId, -group)
totalTable <-
plot_table %>%
filter(Metric == "Total") %>%
group_by(group) %>%
summarise(Total = mean(Value))
myTable_legends <-
n_count %>%
filter(Employee_Count >= mingroup) %>%
mutate(Employee_Count = paste("n=",Employee_Count)) %>%
left_join(totalTable, by = "group")
## Get maximum value
if (is.null(xlim)) {
location <- max(myTable_legends$Total)
}
else if(is.numeric(xlim)) {
location <- xlim
}
else {
stop("Invalid return to `xlim`")
}
## Remove max from axis labels ------------------------------------------
max_blank <- function(x){
as.character(
c(
x[1:length(x) - 1],
"")
)
}
## Remove max from axis labels, but with percentages ---------------------
max_blank_percent <- function(x){
x <- scales::percent(x)
as.character(
c(
x[1:length(x) - 1],
"")
)
}
invert_mean <- function(x){
mean(x) * -1
}
## Create plot -----------------------------------------------------------
plot_object <-
plot_table %>%
filter(Metric != "Total") %>%
mutate(Metric = factor(Metric, levels = rev(metrics))) %>%
group_by(group, Metric) %>%
summarise_at(vars(Value), ~mean(.)) %>%
# Conditional ranking based on `rank` argument
{ if(is.null(rank)){
ggplot(., aes(x = group, y = Value, fill = Metric))
} else if(rank == "descending"){
ggplot(., aes(x = stats::reorder(group, Value, mean), y = Value, fill = Metric))
} else if(rank == "ascending"){
ggplot(., aes(x = stats::reorder(group, Value, invert_mean), y = Value, fill = Metric))
} else {
stop("Invalid return to `rank`")
}
} +
geom_bar(position = "stack", stat = "identity") +
{ if(percent == FALSE){
geom_text(aes(label = round(Value, 1)),
position = position_stack(vjust = text_just),
color = text_colour,
fontface = "bold")
} else if(percent == TRUE){
geom_text(aes(label = scales::percent(Value, accuracy = 0.1)),
position = position_stack(vjust = text_just),
color = text_colour,
fontface = "bold")
}
} +
{ if(percent == FALSE){
scale_y_continuous(expand = c(.01, 0),
limits = c(0, location * 1.3),
labels = max_blank,
position = "right")
} else if(percent == TRUE){
scale_y_continuous(expand = c(.01, 0),
limits = c(0, location * 1.3),
labels = max_blank_percent,
position = "right")
}
} +
annotate("text",
x = myTable_legends$group,
y = location * 1.15,
label = myTable_legends$Employee_Count,
size = 3) +
annotate("rect",
xmin = 0.5,
xmax = length(myTable_legends$group) + 0.5,
ymin = location * 1.05,
ymax = location * 1.25,
alpha = .2) +
annotate(x=length(myTable_legends$group) + 0.8,
xend=length(myTable_legends$group) + 0.8,
y = 0,
yend = location* 1.04,
colour = "black",
lwd = 0.75,
geom = "segment") +
scale_fill_manual(name="",
values = stack_colours,
breaks = metrics,
labels = legend_lab) +
coord_flip() +
theme_wpa_basic() +
theme(axis.line = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()) +
labs(title = plot_title,
subtitle = plot_subtitle,
x = hrvar,
y = "Average weekly hours",
caption = extract_date_range(data, return = "text"))
# Return options ---------------------------------------------------------
if(return == "table"){
myTableReturn
} else if(return == "plot"){
return(plot_object)
} else {
stop("Please enter a valid input for `return`.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_stacked.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Create a line chart that tracks metrics over time with a 4-week
#' rolling average
#'
#' @description
#' `r lifecycle::badge('experimental')`
#'
#' Create a two-series line chart that visualizes a set of metric over time for
#' the selected population, with one of the series being a four-week rolling
#' average.
#'
#' @param data A Standard Person Query dataset in the form of a data frame.
#' @param metric Character string containing the name of the metric,
#' e.g. "Collaboration_hours"
#' percentage signs. Defaults to `FALSE`.
#' @param plot_title An option to override plot title.
#' @param plot_subtitle An option to override plot subtitle.
#' @param percent Logical value to determine whether to show labels as
#' percentage signs. Defaults to `FALSE`.
#'
#' @examples
#' sq_data %>%
#' create_tracking(
#' metric = "Collaboration_hours",
#' percent = FALSE
#' )
#'
#' @family Visualization
#' @family Flexible
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#'
#' @export
create_tracking <- function(data,
metric,
plot_title = us_to_space(metric),
plot_subtitle = "Measure over time",
percent = FALSE){
data$Date <- as.Date(data$Date, "%m/%d/%Y")
min_date <- data %>% extract_date_range() %>% pull(Start)
max_date <- data %>% extract_date_range() %>% pull(End)
# Set variables
metrics <- NULL
`4 week rolling average` <- NULL
`Weekly average` <- NULL
data %>%
group_by(Date) %>%
summarise(across(.cols = metric,
.fns = ~mean(., na.rm = TRUE)),
.groups = "drop") %>%
mutate(
lag0 = lag(!!sym(metric), 0),
lag1 = lag(!!sym(metric), 1),
lag2 = lag(!!sym(metric), 2),
lag3 = lag(!!sym(metric), 3)
) %>%
mutate(`4 week rolling average` = select(., paste0("lag", 0:3)) %>%
apply(1, function(x) mean(x, na.rm = TRUE))) %>% # Use all available data
select(-paste0("lag", 0:3)) %>%
rename(`Weekly average` = metric) %>%
pivot_longer(cols = c(`Weekly average`, `4 week rolling average`),
names_to = "metrics",
values_to = "value") %>%
drop_na(value) %>%
ggplot(aes(x = Date,
y = value,
colour = metrics)) +
geom_line(size = 1) +
scale_colour_manual(
values = c(
"Weekly average" = rgb2hex(67, 189, 211),
"4 week rolling average" = rgb2hex(0, 82, 101)),
labels = us_to_space,
guide = guide_legend(reverse = TRUE)
) +
{ if(percent == FALSE){
scale_y_continuous(
limits = c(0, NA)
)
} else if(percent == TRUE){
scale_y_continuous(
limits = c(0, 1),
labels = scales::percent
)
}} +
scale_x_date(position = "top",
limits = c(min_date, max_date),
date_breaks = "2 weeks") +
theme_wpa_basic() +
theme(axis.line = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank(),
panel.grid.major.x = element_line(color="gray"),
panel.grid.major.y = element_line(colour = "#D9E7F7", size = 5)) +
labs(
title = plot_title,
subtitle = plot_subtitle,
caption = extract_date_range(data, return = "text")
)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_tracking.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Heat mapped horizontal bar plot over time for any metric
#'
#' @description
#' Provides a week by week view of a selected Viva Insights metric. By
#' default returns a week by week heatmap bar plot, highlighting the points in
#' time with most activity. Additional options available to return a summary
#' table.
#'
#' @template spq-params
#' @param metric Character string containing the name of the metric,
#' e.g. "Collaboration_hours"
#' @param palette Character vector containing colour codes, ranked from the
#' lowest value to the highest value. This is passed directly to
#' `ggplot2::scale_fill_gradientn()`.
#' @param return Character vector specifying what to return, defaults to
#' `"plot"`.
#' Valid inputs are "plot" and "table".
#' @param legend_title String to be used as the title of the legend. Defaults to
#' `"Hours"`.
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#' @import scales
#'
#' @family Visualization
#' @family Flexible
#' @family Time-series
#'
#' @examples
#' create_trend(sq_data, metric = "Collaboration_hours", hrvar = "LevelDesignation")
#'
#' # custom colours
#' create_trend(
#' sq_data,
#' metric = "Collaboration_hours",
#' hrvar = "LevelDesignation",
#' palette = c(
#' "#FB6107",
#' "#F3DE2C",
#' "#7CB518",
#' "#5C8001"
#' )
#' )
#'
#' @return
#' Returns a 'ggplot' object by default, where 'plot' is passed in `return`.
#' When 'table' is passed, a summary table is returned as a data frame.
#'
#' @export
create_trend <- function(data,
metric,
hrvar = "Organization",
mingroup = 5,
palette = c("steelblue4",
"aliceblue",
"white",
"mistyrose1",
"tomato1"),
return = "plot",
legend_title = "Hours"){
## Check inputs
required_variables <- c("Date",
metric,
"PersonId")
## Error message if variables are not present
## Nothing happens if all present
data %>%
check_inputs(requirements = required_variables)
## Handling NULL values passed to hrvar
if(is.null(hrvar)){
data <- totals_col(data)
hrvar <- "Total"
}
## Clean metric name
clean_nm <- us_to_space(metric)
myTable <-
data %>%
mutate(Date = as.Date(Date, "%m/%d/%Y")) %>%
rename(group = !!sym(hrvar)) %>% # Rename HRvar to `group`
select(PersonId, Date, group, !!sym(metric)) %>%
group_by(group) %>%
mutate(Employee_Count = n_distinct(PersonId)) %>%
filter(Employee_Count >= mingroup) # Keep only groups above privacy threshold
myTable <-
myTable %>%
group_by(Date, group) %>%
summarize(Employee_Count = mean(Employee_Count, na.rm = TRUE),
!!sym(metric) := mean(!!sym(metric), na.rm = TRUE))
myTable_plot <- myTable %>% select(Date, group, !!sym(metric))
myTable_return <- myTable_plot %>% tidyr::spread(Date, !!sym(metric))
plot_object <-
myTable_plot %>%
ggplot(aes(x = Date , y = group , fill = !!sym(metric))) +
geom_tile(height=.5) +
scale_x_date(position = "top") +
scale_fill_gradientn(name = legend_title,
colours = palette) +
theme_wpa_basic() +
theme(axis.line.y = element_blank(), axis.title.y = element_blank()) +
labs(title = clean_nm,
subtitle = paste("Hotspots by", tolower(camel_clean(hrvar)))) +
xlab("Date") +
ylab(hrvar) +
labs(caption = extract_date_range(data, return = "text"))
if(return == "table"){
myTable_return
} else if(return == "plot"){
plot_object
} else {
stop("Please enter a valid input for `return`.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/create_trend.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Convert a numeric variable for hours into categorical
#'
#' @description
#' Supply a numeric variable, e.g. `Collaboration_hours`, and return a character
#' vector.
#'
#' @details
#' This is used within `create_dist()` for numeric to categorical conversion.
#'
#' @param metric A numeric variable representing hours.
#' @param cuts A numeric vector of minimum length 3 to represent the
#' cut points required. The minimum and maximum values provided in the vector
#' are inclusive.
#' @param unit String to specify the unit of the labels. Defaults to "hours".
#' @param lbound Numeric. Specifies the lower bound (inclusive) value for the
#' minimum label. Defaults to 0.
#' @param ubound Numeric. Specifies the upper bound (inclusive) value for the
#' maximum label. Defaults to 100.
#'
#' @family Support
#'
#' @return
#' Character vector representing a converted categorical variable, appended
#' with the label of the unit. See `examples` for more information.
#'
#' @examples
#' # Direct use
#' cut_hour(1:30, cuts = c(15, 20, 25))
#'
#' # Use on a query
#' cut_hour(sq_data$Collaboration_hours, cuts = c(10, 15, 20))
#'
#' @export
cut_hour <- function(metric,
cuts,
unit = "hours",
lbound = 0,
ubound = 100){
cuts <- unique(cuts) # No duplicates allowed
ncuts <- length(cuts)
if(ncuts < 2){
stop("Please provide a numeric vector of at least length 2 to `cuts`")
}
# Extract min, max, and middle values
mincut <- min(cuts, na.rm = TRUE)
maxcut <- max(cuts, na.rm = TRUE)
midcut <- cuts[!cuts %in% mincut] # Excludes mincut only
midcut_min_1 <- cuts[match(midcut, cuts) - 1] # one value smaller
mincut_2 <- midcut_min_1[[1]] # second smallest cut
# Min and max values of `metric`
minval <- min(metric, na.rm = TRUE)
maxval <- max(metric, na.rm = TRUE)
# Warn if smaller lbound or larger ubound
if(minval < lbound){
warning("`lbound` does not capture the smallest value in `metric`. ",
"Values smaller than `lbound` will be classified as NA. ",
"Adjusting `lbound` is recommended.")
}
if(maxval > ubound){
warning("`ubound` does not capture the largest value in `metric`. ",
"Values larger than `ubound` will be classified as NA. ",
"Adjusting `ubound` is recommended.")
}
# Take smallest or largest of both values
lbound <- min(c(mincut, lbound), na.rm = TRUE)
ubound <- max(c(maxcut, ubound), na.rm = TRUE)
# Individual labels
label_mincut <- paste0("< ", mincut, " ", unit)
label_maxcut <- paste0(maxcut, "+ ", unit)
label_midcut <- paste0(midcut_min_1, " - ", midcut, " ", unit)
# All labels
all_labels <- unique(c(label_mincut, label_midcut, label_maxcut))
# If `lbound` or `ubound` conflict with cuts
if(lbound == mincut){
all_labels <- all_labels[all_labels != label_mincut]
}
if(ubound == maxcut){
all_labels <- all_labels[all_labels != label_maxcut]
}
# Debugging chunk ---------------------------------------------------------
# list(
# breaks = unique(c(lbound, cuts, ubound)),
# lbound,
# ubound,
# all_labels
# )
# Return result
cut(metric,
breaks = unique(c(lbound, cuts, ubound)),
include.lowest = TRUE,
labels = all_labels)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/cut_hour.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Sample Standard Person Query dataset for Data Validation
#'
#' @description
#' A dataset generated from a Standard Person Query from advanced insights in
#' Viva Insights. Note that this is largely interchangeable with a **Ways of
#' Working Assessment** query, with the exception of some additional variables
#' and the different variable names used for `Collaboration_hours` and
#' `Instant_Message_hours`.
#'
#' @family Data
#'
#' @return data frame.
#'
#' @format A data frame with 897 rows and 69 variables:
#' \describe{
#' \item{PersonId}{ }
#' \item{Date}{ }
#' \item{Workweek_span}{ }
#' \item{Meetings_with_skip_level}{ }
#' \item{Meeting_hours_with_skip_level}{ }
#' \item{Generated_workload_email_hours}{ }
#' \item{Generated_workload_email_recipients}{ }
#' \item{Generated_workload_instant_messages_hours}{ }
#' \item{Generated_workload_instant_messages_recipients}{ }
#' \item{Generated_workload_call_hours}{ }
#' \item{Generated_workload_call_participants}{ }
#' \item{Generated_workload_calls_organized}{ }
#' \item{External_network_size}{ }
#' \item{Internal_network_size}{ }
#' \item{Networking_outside_company}{ }
#' \item{Networking_outside_organization}{ }
#' \item{After_hours_meeting_hours}{ }
#' \item{Open_1_hour_block}{ }
#' \item{Open_2_hour_blocks}{ }
#' \item{Total_focus_hours}{ }
#' \item{Low_quality_meeting_hours}{ }
#' \item{Total_emails_sent_during_meeting}{ }
#' \item{Meetings}{ }
#' \item{Meeting_hours}{ }
#' \item{Conflicting_meeting_hours}{ }
#' \item{Multitasking_meeting_hours}{ }
#' \item{Redundant_meeting_hours__lower_level_}{ }
#' \item{Redundant_meeting_hours__organizational_}{ }
#' \item{Time_in_self_organized_meetings}{ }
#' \item{Meeting_hours_during_working_hours}{ }
#' \item{Generated_workload_meeting_attendees}{ }
#' \item{Generated_workload_meeting_hours}{ }
#' \item{Generated_workload_meetings_organized}{ }
#' \item{Manager_coaching_hours_1_on_1}{ }
#' \item{Meetings_with_manager}{ }
#' \item{Meeting_hours_with_manager}{ }
#' \item{Meetings_with_manager_1_on_1}{ }
#' \item{Meeting_hours_with_manager_1_on_1}{ }
#' \item{After_hours_email_hours}{ }
#' \item{Emails_sent}{ }
#' \item{Email_hours}{ }
#' \item{Working_hours_email_hours}{ }
#' \item{After_hours_instant_messages}{ }
#' \item{Instant_messages_sent}{ }
#' \item{Instant_Message_hours}{ }
#' \item{Working_hours_instant_messages}{ }
#' \item{After_hours_collaboration_hours}{ }
#' \item{Collaboration_hours}{ }
#' \item{Collaboration_hours_external}{ }
#' \item{Working_hours_collaboration_hours}{ }
#' \item{After_hours_in_calls}{ }
#' \item{Total_calls}{ }
#' \item{Call_hours}{ }
#' \item{Working_hours_in_calls}{ }
#' \item{Domain}{ }
#' \item{FunctionType}{ }
#' \item{LevelDesignation}{ }
#' \item{Layer}{ }
#' \item{Region}{ }
#' \item{Organization}{ }
#' \item{zId}{ }
#' \item{attainment}{ }
#' \item{TimeZone}{ }
#' \item{HourlyRate}{ }
#' \item{IsInternal}{ }
#' \item{IsActive}{ }
#' \item{HireDate}{ }
#' \item{WorkingStartTimeSetInOutlook}{ }
#' \item{WorkingEndTimeSetInOutlook}{ }
#'
#' ...
#' }
"dv_data"
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/dv_data.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Sample Hourly Collaboration data
#'
#' @description
#' A sample dataset representing an Hourly Collaboration query. The data is
#' grouped by week and contains columns for unscheduled calls, IMs sent, emails
#' sent, and meetings. There are 24 columns per collaboration signal,
#' representing each hour of the day.
#'
#' @family Data
#'
#' @return data frame.
#'
#' @format A data frame with 2000 rows and 105 variables:
#' \describe{
#' \item{PersonId}{ }
#' \item{Date}{ }
#' \item{Unscheduled_calls_23_24}{ }
#' \item{Unscheduled_calls_22_23}{ }
#' \item{Unscheduled_calls_21_22}{ }
#' \item{Unscheduled_calls_20_21}{ }
#' \item{Unscheduled_calls_19_20}{ }
#' \item{Unscheduled_calls_18_19}{ }
#' \item{Unscheduled_calls_17_18}{ }
#' \item{Unscheduled_calls_16_17}{ }
#' \item{Unscheduled_calls_15_16}{ }
#' \item{Unscheduled_calls_14_15}{ }
#' \item{Unscheduled_calls_13_14}{ }
#' \item{Unscheduled_calls_12_13}{ }
#' \item{Unscheduled_calls_11_12}{ }
#' \item{Unscheduled_calls_10_11}{ }
#' \item{Unscheduled_calls_09_10}{ }
#' \item{Unscheduled_calls_08_09}{ }
#' \item{Unscheduled_calls_07_08}{ }
#' \item{Unscheduled_calls_06_07}{ }
#' \item{Unscheduled_calls_05_06}{ }
#' \item{Unscheduled_calls_04_05}{ }
#' \item{Unscheduled_calls_03_04}{ }
#' \item{Unscheduled_calls_02_03}{ }
#' \item{Unscheduled_calls_01_02}{ }
#' \item{Unscheduled_calls_00_01}{ }
#' \item{IMs_sent_23_24}{ }
#' \item{IMs_sent_22_23}{ }
#' \item{IMs_sent_21_22}{ }
#' \item{IMs_sent_20_21}{ }
#' \item{IMs_sent_19_20}{ }
#' \item{IMs_sent_18_19}{ }
#' \item{IMs_sent_17_18}{ }
#' \item{IMs_sent_16_17}{ }
#' \item{IMs_sent_15_16}{ }
#' \item{IMs_sent_14_15}{ }
#' \item{IMs_sent_13_14}{ }
#' \item{IMs_sent_12_13}{ }
#' \item{IMs_sent_11_12}{ }
#' \item{IMs_sent_10_11}{ }
#' \item{IMs_sent_09_10}{ }
#' \item{IMs_sent_08_09}{ }
#' \item{IMs_sent_07_08}{ }
#' \item{IMs_sent_06_07}{ }
#' \item{IMs_sent_05_06}{ }
#' \item{IMs_sent_04_05}{ }
#' \item{IMs_sent_03_04}{ }
#' \item{IMs_sent_02_03}{ }
#' \item{IMs_sent_01_02}{ }
#' \item{IMs_sent_00_01}{ }
#' \item{Emails_sent_23_24}{ }
#' \item{Emails_sent_22_23}{ }
#' \item{Emails_sent_21_22}{ }
#' \item{Emails_sent_20_21}{ }
#' \item{Emails_sent_19_20}{ }
#' \item{Emails_sent_18_19}{ }
#' \item{Emails_sent_17_18}{ }
#' \item{Emails_sent_16_17}{ }
#' \item{Emails_sent_15_16}{ }
#' \item{Emails_sent_14_15}{ }
#' \item{Emails_sent_13_14}{ }
#' \item{Emails_sent_12_13}{ }
#' \item{Emails_sent_11_12}{ }
#' \item{Emails_sent_10_11}{ }
#' \item{Emails_sent_09_10}{ }
#' \item{Emails_sent_08_09}{ }
#' \item{Emails_sent_07_08}{ }
#' \item{Emails_sent_06_07}{ }
#' \item{Emails_sent_05_06}{ }
#' \item{Emails_sent_04_05}{ }
#' \item{Emails_sent_03_04}{ }
#' \item{Emails_sent_02_03}{ }
#' \item{Emails_sent_01_02}{ }
#' \item{Emails_sent_00_01}{ }
#' \item{Meetings_23_24}{ }
#' \item{Meetings_22_23}{ }
#' \item{Meetings_21_22}{ }
#' \item{Meetings_20_21}{ }
#' \item{Meetings_19_20}{ }
#' \item{Meetings_18_19}{ }
#' \item{Meetings_17_18}{ }
#' \item{Meetings_16_17}{ }
#' \item{Meetings_15_16}{ }
#' \item{Meetings_14_15}{ }
#' \item{Meetings_13_14}{ }
#' \item{Meetings_12_13}{ }
#' \item{Meetings_11_12}{ }
#' \item{Meetings_10_11}{ }
#' \item{Meetings_09_10}{ }
#' \item{Meetings_08_09}{ }
#' \item{Meetings_07_08}{ }
#' \item{Meetings_06_07}{ }
#' \item{Meetings_05_06}{ }
#' \item{Meetings_04_05}{ }
#' \item{Meetings_03_04}{ }
#' \item{Meetings_02_03}{ }
#' \item{Meetings_01_02}{ }
#' \item{Meetings_00_01}{ }
#' \item{LevelDesignation}{ }
#' \item{Organization}{ }
#' \item{TimeZone}{ }
#' \item{IsActive}{ }
#' \item{WorkingStartTimeSetInOutlook}{ }
#' \item{WorkingEndTimeSetInOutlook}{ }
#' \item{WorkingDaysSetInOutlook}{ }
#'
#' ...
#' }
"em_data"
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/em_data.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Distribution of Email Hours as a 100% stacked bar
#'
#' @description
#' Analyze Email Hours distribution.
#' Returns a stacked bar plot by default.
#' Additional options available to return a table with distribution elements.
#'
#' @inheritParams create_dist
#' @inherit create_dist return
#'
#' @family Visualization
#' @family Emails
#'
#' @examples
#' # Return plot
#' email_dist(sq_data, hrvar = "Organization")
#'
#' # Return summary table
#' email_dist(sq_data, hrvar = "Organization", return = "table")
#'
#' # Return result with a custom specified breaks
#' email_dist(sq_data, hrvar = "LevelDesignation", cut = c(4, 7, 9))
#'
#' @export
email_dist <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot",
cut = c(5, 10, 15)) {
create_dist(data = data,
metric = "Email_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return,
cut = cut)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/email_dist.R
|
#' @title Distribution of Email Hours (Fizzy Drink plot)
#'
#' @description
#' Analyze weekly email hours distribution, and returns
#' a 'fizzy' scatter plot by default.
#' Additional options available to return a table with distribution elements.
#'
#' @inheritParams create_fizz
#' @inherit create_fizz return
#'
#' @family Visualization
#' @family Emails
#'
#' @examples
#'
#' # Return plot
#' email_fizz(sq_data, hrvar = "Organization", return = "plot")
#'
#' # Return summary table
#' email_fizz(sq_data, hrvar = "Organization", return = "table")
#'
#' @export
email_fizz <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot"){
create_fizz(data = data,
metric = "Email_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/email_fizz.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Email Time Trend - Line Chart
#'
#' @description
#' Provides a week by week view of email time, visualised as line charts.
#' By default returns a line chart for email hours,
#' with a separate panel per value in the HR attribute.
#' Additional options available to return a summary table.
#'
#' @inheritParams create_line
#' @inherit create_line return
#'
#' @family Visualization
#' @family Emails
#'
#' @examples
#' # Return a line plot
#' email_line(sq_data, hrvar = "LevelDesignation")
#'
#' # Return summary table
#' email_line(sq_data, hrvar = "LevelDesignation", return = "table")
#'
#' @export
email_line <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot"){
## Inherit arguments
create_line(data = data,
metric = "Email_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/email_line.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Email Hours Ranking
#'
#' @description
#' This function scans a standard query output for groups with high levels of
#' 'Weekly Email Collaboration'. Returns a plot by default, with an option to
#' return a table with a all of groups (across multiple HR attributes) ranked by
#' hours of digital collaboration.
#'
#' @details
#' Uses the metric `Email_hours`.
#' See `create_rank()` for applying the same analysis to a different metric.
#'
#' @inheritParams create_rank
#' @inherit create_rank return
#'
#' @family Visualization
#' @family Emails
#'
#' @examples
#' # Return rank table
#' email_rank(
#' data = sq_data,
#' return = "table"
#' )
#'
#' # Return plot
#' email_rank(
#' data = sq_data,
#' return = "plot"
#' )
#'
#' @export
email_rank <- function(data,
hrvar = extract_hr(data),
mingroup = 5,
mode = "simple",
plot_mode = 1,
return = "plot"){
data %>%
create_rank(metric = "Email_hours",
hrvar = hrvar,
mingroup = mingroup,
mode = mode,
plot_mode = plot_mode,
return = return)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/email_rank.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Email Summary
#'
#' @description
#' Provides an overview analysis of weekly email hours.
#' Returns a bar plot showing average weekly email hours by default.
#' Additional options available to return a summary table.
#'
#' @inheritParams create_bar
#' @inherit create_bar return
#'
#' @family Visualization
#' @family Emails
#'
#' @examples
#' # Return a ggplot bar chart
#' email_summary(sq_data, hrvar = "LevelDesignation")
#'
#' # Return a summary table
#' email_summary(sq_data, hrvar = "LevelDesignation", return = "table")
#'
#' @export
email_summary <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot"){
create_bar(data = data,
metric = "Email_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return,
bar_colour = "darkblue")
}
#' @rdname email_summary
#' @export
email_sum <- email_summary
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/email_summary.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Email Hours Time Trend
#'
#' @description Provides a week by week view of email time.
#' By default returns a week by week heatmap, highlighting the points in time with most activity.
#' Additional options available to return a summary table.
#'
#' @details
#' Uses the metric `Email_hours`.
#'
#' @inheritParams create_trend
#' @inherit create_trend return
#'
#' @family Visualization
#' @family Emails
#'
#'
#' @export
email_trend <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot"){
create_trend(data,
metric = "Email_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/email_trend.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Export 'wpa' outputs to CSV, clipboard, or save as images
#'
#' @description
#' A general use function to export 'wpa' outputs to CSV, clipboard, or save as
#' images. By default, `export()` copies a data frame to the clipboard. If the
#' input is a 'ggplot' object, the default behaviour is to export a PNG.
#'
#' @author Martin Chan <martin.chan@@microsoft.com>
#'
#' @param x Data frame or 'ggplot' object to be passed through.
#' @param method Character string specifying the method of export.
#' Valid inputs include:
#' - `"clipboard"` (default if input is data frame)
#' - `"csv"`
#' - `"png"` (default if input is 'ggplot' object)
#' - `"svg"`
#' - `"jpeg"`
#' - `"pdf"`
#' @param path If exporting a file, enter the path and the desired file name,
#' _excluding the file extension_. For example, `"Analysis/SQ Overview"`.
#' @param timestamp Logical vector specifying whether to include a timestamp in
#' the file name. Defaults to `TRUE`.
#' @param width Width of the plot
#' @param height Height of the plot
#'
#' @return
#' A different output is returned depending on the value passed to the `method`
#' argument:
#' - `"clipboard"`: no return - data frame is saved to clipboard.
#' - `"csv"`: CSV file containing data frame is saved to specified path.
#' - `"png"`: PNG file containing 'ggplot' object is saved to specified path.
#' - `"svg"`: SVG file containing 'ggplot' object is saved to specified path.
#' - `"jpeg"`: JPEG file containing 'ggplot' object is saved to specified path.
#' - `"pdf"`: PDF file containing 'ggplot' object is saved to specified path.
#'
#' @importFrom utils write.csv
#'
#' @family Import and Export
#'
#' @export
export <- function(x,
method = "clipboard",
path = "wpa export",
timestamp = TRUE,
width = 12,
height = 9){
## Create timestamped path (if applicable)
if(timestamp == TRUE){
newpath <- paste(path, wpa::tstamp())
} else {
newpath <- path
}
## Force method to png if is.ggplot and method not appropriate
if(is.ggplot(x) & method %in% c("clipboard", "csv")){
message("Input is a 'ggplot' object. Defaulted to exporting as PNG...")
method <- "png"
}
## Main export function
if(method == "clipboard"){
copy_df(x)
message(c("Data frame copied to clipboard.\n",
"You may paste the contents directly to Excel."))
## Export option: CSV
} else if(method == "csv"){
newpath <- paste0(newpath, ".csv")
write.csv(x = x, file = newpath)
## Export option: any ggsave methods
} else if(method %in% c("png", "svg", "jpeg", "pdf")){
newpath <- paste0(newpath, ".", method)
ggsave(filename = newpath, plot = x, width = width, height = height)
} else {
stop("Please check inputs. Enter `?export` for more details.")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/export.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title External Collaboration Hours Time Trend - Line Chart
#'
#' @description
#' Provides a week by week view of External collaboration time, visualized as
#' line chart. By default returns a separate panel per value in the HR attribute. Additional
#' options available to return a summary table.
#'
#' @details
#' Uses the metric `Collaboration_hours_external`.
#'
#' @seealso [create_line()] for applying the same analysis to a different metric.
#'
#' @inheritParams create_line
#' @inherit create_line return
#'
#' @family Visualization
#' @family External Collaboration
#'
#' @examples
#' # Return a line plot
#' external_line(sq_data, hrvar = "LevelDesignation")
#'
#' # Return summary table
#' external_line(sq_data, hrvar = "LevelDesignation", return = "table")
#'
#' @export
external_line <- function(data,
hrvar = "Organization",
mingroup=5,
return = "plot"){
plot_data <- data %>% mutate(External_collaboration_hours = Collaboration_hours_external)
## Inherit arguments
create_line(data = plot_data,
metric = "External_collaboration_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/extermal_line.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Distribution of External Collaboration Hours as a 100% stacked bar
#'
#' @description
#' Analyze the distribution of External Collaboration Hours.
#' Returns a stacked bar plot by default.
#' Additional options available to return a table with distribution elements.
#'
#' @details
#' Uses the metric `External_collaboration_hours`.
#' See `create_dist()` for applying the same analysis to a different metric.
#'
#' @inheritParams create_dist
#' @inherit create_dist return
#'
#' @family Visualization
#' @family External Collaboration
#'
#' @examples
#' # Return plot
#' external_dist(sq_data, hrvar = "Organization")
#'
#' # Return summary table
#' external_dist(sq_data, hrvar = "Organization", return = "table")
#'
#' # Return result with a custom specified breaks
#' external_dist(sq_data, hrvar = "LevelDesignation", cut = c(4, 7, 9))
#'
#' @export
external_dist <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot",
cut = c(5, 10, 15)) {
# Rename metric
plot_data <-
data %>%
mutate(External_collaboration_hours = Collaboration_hours_external)
plot_data %>%
create_dist(
metric = "External_collaboration_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return,
cut = cut,
dist_colours = c("#3F7066", "#64B4A4", "#B1EDE1","#CBF3EB")
)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/external_dist.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Distribution of External Collaboration Hours (Fizzy Drink plot)
#'
#' @description
#' Analyze weekly External Collaboration hours distribution, and returns
#' a 'fizzy' scatter plot by default.
#' Additional options available to return a table with distribution elements.
#'
#' @details
#' Uses the metric `Collaboration_hours_external`.
#' See `create_fizz()` for applying the same analysis to a different metric.
#'
#' @inheritParams create_fizz
#' @inherit create_fizz return
#'
#' @family Visualization
#' @family External Collaboration
#'
#' @examples
#' # Return plot
#' external_fizz(sq_data, hrvar = "LevelDesignation", return = "plot")
#'
#' # Return summary table
#' external_fizz(sq_data, hrvar = "Organization", return = "table")
#' @export
external_fizz <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot"){
plot_data <- data %>% mutate(External_collaboration_hours = Collaboration_hours_external)
create_fizz(data = plot_data,
metric = "External_collaboration_hours",
hrvar = hrvar,
mingroup = mingroup,
return = return)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/external_fizz.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Plot External Network Breadth and Size as a scatter plot
#'
#' @description
#' Plot the external network metrics for a HR variable as a scatter plot,
#' showing 'External Network Breadth' as the vertical axis and 'External Network
#' Size' as the horizontal axis.
#'
#' @details
#' Uses the metrics `External_network_size` and `Networking_outside_company`.
#'
#' @inheritParams create_bubble
#'
#' @examples
#' # Return plot
#' external_network_plot(sq_data, return = "plot")
#'
#' @return
#' 'ggplot' object showing a bubble plot with external network size as the
#' x-axis and external network breadth as the y-axis. The size of the bubbles
#' represent the number of unique employees in each group.
#'
#' @family Visualization
#' @family Network
#'
#' @export
external_network_plot <- function(data,
hrvar = "Organization",
mingroup = 5,
return = "plot",
bubble_size = c(1, 8)){
data %>%
rename(`External Network Size` = "External_network_size",
`External Network Breadth` = "Networking_outside_company") %>%
create_bubble(hrvar = hrvar,
mingroup = mingroup,
metric_x = "External Network Size",
metric_y = "External Network Breadth",
return = return,
bubble_size = bubble_size)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/external_network_plot.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Rank groups with high External Collaboration Hours
#'
#' @description
#' This function scans a Standard Person Query for groups with high levels of
#' External Collaboration. Returns a plot by default, with an option to
#' return a table with all groups (across multiple HR attributes) ranked by
#' hours of External Collaboration.
#'
#' @details
#' Uses the metric \code{Collaboration_hours_external}.
#' See `create_rank()` for applying the same analysis to a different metric.
#'
#' @inheritParams create_rank
#'
#' @import dplyr
#' @import ggplot2
#' @import reshape2
#' @import scales
#' @importFrom stats reorder
#'
#' @family Visualization
#' @family After-hours Collaboration
#'
#' @return
#' When 'table' is passed in `return`, a summary table is returned as a data frame.
#'
#' @export
external_rank <- function(data,
hrvar = extract_hr(data),
mingroup = 5,
mode = "simple",
plot_mode = 1,
return = "plot"){
plot_data <- data %>% mutate(External_collaboration_hours = Collaboration_hours_external)
plot_data %>%
create_rank(metric = "External_collaboration_hours",
hrvar = hrvar,
mingroup = mingroup,
mode = mode,
plot_mode = plot_mode,
return = return)
}
|
/scratch/gouwar.j/cran-all/cranData/wpa/R/external_rank.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.