content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' @title Major Adverse Cardiovascular Event
#'
#' @description An artificial network meta-analysis data compering the effectiveness of a number of interventions for major
#' adverse cardiovascular events.
#'
#' @format A \code{data.frame} with the following columns
#' \describe{
#' \item{Study}{The study name of the trial}
#' \item{treat1, treat2, treat3, treat4}{Treatment names of arms}
#' \item{n1, n2, n3, n4}{Total number of participants in arms}
#' \item{event1, event2, event3, event4}{Total number of events in arms}
#' }
"MACE"
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/MACE.R
|
addColorBackground <- function(outcomeNames, components, data, intervals, mycolors, cex,
cex_values = NULL, cex_outcomes = NULL) {
if (!is.null(cex_values)) {
cex_v <- cex_values
} else {
cex_v <- cex
}
if (!is.null(cex_outcomes)) {
cex_o <- cex_outcomes
} else {
cex_o <- cex
}
for (k in 1:length(outcomeNames)) {
start <- circlize::get.cell.meta.data("cell.start.degree", "Outcomes", k + 1)
end <- circlize::get.cell.meta.data("cell.end.degree", "Outcomes", k + 1)
top <- circlize::get.cell.meta.data("cell.top.radius", "Outcomes", k + 1)
bottom <- circlize::get.cell.meta.data("cell.bottom.radius", "Outcomes", k + 1)
circlize::draw.sector(start.degree = start, end.degree = end, rou1 = top, rou2 = bottom, border = NA)
circlize::circos.text(5, 50, sector.index = "Outcomes", facing = "downward", track.index = k + 1, labels = outcomeNames[k], cex = cex_o)
}
for (i in 1:dim(data)[1]) {
for (j in 1:length(outcomeNames)) {
start <- circlize::get.cell.meta.data("cell.start.degree", components[i], j + 1)
end <- circlize::get.cell.meta.data("cell.end.degree", components[i], j + 1)
top <- circlize::get.cell.meta.data("cell.top.radius", components[i], j + 1)
bottom <- circlize::get.cell.meta.data("cell.bottom.radius", components[i], j + 1)
if (is.na(data[i, j]) == TRUE) { # case data is NA: draw a white sector
circlize::draw.sector(start.degree = start, end.degree = end, rou1 = top, rou2 = bottom, border = "#f2f2f2")
} else {
for (k in 1:32) {
if (as.numeric(data[i, j]) >= intervals[k] && as.numeric(data[i, j]) < intervals[k + 1]) {
circlize::draw.sector(start.degree = start, end.degree = end, rou1 = top, rou2 = bottom, border = NA, col = mycolors[k])
}
}
circlize::circos.text(5, 50, sector.index = components[i], facing = "downward", track.index = j + 1, labels = round(data[i, j], 2), cex = cex_v)
}
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/addColorBackground.R
|
build.data <- function(model, median, random, small.values, numOfOutcomes, components, outcomeNames, sep) {
# Dataframe to store estimates
df <- data.frame(matrix(nrow = length(components), ncol = numOfOutcomes))
rownames(df) <- components
colnames(df) <- outcomeNames
for (outcome in 1:numOfOutcomes) {
pscores <- netmeta::netrank(model[[outcome]], small.values = small.values[outcome])
# Components of each node
nodes <- names(pscores$ranking.fixed)
nodes <- gsub(" ", "", nodes)
components_node <- strsplit(nodes, split = paste("[", sep, "]", sep = ""), perl = TRUE)
# select effect
if (random[outcome] == TRUE) {
pscoresvalues <- pscores$Pscore.random
} else {
pscoresvalues <- pscores$Pscore.fixed
}
for (i in 1:dim(df)[1]) {
result <- NA
val <- c()
for (j in 1:length(components_node)) { # from pscores table
if (grepl(rownames(df)[i], components_node[j])) {
val <- c(val, pscoresvalues[j])
}
}
# select median OR mean
if (!is.null(val)) {
if (median == TRUE) {
result <- median(val)
} else {
result <- mean(val)
}
} else {
result <- NULL
}
if (is.null(result)) {
df[i, outcome] <- NA
} else {
df[i, outcome] <- round(result, digits = 2) * 100
}
}
}
dat <- list(df = df, components = components)
dat
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/build.data.R
|
check.arguments <- function(model, median, random, outcomeNames, cex_components, cex_values, cex_outcomes) {
if (inherits(model, "list") == FALSE) {
stop("The class of model is not list", call. = FALSE)
} else if (length(model) < 2) {
stop("The length of model must be at least two", call. = FALSE)
}
numOfOutcomes <- length(model)
# Check random argument
if (length(random) == 1) {
if (inherits(random, "logical") == FALSE) {
stop("The class of random is not logical", call. = FALSE)
} else {
random <- rep(random, numOfOutcomes)
}
} else if (length(random) != numOfOutcomes) {
stop("The length of random must be equal with the number of the outcomes", call. = FALSE)
}
# Check cex arguments
if (!is.null(cex_components)) {
if (inherits(cex_components, "numeric") == FALSE) {
stop("The class of cex_components is not numeric", call. = FALSE)
} else if (length(cex_components) > 1) {
stop("The length of cex_components must be one", call. = FALSE)
} else if (cex_components < 0) {
stop("Argument cex_components must be a positive number", call. = FALSE)
}
}
if (!is.null(cex_values)) {
if (inherits(cex_values, "numeric") == FALSE) {
stop("The class of cex_values is not numeric", call. = FALSE)
} else if (length(cex_values) > 1) {
stop("The length of cex_values must be one", call. = FALSE)
} else if (cex_values < 0) {
stop("Argument cex_values must be a positive number", call. = FALSE)
}
}
if (!is.null(cex_outcomes)) {
if (inherits(cex_outcomes, "numeric") == FALSE) {
stop("The class of cex_outcomes is not numeric", call. = FALSE)
} else if (length(cex_outcomes) > 1) {
stop("The length of cex_outcomes must be one", call. = FALSE)
} else if (cex_outcomes < 0) {
stop("Argument cex_outcomes must be a positive number", call. = FALSE)
}
}
# Check length of outcome names
if (length(outcomeNames) != numOfOutcomes) {
stop("The length of outcome names must be equal with the number of the outcomes", call. = FALSE)
}
# Check model and median argument
for (outcome in 1:numOfOutcomes) {
if (inherits(model[[outcome]], "netmeta") == FALSE) {
stop(paste("The class of model", outcome, "is not of netmeta"), call. = FALSE)
} else if (inherits(median, "logical") == FALSE) {
stop("The class of median is not logical", call. = FALSE)
} else if (length(median) > 1) {
stop("The length of median must be one", call. = FALSE)
} else if (inherits(random[[outcome]], "logical") == FALSE) {
stop(paste("The class of random", outcome, "is not logical"), call. = FALSE)
}
}
random
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/check.arguments.R
|
check.combinations <- function(x, y, sep) {
elements <- strsplit(x, split = paste0("[", sep, "]"), perl = TRUE)
included <- sapply(elements,
FUN = function(z) {
ifelse(sum(z %in% y) == length(z), TRUE, FALSE)
}
)
if (sum(included) != length(included)) {
stop("Argument combination must includes network's components combinations", call. = FALSE)
}
included
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/check.combinations.R
|
check.groups <- function(x) {
pass <- TRUE
# Numeric elements
num <- Hmisc::numeric.string(x) & grepl("+", x, fixed = T) == FALSE
if (sum(as.numeric(x[num]) %% 1 != 0) > 0) {
stop("Argument group must contains integer values", call. = FALSE)
}
if (sum(num) == length(x)) {
pass
} else {
x <- x[which(!num)]
# Range elements
ranges <- strsplit(x, split = "-")
ranges_check <- sapply(ranges,
FUN = function(r) {
Hmisc::all.is.numeric(r) & length(r) == 2 & sum(grepl("+", r, fixed = TRUE)) == 0
}
)
if (sum(ranges_check) > 0) {
range_integer <- sapply(ranges[ranges_check],
FUN = function(r) {
sum(as.numeric(r) %% 1 != 0) > 0
}
)
if (sum(range_integer) > 0) {
stop("Range must be between integer numbers", call. = FALSE)
}
range_x1x2 <- sapply(ranges[ranges_check],
FUN = function(r) {
r[1] > r[2]
}
)
if (sum(range_x1x2) > 0) {
stop("The first element of range must be smaller than the last one", call. = FALSE)
}
}
x <- x[!ranges_check]
# Plus check
if (length(x) > 0) {
plus <- strsplit(x, split = "+", fixed = TRUE)
plus_check <- sapply(plus,
FUN = function(p) {
if (length(p) != 1) {
FALSE
} else {
Hmisc::all.is.numeric(p)
}
}
)
if (sum(plus_check) > 0) {
plus_integer <- sapply(plus[plus_check],
FUN = function(r) {
as.numeric(r[1]) %% 1 != 0
}
)
if (sum(plus_integer) > 0) {
stop("Integer number must be used for the the above a number", call. = FALSE)
}
}
x <- x[!plus_check]
}
if (length(x) > 0) {
pass <- FALSE
}
pass
}
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/check.groups.R
|
#' Components Network Graph
#'
#' @description
#' The Components Network Graph is meant to visualize the frequency of components’ combinations found in the network.
#'
#' @details
#' The function resembles a network plot where nodes represent the individual components found in the network
#' and edges represent the combination of components found in at least one treatment arm of the trials included in the
#' network meta-analysis model. Each edge’s color represents one of the unique interventions (components’ combination)
#' found in the network of interventions. Edges’ thickness indicates the frequency by which each intervention
#' (combination of components) was observed in the network (number of arms in which the combination was assigned).
#' The number of the most frequent combinations can be modified from the argument \code{mostF}. The function by
#' default plots the five most frequent components' combinations found in the network.
#'
#' @note
#' The function can be applied only in network meta-analysis models that contain multi-component interventions.
#'
#' @param model An object of class \code{\link[netmeta]{netmeta}}.
#' @param sep A single character that defines the separator between interventions components.
#' @param mostF Number of most frequent combinations of the network.
#' @param excl A character vector that specifies the combinations to be excluded from the plot.
#' @param title A single character that specifies the overall title of the plot.
#' @param print_legend \code{logical}. If \code{TRUE} the legend is printed.
#' @param size_legend size of the legend.
#'
#' @importFrom qgraph qgraph
#' @importFrom graphics par plot legend
#'
#' @return Returns (invisibly) a \code{\link[qgraph]{qgraph}} object.
#' @export
#'
#' @examples
#' data(nmaMACE)
#' compGraph(model = nmaMACE)
#'
compGraph <- function(model, sep = "+", mostF = 5, excl = NULL, title = "Most frequent combinations of components",
print_legend = TRUE, size_legend = 0.825) {
##
# Check arguments
##
if (inherits(model, "netmeta") == FALSE) {
stop("The class of model is not of netmeta", call. = FALSE)
} else if (model$reference.group == "") {
stop("The netmeta model must have a reference group", call. = FALSE)
} else if (inherits(sep, "character") == FALSE) {
stop("The class of sep is not character", call. = FALSE)
} else if (length(sep) > 1) {
stop("The length of sep must be one", call. = FALSE)
} else if (sep == "") {
stop("Argument sep must be diffent than ''", call. = FALSE)
} else if (inherits(mostF, c("numeric", "integer")) == FALSE) {
stop("The class of mostF must be numeric or integer", call. = FALSE)
} else if (length(mostF) > 1) {
stop("The length of mostF must be one", call. = FALSE)
} else if (mostF <= 0) {
stop("Argument mostF must be positive number", call. = FALSE)
} else if (mostF %% 1 != 0) {
stop("Argument mostF must be an interger number", call. = FALSE)
} else if (!is.null(excl)) {
if (inherits(excl, "character") == FALSE) {
stop("The class of excl is not character", call. = FALSE)
}
} else if (inherits(title, "character") == FALSE) {
stop("The class of title is not character", call. = FALSE)
} else if (length(title) > 1) {
stop("The length of title must be one", call. = FALSE)
} else if (inherits(print_legend, "logical") == FALSE) {
stop("The class of print_legend is not logical", call. = FALSE)
} else if (length(print_legend) > 1) {
stop("The length of print_legend must be one", call. = FALSE)
} else if (inherits(size_legend, c("numeric", "integer")) == FALSE) {
stop("The class of size_legend must be numeric or integer", call. = FALSE)
} else if (length(size_legend) > 1) {
stop("The length of size_legend must be one", call. = FALSE)
} else if (size_legend <= 0) {
stop("size_legend must be a positive number", call. = FALSE)
}
##
# Construct the data of the plot
##
data <- unique(data.frame("t" = c(model$treat1, model$treat2), "study" = c(model$studlab, model$studlab)))
comp.freq <- table(data$t)
if (!is.null(excl)) {
excl_true <- which(excl %in% comp.freq)
if (length(excl_true) > 0) {
excl_f <- excl[!excl_true]
excl <- excl[excl_true]
if (length(excl_f) == 1) {
warning(paste("Combination", excl_f, "was excluded since it was not observed in the network"))
} else {
warning(paste("Combinations", paste(excl_f, collapse = ", "), "were excluded since they were not observed in the network"))
}
if (length(excl) == 0) {
excl <- NULL
}
}
if (length(excl) == length(comp.freq)) {
stop(paste("The length of excl is equal with the total number of observed combinations in the network"), .call = FALSE)
}
}
if (mostF > length(comp.freq)) {
stop(paste("mostF must be smaller than the number of treatments in the network"), .call = FALSE)
}
ntwrk <- sort(comp.freq[!(names(comp.freq) %in% excl)], decreasing = TRUE)[1:mostF]
Combs <- gsub(" ", "", names(ntwrk))
Weights <- as.numeric(ntwrk)
res1 <- strsplit(Combs, split = paste("[", sep, "]", sep = ""), perl = TRUE)
if (sum(sapply(res1, FUN = function(x) {
length(x) > 1
})) == 0) {
stop("No additive treatments are included in the selected most frequent combinations", call. = FALSE)
}
# tables to be merged
res4 <- lapply(res1, FromTo)
# merge the tables
FrToMat <- do.call("rbind", res4)
if (sum(sapply(res4, length) <= 2) == length(res4)) {
groups <- rep(1, length(res4))
} else {
groups <- sapply(sapply(res4, matrix, ncol = 2), nrow)
}
# Weights vector
Wghts <- unlist(mapply(rep, x = Weights, each = groups))
E <- (data.frame(from = FrToMat[, 1], to = FrToMat[, 2], width = Wghts))
# Colors vector
if (mostF > 10) {
clrs <- 1:mostF
} else {
clrs <- grDevices::palette.colors(n = mostF, palette = "Tableau")
}
CLRS <- unlist(mapply(rep, x = clrs, each = groups))
##
# plot
##
oldpar <- graphics::par(no.readonly = TRUE)
on.exit(graphics::par(oldpar))
graphics::par(cex = 0.75, mai = c(0.1, 0.1, 1, 0.1) + 1)
graphics::par(fig = c(0, 0.75, 0, 1))
qgraph::qgraph(E,
mode = "direct", edge.color = CLRS, fade = FALSE, arrows = FALSE, layout = "circle",
title = title
)
graphics::par(fig = c(0.7, 1, 0, 1), new = TRUE, cex = 1)
graphics::par(mar = c(1, 1, 1, 1))
graphics::plot(c(0, 1), c(0, 1), ann = FALSE, bty = "n", type = "n", xaxt = "n", yaxt = "n")
if (print_legend) {
graphics::legend(
x = "left", legend = c("Combination", as.character(Combs), "# of arms", Weights),
ncol = 2, bty = "n", cex = size_legend
)
}
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/compGraph.R
|
#' Components descriptive analysis
#'
#' @description
#' The function performs a descriptive analysis regarding the frequency of the components in the network meta-analysis model.
#'
#' @note
#' The function can be applied only in network meta-analysis models that contain multi-component interventions.
#'
#'
#' @param model An object of class \code{\link[netmeta]{netmeta}}.
#' @param sep A single character that defines the separator between interventions components.
#' @param heatmap \code{logical}. If \code{TRUE} a heat matrix of the component's frequency is plotted.
#' @param percentage \code{logical}. If \code{TRUE} combinations' percentages are printed as a number instead of fraction value in the \code{heatmap}.
#' @param digits A single integer value that specifies the percentages' decimal places in the \code{heatmap}.
#'
#' @importFrom tibble rownames_to_column
#' @importFrom tidyr pivot_longer
#' @importFrom dplyr %>%
#' @importFrom ggplot2 ggplot aes geom_tile scale_fill_gradient2 geom_text coord_fixed xlab ylab guides guide_legend
#'
#' @return A list containing three items
#' \item{crosstable}{A cross-table containing the frequency of the components. Each cell represents the number of arms where the corresponding component combination was observed.}
#' \item{frequency}{A \code{data.frame} that contains the component's frequency. Columns
#' \itemize{ \item{ \code{Component}} {denotes the name of each component}
#' \item{\code{Frequency}} {denotes the number of arms where the component was observed}
#' \item{\code{A}} {denotes the number of studies in which the corresponding component was included in all arms}
#' \item{\code{A_percent}} {denotes the percentage of studies in which the corresponding component was included in all arms}
#' \item{\code{B}} {denotes the number of studies in which the corresponding component was included in at least one arm}
#' \item{\code{B_percent}} {denotes the percentage of studies in which the corresponding component was included in at least one arm}
#' \item{\code{C}} {denotes the number of studies in which the corresponding component was not included in any arm}
#' \item{\code{C_percent}} {denotes the percentage of studies in which the corresponding component was not included in any arm}
#' \item{\code{A.B}} {denotes the ratio of columns \code{A} and \code{B}}.
#' }}
#' \item{heatmat}{An object of class \code{ggplot} that visualizes item \code{crosstable}. Diagonal elements refer to the components and in parentheses the proportion of study
#' arms including that component is provided, while off-diagonal elements to the frequency of component’s combinations and in parentheses the proportion of study arms with both components
#' out of those study arms that have the component in the row is provided. Also, the intensity of the color is proportional to the relative frequency of the component combination.}
#'
#' @export
#'
#' @examples
#' data(nmaMACE)
#' compdesc(model = nmaMACE)
#'
compdesc <- function(model, sep = "+", heatmap = TRUE, percentage = TRUE, digits = 2) {
if (inherits(model, "netmeta") == FALSE) {
stop("The class of model is not of netmeta", call. = FALSE)
} else if (inherits(sep, "character") == FALSE) {
stop("The class of sep is not character", call. = FALSE)
} else if (length(sep) > 1) {
stop("The length of sep must be one", call. = FALSE)
} else if (sep == "") {
stop("Argument sep must be diffent than ''", call. = FALSE)
} else if (inherits(heatmap, "logical") == FALSE) {
stop("The class of heatmap is not logical", call. = FALSE)
} else if (length(heatmap) > 1) {
stop("The length of heatmap must be one", call. = FALSE)
} else if (inherits(percentage, "logical") == FALSE) {
stop("The class of percentage is not logical", call. = FALSE)
} else if (length(percentage) > 1) {
stop("The length of percentage must be one", call. = FALSE)
} else if (inherits(digits, c("numeric", "integer")) == FALSE) {
stop("The class of digits is not integer", call. = FALSE)
} else if (length(digits) > 1) {
stop("The length of digits must be one", call. = FALSE)
} else if (digits < 0) {
stop("Argument digits must be a non-negative number", call. = FALSE)
} else if (digits %% 1 != 0) {
stop("Argument digits must be an interger number", call. = FALSE)
}
# Get the NMA-CNMA data
data <- model$data[, c(".studlab", ".treat1", ".treat2")]
names(data) <- c("studlab", "treat1", "treat2")
# Find all components of the data
nodes <- unique(c(model$treat1, model$treat2))
nodes <- gsub(" ", "", nodes)
components <- strsplit(nodes, split = paste("[", sep, "]", sep = ""), perl = TRUE)
if (sum(sapply(components, FUN = function(x) {
length(x) > 1
})) == 0) {
stop("No additive treatments are included in the NMA model", call. = FALSE)
} else {
components <- unique(unlist(components))
}
# Wide to long format
data_long <- reshape2::melt(data = data, id.vars = "studlab", value.name = "Node")
# Keep unique interventions
data_long <- unique(data_long[, c("studlab", "Node")])
data_long$Node <- gsub(" ", "", data_long$Node)
# Calculate components frequency
dum <- dummies(data_long, components, sep)
# Frequency of each component
comp_freq <- apply(dum[, -c(1:2)], 2, sum)
# Additive for each component
comp_add <- data.frame(
"Component" = labels(comp_freq), "Frequency" = comp_freq, "A" = NA,
"A_percent" = NA, "B" = NA, "B_percent" = NA, "C" = NA,
"C_percent" = NA, "A/B" = NA
)
# cross table
crosstab <- cross_ratio <- as.data.frame(matrix(ncol = length(components), nrow = length(components)))
names(crosstab) <- names(cross_ratio) <- row.names(cross_ratio) <- row.names(crosstab) <- components
studies <- split(dum, dum$studlab)
for (i in components) {
comp_add[which(comp_add$Component == i), "A"] <- sum(sapply(studies, FUN = function(x) {
sum(x[, i]) == dim(x)[1]
})) # component included in each arm
comp_add[which(comp_add$Component == i), "B"] <- sum(sapply(studies, FUN = function(x) {
sum(x[, i]) > 0
})) # component included in at least one arm
comp_add[which(comp_add$Component == i), "C"] <- sum(sapply(studies, FUN = function(x) {
sum(x[, i]) == 0
})) # component not included in any arm
}
comp_add$A_percent <- comp_add$A / length(studies)
comp_add$B_percent <- comp_add$B / length(studies)
comp_add$C_percent <- comp_add$C / length(studies)
comp_add$A.B <- comp_add$A / comp_add$B
# Cross-tabulations for each component
m_i <- 0
m_j <- 0
for (i in components) {
m_i <- m_i + 1
for (j in components) {
m_j <- m_j + 1
crosstab[i, j] <- length(which(dum[, i] + dum[, j] == 2))
if (m_i > m_j) {
cross_ratio[i, j] <- paste0(length(which(dum[, i] + dum[, j] == 2)), "/", length(which(dum[, i] + dum[, i] == 2)))
} else if (m_i < m_j) {
cross_ratio[i, j] <- paste0(length(which(dum[, i] + dum[, j] == 2)), "/", length(which(dum[, i] + dum[, i] == 2)))
} else {
cross_ratio[i, j] <- paste0(length(which(dum[, i] + dum[, j] == 2)), "/", dim(data_long)[1])
}
}
m_j <- 0
}
exp <- list("crosstable" = crosstab, "frequency" = comp_add)
if (heatmap == TRUE) {
data_heat <- t(crosstab) %>%
as.data.frame() %>%
tibble::rownames_to_column("f_id") %>%
tidyr::pivot_longer(-c("f_id"), names_to = "samples", values_to = "counts")
ratios_heat <- t(cross_ratio) %>%
as.data.frame() %>%
tibble::rownames_to_column("f_id") %>%
tidyr::pivot_longer(-c("f_id"), names_to = "samples", values_to = "counts")
# Calculate percentage
data_heat$perc <- round(100 * sapply(ratios_heat$counts, FUN = function(x) {
eval(parse(text = x))
}), digits = digits)
if (percentage == TRUE) {
ratios_heat$label <- paste(data_heat$counts, "\n", paste0("(", data_heat$perc, "%)"))
cap <- paste0("Total number of study arms: ", dim(data_long)[1])
} else {
ratios_heat$label <- paste(data_heat$counts, "\n", paste0("(", ratios_heat$counts, ")"))
cap <- NULL
}
p <- ggplot2::ggplot(data = NULL, ggplot2::aes(x = data_heat$f_id, y = data_heat$samples, fill = data_heat$perc)) +
ggplot2::geom_tile() +
ggplot2::scale_fill_gradient(low = "white", high = "red", limit = c(0, 100)) +
ggplot2::geom_text(ggplot2::aes(label = ratios_heat$label), color = "black", size = 4) +
ggplot2::coord_fixed() +
ggplot2::xlab("") +
ggplot2::ylab("") +
ggplot2::labs(caption = cap) +
ggplot2::guides(fill = ggplot2::guide_legend("% arms")) +
ggplot2::theme(
axis.text = ggplot2::element_text(size = 12, face = "bold", color = "black"),
plot.caption = ggplot2::element_text(size = 14),
legend.title = ggplot2::element_text(size = 12, face = "bold")
)
exp[["heatmat"]] <- p
}
exp
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/compdesc.R
|
#' Components Density Plot
#'
#' @description
#' The function creates density plots in order to explore the efficacy of the components.
#'
#'
#' @details
#' If the length of the argument \code{combination} is 1, the function creates two density plots. The first is produced based on the
#' interventions that include the component combination of interest (which is specified by the argument \code{combination}),
#' while the second on the interventions that do not include the underlying component combination.
#'
#' If the argument \code{combination} includes more than one elements, the number of densities is equal with the length of
#' the argument \code{combination}, and each density is based on the interventions that include the relative component combination.
#' For example, if \code{combination = c("A + B", "B + C", "A")} the function will produce 3 density plots that are based on
#' the interventions that includes components \code{"A"} and \code{"B"}, the interventions that include components \code{"B"} and \code{"C"} and
#' interventions that includes component \code{"A"}, respectively.
#'
#' The function by default uses the intervention's relative effects (\code{z_value = FALSE}) obtained from the random-effects network
#' meta-analysis (NMA) model (\code{random = TRUE}). It can be also adjusted to use the intervention's z-values
#' instead of the relative effects, by setting \code{z_value = TRUE}.
#'
#' @note
#' The efficacy of the components could be explored via violins plots instead of density plots, by setting \code{violin = TRUE}.
#' Also, in the case of dichotomous outcomes, the log-scale is used.
#'
#' The function can be applied only in network meta-analysis models that contain multi-component interventions.
#'
#'
#' @param model An object of class \code{\link[netmeta]{netmeta}}.
#' @param sep A single character that defines the separator between interventions components.
#' @param combination A character vector that contains the component combinations of interest.
#' @param violin \code{logical}. If \code{TRUE} the density is visualized via violins instead of density plots.
#' @param random \code{logical}. If \code{TRUE} the random-effects NMA model is used, instead of the fixed-effect NMA model.
#' @param z_value \code{logical}. If \code{TRUE} z-values are used, instead intervention effects.
#'
#'
#' @return An object of class \code{ggplot}.
#'
#' @export
#'
#' @importFrom ggplot2 ggplot aes `%+%` geom_density theme_classic xlab ylab xlim labs scale_fill_discrete scale_x_log10
#' @importFrom stats density
#' @importFrom plyr mapvalues
#'
#'
#' @examples
#' data(nmaMACE)
#' denscomp(model = nmaMACE, combination = "C")
#'
denscomp <- function(model, sep = "+", combination, violin = FALSE, random = TRUE, z_value = FALSE) {
##
# Check arguments
##
if (inherits(model, "netmeta") == FALSE) {
stop("The class of model is not of netmeta", call. = FALSE)
} else if (model$reference.group == "") {
stop("The netmeta model must have a reference group", call. = FALSE)
} else if (inherits(sep, "character") == FALSE) {
stop("The class of sep is not character", call. = FALSE)
} else if (length(sep) > 1) {
stop("The length of sep must be one", call. = FALSE)
} else if (sep == "") {
stop("Argument sep must be diffent than ''", call. = FALSE)
} else if (inherits(combination, "character") == FALSE) {
stop("The class of combination is not character", call. = FALSE)
} else if (inherits(violin, "logical") == FALSE) {
stop("The class of violin is not logical", call. = FALSE)
} else if (length(violin) > 1) {
stop("The length of violin must be one", call. = FALSE)
} else if (inherits(random, "logical") == FALSE) {
stop("The class of random is not logical", call. = FALSE)
} else if (length(random) > 1) {
stop("The length of random must be one", call. = FALSE)
} else if (inherits(z_value, "logical") == FALSE) {
stop("The class of z_value is not logical", call. = FALSE)
} else if (length(z_value) > 1) {
stop("The length of z_value must be one", call. = FALSE)
}
combination <- gsub(" ", "", combination)
if (length(combination) > 1) {
combination <- unique.combinations(combination, sep)
}
##
# Find the components of the network
##
ifelse(random, type <- "random", type <- "fixed")
if (z_value) {
sm <- "statistic"
xlabel <- "z-value"
} else {
sm <- "TE"
xlabel <- "Intervention Effect"
}
ref <- as.character(model$reference.group) # Reference category
# Get networks treatment effects
nma_sm <- data.frame("SM" = model[[paste(sm, type, sep = ".")]][, ref])
nma_sm$Node <- rownames(nma_sm) <- gsub(" ", "", as.character(rownames(nma_sm)))
if (sum(is.na(as.numeric(nma_sm$SM))) != 0) {
non_num <- which(is.na(as.numeric(nma_sm$SM)))
ref_exc <- which(nma_sm$Node[non_num] == ref)
if (length(ref_exc) != 0) { # reference included
if (length(non_num) > 1) {
warning(paste0("Nodes ", paste0(nma_sm$Node[non_num[-ref_exc]], collapse = ", "), " were excluded since the ", tolower(xlabel), " could not be determined for these nodes"))
}
} else { # reference not included
if (length(non_num) == 1) {
warning(paste0("Node ", nma_sm$Node[non_num], " was excluded since the ", tolower(xlabel), " could not be determined"))
} else {
warning(paste0("Nodes ", paste0(nma_sm$Node[non_num], collapse = ", "), " were excluded since the ", tolower(xlabel), " could not be determined for these nodes"))
}
}
nma_sm <- nma_sm[-non_num, ]
}
# Components of the network
comp_network <- strsplit(nma_sm$Node, split = paste("[", sep, "]", sep = ""), perl = TRUE)
if (sum(sapply(comp_network, FUN = function(x) {
length(x) > 1
})) == 0) {
stop("No additive treatments are included in the NMA model", call. = FALSE)
} else {
comp_network <- unique(unlist(comp_network))
}
# Check if combination's components are included in network's components
component_elements <- strsplit(combination, split = paste("[", sep, "]", sep = ""), perl = TRUE)
included <- lapply(component_elements,
FUN = function(x) {
sum(x %in% comp_network) == length(x)
}
)
if (sum(sapply(included, sum)) != length(included)) {
stop(paste("Argument combination must includes network's components"), call. = FALSE)
}
##
# Write the network's nodes as a combination of components dummy variables
##
dummy <- dummies(nma_sm, comp_network, sep)
# Check if the combinations can be obtained
combination_exist <- sapply(component_elements,
FUN = function(x) {
sum(apply(as.matrix(dummy[, x]), 1, sum) == length(x)) > 1 # two data-points required for the density
}
)
if (sum(combination_exist) == 0) {
stop("At least two datapoints required for the density, which were not found", call. = FALSE)
}
if (sum(!combination_exist) > 0) {
if (sum(!combination_exist) == 1) {
warning(paste(
paste(combination[!combination_exist]),
"is excluded since it was not included in at least two nodes"
),
call. = FALSE
)
} else {
warning(paste(
paste(combination[!combination_exist], collapse = ", "),
"are excluded since they were not included in at least two nodes"
),
call. = FALSE
)
}
combination <- combination[combination_exist]
}
##
# Make plot data
##
if (length(combination) > 1) {
# More than one combination
select_comp <- NULL
n <- NULL
index <- NULL
for (i in 1:length(combination)) {
combination_components <- unlist(strsplit(combination[i], split = paste("[", sep, "]", sep = ""), perl = TRUE)[[1]])
if (length(combination_components) > 1) {
rows <- which(apply(dummy[, combination_components], 1, sum) == length(combination_components))
} else {
rows <- which(dummy[, combination_components] == 1)
}
##
select_comp_i <- dummy[rows, "SM"]
n <- c(n, length(select_comp_i))
index <- c(index, rep(combination[i], length(select_comp_i)))
##
select_comp <- c(select_comp, select_comp_i)
}
plot.data <- data.frame(
"SM" = c(select_comp),
"combination" = rep(combination, times = n)
)
lab <- paste("Interventions including", combination)
} else {
# One component
combination_components <- unlist(strsplit(combination, split = paste("[", sep, "]", sep = ""), perl = TRUE)[[1]])
if (length(combination_components) > 1) {
rows_include <- which(apply(dummy[, combination_components], 1, sum) == length(combination_components))
rows_not_include <- which(apply(dummy[, combination_components], 1, sum) != length(combination_components))
} else {
rows_include <- which(dummy[, combination_components] == 1)
rows_not_include <- which(dummy[, combination_components] == 0)
}
##
select_comp_incl <- dummy[rows_include, "SM"]
select_comp_not_incl <- dummy[rows_not_include, "SM"]
select_comp <- c(select_comp_incl, select_comp_not_incl)
##
xl <- paste("Treatment Effect with and without", combination)
lab <- c(
paste("Interventions including", combination),
paste("Interventions not including", combination)
)
plot.data <- data.frame(
"SM" = c(select_comp_incl, select_comp_not_incl),
"combination" = rep(c(lab[1], lab[2]), times = c(length(select_comp_incl), length(select_comp_not_incl)))
)
}
##
# Plot
##
if (model$sm %in% c("OR", "RR") & z_value == FALSE) { # dichotomous outcomes
plot.data$SM <- exp(plot.data$SM)
}
if (violin) {
p <- ggplot2::ggplot(
data = NULL,
ggplot2::aes(x = plot.data$combination, y = plot.data$SM)
) +
ggplot2::geom_violin(trim = TRUE, fill = "lightblue") +
ggplot2::geom_boxplot(width = 0.2, ggplot2::aes(fill = plot.data$combination)) +
ggplot2::geom_jitter(
shape = 16,
position = ggplot2::position_jitter(0.01)
) +
ggplot2::ylab(xlabel) +
ggplot2::xlab("") +
ggplot2::ylim(
min(select_comp) - 0.5,
max(select_comp) + 0.5
) +
ggplot2::theme(legend.position = "none")
} else {
p <- ggplot2::ggplot(
data = NULL,
ggplot2::aes(
x = plot.data$SM,
fill = plot.data$combination
)
) +
ggplot2::geom_density(alpha = 0.7, color = NA) +
ggplot2::xlab(xlabel) +
ggplot2::ylab("Density") +
ggplot2::scale_fill_discrete(labels = lab) +
ggplot2::theme_classic() +
ggplot2::labs(fill = "") +
ggplot2::theme(legend.position = "bottom")
}
if (model$sm %in% c("OR", "RR") & z_value == FALSE) {
p <- p + ggplot2::scale_x_log10()
}
p
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/denscomp.R
|
differ.by.one <- function(M, combination = NULL, nodes_elements = NULL) {
pos1 <- pos2 <- NULL
k <- 1
if (is.null(combination)) {
# Differ by one component
for (i in 1:dim(M)[1]) {
for (j in 1:dim(M)[1]) {
if (sum(abs(M[i, ] - M[j, ])) == 1) {
pos1[k] <- i
pos2[k] <- j
k <- k + 1
}
}
}
} else {
if (length(combination) == 1) {
# Differ by one specific component
for (i in 1:dim(M)[1]) {
for (j in 1:dim(M)[1]) {
if (sum(abs(M[i, ] - M[j, ])) == 1 & abs(M[i, combination] - M[j, combination]) == 1) {
pos1[k] <- i
pos2[k] <- j
k <- k + 1
}
}
}
} else {
# Differ by one specific component combination
pairs <- differ.combination(nodes_elements, combination)
pos1 <- pairs[, 1]
pos2 <- pairs[, 2]
}
}
if (is.null(pos1) | is.null(pos2)) {
if (is.null(combination)) {
stop("Comparisons that differ by one component were not identified", call. = FALSE)
} else {
stop("Comparisons that differ by the component ", paste(combination, collapse = ", "), " were not identified", call. = FALSE)
}
}
pos <- cbind(pos1, pos2)
# Exclude duplicate cases
pos <- as.data.frame(matrix(pos[!duplicated(t(apply(pos, 1, sort))), ], ncol = 2))
names(pos) <- c("pos1", "pos2")
pos
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/differ.by.one.R
|
differ.combination <- function(nodes_elements, combination_components) {
pair <- NULL
for (i in 1:length(nodes_elements)) {
elements_i <- nodes_elements[[i]]
differ <- sapply(nodes_elements, FUN = function(x) {
A <- sum(combination_components %in% elements_i) == length(combination_components) # element i includes the combination
A_none <- sum(combination_components %in% elements_i) == 0
# x includes the combination
B <- sum(combination_components %in% x) == length(combination_components) # element i includes the combination
B_none <- sum(combination_components %in% x) == 0
# components of element of i except the combination
if (A) {
A_rest <- elements_i[-which(elements_i %in% combination_components)]
} else if (A_none) {
A_rest <- elements_i
}
if (B) {
B_rest <- x[-which(x %in% combination_components)]
} else if (B_none) {
B_rest <- x
}
# same rest components
if (((A & B_none) | (B & A_none))) {
if (length(A_rest) == length(B_rest) & length(A_rest) != 0) {
C <- sum(A_rest[order(A_rest)] == B_rest[order(B_rest)]) == length(A_rest)
} else {
C <- FALSE
}
} else {
C <- FALSE
}
C
})
if (sum(differ) > 0) {
if (sum(differ) == 1) {
pair <- c(pair, i, which(differ == TRUE))
} else {
differ_pos <- which(differ == TRUE)
for (j in 1:length(differ_pos)) {
pair <- c(pair, i, differ_pos[j])
}
}
}
}
if (!is.null(pair)) {
pair <- matrix(pair, ncol = 2, byrow = TRUE)
colnames(pair) <- c("pos1", "pos2")
} else {
stop("Nodes that differs by this combination was not found", call. = FALSE)
}
pair
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/differ.combination.R
|
drawDonughts <- function(outcomeNames, components, cex_components = NULL) {
if (!is.null(cex_components)) {
cex_c <- cex_components
} else {
cex_c <- 0.65
}
circlize::circos.clear()
no <- length(outcomeNames)
addComponent <- c("Outcomes", components)
circlize::circos.par(points.overflow.warning = FALSE, track.margin = c(0, 0), start.degree = 100)
circlize::circos.initialize(factors = addComponent, xlim = c(0, 10))
circlize::circos.trackPlotRegion(addComponent, ylim = c(0, 100), bg.border = NA, track.height = 0.05, panel.fun = function(x, y) {
circlize::circos.text(5, 100, facing = "bending", cex = cex_c, circlize::get.cell.meta.data("sector.index"))
})
if (no == 1 | no == 2) {
trHigh <- 0.3
cex <- 0.5
} else if (no == 3) {
trHigh <- 0.17
cex <- 0.5
} else if (no == 4) {
trHigh <- 0.13
cex <- 0.4
} else if (no == 5) {
trHigh <- 0.09
cex <- 0.4
} else if (no >= 6) {
trHigh <- 0.05
cex <- 0.4
}
for (i in 1:no) {
circlize::circos.trackPlotRegion(addComponent, ylim = c(0, 100), bg.border = NA, track.height = trHigh)
}
cex
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/drawDonughts.R
|
dummies <- function(data, components, sep) {
# Create dummy variables for each component
for (i in 1:length(components)) {
data[, components[i]] <- 0
}
# Write each node as a combination of components
for (i in 1:dim(data)[1]) {
# Components of each intervention
dummy_comp_i <- strsplit(data$Node[i], split = paste0("[", sep, "]"), perl = TRUE)[[1]]
for (j in 1:length(dummy_comp_i)) {
if (dummy_comp_i[j] %in% components) {
data[i, dummy_comp_i[j]] <- 1
}
}
}
data
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/dummies.R
|
generateIntervals <- function(data) {
intervals <- c(0, 3.1, 6.2, 9.3, 12.4, 15.5, 18.6, 21.7, 24.8, 27.9, 31, 34.1, 37.2, 40.3, 43.4, 46.5, 49.6, 52.7, 55.8, 58.9, 62, 65.1, 68.2, 71.3, 74.4, 77.5, 80.6, 83.7, 86.8, 89.9, 93, 96.1, 100)
mycolors <- c("#e00000", "#ef0000", "#FF0000", "#FF3000", "#FF4000", "#FF5000", "#FF6000", "#FF7000", "#FF8000", "#FF9000", "#FFA000", "#FFB000", "#FFC000", "#FFD000", "#FFE000", "#FFF000", "#FFFF00", "#F0FF00", "#d0ff00", "#C0FF00", "#B0FF00", "#A0FF00", "#90FF00", "#70FF00", "#50FF00", "#0ffc00", "#0ff200", "#0ee800", "#0ee000", "#0ed800", "#0ecc00", "#0dc100")
gen_intervals <- list(intervals = intervals, mycolors = mycolors)
gen_intervals
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/generateIntervals.R
|
#' Components Heat Plot
#'
#' @description
#' The function creates a heat plot based on the two-by-two component combinations, obtained from the
#' network meta-analysis (NMA) model.
#'
#' @details
#' Diagonal elements refer to components, while off-diagonal to components' combinations. Each element summarizes by default
#' the NMA relative effects (\code{z_value = FALSE}) of the interventions that includes the corresponding
#' component combination. Combinations that were not observed in the NMA model, are denoted by the letter "X".
#' Frequency of component combinations observed in the NMA is printed by default (\code{freq = TRUE}). As a summary measure,
#' the median is used by default (\code{median = TRUE}). The magnitude of each relative effect is reflected by the color's intensity.
#' Estimates close to zero are denoted by white color, and indicates a small magnitude of the corresponding component combination, while
#' deep green and red colors indicate a large magnitude of the corresponding component combination.
#' Outcomes nature (beneficial or harmful) is defined in the \code{netmeta} model.
#'
#' The function can be also adjusted to include z-scores by setting the argument \code{z_value = TRUE}.
#' Z-scores quantify the strength of statistical evidence. Thus, dark green (or red) indicates strong statistical evidence that
#' the corresponding component (or combination of components) performs better (or worse) than the reference intervention.
#'
#' @note
#' In the case where the NMA relative effects are used, the uncertainty of the NMA estimates are reflected by the size of the grey boxes.
#' The bigger the box, the more precise the estimate.
#'
#' By setting \code{median = FALSE}, the mean is used instead of the median as a summary measure.
#'
#' The function can be applied only in network meta-analysis models that contain multi-component interventions.
#'
#'
#' @param model An object of class \code{\link[netmeta]{netmeta}}.
#' @param sep A single character that defines the separator between interventions components.
#' @param median \code{logical}. If \code{TRUE} the median is used instead of the mean as a summary measure.
#' @param random \code{logical}. If \code{TRUE} the random-effects NMA model is used instead of the fixed-effect NMA model.
#' @param z_value \code{logical}. If \code{TRUE} z-values are used instead of interventions effects.
#' @param freq \code{logical}. If \code{TRUE} the frequency of component combinations are printed.
#' @param legend_name A single character that specifies the title of the legend.
#'
#' @import netmeta
#' @importFrom reshape2 melt
#' @importFrom MASS mvrnorm
#' @importFrom stats median quantile sd
#' @importFrom ggplot2 ggplot aes geom_tile geom_text scale_fill_gradient2 theme_minimal theme theme element_blank element_text
#' @importFrom ggnewscale new_scale_fill
#'
#' @return An object of class \code{ggplot}.
#' @export
#'
#' @examples
#' data(nmaMACE)
#' heatcomp(model = nmaMACE)
#'
heatcomp <-
function(model, sep = "+", median = TRUE, random = TRUE, z_value = FALSE, freq = TRUE, legend_name = NULL) {
##
# Check arguments
##
if (inherits(model, "netmeta") == FALSE) {
stop("The class of model is not of netmeta", call. = FALSE)
} else if (model$reference.group == "") {
stop("The netmeta model must have a reference group", call. = FALSE)
} else if (inherits(sep, "character") == FALSE) {
stop("The class of sep is not character", call. = FALSE)
} else if (length(sep) > 1) {
stop("The length of sep must be one", call. = FALSE)
} else if (sep == "") {
stop("Argument sep must be diffent than ''", call. = FALSE)
} else if (inherits(median, "logical") == FALSE) {
stop("The class of median is not logical", call. = FALSE)
} else if (length(median) > 1) {
stop("The length of median must be one", call. = FALSE)
} else if (inherits(random, "logical") == FALSE) {
stop("The class of random is not logical", call. = FALSE)
} else if (length(random) > 1) {
stop("The length of random must be one", call. = FALSE)
} else if (inherits(z_value, "logical") == FALSE) {
stop("The class of z_value is not logical", call. = FALSE)
} else if (length(z_value) > 1) {
stop("The length of z_value must be one", call. = FALSE)
} else if (inherits(freq, "logical") == FALSE) {
stop("The class of freq is not logical", call. = FALSE)
} else if (length(freq) > 1) {
stop("The length of freq must be one", call. = FALSE)
} else if (!is.null(legend_name)) {
if (!is.vector(legend_name)) {
stop("legend_name should be a vector of length one", call. = FALSE)
} else if (length(legend_name) != 1) {
stop("legend_name should be a vector of length one", call. = FALSE)
}
}
##
# NMA data
##
summodel <- summary(model) # summary of NMA model
ref <- model$reference.group # reference group
# Set NMA type and measurement unit
ifelse(random, type <- "random", type <- "fixed")
ifelse(z_value, o <- "statistic", o <- "TE")
# Mean effects vs reference category
mean_eff <- summodel[[type]][[o]][-which(colnames(summodel[[type]][[o]]) == ref), ref]
names(mean_eff) <- gsub(" ", "", labels(mean_eff))
# Find all components of the data. Components must be separated by " + "
nodes <- unique(c(model$treat1, model$treat2))
nodes <- nodes[-which(nodes == ref)]
nodes <- gsub(" ", "", nodes)
# Components of the network
components <- strsplit(nodes, split = paste("[", sep, "]", sep = ""), perl = TRUE)
if (sum(sapply(components, FUN = function(x) {
length(x) > 1
})) == 0) {
stop("No additive treatments are included in the NMA model", call. = FALSE)
} else {
components <- sort(unique(unlist(components)))
}
if (model$sm %in% c("OR", "RR") & z_value == FALSE) {
exponen <- TRUE
} else {
exponen <- FALSE
}
Heatdata <- phd(components, mean_eff, median, sep, freq, exponen)
melted_data <- Heatdata$data
txt <- Heatdata$text
# Data are in upper triangle form. So exclude NAs in order to not get warnings in the ggplot
keep <- which(is.nan(melted_data$value) | !is.na(melted_data$value))
melted_data <- melted_data[keep, ]
txt <- txt[which(!is.na(txt))]
# calculate se.TE for each combination
if (z_value == FALSE) {
se.TE <- summodel[[type]][["seTE"]][-which(colnames(summodel[[type]][["seTE"]]) == ref), ref]
names(se.TE) <- gsub(" ", "", labels(se.TE))
weights <- phd(components, 1 / se.TE, median, sep, freq, exponen = FALSE)$data
weights <- weights[keep, ]
}
##
# Heat Plot
##
# Set legend title
if (is.null(legend_name)) {
if (z_value == TRUE) {
if (median == TRUE) {
titleg <- "Median Z-value"
} else {
titleg <- "Mean Z-value"
}
} else {
if (median == TRUE) {
titleg <- "Median TE"
} else {
titleg <- "Mean TE"
}
}
} else {
titleg <- as.character(legend_name)
}
small_val <- model$small.values
if (small_val == "bad") { # beneficial
low_col <- "red"
high_col <- "green"
} else {
low_col <- "green"
high_col <- "red"
}
if (model$sm %in% c("OR", "RR") & z_value == FALSE) {
mid <- 1
lim <- c(0, max(melted_data$value, na.rm = TRUE))
} else {
mid <- 0
lim <- c(min(melted_data$value, na.rm = TRUE), max(melted_data$value, na.rm = TRUE))
}
p <- ggplot2::ggplot(data = NULL, ggplot2::aes(x = melted_data$Var1, y = melted_data$Var2, fill = melted_data$value)) +
ggplot2::geom_tile(color = "white") +
ggplot2::geom_text(ggplot2::aes(label = txt)) +
ggplot2::labs(x = "", y = "") +
ggplot2::scale_fill_gradient2(
low = low_col, high = high_col, mid = "white",
midpoint = mid, na.value = "white",
limit = lim,
space = "Lab",
name = titleg
) +
ggplot2::theme_minimal() +
ggplot2::theme(
panel.grid.major = ggplot2::element_blank(),
axis.text = ggplot2::element_text(size = 12, face = "bold", color = "black")
)
if (z_value == FALSE) {
weights$size <- (weights$value - 0.1) / (10 - 0.1) # scale 0.1 - 10
over1 <- which(weights$size >= 1)
if (length(over1) > 0) {
weights$size[over1] <- 0.98
}
zeros <- which(round(weights$size, 1) == 0)
if (length(zeros) > 0) {
weights$size[zeros] <- 0.1
}
w_Nas <- which(is.na(weights$size) == T | is.nan(weights$size) == T)
if(length(w_Nas) > 0){
weights[w_Nas, c("value", "size")] <- 0
}
p <- p + ggnewscale::new_scale_fill() +
ggplot2::geom_tile(
data = NULL, ggplot2::aes(x = weights$Var1, y = weights$Var2, height = weights$size, width = weights$size),
alpha = 0.1
)
}
p
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/heatcomp.R
|
#' Leaving One Component Combination Out Scatter plot
#'
#' @description
#' The function based on the network meta-analysis (NMA) estimates explores if a set of components has a
#' positive or a negative impact on the outcome, by creating a scatter plot based on the set of
#' interventions that differ by a specific set of components.
#'
#' @details
#' Axis y represents the intervention's effect when the component combination is not included in the
#' intervention, while axis x represents the intervention's effect when is included.
#' Line \eqn{y = x} splits the plot in two parts. For a beneficial outcome, dots above the line
#' indicates that the inclusion of component combination balk the intervention's efficacy, while
#' dots below the line indicate that the inclusion of the component combination increases intervention's efficacy.
#' The opposite holds for harmful outcomes.
#'
#' The component combination of interest is specified by the argument \code{combination}. For example, if \code{combination = "A"}, the
#' function plots all the interventions that differ by the component \code{"A"}. If \code{combination = NULL}, all interventions
#' that differ by one component are plotted.
#'
#' The function by default uses the NMA relative effects estimates, but it can be adjusted to use the z-values by setting the argument \code{z_value = TRUE}.
#' Histograms for the nodes that include and not include the component combination can be added to the scatter plot,
#' by setting the argument \code{histogram = TRUE}.
#'
#' @note
#' In the case of dichotomous outcomes, the log-scale is used for both axis. Also, the function can be applied
#' only in network meta-analysis models that contain multi-component interventions.
#'
#'
#' @param model An object of class \code{\link[netmeta]{netmeta}}.
#' @param sep A single character that defines the separator between interventions components.
#' @param combination A single character that specifies the component combination of interest.
#' @param random \code{logical}. If \code{TRUE} the random-effects NMA model is used instead of the fixed-effect NMA model.
#' @param z_value \code{logical}. If \code{TRUE} z-values are used instead of interventions effects.
#' @param histogram \code{logical}. If \code{TRUE} histograms are added to the plot.
#' @param histogram.color A single character that specifies the color of the histogram. See \code{\link[ggExtra]{ggMarginal}} for more details.
#'
#' @importFrom ggplot2 ggplot aes geom_hline labs `%+%` geom_point geom_line theme_minimal scale_y_log10 scale_x_log10 theme
#' @importFrom ggExtra ggMarginal
#'
#' @return An object of class \code{\link[ggplot2]{ggplot}}.
#' @export
#'
#' @examples
#' data(nmaMACE)
#' loccos(model = nmaMACE, combination = c("B"))
#'
loccos <- function(model, sep = "+", combination = NULL, random = TRUE, z_value = FALSE, histogram = TRUE, histogram.color = "blue") {
##
# Check arguments
##
if (inherits(model, "netmeta") == FALSE) {
stop("The class of model is not of netmeta", call. = FALSE)
} else if (model$reference.group == "") {
stop("The netmeta model must have a reference group", call. = FALSE)
} else if (inherits(sep, "character") == FALSE) {
stop("The class of sep is not character", call. = FALSE)
} else if (length(sep) > 1) {
stop("The length of sep must be one", call. = FALSE)
} else if (sep == "") {
stop("Argument sep must be diffent than ''", call. = FALSE)
} else if (!is.null(combination) & inherits(combination, "character") == FALSE) {
stop("The class of combination is not character", call. = FALSE)
} else if (!is.null(combination) & length(combination) > 1) {
stop("The length of combination must be one", call. = FALSE)
} else if (inherits(random, "logical") == FALSE) {
stop("The class of random is not logical", call. = FALSE)
} else if (length(random) > 1) {
stop("The length of random must be one", call. = FALSE)
} else if (inherits(z_value, "logical") == FALSE) {
stop("The class of z_value is not logical", call. = FALSE)
} else if (length(z_value) > 1) {
stop("The length of z_value must be one", call. = FALSE)
} else if (inherits(histogram, "logical") == FALSE) {
stop("The class of histogram is not logical", call. = FALSE)
} else if (length(histogram) > 1) {
stop("The length of histogram must be one", call. = FALSE)
} else if (inherits(histogram.color, c("character", "numeric")) == FALSE) {
stop("The class of histogram.color must be character or numeric", call. = FALSE)
} else if (length(histogram.color) != 1) {
stop("The length of histogram.color must be one", call. = FALSE)
}
##
# NMA estimates and characteristics
##
# Get NMA z-scores
z_nma <- nmares(model, random)
z_nma$Node <- row.names(z_nma) <- gsub(" ", "", z_nma$Node)
if (z_value) {
z_nma <- z_nma[, c("Node", "z_stat")] # z_values
} else {
z_nma <- z_nma[, c("Node", "TE")] # TE estimates
}
# Find the components of the network
comp_network <- strsplit(z_nma$Node, split = paste("[", sep, "]", sep = ""), perl = TRUE)
if (sum(sapply(comp_network, FUN = function(x) {
length(x) > 1
})) == 0) {
stop("No additive treatments are included in the NMA model", call. = FALSE)
} else {
comp_network <- unique(unlist(comp_network))
}
##
# Writing nodes as a combination of component's dummy variables
##
dummy <- dummies(z_nma, comp_network, sep)
dummy <- dummy[, -c(1, 2)]
# Check if the combination exist
if (!is.null(combination)) {
combination <- gsub(" ", "", combination)
check.combinations(combination, comp_network, sep)
combination_components <- strsplit(combination, split = paste("[", sep, "]", sep = ""), perl = TRUE)[[1]]
} else {
combination_components <- NULL
}
# Number of components for each node
z_nma$n_comp <- apply(dummy, 1, sum)
##
# Find the set of nodes that differ by one component
##
if (length(combination_components) > 1) {
nodes_elements <- strsplit(z_nma$Node, split = paste("[", sep, "]", sep = ""), perl = TRUE)
} else {
nodes_elements <- NULL
}
pos <- differ.by.one(M = dummy, combination = combination_components, nodes_elements = nodes_elements)
##
# Construct plot data
##
data <- as.data.frame(cbind(z_nma$Node[pos$pos1], z_nma$Node[pos$pos2]))
# Add Node's z scores
data <- merge(data, z_nma, by.x = c("V1"), by.y = c("Node"), all.x = TRUE)
colnames(data)[c(3, 4)] <- c("z_V1", "n_comp_V1")
data <- merge(data, z_nma, by.x = c("V2"), by.y = c("Node"), all.x = TRUE)
colnames(data)[c(5, 6)] <- c("z_V2", "n_comp_V2")
data_plot <- data
t <- which(!data$n_comp_V1 > data$n_comp_V2)
if (length(t) > 0) {
data_plot[t, "V1"] <- data$V2[t]
data_plot[t, "z_V1"] <- data$z_V2[t]
data_plot[t, "n_comp_V1"] <- data$n_comp_V2[t]
data_plot[t, "V2"] <- data$V1[t]
data_plot[t, "z_V2"] <- data$z_V1[t]
data_plot[t, "n_comp_V2"] <- data$n_comp_V1[t]
}
# Network comparisons
data_plot$comparison <- paste(data_plot$V1, data_plot$V2, sep = "vs")
if (sum(is.nan(data_plot$z_V1)) > 0 | sum(is.nan(data_plot$z_V2)) > 0) {
not_a_number <- c(which(is.nan(data_plot$z_V1) == TRUE), which(is.nan(data_plot$z_V2) == TRUE))
excluded <- paste(data_plot$V2[not_a_number], data_plot$V1[not_a_number], sep = " and ", collapse = " , ")
if (length(not_a_number) > 1) {
dot <- "Dots"
was_were <- "were"
} else {
dot <- "Dot"
was_were <- "was"
}
ref <- model$reference.group # reference treatment
if (dim(data_plot)[1] - length(not_a_number) == 0) {
stop(paste(dot, "between", excluded, was_were, "excluded since the z-value for the", ref, "vs", ref, "is NaN"), call. = FALSE)
} else {
warning(paste(dot, "between", excluded, was_were, "excluded since the z-value for the", ref, "vs", ref, "is NaN"), call. = FALSE)
data_plot <- data_plot[-not_a_number, ]
}
}
##
# Plot
##
# Limits of the plot
lim <- data.frame(
y = c(max(data_plot$z_V1, data_plot$z_V2), min(data_plot$z_V1, data_plot$z_V2)),
x = c(max(data_plot$z_V1, data_plot$z_V2), min(data_plot$z_V1, data_plot$z_V2))
)
if (model$sm %in% c("OR", "RR") & z_value == FALSE) {
lim <- exp(lim)
data_plot$z_V1 <- exp(data_plot$z_V1)
data_plot$z_V2 <- exp(data_plot$z_V2)
}
p <- ggplot2::ggplot(
data = NULL
) +
ggplot2::geom_point(ggplot2::aes(
x = data_plot$z_V1,
y = data_plot$z_V2
)) +
ggplot2::geom_line(ggplot2::aes(
y = lim$y,
x = lim$x
)) +
ggplot2::labs(
x = paste("Nodes including", combination),
y = paste("Nodes not including", combination)
) +
ggplot2::theme(aspect.ratio = 1)
if (model$sm %in% c("OR", "RR") & z_value == FALSE) {
p <- p + ggplot2::scale_y_log10() + ggplot2::scale_x_log10()
}
if (histogram) {
p <- ggExtra::ggMarginal(p, type = "histogram", fill = histogram.color)
}
p
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/loccos.R
|
#' @title Network Meta-Analysis of Major Adverse Cardiovascular Event
#'
#' @description An artificial network meta-analysis (of class \code{\link[netmeta]{netmeta}}) compering the effectiveness of a number of interventions for major
#' adverse cardiovascular events.
#'
#'
"nmaMACE"
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/nmaMACE.R
|
nmares <- function(model, random) {
ref <- as.character(model$reference.group) # Reference category
summ_NMA <- summary(model) # Summary of NMA model
# Set NMA type
ifelse(random, type <- "random", type <- "fixed")
# data.frame with the NMA results
results <- data.frame(
"Node" = as.character(rownames(summ_NMA[[type]][["TE"]])),
"TE" = summ_NMA[[type]][["TE"]][, ref],
"seTE" = summ_NMA[[type]][["seTE"]][, ref],
"lb" = summ_NMA[[type]][["lower"]][, ref],
"ub" = summ_NMA[[type]][["upper"]][, ref],
"z_stat" = summ_NMA[[type]][["statistic"]][, ref]
)
# Order based on the absolute treatment effect
results <- results[order(abs(results$TE), decreasing = TRUE), ]
results
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/nmares.R
|
# Prepare Heat Data
phd <- function(components, mean_eff, median, sep, freq, exponen) {
# Dataframes to store estimates
df <- freq_tb <- data.frame(matrix(nrow = length(components), ncol = length(components)))
colnames(df) <- rownames(df) <- components
dat <- matrix(mean_eff, nrow = 1)
# Components of each node
nodes <- labels(mean_eff)
components_node <- strsplit(nodes, split = paste("[", sep, "]", sep = ""), perl = TRUE)
# Set measurement unit
ifelse(median, funct <- "stats::median", funct <- "mean")
for (i in 1:dim(df)[1]) {
for (j in 1:dim(df)[1]) {
# Find the nodes that include components i and j
ind <- unlist(lapply(components_node, FUN = function(x) {
sum(c(colnames(df)[i], colnames(df)[j]) %in% x) == 2
}))
# Calculate the mean estimate
if (length(as.vector(dat[, ind])) == 0) { # The comparison was not observed
df[i, j] <- NaN
freq_tb[i, j] <- NaN
} else { # The comparison was observed
df[i, j] <- apply(matrix(dat[, ind], ncol = 1), 2, FUN = eval(parse(text = funct)))
freq_tb[i, j] <- length(matrix(dat[, ind], ncol = 1))
}
}
}
# Upper triangle form
df[upper.tri(df)] <- NA
melted_data <- reshape2::melt(as.matrix(df))
freq_tb[upper.tri(freq_tb)] <- NA
melted_freq <- reshape2::melt(as.matrix(freq_tb))
if (exponen == TRUE) {
melted_data$value <- exp(melted_data$value)
}
# Text to be printed
if (freq) {
txt <- paste(
paste(format(round(melted_data$value, 2), nsmall = 2)), "\n",
paste0("(", melted_freq$value, ")")
)
} else {
txt <- paste(paste(format(round(melted_data$value, 2), nsmall = 2)))
}
# For the non observed comparisons print "X"
txt[is.na(round(melted_data$value))] <- NA
txt[is.nan(melted_data$value)] <- "X"
# Rename the columns of the melted dataset
colnames(melted_data) <- c("Var1", "Var2", "value")
Heatdata <- list("data" = melted_data, "text" = txt)
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/phd.R
|
#' Components Rank Heat Plot
#'
#' @description
#' Rank heat plot summarizes the components' p-scores for multiple outcomes.
#'
#' @details
#' The function creates a rank heat plot, where the number of circles depend on the number of outcomes.
#' Each circle is divided by the total number of components, and each sector is colored according
#' the corresponding component p-score. Components' p-scores are summarized by using either the median (\code{median = TRUE})
#' or the mean (\code{median = FALSE}) of the p-scores obtained from the interventions that include the corresponding component.
#' The sector's colors reflect the magnitude of the components p-scores. Red color indicates a low p-score (close to zero),
#' while green color indicates values close to 1. Intervention's p-scores are obtained from the network meta-analysis (NMA) model.
#' By default the random-effects NMA model is used for each outcome (\code{random = TRUE}).
#'
#' @note
#' The function can be applied only in network meta-analysis models that contain multi-component interventions.
#'
#' @param model A list of \code{\link[netmeta]{netmeta}} models.
#' @param sep A single character that defines the separator between interventions components.
#' @param median \code{logical}. If \code{TRUE} the median is used as a summary measure instead of the mean.
#' @param random A \code{logical} vector that specifies the NMA model for each outcome. If \code{TRUE} the random-effects NMA model is used instead of the fixed-effects NMA model.
#' @param outcomeNames A character vector that specifies the names of the outcomes.
#' @param cex_components Font size of components' names.
#' @param cex_values Font size of p-scores.
#' @param cex_outcomes Font size of outcomes' names.
#'
#' @importFrom circlize circos.clear circos.par circos.initialize circos.trackPlotRegion circos.text get.cell.meta.data circos.trackPlotRegion draw.sector
#'
#' @return Returns (invisibly) a rank heat plot.
#' @export
#'
#' @examples
#' \donttest{
#' # Artificial data set
#'
#' t1 <- c("A", "B", "C", "A+B", "A+C", "B+C", "A")
#' t2 <- c("C", "A", "A+C", "B+C", "A", "B", "B+C")
#'
#' TE1 <- c(2.12, 3.24, 5.65, -0.60, 0.13, 0.66, 3.28)
#' TE2 <- c(4.69, 2.67, 2.73, -3.41, 1.79, 2.93, 2.51)
#'
#' seTE1 <- rep(0.1, 7)
#' seTE2 <- rep(0.2, 7)
#'
#' study <- paste0("study_", 1:7)
#'
#' data1 <- data.frame(
#' "TE" = TE1, "seTE" = seTE1, "treat1" = t1, "treat2" = t2, "studlab" = study,
#' stringsAsFactors = FALSE
#' )
#'
#' data2 <- data.frame(
#' "TE" = TE2, "seTE" = seTE2, "treat1" = t1, "treat2" = t2, "studlab" = study,
#' stringsAsFactors = FALSE
#' )
#'
#' # Network meta-analysis models
#'
#' net1 <- netmeta::netmeta(
#' TE = TE, seTE = seTE, studlab = studlab, treat1 = treat1,
#' treat2 = treat2, data = data1, ref = "A"
#' )
#'
#' net2 <- netmeta::netmeta(
#' TE = TE, seTE = seTE, studlab = studlab, treat1 = treat1,
#' treat2 = treat2, data = data2, ref = "A"
#' )
#'
#' # Rank heat plot
#'
#' rankheatplot(model = list(net1, net2))
#' }
#'
rankheatplot <- function(model, sep = "+", median = TRUE, random = TRUE, outcomeNames = NULL,
cex_components = NULL, cex_values = NULL, cex_outcomes = NULL) {
##
# Check arguments
##
numOfOutcomes <- length(model)
if (is.null(outcomeNames)) {
outcomeNames <- paste0("Outcome ", 1:numOfOutcomes)
}
random <- check.arguments(model, median, random, outcomeNames, cex_components, cex_values, cex_outcomes)
##
# Build all components of the network
##
small.values <- NULL
for (i in 1:numOfOutcomes) {
small.values <- c(small.values, model[[i]]$small.values)
}
listcomponents <- list()
stop_fun <- NULL
for (outcome in 1:numOfOutcomes) {
# Find all components of the data
nodes <- unique(c(model[[outcome]]$treat1, model[[outcome]]$treat2))
nodes <- gsub(" ", "", nodes)
# Components of the network
uniquecomponents <- strsplit(nodes, split = paste("[", sep, "]", sep = ""), perl = TRUE)
comp_find <- sum(sapply(uniquecomponents, FUN = function(x) {
length(x) > 1
}))
uniquecomponents <- unique(unlist(uniquecomponents))
# save components in list
listcomponents[outcome] <- list(uniquecomponents)
stop_fun <- c(stop_fun, comp_find)
}
components <- unique(do.call(c, listcomponents))
if (sum(stop_fun == 0) > 0) {
stop("No additive treatments are included in the NMA model", call. = FALSE)
}
results <- build.data(model, median, random, small.values, numOfOutcomes, components, outcomeNames, sep)
##
# Draw the donughts of the rankheatplot
##
oldpar <- graphics::par(no.readonly = TRUE)
on.exit(graphics::par(oldpar))
cex <- drawDonughts(outcomeNames, results$components, cex_components)
##
# Generate intervals
##
gen_intervals <- generateIntervals(results$df)
##
# Add the background color of the rad
##
addColorBackground(outcomeNames,
components = results$components, data = results$df,
gen_intervals$intervals, gen_intervals$mycolors,
cex = cex, cex_values, cex_outcomes
)
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/rankheatplot.R
|
#' Specific Component Combination violin plots
#'
#' @description
#' The function based on the network meta-analysis (NMA) estimates produces violin plots from interventions that include the
#' component combinations of interest.
#'
#' @details
#' By default the function creates a violin for each component of the network (\code{combination = NULL}). Each violin visualizes the
#' distribution of the effect estimates, obtained from the interventions that include the corresponding component.
#' Combinations of interest are specified from the argument \code{combination}. For example, if \code{combination = c("A", "A + B")},
#' two violin plots are produced. The first one is based on the interventions that contain the component "A", and the second one, based
#' on the interventions that contain both components A and B.
#'
#' By setting the argument \code{components_number = TRUE}, the behavior of intervention's effect as the number of components increased
#' is explored, by producing violins based on the number of components included in the interventions. If the number of
#' components included in a intervention ranges between 1 and 3, then 3 violins will be produced in total. The violins will be based on
#' the interventions that include one component, two components, and three components respectively.
#' The number of components could be also categorized in groups by the argument \code{groups}. For
#' example if \code{components_number = TRUE} and \code{groups = c("1-3", 4, "5+")}, 3 violins will be created. One for the
#' interventions that contain less than 3 components, one for the interventions that contain 4 components and one for those
#' that contain more than 5 components.
#'
#' The function by default uses the NMA relative effects, but it could be adjusted to use intervention's z-scores by setting \code{z_value = TRUE}.
#' In the case where the NMA relative effects, the size of dots reflects the precision of the estimates. Larger dots indicates
#' more precise NMA estimates.
#'
#' @note
#' In the case of dichotomous outcomes, the log-scale is used in axis y. Also, the function can be applied
#' only in network meta-analysis models that contain multi-component interventions.
#'
#'
#' @param model An object of class \code{\link[netmeta]{netmeta}}.
#' @param sep A single character that defines the separator between interventions components.
#' @param combination A character vector that specifies the component combinations of interest.
#' @param components_number \code{logical}. If \code{TRUE} the violins are created based on the number of components included in the interventions.
#' @param groups A character vector that contains the clusters of the number of components. Elements of the vector must be integer numbers (e.g. 5 or "5"), or range values (e.g. "3-4" ), or in the "xx+" format (e.g "5+").
#' @param random \code{logical}. If \code{TRUE} the random-effects NMA model is used instead of the fixed-effect NMA model.
#' @param z_value \code{logical}. If \code{TRUE} z-values are used instead of interventions effects.
#' @param prop_size \code{logical}. If \code{TRUE} in the case where \code{z_value == FALSE}, the size of the dots is proportional to the precision of the estimates.
#' @param fill_violin fill color of the violin. See \code{\link[ggplot2]{geom_violin}} for more details.
#' @param color_violin color of the violin. See \code{\link[ggplot2]{geom_violin}} for more details.
#' @param adj_violin adjustment of the violin. See \code{\link[ggplot2]{geom_violin}} for more details.
#' @param width_violin width of the violin. See \code{\link[ggplot2]{geom_violin}} for more details.
#' @param boxplot \code{logical}. If \code{TRUE} boxplots are plotted.
#' @param width_boxplot width of the boxplot. See \code{\link[ggplot2]{geom_boxplot}} for more details.
#' @param errorbar_type boxplot's line type. See \code{\link[ggplot2]{stat_boxplot}} for more details.
#' @param dots \code{logical}. If \code{TRUE} data points are plotted.
#' @param jitter_shape jitter shape. See \code{\link[ggplot2]{geom_jitter}} for more details.
#' @param jitter_position jitter position. See \code{\link[ggplot2]{geom_jitter}} for more details.
#' @param values \code{logical}. If \code{TRUE} median value of each violin is printed.
#'
#' @importFrom stats median
#' @importFrom ggplot2 ggplot aes `%+%` geom_violin geom_boxplot geom_jitter position_jitter geom_text stat_boxplot labs scale_x_discrete scale_y_log10 guides
#' @importFrom Hmisc all.is.numeric
#'
#' @return An object of class \code{ggplot}.
#' @export
#'
#' @examples
#' data(nmaMACE)
#' specc(model = nmaMACE, combination = c("B", "C", "B + C"))
#'
specc <- function(model, sep = "+", combination = NULL, components_number = FALSE, groups = NULL, random = TRUE, z_value = FALSE,
prop_size = TRUE, fill_violin = "lightblue", color_violin = "lightblue", adj_violin = 1, width_violin = 1,
boxplot = TRUE, width_boxplot = 0.5, errorbar_type = 5, dots = TRUE,
jitter_shape = 16, jitter_position = 0.01, values = TRUE) {
##
# Check arguments
##
if (inherits(model, "netmeta") == FALSE) {
stop("The class of model is not of netmeta", call. = FALSE)
} else if (model$reference.group == "") {
stop("The netmeta model must have a reference group", call. = FALSE)
} else if (inherits(sep, "character") == FALSE) {
stop("The class of sep is not character", call. = FALSE)
} else if (length(sep) > 1) {
stop("The length of sep must be one", call. = FALSE)
} else if (sep == "") {
stop("Argument sep must be diffent than ''", call. = FALSE)
} else if (!is.null(combination)) {
if (inherits(combination, "character") == FALSE) {
stop("The class of combination is not character", call. = FALSE)
}
combination <- unique.combinations(combination, sep) # unique combinations
} else if (inherits(components_number, "logical") == FALSE) {
stop("The class of components_number is not logical", call. = FALSE)
} else if (length(components_number) > 1) {
stop("The length of components_number must be one", call. = FALSE)
} else if (components_number == TRUE & is.null(groups) == FALSE) {
if (inherits(groups, "character") == FALSE) {
stop("The class of groups is not character", call. = FALSE)
}
if (sum(table(groups) > 1) > 0) {
stop("Argument groups must contain different elements", call. = FALSE)
}
if (check.groups(groups) == FALSE) {
stop("The elements of groups must be either integer (string) values, or either a range (e.g 1-2), or greiter than an integer number (e.g 5+)", call. = FALSE)
}
} else if (inherits(random, "logical") == FALSE) {
stop("The class of random is not logical", call. = FALSE)
} else if (length(random) > 1) {
stop("The length of random must be one", call. = FALSE)
} else if (inherits(z_value, "logical") == FALSE) {
stop("The class of z_value is not logical", call. = FALSE)
} else if (length(z_value) > 1) {
stop("The length of z_value must be one", call. = FALSE)
} else if (inherits(boxplot, "logical") == FALSE) {
stop("The class of boxplot is not logical", call. = FALSE)
} else if (length(boxplot) > 1) {
stop("The length of boxplot must be one", call. = FALSE)
} else if (inherits(dots, "logical") == FALSE) {
stop("The class of dots is not logical", call. = FALSE)
} else if (length(dots) > 1) {
stop("The length of dots must be one", call. = FALSE)
} else if (inherits(values, "logical") == FALSE) {
stop("The class of values is not logical", call. = FALSE)
} else if (length(values) > 1) {
stop("The length of values must be one", call. = FALSE)
}
##
# NMA estimates and characteristics
##
# Get NMA estimates
nma_est <- nmares(model, random)
nma_est$Node <- row.names(nma_est) <- gsub(" ", "", nma_est$Node)
# Components of the network
comp_network <- strsplit(nma_est$Node, split = paste("[", sep, "]", sep = ""), perl = TRUE)
if (sum(sapply(comp_network, FUN = function(x) {
length(x) > 1
})) == 0) {
stop("No additive treatments are included in the NMA model", call. = FALSE)
} else {
comp_network <- unique(unlist(comp_network))
}
##
# Write the network's nodes as a combination of components' dummy variables
##
dummy <- dummies(nma_est, comp_network, sep)
##
data_plot <- NULL # data for the plot
if (components_number) {
##
# Data for the number of components
##
dummy$n_comp <- apply(dummy[, (which(names(dummy) == "z_stat") + 1):(dim(dummy)[2])], 1, sum)
if (!is.null(groups)) {
# Exclude the groups that cannot be calculated
exclude_groups <- NULL
num_elements <- sapply(groups, Hmisc::all.is.numeric)
exclude_num <- !groups[num_elements] %in% dummy$n_comp
if (sum(exclude_num) > 0) {
exclude_groups <- groups[num_elements][exclude_num]
groups <- groups[-which(groups %in% exclude_groups)]
}
if (length(groups) > 0) {
ranges <- strsplit(groups, split = "-")
range_elements <- sapply(ranges,
FUN = function(x) {
Hmisc::all.is.numeric(x) & length(x) == 2
}
)
if (sum(range_elements) > 0) {
exclude_ranges <- sapply(ranges[range_elements],
FUN = function(x) {
inc <- as.numeric(x[1]):as.numeric(x[2]) %in% dummy$n_comp
sum(inc) == 0
}
)
if (sum(exclude_ranges) > 0) {
exclude_ranges <- groups[range_elements][exclude_ranges]
exclude_groups <- c(exclude_groups, exclude_ranges)
groups <- groups[-which(groups %in% exclude_groups)]
}
}
}
if (length(groups) > 0) {
plus <- strsplit(groups, split = "+")
plus_elements <- sapply(plus,
FUN = function(p) {
if (length(p) != 2) {
FALSE
} else if (Hmisc::all.is.numeric(p[1]) & p[2] == "+") {
TRUE
}
}
)
if (sum(plus_elements) > 0) {
exclude_plus <- sapply(plus[plus_elements],
FUN = function(p) {
p[1] > max(dummy$n_comp)
}
)
if (sum(exclude_plus) > 0) {
exclude_plus <- groups[plus_elements][exclude_plus]
exclude_groups <- c(exclude_groups, exclude_plus)
groups <- groups[-which(groups %in% exclude_groups)]
}
}
}
if (length(groups) == 0) {
stop("Argument groups contains classes that cannot obtained", call. = FALSE)
}
if (!is.null(exclude_groups)) {
warning(paste(paste(exclude_groups, collapse = ", "), ifelse(length(exclude_groups) == 1, paste("was"), paste("were")),
"excluded from the groups arguments since it cannot be obtained from the network geometry",
collapse = ""
),
call. = FALSE
)
}
# Data for each component number group
for (i in groups) {
if (grepl("-", i)) {
group_range <- strsplit(i, split = "-")[[1]]
data_j <- dummy[which(dummy$n_comp <= max(group_range) & dummy$n_comp >= min(group_range)), ]
} else if ("+" %in% strsplit(i, "+")[[1]]) {
data_j <- dummy[which(dummy$n_comp >= as.numeric(strsplit(i, "+")[[1]][1])), ]
} else {
data_j <- dummy[which(dummy$n_comp == as.numeric(i)), ]
}
data_j$Combination <- i
data_plot <- rbind(data_plot, data_j)
}
} else {
dummy$Combination <- dummy$n_comp
data_plot <- dummy
}
# Exclude the reference rows
ex_ref <- which(data_plot$Node == model$reference.group)
if(length(ex_ref) > 0){
data_plot <- data_plot[-which(data_plot$Node == model$reference.group), ]
}
axis_x <- "Number of components"
} else {
##
# Data for the combination of components
##
dummy$Combination <- NA
axis_x <- "Components"
if (!is.null(combination)) {
combination <- gsub(" ", "", combination)
check.combinations(combination, comp_network, sep)
# Specific component combinations
for (i in 1:length(combination)) {
dummy_comb_i <- strsplit(combination[i], split = paste0("[", sep, "]"), perl = TRUE)[[1]]
columns <- which(names(dummy) %in% dummy_comb_i) # corresponding columns
# Find the rows that have those components
data_j <- dummy
##
for (k in 1:length(columns)) {
data_j <- data_j[which(data_j[, columns[k]] == 1), ]
}
if (dim(data_j)[1] > 0) {
data_j$Combination <- combination[i]
data_plot <- rbind(data_plot, data_j)
} else {
warning(paste("Combination", combination[i], "cannot obtained due to network geometry"), call. = FALSE)
}
}
if (is.null(data_plot)) {
stop("Argument combinations containes component combinations that cannot be obtained", call. = FALSE)
}
} else {
# Network's components
for (i in comp_network) {
# Find the rows that have those components
data_j <- dummy[which(dummy[, i] == 1), ]
data_j$Combination <- i
data_plot <- rbind(data_plot, data_j)
}
# Exclude the reference category
ref_exc <- which(data_plot$Combination == model$reference.group)
if (length(ref_exc) > 0) {
data_plot <- data_plot[-ref_exc, ]
}
}
}
# Effect measure to be used
if (z_value == TRUE) {
mes <- "z_stat"
yax <- "Standardized effects"
# exclude NaN or NA
if (sum(is.nan(data_plot$z_stat)) > 0 | sum(is.na(data_plot$z_stat)) > 0) {
ex_z <- which(is.nan(data_plot$z_stat) == TRUE | is.na(data_plot$z_stat) == TRUE)
data_plot <- data_plot[-ex_z, ]
}
data_plot$size <- 1
a <- 1
} else {
mes <- "TE"
a <- 0.3
if (model$sm %in% c("OR", "RR")) { # dichotomous outcomes
data_plot[, mes] <- exp(data_plot[, mes])
}
yax <- "Treatment effects"
# dots size
if (prop_size == TRUE) {
data_plot$size <- (1 / data_plot$seTE - 0.1) / (10 - 0.1) # scale 0.1 - 10
over1 <- which(data_plot$size > 1)
if (length(over1) > 0) {
data_plot$size[over1] <- 1
}
zeros <- which(round(data_plot$size, 1) == 0)
if (length(zeros) > 0) {
data_plot$size[zeros] <- 0.2
}
data_plot$size <- 8 * data_plot$size
} else {
data_plot$size <- 1
a <- 1
}
}
##
# Plot
##
lees_2 <- table(data_plot$Combination) < 2
r <- 1:dim(data_plot)[1]
if (sum(lees_2) > 0) {
warning("Violin plot requires at least 2 data point", call. = FALSE)
no_violin <- labels(lees_2)[[1]][which(lees_2 == TRUE)]
r <- which(!data_plot$Combination %in% no_violin)
}
data_plot$Combination <- as.character(data_plot$Combination)
p <- ggplot2::ggplot(
data = NULL,
ggplot2::aes(
x = data_plot$Combination,
y = data_plot[, mes]
)
) +
ggplot2::geom_violin(
ggplot2::aes(
x = data_plot$Combination[r],
y = data_plot[r, mes]
),
trim = TRUE,
fill = fill_violin,
color = color_violin,
adjust = adj_violin,
width = width_violin
) +
ggplot2::labs(
y = yax,
fill = ""
) +
ggplot2::scale_x_discrete(name = axis_x) +
ggplot2::theme(
legend.position = "bottom",
axis.title = ggplot2::element_text(size = 12)
) +
ggplot2::guides(fill = guide_legend(nrow = 1))
if (boxplot) {
p <- p + ggplot2::geom_boxplot(
width = width_boxplot,
outlier.shape = NA,
ggplot2::aes(fill = data_plot$Combination)
) +
ggplot2::stat_boxplot(
geom = "errorbar",
linetype = errorbar_type
)
}
if (dots) {
p <- p + ggplot2::geom_jitter(
size = data_plot$size, alpha = a,
shape = jitter_shape,
position = ggplot2::position_jitter(jitter_position)
)
}
if (values) {
medz <- tapply(data_plot[, mes], data_plot$Combination, stats::median)
p_meds <- data.frame(combination = names(medz), med = round(medz, 2))
p <- p + ggplot2::geom_text(
data = NULL,
ggplot2::aes(
x = p_meds$combination,
y = round(p_meds$med, 2),
label = format(p_meds$med, n.small = 2)
),
color = "red",
size = 5,
hjust = 2.2,
vjust = -0.6
)
}
if (model$sm %in% c("OR", "RR") & z_value == FALSE) {
p <- p + ggplot2::scale_y_log10()
}
p
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/specc.R
|
unique.combinations <- function(combination, sep) {
elements <- strsplit(combination, split = paste0("[", sep, "]"), perl = TRUE)
excluded <- NULL
i <- 1
while (i <= length(combination) & i <= length(elements)) {
elements_i <- elements[[i]]
same_i <- sapply(elements, FUN = function(x) {
sum(elements_i %in% x) == length(elements_i) & length(x) == length(elements_i)
})
if (sum(same_i) > 1) {
pos <- which(same_i == TRUE)
pos <- pos[!pos == i]
excluded <- c(excluded, sapply(elements[pos], FUN = function(x) {
(paste(x, collapse = " "))
}))
elements <- elements[-pos]
}
i <- i + 1
}
if (length(excluded) > 0) {
warning(paste(paste(excluded, collapse = ", "), "excluded from argument combination as duplicate"), call. = FALSE)
}
combination <- sapply(elements, FUN = function(x) {
(paste(x, collapse = sep))
})
combination
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/unique.combinations.R
|
#' Waterfall plot
#'
#' @description
#' The function produces a waterfall plot based on the z-values from the interventions that differ
#' by one specific component combination.
#'
#' @details
#' The function based on the intervention's z-values (default choice) obtained from the network meta-analysis (NMA) model
#' visualizes all the observed interventions that differ by one specific component
#' combination, in order to explore if the one extra component combination from every comparison
#' has a positive or negative impact. Bars above or below of the \eqn{y = 0} line,
#' indicates that the inclusion of the extra specific component combination has an impact on the
#' intervention. The direction of the impact (positive or negative), depends on the outcomes’ nature
#' (beneficial or harmful).
#'
#' The combination of interest is defined from the argument \code{combination}. By default the
#' function visualizes the interventions that differ by one component (\code{combination = NULL}).
#' If for example \code{combination = "A+B"}, the function plots the interventions that differ
#' by "A+B".
#'
#' @note
#' In the case of dichotomous outcomes, the log-scale is used in axis y. Also, the function can be applied
#' only in network meta-analysis models that contain multi-component interventions.
#'
#' @param model An object of class \code{\link[netmeta]{netmeta}}.
#' @param sep A single character that defines the separator between interventions components.
#' @param combination A single character that specifies the component combination of interest.
#' @param z_value \code{logical}. If \code{TRUE} z-values are used instead of interventions effects.
#' @param random \code{logical}. If \code{TRUE} z-values are obtained from the random-effects NMA model instead of the fixed-effect NMA model.
#'
#'
#' @return An object of class \code{ggplot}.
#' @export
#'
#' @importFrom ggplot2 ggplot aes `%+%` geom_bar position_dodge labs theme_classic
#'
#' @examples
#' data(nmaMACE)
#' watercomp(nmaMACE)
#'
watercomp <- function(model, sep = "+", combination = NULL, z_value = FALSE, random = TRUE) {
##
# Check arguments
##
if (inherits(model, "netmeta") == FALSE) {
stop("The class of model is not of netmeta", call. = FALSE)
} else if (model$reference.group == "") {
stop("The netmeta model must have a reference group", call. = FALSE)
} else if (inherits(sep, "character") == FALSE) {
stop("The class of sep is not character", call. = FALSE)
} else if (length(sep) > 1) {
stop("The length of sep must be one", call. = FALSE)
} else if (sep == "") {
stop("Argument sep must be diffent than ''", call. = FALSE)
} else if (!is.null(combination) & inherits(combination, "character") == FALSE) {
stop("The class of combination is not character", call. = FALSE)
} else if (!is.null(combination) & length(combination) > 1) {
stop("The length of combination must be one", call. = FALSE)
} else if (inherits(random, "logical") == FALSE) {
stop("The class of random is not logical", call. = FALSE)
} else if (length(random) > 1) {
stop("The length of random must be one", call. = FALSE)
}
##
# NMA estimates and characteristics
##
# Get NMA estimates
nma_est <- nmares(model, random)
nma_est <- nma_est[, -c(3:dim(nma_est)[2])]
nma_est$Node <- row.names(nma_est) <- gsub(" ", "", nma_est$Node)
# Reference category
ref <- as.character(model$reference.group)
# Components of the network
comp_network <- strsplit(nma_est$Node, split = paste("[", sep, "]", sep = ""), perl = TRUE)
if (sum(sapply(comp_network, FUN = function(x) {
length(x) > 1
})) == 0) {
stop("No additive treatments are included in the NMA model", call. = FALSE)
} else {
comp_network <- unique(unlist(comp_network))
}
##
# Writing nodes as a combination of component's dummy variables
##
dummy <- dummies(nma_est, comp_network, sep)
dummy <- dummy[, -c(1, 2)]
# Check if the combination exist
if (!is.null(combination)) {
combination <- gsub(" ", "", combination)
check.combinations(combination, comp_network, sep)
combination_components <- strsplit(combination, split = paste("[", sep, "]", sep = ""), perl = TRUE)[[1]]
} else {
combination_components <- NULL
}
# Number of components for each node
nma_est$n_comp <- apply(dummy, 1, sum)
##
# Find the set of nodes that differ one component
##
if (length(combination_components) > 1) {
nodes_elements <- strsplit(nma_est$Node, split = paste("[", sep, "]", sep = ""), perl = TRUE)
} else {
nodes_elements <- NULL
}
pos <- differ.by.one(M = dummy, combination = combination_components, nodes_elements = nodes_elements)
##
# Make Plot data
##
data_plot <- as.data.frame(cbind(nma_est$Node[pos$pos1], nma_est$Node[pos$pos2]))
# Add node's summary measures
data_plot <- merge(data_plot, nma_est, by.x = c("V1"), by.y = c("Node"), all.x = TRUE)
colnames(data_plot)[c(3, 4)] <- c("TE_V1", "n_comp_V1")
data_plot <- merge(data_plot, nma_est, by.x = c("V2"), by.y = c("Node"), all.x = TRUE)
colnames(data_plot)[c(5, 6)] <- c("TE_V2", "n_comp_V2")
# Calculate differences based on the number of components
data_plot$diff <- ifelse(data_plot$n_comp_V1 > data_plot$n_comp_V2,
data_plot$TE_V1 - data_plot$TE_V2,
data_plot$TE_V2 - data_plot$TE_V1
)
##
# Calculate variances
##
colm <- "diff" # column for TE
if (z_value == TRUE) {
colm <- "z" # column for z-values
ifelse(random, covmat <- model$Cov.random, covmat <- model$Cov.fixed) # Covariance matrix
colnames(covmat) <- rownames(covmat) <- gsub(" ", "", colnames(covmat))
v <- NULL
for (i in 1:dim(data_plot)[1]) {
# Calculate Covariance
a <- which(rownames(covmat) == paste(data_plot$V1[i], ref, sep = ":"))
b <- which(colnames(covmat) == paste(data_plot$V2[i], ref, sep = ":"))
##
if (length(a) == 0 | length(b) == 0) { # ref vs ref
r <- which(row.names(covmat) %in% c(
paste(data_plot$V1[i], data_plot$V2[i], sep = ":"),
paste(data_plot$V2[i], data_plot$V1[i], sep = ":")
))
v[i] <- covmat[r, r]
} else {
covariance <- covmat[a, b]
var_a <- which(colnames(covmat) == paste(data_plot$V1[i], ref, sep = ":"))
var_b <- which(colnames(covmat) == paste(data_plot$V2[i], ref, sep = ":"))
##
v[i] <- covmat[var_a, var_a] + covmat[var_b, var_b] - 2 * covariance
}
}
# Calculate Z-values
data_plot$z <- data_plot$diff / v
}
data_plot$compar <- paste(data_plot$V2, data_plot$V1, sep = " vs ")
##
# Plot
##
if (z_value == TRUE) {
lab_y <- "Standardized change"
} else {
lab_y <- "TE change"
if (model$sm %in% c("OR", "RR")) {
data_plot$diff <- exp(data_plot$diff) # TE is on log scale
}
}
p <- ggplot2::ggplot(
data = NULL,
ggplot2::aes(
x = data_plot$compar,
y = data_plot[, colm]
)
) +
ggplot2::geom_bar(
stat = "identity",
width = 0.7,
position = ggplot2::position_dodge(width = 0.4),
color = "red"
) +
ggplot2::labs(
x = "Comparison",
y = lab_y
) +
ggplot2::theme_classic()
if (model$sm %in% c("OR", "RR") & z_value == FALSE) {
p <- p + ggplot2::scale_y_log10()
}
p
}
|
/scratch/gouwar.j/cran-all/cranData/viscomp/R/watercomp.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(viscomp)
data("MACE")
## ---- message = FALSE, warning=FALSE------------------------------------------
library(netmeta)
data_NMA <- pairwise(studlab = Study,
treat = list(treat1, treat2, treat3, treat4),
n = list(n1, n2, n3, n4),
event = list(event1, event2, event3, event4),
data = MACE,
sm = "OR" )
net <- netmeta(TE = TE,
seTE = seTE,
studlab = studlab,
treat1 = treat1,
treat2 = treat2,
data = data_NMA,
small.values = "good",
ref = "UC")
## ---- fig.width = 8.5, fig.height = 6, out.width="100%"-----------------------
compdesc(net)
## ---- fig.width = 7.5, fig.height = 6, out.width="100%"-----------------------
compGraph(net, mostF = 10, title = "")
## ---- fig.width = 7.5, fig.height = 6, out.width="100%"-----------------------
compGraph(net, mostF = 10, title = "", excl = "UC")
## ---- fig.width = 7.2, fig.height = 6-----------------------------------------
heatcomp(net)
## ---- fig.width=10, out.width="100%", fig.height = 7.5------------------------
specc(net)
## ---- fig.width = 8, out.width="100%", fig.height = 7.5-----------------------
specc(net, combination = c("A", "A + B", "A + B + C"))
## ---- fig.width = 7.2, fig.height = 7.5, out.width="100%"---------------------
specc(net, components_number = TRUE)
## ---- fig.width = 7.2, fig.height = 7.5, out.width="100%"---------------------
specc(net, components_number = TRUE, groups = c(1, 2, "1-2", "2+"))
## ---- fig.width = 7.2, fig.height = 6-----------------------------------------
denscomp(net, combination = "A+B")
## ---- fig.width = 7.2, fig.height = 6-----------------------------------------
denscomp(net, combination = c("A", "A + B", "A + B + C"))
## ---- fig.width = 7.2, fig.height = 6-----------------------------------------
loccos(net, combination = "A", histogram = FALSE)
## ---- fig.width = 7.2, fig.height = 6-----------------------------------------
watercomp(net, combination = "A")
## ---- eval = TRUE-------------------------------------------------------------
t1 <- c("A", "B", "C", "A+B", "A+C", "B+C", "A")
t2 <- c("C", "A", "A+C", "B+C", "A", "B", "B+C")
TE1 <- c(2.12, 3.24, 5.65, -0.60, 0.13, 0.66, 3.28)
TE2 <- c(4.69, 2.67, 2.73, -3.41, 1.79, 2.93, 2.51)
seTE1 <- rep(0.1, 7)
seTE2 <- rep(0.2, 7)
study <- paste0("study_", 1:7)
data1 <- data.frame("TE" = TE1,
"seTE" = seTE1,
"treat1" = t1,
"treat2" = t2,
"studlab" = study,
stringsAsFactors = FALSE)
data2 <- data.frame("TE" = TE2,
"seTE" = seTE2,
"treat1" = t1,
"treat2" = t2,
"studlab" = study,
stringsAsFactors = FALSE)
net1 <- netmeta(TE = TE,
seTE = seTE,
studlab = studlab,
treat1 = treat1,
treat2 = treat2,
data = data1,
ref = "A")
net2 <- netmeta::netmeta(TE = TE,
seTE = seTE,
studlab = studlab,
treat1 = treat1,
treat2 = treat2,
data = data2,
ref = "A")
## ---- fig.width = 7.2, fig.height = 6, out.width="100%"-----------------------
rankheatplot(list(net1, net2))
|
/scratch/gouwar.j/cran-all/cranData/viscomp/inst/doc/viscomp.R
|
---
title: "An introduction to network meta-analysis using the viscomp package"
author: "Georgios Seitidis"
output: rmarkdown::html_vignette
description: >
This document introduces you to viscomp’s set of tools and presents how to apply them on network meta-analysis model when multicomponent interventions (complex data) are present.
vignette: >
%\VignetteIndexEntry{An introduction to network meta-analysis using the viscomp package}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
<style>
body {
text-align: justify}
</style>
When multi-component (complex) interventions are present in a network meta-analysis model we usually interested on:
* identifying the most efficacious components
* identifying which component combination works better
* understanding the behavior of the components
The **viscomp** package provides several visualization tools to address these issues. This document makes an introduction to viscomp’s set of tools and presents how to apply them on network meta-analysis when multi-component (complex) interventions are included.
\
# Single outcome
## Artificial network meta-analysis model
Load the MACE data of the viscomp package
```{r setup}
library(viscomp)
data("MACE")
```
Network meta-analysis (NMA) model is applied using the R-package `netmeta`. MACE is a dichotomous harmful outcome and the NMA model is constructed using the odds ratios (OR) as effect size.
```{r, message = FALSE, warning=FALSE}
library(netmeta)
data_NMA <- pairwise(studlab = Study,
treat = list(treat1, treat2, treat3, treat4),
n = list(n1, n2, n3, n4),
event = list(event1, event2, event3, event4),
data = MACE,
sm = "OR" )
net <- netmeta(TE = TE,
seTE = seTE,
studlab = studlab,
treat1 = treat1,
treat2 = treat2,
data = data_NMA,
small.values = "good",
ref = "UC")
```
## Descriptive analysis of components with `compdesc`
`compdesc` performs a descriptive analysis for the components observed in the network. It provides 3 items as an output.
Item `crosstable` contains a cross-table with the frequency of the components. Diagonal elements refer to the components, while off-diagonal elements to the components combinations. Each cell represents the number of arms where the corresponding component (combination) was observed.
Item `heatmat` visualizes the item *crosstable*. Diagonal elements refer to the components and in parentheses the proportion of study arms including that component is provided, while off-diagonal elements to the frequency of component’s combinations and in parentheses the proportion of study arms with both components out of those study arms that have the component in the row is provided. Also, the intensity of the color is proportional to the relative frequency of the component combination.
Item `frequency` provides useful descriptive characteristics about the component's frequency. In addition, it reports:
* the number of arms where the component was observed (column *Frequency*)
* the number of studies in which the corresponding component was included in all arms (column *A*)
* the percentage of studies in which the corresponding component was included in all arms (column *A_percent*)
* the number of studies in which the corresponding component was included in at least one arm (column *B*)
* the percentage of studies in which the corresponding component was included in at least one arm (column *B_percent*)
* the number of studies in which the corresponding component was not included in any arm (column *C*)
* the percentage of studies in which the corresponding component was not included in any arm (column *C_percent*)
* the ratio of columns *A* and *B* (column *A.B*).
We can perform a descriptive analysis of the components with:
```{r, fig.width = 8.5, fig.height = 6, out.width="100%"}
compdesc(net)
```
## Explore components geometry with `compGraph()`
`compGraph()` is meant to visualize the frequency of components’ combinations found in the network. The function resembles a network plot where nodes represent the individual components found in the network, while edges represent the combination of components found in at least one treatment arm of the trials included in the NMA model. Each edge’s color represents one of the unique interventions (components’ combination) found in the network of interventions. Edges’ thickness indicates the frequency by which each intervention (combination of components) was observed in the network (number of arms in which the combination was assigned). The number of the most frequent combinations can be modified from the argument `mostF`.
\
We can visualize the 10 most frequent component combinations with:
```{r, fig.width = 7.5, fig.height = 6, out.width="100%"}
compGraph(net, mostF = 10, title = "")
```
In NMA we usually have interventions that are used as a potentially inactive reference intervention (e.g. placebo, usual care), and are not combined with other interventions. We can exclude these interventions through the argument `excl`. For example, we can exclude the usual care (UC) from the component network plot with:
```{r, fig.width = 7.5, fig.height = 6, out.width="100%"}
compGraph(net, mostF = 10, title = "", excl = "UC")
```
## Explore the efficacy of the two-by-two components combinations with `heatcomp()`
`heatcomp()` creates a heat plot comparing the two-by-two component combinations to the reference intervention. Diagonal elements refer to components, while off-diagonal to components' combinations. Each element summarizes the efficacy of the interventions (obtained from the NMA model) that includes the corresponding component combination. The frequency of the components combinations found in the NMA model is printed by default (`freq = TRUE`). Combinations that were not observed in the NMA model are denoted by the letter "X". The function by default uses the relative effects and the median as a summary measure (`median = TRUE`). The uncertainty around the NMA estimates is reflected by the size of the grey boxes. The bigger the box, the more precise the estimate. Moreover, the magnitude of the evidence is reflected by the color's intensity. Dark green or red colors indicate a large impact on the outcome. Outcomes nature (beneficial or harmful) is defined in the *netmeta* model (argument *small.values*).
The function can be also adjusted to include z-scores by setting the argument `z_value = TRUE`.
Z-scores quantify the strength of statistical evidence. Thus, dark green (or red) indicates strong statistical evidence that the corresponding component (or combination of components) performs better (or worse) than the reference intervention.
\
We can visualize the efficacy of the components, with:
```{r, fig.width = 7.2, fig.height = 6}
heatcomp(net)
```
From the plot we see that the most intensive colors are observed for the component E, B, G, and the combination between components B and G. Thus, these combination seems to be the most efficacious according to the `heatcomp()`. We also see that the frequency of components B, G and the combination of B and G equals one, indicating that their corresponding estimates have been obtained by the same single intervention. Note also that the size for the majority of the grey boxes is large, indicating that the corresponding NMA estimates are precise.
## Explore the efficacy of the components with `specc()`
### Components
`specc()` works similarly to `heatcomp()` except that instead of visualizing the two-by-two component combinations, it can visualize combinations with more than two components. The function by default produces violin plots based on the components' relative effects (`z_value = FALSE`).
\
We can visualize the distribution of each component in the network with:
```{r, fig.width=10, out.width="100%", fig.height = 7.5}
specc(net)
```
Note that in the plot the median estimates of the components are equal with the diagonal elements of the `heatcomp()` function. Moreover, the size of the dots is proportional to precision of the NMA estimates. Larger dots denote more precise estimates.
Note also that we receive a warning message because component G was included in a single intervention.
### Components combinations
We can visualize the distribution of component combinations (e.g. A, A+B, A+B+C) with:
```{r, fig.width = 8, out.width="100%", fig.height = 7.5}
specc(net, combination = c("A", "A + B", "A + B + C"))
```
### Number of components
Often, we interested on exploring the behavior of intervention's effect as the number of components increased. We can do that with:
```{r, fig.width = 7.2, fig.height = 7.5, out.width="100%"}
specc(net, components_number = TRUE)
```
\
We can also group the violins in clusters based on the number of components. For example, we can create violins for the interventions that includes 1 component, 2 components, 1-2 components and more than 2 components, with:
```{r, fig.width = 7.2, fig.height = 7.5, out.width="100%"}
specc(net, components_number = TRUE, groups = c(1, 2, "1-2", "2+"))
```
## Explore the efficacy of the components with `denscomp()`
The efficacy of a component (or component combination) can be explored by comparing the corresponding densities. `denscomp()` compares the following densities: one density is constructed by the NMA results referring to the interventions including the component (combination) of interest, while the second density refers to the interventions, not including the underlying component (combination). The function by default uses the NMA relative effects, but it can be also adjusted to use z-scores by setting the argument `z_value = TRUE`.
\
For example, if we interested on exploring the efficacy of the component combination A+B, we can visualize the densities of the NMA relative effect estimates from the interventions that include and not include components A+B, respectively, with:
```{r, fig.width = 7.2, fig.height = 6}
denscomp(net, combination = "A+B")
```
\
`denscomp()` can also compare more than two densities. If we interested for example to compare the densities of the interventions that include component A, components A+B, and components A+B+C, we can do it with:
```{r, fig.width = 7.2, fig.height = 6}
denscomp(net, combination = c("A", "A + B", "A + B + C"))
```
## Leaving one component out scatter plot with `loccos()`
Exploring whether the inclusion or the exclusion of a component (combination) has a positive or negative impact on the efficacy of an intervention could be undertaken by looking at the interventions that differ by this specific component (combination). `loccos()` creates a scatter plot where the x-axis represents the NMA relative effect of the intervention that includes the underlying component (combination), while the y-axis represents the NMA relative effect of the intervention that consists of the same components just like the one in the x-axis with the only difference that it does not include the component (combination) of interest. A point on the line y = x indicates that the inclusion/exclusion of the underlying component does not affect the efficacy of the interventions. Dots above the y = x line for a beneficial outcome, indicate that the inclusion of a component hampers the treatment effect while dots below this line signify a component that increases efficacy. The opposite holds for a harmful outcome.
The function by default uses the NMA relative effects, but it can be also adjusted to use z-scores by setting the argument `z_value = TRUE`.
\
We can explore the impact of component A, with:
```{r, fig.width = 7.2, fig.height = 6}
loccos(net, combination = "A", histogram = FALSE)
```
Note that the estimates with or without component A fall below or above the y = x line. This indicates that
the additivity assumption might not hold for the Component NMA model. This is because additivity implies that the inclusion/exclusion of a component has the same impact on interventions that differ by this component. This is expressed visually in the scatter plot by a line parallel to y = x.
## Waterfall plot with `watercomp()`
`watercomp()` works similarly to `loccos()` with the sole difference that instead of visualizing the impact of a component (combination) in a scatter plot, the impact is now visualized in a waterfall plot. The horizontal y = 0 line represents zero impact on the intervention efficacy if an extra component is added. Bars indicate whether the inclusion of the extra component has an impact on the intervention. The interpretation of the direction of the underlying bars (positive or negative) depends on the nature of the outcome used (beneficial or harmful) which is obtained automatically from the *netmeta* model.
The function by default uses the NMA relative effects, but it can be also adjusted to use z-scores by setting the argument `z_value = TRUE`.
\
We can explore the impact of component A, with:
```{r, fig.width = 7.2, fig.height = 6}
watercomp(net, combination = "A")
```
\
# Multiple outcomes
## Artificial network meta-analysis model
```{r, eval = TRUE}
t1 <- c("A", "B", "C", "A+B", "A+C", "B+C", "A")
t2 <- c("C", "A", "A+C", "B+C", "A", "B", "B+C")
TE1 <- c(2.12, 3.24, 5.65, -0.60, 0.13, 0.66, 3.28)
TE2 <- c(4.69, 2.67, 2.73, -3.41, 1.79, 2.93, 2.51)
seTE1 <- rep(0.1, 7)
seTE2 <- rep(0.2, 7)
study <- paste0("study_", 1:7)
data1 <- data.frame("TE" = TE1,
"seTE" = seTE1,
"treat1" = t1,
"treat2" = t2,
"studlab" = study,
stringsAsFactors = FALSE)
data2 <- data.frame("TE" = TE2,
"seTE" = seTE2,
"treat1" = t1,
"treat2" = t2,
"studlab" = study,
stringsAsFactors = FALSE)
net1 <- netmeta(TE = TE,
seTE = seTE,
studlab = studlab,
treat1 = treat1,
treat2 = treat2,
data = data1,
ref = "A")
net2 <- netmeta::netmeta(TE = TE,
seTE = seTE,
studlab = studlab,
treat1 = treat1,
treat2 = treat2,
data = data2,
ref = "A")
```
## Visualize the components' ranking for multiple outcomes with `rankheatplot()`
`rankheatplot()` creates a rank heat plot where the ranking of interventions can be presented across multiple outcomes (Veroniki et al., 2016). Circles corresponds to outcomes, while rads to components. Sectors are coloured according to the ranking of the relevant components within the underlying outcomes. Ranking is calculated as the median (or the mean) of the intervention P-scores including the component of interest in the particular outcome. The coloured scale ranges between red (p-score = 0%) and green (p-score = 100%). Uncolored sectors, if any, suggest that the underlying component was not included in any of the interventions in the NMA for the particular outcome.
\
We can visualize the p-scores for the two outcomes, with:
```{r, fig.width = 7.2, fig.height = 6, out.width="100%"}
rankheatplot(list(net1, net2))
```
### References
[Seitidis, G., Tsokani, S., Christogiannis, C., Kontouli, K.-M., Fyraridis, A., Nikolakopoulos, S., Veroniki, A.A. and Mavridis, D. (2023), Graphical tools for visualizing the results of network meta-analysis of multicomponent interventions. Research Synthesis Methods, 1-14.](https://doi.org/10.1002/jrsm.1617)
[Veroniki, A. A., Straus, S. E., Fyraridis, A., & Tricco, A. C. (2016). The rank-heat plot is a novel way to present the results from a network meta-analysis including multiple outcomes. Journal of Clinical Epidemiology, 76, 193–199.](https://doi.org/10.1016/j.jclinepi.2016.02.016)
|
/scratch/gouwar.j/cran-all/cranData/viscomp/inst/doc/viscomp.Rmd
|
---
title: "An introduction to network meta-analysis using the viscomp package"
author: "Georgios Seitidis"
output: rmarkdown::html_vignette
description: >
This document introduces you to viscomp’s set of tools and presents how to apply them on network meta-analysis model when multicomponent interventions (complex data) are present.
vignette: >
%\VignetteIndexEntry{An introduction to network meta-analysis using the viscomp package}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
<style>
body {
text-align: justify}
</style>
When multi-component (complex) interventions are present in a network meta-analysis model we usually interested on:
* identifying the most efficacious components
* identifying which component combination works better
* understanding the behavior of the components
The **viscomp** package provides several visualization tools to address these issues. This document makes an introduction to viscomp’s set of tools and presents how to apply them on network meta-analysis when multi-component (complex) interventions are included.
\
# Single outcome
## Artificial network meta-analysis model
Load the MACE data of the viscomp package
```{r setup}
library(viscomp)
data("MACE")
```
Network meta-analysis (NMA) model is applied using the R-package `netmeta`. MACE is a dichotomous harmful outcome and the NMA model is constructed using the odds ratios (OR) as effect size.
```{r, message = FALSE, warning=FALSE}
library(netmeta)
data_NMA <- pairwise(studlab = Study,
treat = list(treat1, treat2, treat3, treat4),
n = list(n1, n2, n3, n4),
event = list(event1, event2, event3, event4),
data = MACE,
sm = "OR" )
net <- netmeta(TE = TE,
seTE = seTE,
studlab = studlab,
treat1 = treat1,
treat2 = treat2,
data = data_NMA,
small.values = "good",
ref = "UC")
```
## Descriptive analysis of components with `compdesc`
`compdesc` performs a descriptive analysis for the components observed in the network. It provides 3 items as an output.
Item `crosstable` contains a cross-table with the frequency of the components. Diagonal elements refer to the components, while off-diagonal elements to the components combinations. Each cell represents the number of arms where the corresponding component (combination) was observed.
Item `heatmat` visualizes the item *crosstable*. Diagonal elements refer to the components and in parentheses the proportion of study arms including that component is provided, while off-diagonal elements to the frequency of component’s combinations and in parentheses the proportion of study arms with both components out of those study arms that have the component in the row is provided. Also, the intensity of the color is proportional to the relative frequency of the component combination.
Item `frequency` provides useful descriptive characteristics about the component's frequency. In addition, it reports:
* the number of arms where the component was observed (column *Frequency*)
* the number of studies in which the corresponding component was included in all arms (column *A*)
* the percentage of studies in which the corresponding component was included in all arms (column *A_percent*)
* the number of studies in which the corresponding component was included in at least one arm (column *B*)
* the percentage of studies in which the corresponding component was included in at least one arm (column *B_percent*)
* the number of studies in which the corresponding component was not included in any arm (column *C*)
* the percentage of studies in which the corresponding component was not included in any arm (column *C_percent*)
* the ratio of columns *A* and *B* (column *A.B*).
We can perform a descriptive analysis of the components with:
```{r, fig.width = 8.5, fig.height = 6, out.width="100%"}
compdesc(net)
```
## Explore components geometry with `compGraph()`
`compGraph()` is meant to visualize the frequency of components’ combinations found in the network. The function resembles a network plot where nodes represent the individual components found in the network, while edges represent the combination of components found in at least one treatment arm of the trials included in the NMA model. Each edge’s color represents one of the unique interventions (components’ combination) found in the network of interventions. Edges’ thickness indicates the frequency by which each intervention (combination of components) was observed in the network (number of arms in which the combination was assigned). The number of the most frequent combinations can be modified from the argument `mostF`.
\
We can visualize the 10 most frequent component combinations with:
```{r, fig.width = 7.5, fig.height = 6, out.width="100%"}
compGraph(net, mostF = 10, title = "")
```
In NMA we usually have interventions that are used as a potentially inactive reference intervention (e.g. placebo, usual care), and are not combined with other interventions. We can exclude these interventions through the argument `excl`. For example, we can exclude the usual care (UC) from the component network plot with:
```{r, fig.width = 7.5, fig.height = 6, out.width="100%"}
compGraph(net, mostF = 10, title = "", excl = "UC")
```
## Explore the efficacy of the two-by-two components combinations with `heatcomp()`
`heatcomp()` creates a heat plot comparing the two-by-two component combinations to the reference intervention. Diagonal elements refer to components, while off-diagonal to components' combinations. Each element summarizes the efficacy of the interventions (obtained from the NMA model) that includes the corresponding component combination. The frequency of the components combinations found in the NMA model is printed by default (`freq = TRUE`). Combinations that were not observed in the NMA model are denoted by the letter "X". The function by default uses the relative effects and the median as a summary measure (`median = TRUE`). The uncertainty around the NMA estimates is reflected by the size of the grey boxes. The bigger the box, the more precise the estimate. Moreover, the magnitude of the evidence is reflected by the color's intensity. Dark green or red colors indicate a large impact on the outcome. Outcomes nature (beneficial or harmful) is defined in the *netmeta* model (argument *small.values*).
The function can be also adjusted to include z-scores by setting the argument `z_value = TRUE`.
Z-scores quantify the strength of statistical evidence. Thus, dark green (or red) indicates strong statistical evidence that the corresponding component (or combination of components) performs better (or worse) than the reference intervention.
\
We can visualize the efficacy of the components, with:
```{r, fig.width = 7.2, fig.height = 6}
heatcomp(net)
```
From the plot we see that the most intensive colors are observed for the component E, B, G, and the combination between components B and G. Thus, these combination seems to be the most efficacious according to the `heatcomp()`. We also see that the frequency of components B, G and the combination of B and G equals one, indicating that their corresponding estimates have been obtained by the same single intervention. Note also that the size for the majority of the grey boxes is large, indicating that the corresponding NMA estimates are precise.
## Explore the efficacy of the components with `specc()`
### Components
`specc()` works similarly to `heatcomp()` except that instead of visualizing the two-by-two component combinations, it can visualize combinations with more than two components. The function by default produces violin plots based on the components' relative effects (`z_value = FALSE`).
\
We can visualize the distribution of each component in the network with:
```{r, fig.width=10, out.width="100%", fig.height = 7.5}
specc(net)
```
Note that in the plot the median estimates of the components are equal with the diagonal elements of the `heatcomp()` function. Moreover, the size of the dots is proportional to precision of the NMA estimates. Larger dots denote more precise estimates.
Note also that we receive a warning message because component G was included in a single intervention.
### Components combinations
We can visualize the distribution of component combinations (e.g. A, A+B, A+B+C) with:
```{r, fig.width = 8, out.width="100%", fig.height = 7.5}
specc(net, combination = c("A", "A + B", "A + B + C"))
```
### Number of components
Often, we interested on exploring the behavior of intervention's effect as the number of components increased. We can do that with:
```{r, fig.width = 7.2, fig.height = 7.5, out.width="100%"}
specc(net, components_number = TRUE)
```
\
We can also group the violins in clusters based on the number of components. For example, we can create violins for the interventions that includes 1 component, 2 components, 1-2 components and more than 2 components, with:
```{r, fig.width = 7.2, fig.height = 7.5, out.width="100%"}
specc(net, components_number = TRUE, groups = c(1, 2, "1-2", "2+"))
```
## Explore the efficacy of the components with `denscomp()`
The efficacy of a component (or component combination) can be explored by comparing the corresponding densities. `denscomp()` compares the following densities: one density is constructed by the NMA results referring to the interventions including the component (combination) of interest, while the second density refers to the interventions, not including the underlying component (combination). The function by default uses the NMA relative effects, but it can be also adjusted to use z-scores by setting the argument `z_value = TRUE`.
\
For example, if we interested on exploring the efficacy of the component combination A+B, we can visualize the densities of the NMA relative effect estimates from the interventions that include and not include components A+B, respectively, with:
```{r, fig.width = 7.2, fig.height = 6}
denscomp(net, combination = "A+B")
```
\
`denscomp()` can also compare more than two densities. If we interested for example to compare the densities of the interventions that include component A, components A+B, and components A+B+C, we can do it with:
```{r, fig.width = 7.2, fig.height = 6}
denscomp(net, combination = c("A", "A + B", "A + B + C"))
```
## Leaving one component out scatter plot with `loccos()`
Exploring whether the inclusion or the exclusion of a component (combination) has a positive or negative impact on the efficacy of an intervention could be undertaken by looking at the interventions that differ by this specific component (combination). `loccos()` creates a scatter plot where the x-axis represents the NMA relative effect of the intervention that includes the underlying component (combination), while the y-axis represents the NMA relative effect of the intervention that consists of the same components just like the one in the x-axis with the only difference that it does not include the component (combination) of interest. A point on the line y = x indicates that the inclusion/exclusion of the underlying component does not affect the efficacy of the interventions. Dots above the y = x line for a beneficial outcome, indicate that the inclusion of a component hampers the treatment effect while dots below this line signify a component that increases efficacy. The opposite holds for a harmful outcome.
The function by default uses the NMA relative effects, but it can be also adjusted to use z-scores by setting the argument `z_value = TRUE`.
\
We can explore the impact of component A, with:
```{r, fig.width = 7.2, fig.height = 6}
loccos(net, combination = "A", histogram = FALSE)
```
Note that the estimates with or without component A fall below or above the y = x line. This indicates that
the additivity assumption might not hold for the Component NMA model. This is because additivity implies that the inclusion/exclusion of a component has the same impact on interventions that differ by this component. This is expressed visually in the scatter plot by a line parallel to y = x.
## Waterfall plot with `watercomp()`
`watercomp()` works similarly to `loccos()` with the sole difference that instead of visualizing the impact of a component (combination) in a scatter plot, the impact is now visualized in a waterfall plot. The horizontal y = 0 line represents zero impact on the intervention efficacy if an extra component is added. Bars indicate whether the inclusion of the extra component has an impact on the intervention. The interpretation of the direction of the underlying bars (positive or negative) depends on the nature of the outcome used (beneficial or harmful) which is obtained automatically from the *netmeta* model.
The function by default uses the NMA relative effects, but it can be also adjusted to use z-scores by setting the argument `z_value = TRUE`.
\
We can explore the impact of component A, with:
```{r, fig.width = 7.2, fig.height = 6}
watercomp(net, combination = "A")
```
\
# Multiple outcomes
## Artificial network meta-analysis model
```{r, eval = TRUE}
t1 <- c("A", "B", "C", "A+B", "A+C", "B+C", "A")
t2 <- c("C", "A", "A+C", "B+C", "A", "B", "B+C")
TE1 <- c(2.12, 3.24, 5.65, -0.60, 0.13, 0.66, 3.28)
TE2 <- c(4.69, 2.67, 2.73, -3.41, 1.79, 2.93, 2.51)
seTE1 <- rep(0.1, 7)
seTE2 <- rep(0.2, 7)
study <- paste0("study_", 1:7)
data1 <- data.frame("TE" = TE1,
"seTE" = seTE1,
"treat1" = t1,
"treat2" = t2,
"studlab" = study,
stringsAsFactors = FALSE)
data2 <- data.frame("TE" = TE2,
"seTE" = seTE2,
"treat1" = t1,
"treat2" = t2,
"studlab" = study,
stringsAsFactors = FALSE)
net1 <- netmeta(TE = TE,
seTE = seTE,
studlab = studlab,
treat1 = treat1,
treat2 = treat2,
data = data1,
ref = "A")
net2 <- netmeta::netmeta(TE = TE,
seTE = seTE,
studlab = studlab,
treat1 = treat1,
treat2 = treat2,
data = data2,
ref = "A")
```
## Visualize the components' ranking for multiple outcomes with `rankheatplot()`
`rankheatplot()` creates a rank heat plot where the ranking of interventions can be presented across multiple outcomes (Veroniki et al., 2016). Circles corresponds to outcomes, while rads to components. Sectors are coloured according to the ranking of the relevant components within the underlying outcomes. Ranking is calculated as the median (or the mean) of the intervention P-scores including the component of interest in the particular outcome. The coloured scale ranges between red (p-score = 0%) and green (p-score = 100%). Uncolored sectors, if any, suggest that the underlying component was not included in any of the interventions in the NMA for the particular outcome.
\
We can visualize the p-scores for the two outcomes, with:
```{r, fig.width = 7.2, fig.height = 6, out.width="100%"}
rankheatplot(list(net1, net2))
```
### References
[Seitidis, G., Tsokani, S., Christogiannis, C., Kontouli, K.-M., Fyraridis, A., Nikolakopoulos, S., Veroniki, A.A. and Mavridis, D. (2023), Graphical tools for visualizing the results of network meta-analysis of multicomponent interventions. Research Synthesis Methods, 1-14.](https://doi.org/10.1002/jrsm.1617)
[Veroniki, A. A., Straus, S. E., Fyraridis, A., & Tricco, A. C. (2016). The rank-heat plot is a novel way to present the results from a network meta-analysis including multiple outcomes. Journal of Clinical Epidemiology, 76, 193–199.](https://doi.org/10.1016/j.jclinepi.2016.02.016)
|
/scratch/gouwar.j/cran-all/cranData/viscomp/vignettes/viscomp.Rmd
|
# R package viscomplexr - phase portraits of functions in the
# complex number plane
# Copyright (C) 2020 Peter Biber
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#' Adjust ylim to xlim
#'
#' This simple function is useful for adjusting x and y coordinate ranges
#' \code{xlim} and \code{ylim} in order to maintain a desired display ratio. The
#' former must be given, the latter will be adjusted.
#'
#' For certain purposes, e.g. producing a graph that exactly matches a screen,
#' the x and y coordinates must be adjusted to match a given display ratio. If
#' the horizontal range, \code{xlim}, the desired ratio, \code{x_to_y} and the
#' desired center of the y-range, \code{centerY} are provided, this function
#' returns an adapted vertical range, that can be used as \code{ylim} in any
#' plot including \code{\link{phasePortrait}}.
#'
#' @param xlim Numeric vector of length 2; the fixed lower and upper boundary
#' of the horizontal coordinate range
#'
#' @param centerY The vertical coordinate which the output range is to be
#' centered around (default = 0)
#'
#' @param x_to_y The desired ratio of the horizontal (x) to the vertical (y)
#' range. Default is 16/9, a display ratio frequently used for computer or
#' mobile screens
#'
#' @return A numeric vector of length 2; the lower and upper boundary of the
#' resulting vertical coordinate range
#'
#' @family helpers
#'
#' @export
#'
#' @examples
#' # Make a phase portrait of a Jacobi theta function that fully covers a
#' # plot with a display aspect ratio of 4/3.
#' # 10 inch wide window with 4/3 display ratio (x/y)
#' \donttest{
#' # x11(width = 10, height = 10 * 3/4) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' xlim <- c(-3, 3)
#' ylim <- ylimFromXlim(xlim, centerY = -0.3, x_to_y = 4/3)
#' op <- par(mar = c(0, 0, 0, 0), bg = "black") # Omit all plot margins
#' phasePortrait(jacobiTheta, moreArgs = list(tau = 1i/2 - 1/3),
#' xlim = xlim, ylim = ylim, # Apply the coordinate ranges
#' xaxs = "i", yaxs = "i", # Allow for now room between plot and axes
#' nCores = 1) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' par(op)
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
ylimFromXlim <- function(xlim, centerY = 0, x_to_y = 16/9) {
yRange <- abs(diff(xlim)) / x_to_y
ylim <- centerY + c(-1/2, 1/2) * yRange
return(ylim)
}
#' Adjust xlim to ylim
#'
#' This simple function is useful for adjusting x and y coordinate ranges
#' \code{xlim} and \code{ylim} in order to maintain a desired display ratio. The
#' latter must be given, the former will be adjusted.
#'
#' For certain purposes, e.g. producing a graph that exactly matches a screen,
#' the x and y coordinates must be adjusted to match a given display ratio. If
#' the vertical range, \code{ylim}, the desired ratio, \code{x_to_y} and the
#' desired center of the x-range, \code{centerX}, are provided, this function
#' returns an adpated vertical range, that can be used as \code{ylim} in any
#' plot including \code{\link{phasePortrait}}.
#'
#' @param ylim Numeric vector of length 2; the fixed lower and upper boundary
#' of the vertical coordinate range
#'
#' @param centerX The horizontal coordinate which the output range is to be
#' centered around (default = 0)
#'
#' @param x_to_y The desired ratio of the horizontal (x) to the vertical (y)
#' range. Default is 16/9, a display ratio frequently used for computer or
#' mobile screens
#'
#' @return A numeric vector of length 2; the lower and upper boundary of the
#' resulting vertical coordinate range
#'
#' @family helpers
#'
#' @export
#'
#' @examples
#' # Make a phase portrait of a pretty function that fully covers a
#' # plot with a display aspect ratio of 5/4.
#'
#' # 9 inch wide window with 5/4 display ratio (x/y)
#' \donttest{
#' # x11(width = 9, height = 9 * 4/5) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' ylim <- c(-8, 7)
#' xlim <- xlimFromYlim(ylim, centerX = 0, x_to_y = 5/4)
#' op <- par(mar = c(0, 0, 0, 0), bg = "black") # Omit all plot margins
#' phasePortrait("exp(cosh(1/(z - 2i + 2)^2 * (1/2i - 1/4 + z)^3))", pType = "pm",
#' xlim = xlim, ylim = ylim, # Apply the coordinate ranges
#' xaxs = "i", yaxs = "i", # Allow for now room between plot and axes
#' nCores = 2) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' par(op)
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
xlimFromYlim <- function(ylim, centerX = 0, x_to_y = 16/9) {
xRange <- abs(diff(ylim)) * x_to_y
xlim <- centerX + c(-1/2, 1/2) * xRange
return(xlim)
}
|
/scratch/gouwar.j/cran-all/cranData/viscomplexr/R/Helpers.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Mandelbrot iteration with a given number of steps
#'
#' This function is provided as a basis for visualizing the Mandelbrot set with
#' \code{\link{phasePortrait}}. While usual visualizations color the points
#' \emph{outside} the Mandelbrot set dependent on the velocity of divergence,
#' this function produces the information required for coloring the Mandelbrot
#' set itself. For numbers that can be identified as not being elements of the
#' Mandelbrot set, we obtain a \code{NaN+NaNi} value; for all other numbers,
#' the function gives back the value after a user-defined number of iterations.
#' The function has been implemented in C++; it runs fairly fast.
#'
#' The Mandelbrot set comprises all complex numbers \code{z} for which the
#' sequence \code{a[n+1] = a[n]^2 + z} starting with \code{a[0] = 0} remains
#' bounded for all \code{n > 0}. This condition is certainly not true, if, at
#' any time, \code{abs(a[]) >= 2}. The function \code{mandelbrot} performs the
#' iteration for \code{n = 0, ..., itDepth - 1} and permanently checks for
#' \code{abs(a[n+1]) >= 2}. If this is the case, it stops the iteration and
#' returns \code{NaN+NaNi}. In all other cases, it returns \code{a[itDepth]}.
#'
#' @param z Complex number; the point in the complex plane to which the output
#' of the function is mapped
#'
#' @param itDepth An integer which defines the depth of the iteration, i.e. the
#' maximum number of iteration (default: \code{itDepth = 500})
#'
#' @return Either \code{NaN+NaNi} or the complex number obtained after
#' \code{itDepth} iterations
#'
#' @family fractals
#' @family maths
#'
#' @examples
#' # This code shows the famous Mandelbrot figure in total, just in the
#' # opposite way as usual: the Mandelbrot set itself is colored, while the
#' # points outside are uniformly black.
#' # Adjust xlim and ylim to zoom in wherever you like.
#' \donttest{
#' phasePortrait(mandelbrot,
#' xlim = c(-2.3, 0.7),
#' ylim = c(-1.2, 1.2),
#' hsvNaN = c(0, 0, 0),
#' nCores = 1) # Max. two cores on CRAN, not a limit for your use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#' @export
mandelbrot <- function(z, itDepth = 500L) {
.Call(`_viscomplexr_mandelbrot`, z, itDepth)
}
#' Julia iteration with a given number of steps
#'
#' This function is designed as the basis for visualizing normal Julia sets
#' with \code{\link{phasePortrait}}. In contrast to usual visualizations of
#' Julia sets, this requires coloring the actual member points of the set and
#' not the points outside. Therefore, for numbers that can be identified as not
#' being parts of the Julia set, this function returns \code{NaN+NaNi}. All
#' other numbers are mapped to the complex value obtained after a user-defined
#' number of iterations. This function has been implemented in C++; therefore
#' it is fairly fast.
#'
#' Normal Julia sets are closely related to the Mandelbrot set. A normal Julia
#' set comprises all complex numbers \code{z} for which the following sequence
#' is bounded for all \code{n > 0}: \code{a[n+1] = a[n]^2 + c}, starting with
#' \code{a[0] = z}. The parameter \code{c} is a complex number, and the
#' sequence is certainly unbounded if \code{abs(a[]) >= R} with \code{R} being
#' an escape Radius which matches the inequality \code{R^2 - R >= abs(c)}. As
#' the visualization with this package gives interesting pictures (i.e. other
#' than a blank screen) only for \code{c} which are elements of the Mandelbrot
#' set, \code{R = 2} is a good choice. For the author's taste, the Julia
#' visualizations become most interesting for \code{c} located in the border
#' zone of the Mandelbrot set.
#'
#' @param z Complex number; the point in the complex plane to which the output
#' of the function is mapped
#'
#' @param c Complex number; a parameter whose choice has an enormous effect on
#' the shape of the Julia set. For obtaining useful results with
#' \code{\link{phasePortrait}}, \code{c} must be an element of the Mandelbrot
#' set.
#'
#' @param R_esc Real number; the espace radius. If the absolute value of a
#' number obtained during iteration attains or excels the value of
#' \code{R_esc}, \code{juliaNormal} will return \code{NaN+NaNi}. \code{R_esc
#' = 2} is a good choice for \code{c} being an element of the Mandelbrot set.
#' See Details for more information.
#'
#' @param itDepth An integer which defines the depth of the iteration, i.e. the
#' maximum number of iteration (default: \code{itDepth = 500})
#'
#' @return Either \code{NaN+NaNi} or the complex number obtained after
#' \code{itDepth} iterations
#'
#' @family fractals
#' @family maths
#'
#' @examples
#' # This code visualizes a Julia set with some appeal (for the author's
#' # taste). Zoom in as you like by adjusting xlim and ylim.
#' \donttest{
#' phasePortrait(juliaNormal,
#' moreArgs = list(c = -0.09 - 0.649i, R_esc = 2),
#' xlim = c(-2, 2),
#' ylim = c(-1.3, 1.3),
#' hsvNaN = c(0, 0, 0),
#' nCores = 1) # Max. two cores on CRAN, not a limit for your use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
#' @export
juliaNormal <- function(z, c, R_esc, itDepth = 500L) {
.Call(`_viscomplexr_juliaNormal`, z, c, R_esc, itDepth)
}
#' Calculate Blaschke products
#'
#' This function calculates Blaschke products
#' (\url{https://en.wikipedia.org/wiki/Blaschke_product}) for a complex number
#' \code{z} given a sequence \code{a} of complex numbers inside the unit disk,
#' which are the zeroes of the Blaschke product.
#'
#' A sequence of points \code{a[n]} located inside the unit disk satisfies the
#' Blaschke condition, if \code{sum[1:n] (1 - abs(a[n])) < Inf}. For each
#' element \code{a != 0} of such a sequence, \code{B(a, z) = abs(a)/a * (a -
#' z)/(1 - conj(a) * z)} can be calculated. For \code{a = 0}, \code{B(a, z) =
#' z}. The Blaschke product \code{B(z)} results as \code{B(z) = prod[1:n]
#' (B(a[n], z))}.
#'
#' @param z Complex number; the point in the complex plane to which the output
#' of the function is mapped
#'
#' @param a Vector of complex numbers located inside the unit disk. At each
#' \code{a}, the Blaschke product will have a zero.
#'
#' @return The value of the Blaschke product at \code{z}.
#'
#' @family maths
#'
#' @examples
#' # Generate random vector of 17 zeroes inside the unit disk
#' n <- 17
#' a <- complex(modulus = runif(n, 0, 1), argument = runif(n, 0, 2*pi))
#' \donttest{
#' # Portrait the Blaschke product
#' phasePortrait(blaschkeProd, moreArgs = list(a = a),
#' xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2),
#' nCores = 1) # Max. two cores on CRAN, not a limit for your use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#' @export
blaschkeProd <- function(z, a) {
.Call(`_viscomplexr_blaschkeProd`, z, a)
}
#' Jacobi theta function
#'
#' Approximation of "the" Jacobi theta function using the first \code{nn}
#' factors in its triple product version
#'
#' This function approximates the Jacobi theta function theta(z; tau) which is
#' the sum of exp(pi*i*n^2*tau + 2*pi*i*n*z) for n in -Inf, Inf. It uses,
#' however, the function's triple product representation. See
#' \url{https://en.wikipedia.org/wiki/Theta_function} for details. This function
#' has been implemented in C++, but it is only slightly faster than well-crafted
#' R versions, because the calculation can be nicely vectorized in R.
#'
#' @param z Complex number; the point in the complex plane to which the output
#' of the function is mapped
#'
#' @param tau Complex number; the so-called half-period ratio, must have a
#' positive imaginary part
#'
#' @param nn Integer; number of factors to be used when approximating the
#' triple product (default = 30)
#'
#' @return The value of the function for \code{z} and \code{tau}.
#'
#' @family maths
#'
#'
#' @examples
#' \donttest{
#' phasePortrait(jacobiTheta, moreArgs = list(tau = 1i/2-1/4),
#' pType = "p", xlim = c(-2, 2), ylim = c(-2, 2),
#' nCores = 1) # Max. two cores on CRAN, not a limit for your use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#' \donttest{
#' phasePortrait(jacobiTheta, moreArgs = list(tau = 1i/2-1/2),
#' pType = "p", xlim = c(-2, 2), ylim = c(-2, 2),
#' nCores = 1)
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#' \donttest{
#' phasePortrait(jacobiTheta, moreArgs = list(tau = 1i/3+1/3),
#' pType = "p", xlim = c(-2, 2), ylim = c(-2, 2),
#' nCores = 1)
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#' \donttest{
#' phasePortrait(jacobiTheta, moreArgs = list(tau = 1i/4+1/2),
#' pType = "p", xlim = c(-2, 2), ylim = c(-2, 2),
#' nCores = 1)
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
#' @export
jacobiTheta <- function(z, tau, nn = 30L) {
.Call(`_viscomplexr_jacobiTheta`, z, tau, nn)
}
|
/scratch/gouwar.j/cran-all/cranData/viscomplexr/R/RcppExports.R
|
# R package viscomplexr - phase portraits of functions in the
# complex number plane
# Copyright (C) 2020 Peter Biber
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#' Plot a Riemann sphere mask over a phase portrait
#'
#' The function \code{riemannMask} can be used for laying a circular mask over
#' an existing phasePortrait (as generated with the function
#' \code{\link{phasePortrait}}). This mask shades the plot region outside the
#' unit circle. The unshaded area is a projection on the southern or northern
#' Riemann hemisphere. The standard projection used by
#' \code{\link{phasePortrait}}, i.e. \code{invertFlip = FALSE} hereby
#' corresponds to the southern Riemann hemisphere with the origin being the
#' south pole. If \code{\link{phasePortrait}} was called with \code{invertFlip =
#' TRUE}, then the unit circle contains the northern Riemann hemisphere with the
#' point at infinity in the center (see the vignette for more details). Options
#' for adding annotation, landmark points are available
#' (\insertCite{@see @wegert_visualcpx_2012;textual}{viscomplexr}, p. 41).
#' Several parameters are on hand for adjusting the mask's transparency, color,
#' and similar features. some details, this function behaves less nicely under
#' Windows than under Linux (see Details).
#'
#' There is, unfortunately, a somewhat different behavior of this function under
#' Linux and Windows systems. Under Windows, the region outside the unit circle
#' is only shaded if the whole unit circle fits into the plot region. If only a
#' part of the unit circle is to be displayed, the shading is completely omitted
#' under Windows (annotation etc. works correctly, however), while it works
#' properly on Linux systems. Obviously, the function \code{\link{polypath}},
#' which we are using for creating the unit circle template, is interpreted
#' differently on both systems.
#'
#' @param colMask Color for the shaded area outside the unit circle. Defaults to
#' "white". Can be any kind of color definition R accepts. I recommend,
#' however, to use a color definition without a transparency value, because
#' this would be overridden by the parameter \code{alphaMask}.
#'
#' @param alphaMask Transparency value for the color defined with
#' \code{colMask}. Has to be a value between 0 (fully transparent) and 1
#' (totally opaque). Defaults to 0.5.
#'
#' @param circOutline Boolean - if \code{TRUE}, the outline of the unit circle
#' is drawn. Defaults to
#' \code{TRUE}.
#'
#' @param circLwd Line width of the unit circle outline. Obviously relevant
#' only when \code{circOutline == TRUE}. Defaults to 1.
#'
#' @param circleSteps Number of vertices to draw the circle. Defaults to 360
#' (one degree between two vertices).
#'
#' @param circleCol Color of the unit circle, default is the default foreground
#' color (\code{par("fg")}).
#'
#' @param gridCross Boolean - if \code{TRUE}, a horizontal and a vertical gray
#' line will be drawn over the plot region, intersection in the center of the
#' unit circle. Defaults to \code{FALSE}.
#'
#' @param annotSouth Boolean - add landmark points and annotation for a
#' \emph{southern} Riemann hemisphere, defaults to \code{FALSE}. This
#' annotation fits to an image that has been created with
#' \code{\link{phasePortrait}} and the option \code{invertFlip = FALSE}.
#'
#' @param annotNorth Boolean - add landmark points and annotation for a
#' \emph{northern} Riemann hemisphere, defaults to \code{FALSE}. This
#' annotation fits to an image that has been created with
#' \code{\link{phasePortrait}} and the option \code{invertFlip = TRUE}.
#'
#' @param xlim,ylim optional, if provided must by numeric vectors of length 2
#' defining plot limits as usual. They define the outer rectangle of the
#' Riemann mask. If \code{xlim} or \code{ylim} is not provided (the standard
#' case), the coordinates of the plot window as given by \code{par("usr")}
#' will be used for the missing component.
#'
#' @references
#' \insertAllCited{}
#'
#' @examples
#' # Tangent with fully annotated Riemann masks.
#' # The axis tick marks on the second diagram (Northern hemisphere)
#' # have to be interpreted as the real and imaginary parts of 1/z
#' # (see vignette). The axis labels in this example have been adapted
#' # accordingly.
#' \donttest{
#' # x11(width = 16, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' op <- par(mfrow = c(1, 2), mar = c(4.7, 4.7, 3.5, 3.5))
#' phasePortrait("tan(z)", pType = "pma",
#' main = "Southern Riemann Hemisphere",
#' xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2),
#' xlab = "real", ylab = "imaginary",
#' xaxs = "i", yaxs = "i",
#' nCores = 2) # Max. two cores on CRAN, not a limit for your use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' riemannMask(annotSouth = TRUE, gridCross = TRUE)
#'
#' phasePortrait("tan(z)", pType = "pma",
#' main = "Northern Riemann Hemisphere",
#' invertFlip = TRUE,
#' xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2),
#' xlab = "real (1/z)", ylab = "imaginary (1/z)",
#' xaxs = "i", yaxs = "i",
#' nCores = 2) # Max. two cores on CRAN, not a limit for your use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' riemannMask(annotNorth = TRUE, gridCross = TRUE)
#' par(op)
#' }
#'
#' # Rational function with Riemann masks without annotation.
#' # The axis tick marks on the second diagram (Northern hemisphere)
#' # have to be interpreted as the real and imaginary parts of 1/z
#' # (see vignette). The axis labels in this example have been adapted
#' # accordingly.
#' \donttest{
#' # x11(width = 16, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' op <- par(mfrow = c(1, 2), mar = c(4.7, 4.7, 3.5, 3.5))
#' phasePortrait("(-z^17 - z^15 - z^9 - z^7 - z^2 - z + 1)/(1i*z - 1)",
#' pType = "pma",
#' main = "Southern Riemann Hemisphere",
#' xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2),
#' xlab = "real", ylab = "imaginary",
#' xaxs = "i", yaxs = "i",
#' nCores = 2) # Max. two cores on CRAN, not a limit for your use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' riemannMask(annotSouth = FALSE, gridCross = FALSE, circOutline = FALSE,
#' alphaMask = 0.7)
#'
#' phasePortrait("(-z^17 - z^15 - z^9 - z^7 - z^2 - z + 1)/(1i*z - 1)",
#' pType = "pma",
#' main = "Northern Riemann Hemisphere",
#' invertFlip = TRUE,
#' xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2),
#' xlab = "real (1/z)", ylab = "imaginary (1/z)",
#' xaxs = "i", yaxs = "i",
#' nCores = 2) # Max. two cores on CRAN, not a limit for your use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' riemannMask(annotNorth = FALSE, gridCross = FALSE, circOutline = FALSE,
#' alphaMask = 0.7)
#' par(op)
#' }
#'
#'
#' @export
#'
#'
riemannMask <- function(colMask = "white",
alphaMask = 0.5,
circOutline = TRUE,
circLwd = 1,
circleSteps = 360,
circleCol = par("fg"),
gridCross = FALSE,
annotSouth = FALSE,
annotNorth = FALSE,
xlim = NULL,
ylim = NULL) {
# Get user plot coordinate extremes and calculate
# the widths in both directions (x and y)
coord <- par("usr")
xlmt <- c(coord[1], coord[2])
ylmt <- c(coord[3], coord[4])
# Overwrite if xlim and/or ylim are not NULL
if(!is.null(xlim)) xlmt <- xlim
if(!is.null(ylim)) ylmt <- ylim
# Define the outer frame of the mask
frame <- list(x = c(xlmt[1], xlmt[1], xlmt[2], xlmt[2]),
y = c(ylmt[1], ylmt[2], ylmt[2], ylmt[1]))
# Define the inner unit circle
innerCircle <- list(x = cos(seq(360/circleSteps, 360,
by = 360/circleSteps)*2*pi/360),
y = sin(seq(360/circleSteps, 360,
by = 360/circleSteps)*2*pi/360))
# Plot it
polypath(x = c(frame$x, NA, innerCircle$x),
y = c(frame$y, NA, innerCircle$y),
col = scales::alpha(colMask, alpha = alphaMask),
rule = "evenodd",
border = NA)
# Add circle
if(circOutline) {
plotrix::draw.circle(0, 0, 1, nv = circleSteps,
lwd = circLwd, border = circleCol)
}
# Add grid cross if desired
if(gridCross) {
abline(h = 0, col = "grey")
abline(v = 0, col = "grey")
}
# Add annotation if desired (for southern hemisphere)
if(annotSouth) {
pts <- list(x = c(-1, 0, 0, 0, 1), y = c(0, 1, 0, -1, 0))
points(pts$x, pts$y, pch = 21, col = "black",
bg = c("white", "white", "black", "white", "white"))
labels <- c("-1", "i", "0", "-i", "1")
i <- c(1:5)
lapply(i, function(i, pts, labels) {
adx <- switch(labels[i],
"-1" = 1.5, "i" = -2.5, "0" = -2.0, "-i" = -0.6, "1" = -1.8)
ady <- switch(labels[i],
"-1" = -1.2, "i" = -1.2, "0" = -1.2, "-i" = 2.2, "1" = -1.2)
text(pts$x[i], pts$y[i], labels[i], vfont = c("serif", "bold"),
adj = c(adx, ady))
}, pts = pts, labels = labels)
} # if annotSouth
# Add annotation if desired (for northern hemisphere)
if(annotNorth) {
pts <- list(x = c(-1, 0, 0, 0, 1), y = c(0, 1, 0, -1, 0))
points(pts$x, pts$y, pch = 21, col = "black",
bg = c("white"))
labels <- c("-1", "i", " ", "-i", "1")
i <- c(1:5)
lapply(i, function(i, pts, labels) {
adx <- switch(labels[i],
"-1" = -0.6, "i" = -2.5, " " = -2.0, "-i" = -0.6, "1" = 2.6)
ady <- switch(labels[i],
"-1" = -1.2, "i" = -1.2, " " = -1.2, "-i" = 2.2, "1" = -1.2)
text(pts$x[i], pts$y[i], labels[i], vfont = c("serif", "bold"),
adj = c(adx, ady))
}, pts = pts, labels = labels)
text(0, 0, expression(infinity), adj = c(-0.7, -1.0), cex = 1.7, font = 2)
} # if annotNorth
} # function riemannMask
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
|
/scratch/gouwar.j/cran-all/cranData/viscomplexr/R/RiemannMask.R
|
# R package viscomplexr - phase portraits of functions in the
# complex number plane
# Copyright (C) 2020 Peter Biber
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#' Convert a vector into a comma-separated string
#'
#' A simple utility function that transforms any vector into a single character
#' string, where the former vector elements are separated by commas. This is can
#' be useful, in some circumstances, for feeding a series of constant numeric
#' values to \code{\link{phasePortrait}} (see examples). For most applications
#' we recommend, however, to use \code{\link{phasePortrait}}'s parameter
#' \code{moreArgs} instead.
#'
#' @param vec The (usually real or complex valued) vector to be converted.
#'
#' @return A string, where the former vector elements are separated by commas,
#' enclosed between "c(" and ")".
#'
#' @family helpers
#'
#' @export
#'
#' @examples
#' # Make a vector of 77 complex random numbers inside the unit circle
#' n <- 77
#' a <- complex(n, modulus = runif(n), argument = 2*pi*runif(n))
#' a <- vector2String(a)
#' print(a)
#'
#'
#' # Use this for portraying a Blaschke product
#' \donttest{
#' # x11(width = 9.45, height = 6.30) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' op <- par(mar = c(1, 1, 1, 1), bg = "black")
#' n <- 77
#' a <- complex(n, modulus = runif(n), argument = 2*pi*runif(n))
#' a <- vector2String(a)
#' FUN <- paste("vapply(z, function(z, a){
#' return(prod(abs(a)/a * (a-z)/(1-Conj(a)*z)))
#' }, a =", a,
#' ", FUN.VALUE = complex(1))", sep = "")
#' phasePortrait(FUN, pType = "p", axes = FALSE,
#' xlim = c(-3, 3), ylim = c(-2.0, 2.0),
#' nCores = 2) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' par(op)
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
vector2String <- function(vec) {
n <- length(vec)
rVec <- paste(vec, collapse = ", ")
rVec <- paste("c(", rVec, ")", sep = "")
return(rVec)
}
|
/scratch/gouwar.j/cran-all/cranData/viscomplexr/R/Vector2String.R
|
# R package viscomplexr - phase portraits of functions in the
# complex number plane
# Copyright (C) 2020 Peter Biber
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
# ------------------------------------------------------------------------------
# Package viscomplexr
# Main R file
# ------------------------------------------------------------------------------
#' @importFrom grDevices as.raster hsv
#' @importFrom graphics par plot rasterImage abline polypath text points
#' @importFrom stats runif
#' @importFrom parallel detectCores
#' @importFrom doParallel registerDoParallel
#' @importFrom foreach foreach
#' @importFrom foreach getDoParWorkers
#' @importFrom foreach registerDoSEQ
#' @importFrom foreach %dopar%
#' @importFrom Rdpack reprompt
#' @importFrom grDevices col2rgb rgb
# in order to avoid package build warning for the i iterator
# in the foreach loops
utils::globalVariables("i")
# For including Rcpp
#' @useDynLib viscomplexr, .registration = TRUE
#' @importFrom Rcpp sourceCpp
NULL
# -------------------------------------------------------------------------------
# Pointer emulation after
# https://www.stat.berkeley.edu/~paciorek/computingTips/
# Pointers_passing_reference_.html
newPointer <- function(inputValue) {
object <- new.env(parent = emptyenv())
object$value <- inputValue
class(object) <- "pointer"
return(object)
} # newPointer
# -------------------------------------------------------------------------------
# Function phaseColhsv
# Calculates a hsv color array based on an array of complex numbers, both arrays
# handed over as pointers.
# This function only takes into account the arguments of the complex numbers.
# It is called from phasePortrait when pType = "p".
# The checks for NaN values (obtained at definition gaps other than Inf,
# i.e. result NaN) are important in order to ensure proper execution of hsv.
# The color for NaN values is hsvNaN.
# Default is a neutral grey (h = 0, s = 0, v = 0.5).
phaseColhsv <- function(pCompArr, pHsvCol, stdSaturation = 0.8,
hsvNaN = c(0, 0, 0.5)) {
names(hsvNaN) <- c("h", "s", "v")
dims <- dim(pCompArr$value)
h <- Arg(pCompArr$value)
h <- ifelse(is.nan(h), hsvNaN["h"], ifelse(h < 0, h + 2*pi, h) / (2*pi))
v <- ifelse(is.nan(pCompArr$value), hsvNaN["v"], 1)
s <- ifelse(is.nan(pCompArr$value), hsvNaN["s"], stdSaturation)
pHsvCol$value <- array(hsv(h = h, v = v, s = s), dims)
return(pHsvCol)
} # phaseColhsv
# ------------------------------------------------------------------------------
# Function phaseAngColhsv
# Works like phaseColhsv above, but additionally provides phase contour
# lines and shading. Called from phasePortrait when pType = "pa".
phaseAngColhsv <- function(pCompArr, pHsvCol, lambda = 7, pi2Div = 24,
argOffset = 0, stdSaturation = 0.8,
darkestShade = 0.1,
hsvNaN = c(0, 0, 0.5)) {
names(hsvNaN) <- c("h", "s", "v")
dims <- dim(pCompArr$value)
argmt <- Arg(pCompArr$value)
h <- ifelse(is.nan(argmt), hsvNaN["h"],
ifelse(argmt < 0, argmt + 2*pi, argmt)/ (2*pi))
v <- ifelse(is.nan(pCompArr$value), hsvNaN["v"],
darkestShade + (1 - darkestShade) *
(((argmt - argOffset) / (2*pi / pi2Div)) %% 1)^(1/lambda))
s <- ifelse(is.nan(pCompArr$value), hsvNaN["s"], stdSaturation)
pHsvCol$value <- array(hsv(h = h, v = v, s = s), dims)
return(pHsvCol)
} # phaseAngColhsv
# ------------------------------------------------------------------------------
# Function phaseModColhsv
# Works like phaseColhsv above, but additionally provides contour
# lines and shading for the modulus. Called from phasePortrait
# when pType = "pm".
# The checks for NaN values (obtained at definition gaps other than Inf,
# i.e. result NaN) are important in order to ensure proper execution of hsv.
# The color for Nan values is hsvNaN.
# Default is a neutral grey (h = 0, s = 0, v = 0.5).
# Complex numbers with infinite modulus have a valid argument, so infinite
# modulus gets no shading at all (v = 1).
phaseModColhsv <- function(pCompArr, pHsvCol, lambda = 7, logBase = 2,
stdSaturation = 0.8, darkestShade = 0.1,
hsvNaN = c(0, 0, 0.5)) {
names(hsvNaN) <- c("h", "s", "v")
dims <- dim(pCompArr$value)
h <- Arg(pCompArr$value)
h <- ifelse(is.nan(h), hsvNaN["h"], ifelse(h < 0, h + 2*pi, h)/ (2*pi))
v <- Mod(pCompArr$value)
v <- ifelse(is.nan(pCompArr$value), hsvNaN["v"],
ifelse(is.infinite(v), 1,
ifelse(v == 0, darkestShade,
darkestShade + (1 - darkestShade) *
(log(v, logBase) %% 1)^(1/lambda))))
s <- ifelse(is.nan(pCompArr$value), hsvNaN["s"], stdSaturation)
pHsvCol$value <- array(hsv(h = h, v = v, s = s), dims)
return(pHsvCol)
} # phaseModColhsv
# ------------------------------------------------------------------------------
# Function phaseModAngColhsv
# Works like phaseColhsv above, but additionally provides contour lines and
# shading for the modulus _and_ for the argument. Called from phasePortrait
# when pType = "pma".
# The checks for NaN values (obtained at definition gaps other than Inf,
# i.e. result NaN) are important in order to ensure proper execution of hsv.
# The color for NaN values is hsvNaN.
# Default is a neutral grey (h = 0, s = 0, v = 0.5).
# Complex numbers with infinite modulus have a valid argument, so infinite
# modulus gets no shading (v = 1) for modulus. But shading for the argument
# can still occur.
phaseModAngColhsv <- function(pCompArr, pHsvCol, lambda = 7, gamma = 9/10,
logBase = 2, pi2Div = 24, argOffset = 0,
stdSaturation = 0.8, darkestShade = 0.1,
hsvNaN = c(0, 0, 0.5)) {
names(hsvNaN) <- c("h", "s", "v")
dims <- dim(pCompArr$value)
argmt <- Arg(pCompArr$value)
h <- ifelse(is.nan(argmt), hsvNaN["h"],
ifelse(argmt < 0, argmt + 2*pi, argmt)/(2 * pi))
vMod <- Mod(pCompArr$value)
vMod <- ifelse(is.nan(pCompArr$value), hsvNaN["v"],
ifelse(is.infinite(vMod), 1,
ifelse(vMod == 0, 0,
(log(vMod, logBase) %% 1)^(1/lambda))))
vAng <- ifelse(is.nan(pCompArr$value), hsvNaN["v"],
(((argmt - argOffset) / (2 * pi / pi2Div)) %% 1)^(1/lambda))
v <- ifelse(is.nan(pCompArr$value), hsvNaN["v"],
darkestShade + (1 - darkestShade) *
(gamma * vMod * vAng +
(1 - gamma) * (1 - (1 - vMod) * (1 - vAng))))
s <- ifelse(is.nan(pCompArr$value), hsvNaN["s"], stdSaturation)
pHsvCol$value <- array(hsv(h = h, v = v, s = s), dims)
return(pHsvCol)
} # phaseModAngColhsv
# ------------------------------------------------------------------------------
# Function buildArray
# Build an array of complex numbers which represents the complex plane.
# Each array cell represents a pixel and contains the complex number z
# associated with this pixel. These are the z-values to be transformed
# later by the function of interest.
# Computing times for 10x10 inch x 150x150 pixel = 2250000 have been
# ok so far. Thus, we compose the array as a set of vertically arranged
# blocks of about that size. Each block is saved as a temporary file in the
# tempDir folder. The function gives back all information required to locate,
# identify and process the temporary files in the correct order.
# The blocks are built and saved in a parallel loop.
buildArray <- function(widthPx, heightPx, xlim, ylim,
blockSizePx = 2250000,
tempDir,
verbose) {
# How many blocks to build?
linesPerBlock <- blockSizePx %/% widthPx
nBlocks <- heightPx %/% linesPerBlock
linesRest <- heightPx %% linesPerBlock
nBlocks <- nBlocks + 1 * (linesRest > 0)
if(verbose) cat("\n.making", nBlocks, "blocks ... ")
# First and last line number covered by each block
iBlocks <- c(1:nBlocks)
lower <- 1 + (iBlocks - 1) * linesPerBlock
upper <- iBlocks * linesPerBlock
if(linesRest != 0) upper[nBlocks] <- lower[nBlocks] - 1 + linesRest
uplow <- cbind(lower, upper)
# Random Code for all files to be saved during this run
rndCode <- substr(format(round(runif(1), 10), nsmall = 10), 3, 12)
# Define z-Values of the Pixels
xPxValVec <- (xlim[2] - xlim[1])/(widthPx - 1) *
c(0:(widthPx - 1)) + xlim[1]
yPxValVec <- (ylim[2] - ylim[1])/(heightPx - 1) *
c((heightPx - 1):0) + ylim[1]
# Find the xlim and ylim values for each Block, combine all meta
# information about the blocks into one data.frame (metaZ).
fileNames <- paste(formatC(lower, width = trunc(log10(lower[nBlocks])) + 1,
flag = "0"), "zmat", rndCode, ".RData", sep = "")
# Define data.frame with meta information
metaZ <- cbind(data.frame(fileNames = fileNames),
uplow,
xlim1 = xPxValVec[1], ylim1 = yPxValVec[upper],
xlim2 = xPxValVec[length(xPxValVec)], ylim2 = yPxValVec[lower])
# Check for temporary directory, if it is not the tempdir() of the current
# R session, create it if it does not exist
if(tempDir != tempdir())
if(!dir.exists(tempDir)) { dir.create(tempDir, recursive = TRUE) }
# Parallel loop
if(verbose) cat("parallel loop starting ... ")
foreach(i = iBlocks) %dopar% {
yPxValVecBlock <- rep(yPxValVec[c(metaZ[i,"lower"]:metaZ[i,"upper"])],
widthPx)
xPxValVecBlock <- rep(xPxValVec,
each = metaZ[i,"upper"] - metaZ[i,"lower"] + 1)
compVec <- complex(real = xPxValVecBlock, imaginary = yPxValVecBlock)
compArr <- array(compVec, dim = c(metaZ[i,"upper"] - metaZ[i,"lower"] + 1,
widthPx))
save(compArr, file = paste(tempDir, metaZ[i,"fileNames"], sep = "/"))
rm(list = c("compArr", "compVec", "xPxValVecBlock", "yPxValVecBlock"))
} # foreach i
if(verbose) cat("done.")
# Arrange meta-information to be returned
metaBack <- list(tempDir = tempDir, rndCode = rndCode, metaZ = metaZ)
return(metaBack)
} # function buildArray
# -----------------------------------------------------------------------------
# rbindArraysbyPointer
# Takes a list of pointers to arrays of the same width (and same type), rbinds
# the arrays in order and gives back the pointer to the combined array
# while deleting all others. For reasons of convenience this one return value
# is a one-element list.
rbindArraysbyPointer <- function(pArray) {
nn <- length(pArray)
if(nn > 1) {
for(i in (2:nn)) {
# Always append the next one
pArray[[1]]$value <- rbind(pArray[[1]]$value, pArray[[2]]$value)
# Remove just appended element from list. Next element becomes #2.
pArray[[2]]$value <- NULL
pArray[[2]] <- NULL
} # for i
} # if length(nn > 1)
return(pArray[[1]]) # Useful though looking strange
} # function rbindArraysbyPointer
# ------------------------------------------------------------------------------
# verticalSplitIndex
# Returns the row indexes for splitting an array with nnRows as required for
# parallel processing with nnCores cores.
# Used in the functions complexArrayPlot and phasePortrait.
verticalSplitIndex <- function(nnRows, nnCores) {
# nProcss: Number of processes to be parallelized
nProcss <- min(nnRows, nnCores)
nRowsPerChunk <- nnRows %/% nProcss
nRest <- nnRows %% nProcss
iProcss <- c(1:nProcss)
iPerChunk <- c(1:nRowsPerChunk)
lower <- 1 + nRowsPerChunk * (iProcss - 1)
upper <- nRowsPerChunk * (iProcss)
upper[nProcss] <- upper[nProcss] + nRest
uplow <- asplit(cbind(lower, upper), MARGIN = 1)
return(uplow)
} # function verticalSplitIndex
# ------------------------------------------------------------------------------
# Function complexArrayPlot
# Displays an array of complex numbers in an existing plot.
# In order to do so,the temporary files that together form the array are
# read from disk one by one, but each one is processed in a parallel loop.
# The resulting array of hsv color values is finally plotted as
# a raster image.
complexArrayPlot <- function(zMetaInfrm, xlim, ylim,
pType = "pma", invertFlip = FALSE,
lambda = 7, gamma = 9/10, pi2Div = 9,
logBase = exp(2*pi/pi2Div),
argOffset = 0,
stdSaturation = 0.8,
darkestShade = 0.1,
hsvNaN = c(0, 0, 0.5),
asp = 1,
xlab = "", ylab = "",
verbose,
...) {
# Set up plot
plot(NULL, xlim = xlim, ylim = ylim, asp = asp, xlab = xlab, ylab = ylab, ...)
# Define call to color transformation function depending user's
# choice of pType
colCmd <- switch(pType,
"p" = "phaseColhsv(pListCompArr[[i]],
pHsvCol,
stdSaturation = stdSaturation,
hsvNaN = hsvNaN)",
"pm" = "phaseModColhsv(pListCompArr[[i]],
pHsvCol,
lambda = lambda,
logBase = logBase,
stdSaturation = stdSaturation,
darkestShade = darkestShade,
hsvNaN = hsvNaN)",
"pa" = "phaseAngColhsv(pListCompArr[[i]],
pHsvCol,
lambda = lambda,
pi2Div = pi2Div,
argOffset = argOffset,
stdSaturation = stdSaturation,
darkestShade = darkestShade,
hsvNaN = hsvNaN)",
"pma" = "phaseModAngColhsv(pListCompArr[[i]],
pHsvCol,
lambda = lambda,
gamma = gamma,
pi2Div = pi2Div,
logBase = logBase,
argOffset = argOffset,
stdSaturation = stdSaturation,
darkestShade = darkestShade,
hsvNaN = hsvNaN)"
) # switch
# Obtain the names of the files to load and process
zMetaInfrm$metaZ$wFileNames <- paste(zMetaInfrm$tempDir,
zMetaInfrm$metaZ$wFileNames, sep = "/")
# Run the color transformation function over each file
pHsvCol <- lapply(c(1:nrow(zMetaInfrm$metaZ)),
function(i, zMetaInfrm, colCmd) {
if(verbose) cat("\n.transforming block", i, "... ")
# load a block (will soon become a list of pointers, hence the name)
pListCompArr <- get(load(zMetaInfrm$metaZ[i,]$wFileNames))
# split it for parallel processing
nCores <- getDoParWorkers()
uplow <- verticalSplitIndex(nrow(pListCompArr), nCores)
# here's the actual splitting, pListCompArr becomes a list of pointers
pListCompArr <- lapply(uplow, FUN = function(uplow, pListCompArr) {
nwPtr <- newPointer(pListCompArr[c(uplow[1]:uplow[2]),])
# if the split result has only one line, it will automatically become a
# vector, which is undesired, because functions coming later require it
# as a two-dimensional array. This is made sure here.
if(uplow[1] == uplow[2]) {
dim(nwPtr$value) <- c(1, length(nwPtr$value))
}
return(nwPtr)
}, pListCompArr = pListCompArr)
# Parallel loop transforming the chunks into a color raster each;
# giving back a list of pointers to the rasters
if(verbose) cat("parallel loop starting ... ")
pHsvCol <- foreach(i = c(1:length(pListCompArr)),
.export = c("phaseColhsv",
"phaseModColhsv",
"phaseAngColhsv",
"phaseModAngColhsv",
"logBase",
"lambda",
"gamma",
"pi2Div",
"stdSaturation",
"darkestShade",
"hsvNaN",
"newPointer",
"argOffset"),
.combine = c) %dopar% {
pHsvCol <- newPointer(NULL)
eval(parse(text = colCmd)) # Does not require a return value,
# changes color array via pointer
pListCompArr[[i]]$value <- NULL # Reduced here, but removed after
# the foreach loop
return(pHsvCol)
} # foreach
if(verbose) cat("done.")
# Remove the original list of array pointers
rm(pListCompArr)
# Combine the color arrays in the value of the first pointer.
# Free the others (rbindArraysbyPointer).
# Enforce (one-element-) list in case there is only one value
# (i.e. if foreach loop was executed sequentially, one core only)
if(length(pHsvCol) == 1) pHsvCol <- list(pHsvCol)
pHsvCol <- rbindArraysbyPointer(pHsvCol)
return(pHsvCol)
}, # function in lapply
zMetaInfrm = zMetaInfrm, colCmd = colCmd
) # lapply
# Now combine all blocks into the big raster ...
if(verbose) cat("\nCombine color rasters ... ")
pHsvCol <- rbindArraysbyPointer(pHsvCol)
if(verbose) cat("done.\n")
# ... and plot it
if(verbose) cat("Plotting raster image ... ")
rasterImage(as.raster(pHsvCol$value), xlim[1], ylim[1], xlim[2], ylim[2])
if(verbose) cat("done.\n")
pHsvCol$value <- NULL
rm(pHsvCol)
return(NULL)
} # complexArrayPlot
# -----------------------------------------------------------------------------
# Function makeFunctionFromInput
# Transform an input FUN and moreArgs (a named list), both inputs to
# phasePortrait, into an executable function.
makeFunctionFromInput <- function(FUN, moreArgs = NULL) {
# If match.fun() detects a function, give it back. If not, return NULL
testFun <- tryCatch(match.fun(FUN), error = function(err) NULL)
# If this does not work, maybe we have a useful character string
if(is.null(testFun) & mode(FUN) == "character") {
if(!is.null(moreArgs)) {
moreArgString <- paste(",", names(moreArgs), collapse = "")
}
else {
moreArgString <- ""
}
exprText <- paste("function(z", moreArgString, ") ", FUN, sep = "")
testFun <- eval(parse(text = exprText))
} # if character
# Test the function if the above resulted in something else
# than NULL
if(!is.null(testFun)) {
# Arbitrary number for testing the function
testNum <- complex(real = runif(1), imaginary = runif(1))
if(!is.null(moreArgs)) testArgs <- c(testNum, moreArgs)
else testArgs <- list(testNum)
# if NULL comes back from this call, the function does not work
testOut <- tryCatch(do.call(testFun, testArgs),
error = function(err) NULL)
if(is.null(testOut)) testFun <- NULL
} # if(!is.null(compFun))
return(testFun)
} # makeFunctionFromInput
# -----------------------------------------------------------------------------
#' Create phase portraits of complex functions
#'
#' \code{phasePortrait} makes phase portraits of functions in the complex number
#' plane. It uses a technique often (but not quite correctly) called
#' \emph{domain coloring} (\url{https://en.wikipedia.org/wiki/Domain_coloring}).
#' While many varieties of this technique exist, this book relates closely to
#' the standards proposed by E. Wegert in his book \emph{Visual Complex
#' Functions} \insertCite{wegert_visualcpx_2012}{viscomplexr}. In a nutshell,
#' the argument (\code{\link{Arg}}) of any complex function value is displayed
#' as a color from the chromatic circle. The fundamental colors red, green, and
#' blue relate to the arguments (angles) of 0, 2/3pi, and 4/3pi (with smooth
#' color transitions in between), respectively. Options for displaying the
#' modulus (\code{\link{Mod}}) of the complex values and additional reference
#' lines for the argument are available. This function is designed for being
#' used inside the framework of R base graphics. It makes use of parallel
#' computing, and depending on the desired resolution it may create extensive
#' sets of large temporary files (see Details and Examples).
#'
#' This function is intended to be used inside the framework of R base graphics.
#' It plots into the active open graphics device where it will display the phase
#' plot of a user defined function as a raster image. If no graphics device is
#' open when called, the function will plot into the default graphics device.
#' This principle allows to utilize the full functionality of R base graphics.
#' All graphics parameters (\code{\link{par}}) can be freely set and the
#' function \code{phasePortrait} accepts all parameters that can be passed to
#' the \code{\link{plot.default}} function. This allows all kinds of plots -
#' from scientific representations with annotated axes and auxiliary lines,
#' notation, etc. to poster-like artistic pictures.\cr
#'
#' \describe{
#' \item{Mode of operation}{After being called, \code{phasePortrait} gets the
#' size in inch of the plot region of the graphics device it is plotting into.
#' With the parameter \code{res} which is the desired plot resolution in dpi,
#' the horizontal and vertical number of pixels is known. As \code{xlim} and
#' \code{ylim} are provided by the user, each pixel can be attributed a
#' complex number z from the complex plane. In that way a two-dimensional
#' array is built, where each cell represents a point of the complex plane,
#' containing the corresponding complex number z. This array is set up in
#' horizontal strips (i.e. split along the imaginary axis), each strip
#' containing approximately \code{blockSizePx} pixels. In a parallel computing
#' loop, the strips are constructed, saved as temporary files and immediately
#' deleted from the RAM in order to avoid memory overflow. After that, the
#' strips are sequentially loaded and subdivided into a number of chunks that
#' corresponds to the number of registered parallel workers (parameter
#' \code{nCores}). By parallely processing each chunk, the function
#' \code{f(z)} defined by the user in the argument \code{FUN} is applied to
#' each cell of the strip. This results in an array of function values that
#' has exactly the same size as the original strip. The new array is saved as
#' a temporary file, the RAM is cleared, and the next strip is loaded. This
#' continues until all strips are processed. In a similar way, all strips
#' containing the function values are loaded sequentially, and in a parallel
#' process the complex values are translated into colors which are stored in a
#' raster object. While the strips are deleted from the RAM after processing,
#' the color values obtained from each new strip are appended to the color
#' raster. After all strips are processed, the raster is plotted into the plot
#' region of the graphics device. If not explicitly defined otherwise by the
#' user, all temporary files are deleted after that.
#' }
#' \item{Temporary file system}{By default, the above-mentioned temporary
#' files are deleted after use. This will not happen, if the parameter
#' \code{deleteTempFiles} is set to \code{FALSE} or if \code{phasePortrait}
#' does not terminate properly. In both cases, you will find the files in the
#' directory specified with the parameter \code{tempDir}. These files are
#' \code{.RData} files, each one contains a two-dimensional array of complex
#' numbers. The file names follow a strict convention, see the following
#' examples:\cr\cr
#' \code{0001zmat2238046385.RData}\cr
#' \code{0001wmat2238046385.RData}\cr\cr
#' Both names begin with '0001', indicating that the array's top line is the
#' first line of the whole clipping of the complex number plane where the
#' phase portrait relates to. The array which follows below can e.g. begin
#' with a number like '0470', indicating that its first line is line number
#' 470 of the whole clipping. The number of digits for these line numbers is
#' not fixed. It is determined by the greatest number required. Numbers with
#' less digits are zero-padded. The second part of the file name is either
#' \code{zmat} or \code{wmat}. The former indicates an array whose cells
#' contain untransformed numbers of the complex number plane. The latter
#' contains the values obtained from applying the function of interest to the
#' first array. Thus, cells at the same position in both arrays exactly relate
#' to each other. The third part of the file names is a ten-digit integer.
#' This is a random number which all temporary files stemming from the same
#' call of \code{phasePortrait} have in common. This guarantees that no
#' temporary files will be confounded by the function, even if undeleted
#' temporary files from previous runs are still present.
#' }
#' \item{HSV color model}{For color-coding the argument of a complex number,
#' \code{phasePortrait} uses the \code{\link{hsv}} (hue, saturation, value)
#' color model. Hereby, the argument is mapped to a position on the chromatic
#' circle, where the fundamental colors red, green, and blue relate to the
#' arguments (angles) of 0, 2/3pi, and 4/3pi, respectively. This affects only
#' the hue component of the color model. The value component is used for
#' shading modulus and/or argument zones. The saturation component for all
#' colors can be defined with the parameter \code{stdSaturation}.
#' }
#' \item{Zone definitions and shading}{In addition to displaying colors for
#' the arguments of complex numbers, zones for the modulus and/or the argument
#' are shaded for \code{pType} other than "p". The modulus zones are defined
#' in a way that each zone covers moduli whose logarithms to the base
#' \code{logBase} have the same integer part. Thus, from the lower edge of one
#' modulus zone to its upper edge, the modulus multiplies with the value of
#' \code{logBase}. The shading of a modulus zone depends on the fractional
#' parts \code{x} of the above-mentioned logarithms, which cover the interval
#' \code{[0, 1[}.
#' This translates into the value component \code{v} of the \code{\link{hsv}}
#' color model as follows:\cr\cr
#' \code{v = darkestShade + (1 - darkestShade) * x^(1/lambda)}\cr\cr
#' where \code{darkestShade} and \code{lambda} are parameters that can be
#' defined by the user. Modifying the parameters \code{lambda} and
#' \code{darkestShade} is useful for adjusting contrasts in the phase
#' portraits. The argument zone definition is somewhat simpler: Each zone
#' covers an angle domain of \code{2*pi / pi2Div}, the "zero reference" for
#' all zones being \code{argOffset}. The angle domain of one zone is linearly
#' mapped to a value \code{x} from the range \code{[0, 1[}.
#' The value component of the color to be displayed is calculated as a
#' function of \code{x} with the same equation as shown above. In case the
#' user has chosen \code{pType = "pma"}, x-values \code{xMod} and \code{xArg}
#' are calculated separately for the modulus and the argument, respectively.
#' They are transformed into preliminary v-values as follows:\cr\cr
#' \code{vMod = xMod^(1/lambda)} and {vArg = xArg^(1/lambda)}\cr\cr
#' From these, the final v value results as\cr\cr
#' \code{v = darkestShade + (1-darkestShade) * (gamma * vMod * vArg +
#' (1-gamma) * (1 - (1-vMod) * (1-vArg)))}\cr\cr
#' The parameter \code{gamma} (range \code{[0, 1]}) determines they way how
#' vMod and vArg are combined. The closer \code{gamma} is to one, the more
#' the smaller of both values will dominate the outcome and vice versa.
#' }
#' \item{Defining more complicated functions as strings with
#' \code{\link{vapply}}}{You might want to write and use functions which
#' require more code than a single statement like \code{(z-3)^2+1i*z}. As
#' mentioned in the description of the parameter \code{FUN}, we recommend to
#' define such functions as separate objects and hand them over as such. There
#' might be, however, cases, where it is more convenient, to define a function
#' as a single long string, and pass this string to \code{FUN}.
#' In order to make this work, \code{\link{vapply}} should be be used for
#' wrapping the actual code of the function. This is probably not the use of
#' \code{\link{vapply}} intended by its developers, but it works nicely and
#' performs well. The character string has to have the following structure
#' "vapply(z, function(z, \emph{other arguments if required}) \{\emph{define
#' function code in here}\}, \emph{define other arguments here}, FUN.VALUE =
#' complex(1))". See examples.
#' }
#' }
#'
#'
#' @param FUN The function to be visualized. There are two possibilities to
#' provide it, a quoted character string, or a function object.
#' \describe{
#' \item{Quoted character string}{ A function can be provided as a quoted
#' character string containing an expression R can interpret as a function of
#' a complex number z. Examples: "sin(z)", "(z^2 - 1i)/(tan(z))", "1/4*z^2 -
#' 10*z/(z^4+4)". Names of functions known in your R session can be used in a
#' standalone way, without mentioning z, e.g. "sin", "tan", "sqrt". Obviously,
#' this also works for functions you defined yourself, e.g.
#' "myIncredibleFunction" would be valid if you coded a function with this
#' name before. This is especially useful for functions which require
#' additional parameters beside the complex number they are supposed to
#' calculate with. Such arguments can be provided via the parameter
#' \code{moreArgs}. One-liner expressions provided as strings are also
#' compatible with \code{moreArgs} (see examples).
#'
#' While it is not the way we recommend for most purposes, you can even define
#' more complicated functions of your own as character strings. In this case,
#' you need to use \code{\link{vapply}} as a wrapper for your actual function
#' (see Details, and Examples). Such constructions allow to provide additional
#' input variables as a part of the character string by using the
#' \code{\link{vapply}}-mechanism (see Details and Examples). The helper
#' function \code{\link{vector2String}}) can be useful for that matter.
#' However, the parameter \code{moreArgs} is not applicable in this context.
#' Probably, the most useful application of the function-as-string concept is
#' when the user defined function, possibly including values for additional
#' arguments, is to be pasted together at runtime.}
#'
#' \item{Function object}{ It is also possible
#' to directly provide function objects to \code{FUN}. This can be any
#' function known to R in the current session. Simply put, for functions like
#' sin, tan, cos, and sqrt you do not even have to quote their names when
#' passing them to \code{phasePortrait}. Same applies to functions you defined
#' yourself. It is also possible to hand over an anonymous function to
#' \code{FUN} when calling \code{phasePortrait}. In all these cases, the
#' parameter \code{moreArgs} can be used for providing additional arguments to
#' \code{FUN}. In general, providing a function as an object, and using
#' \code{moreArgs} in case additional arguments are required, is what we
#' recommend for user-defined functions.}
#' }
#'
#' When executing \code{phasePortrait}, \code{FUN} is first evaluated with
#' \code{\link{match.fun}}. If this is not successful, an attempt to interpret
#' \code{FUN} as an expression will be made. If this fails,
#' \code{phasePortrait} terminates with an error.
#'
#' @param moreArgs A named list of other arguments to FUN. The names must match
#' the names of the arguments in FUN's definition.
#'
#' @param xlim The x limits (x1, x2) of the plot as a two-element numeric
#' vector. Follows exactly the same definition as in
#' \code{\link{plot.default}}. Here, \code{xlim} has to be interpreted as the
#' plot limits on the real axis.
#'
#' @param ylim The y limits of the plot (y1, y2) to be used in the same way as
#' \code{xlim}. Evidently, \code{ylim} indicates the plot limits on the
#' imaginary axis.
#'
#' @param invertFlip If \code{TRUE}, the function is mapped over a z plane,
#' which has been transformed to \code{1/z * exp(1i*pi)}. This is the
#' projection required to plot the north Riemann hemisphere in the way
#' proposed by \insertCite{wegert_visualcpx_2012;textual}{viscomplexr}, p. 41.
#' Defaults to \code{FALSE}. If this option is chosen, the numbers at the
#' axis ticks have another meaning than in the normal case. Along the real
#' axis, they represent the real part of \code{1/z}, and along the imaginary
#' axis, they represent the imaginary part of \code{1/z}. Thus, if you want
#' annotation, you should choose appropriate axis labels like \code{xlab =
#' Re(1/z)}, and \code{ylab = Im(1/z)}.
#'
#' @param res Desired resolution of the plot in dots per inch (dpi). Default is
#' 150 dpi. All other things being equal, \code{res} has a strong influence on
#' computing times (double \code{res} means fourfold number of pixels to
#' compute). A good approach could be to make a plot with low resolution (e.g.
#' the default 150 dpi) first, adjust whatever required, and plot into a
#' graphics file with high resolution after that.
#'
#' @param blockSizePx Number of pixels and corresponding complex values to be
#' processed at the same time (see Details). Default is 2250000. This value
#' gave good performance on older systems as well as on a high-end gaming
#' machine, but some tweaking for your individual system might even improve
#' things.
#'
#' @param tempDir \code{NULL} or a character string, specifying the name of the
#' directory where the temporary files written by \code{phasePortrait} are
#' stored. Default is \code{NULL}, which makes \code{phasePortrait} use the
#' current R session's temporary directory. Note that if you specify another
#' directory, it will be created if it does not exist already. Even though the
#' temporary files are deleted after completing a phase portrait (unless the
#' user specifies \code{deleteTempFiles = FALSE}, see below), the directory
#' will remain alive even if has been created by \code{phasePortrait}.
#'
#' @param nCores Number of processor cores to be used in the parallel computing
#' tasks. Defaults to the maximum number of cores available minus 1. Any
#' number between 1 (serial computation) and the maximum number of cores
#' available as indicated by \code{parallel::detectCores()} is accepted. If
#' \code{nCores} is set to a value greater than the available number of cores,
#' the function will use one core less than available.
#'
#' @param pType One of the four options for plotting, "p", "pa", "pm", and "pma"
#' as a character string. Defaults to "pma". Option "p" produces a mere phase
#' plot, i.e. contains only colors for the complex numbers' arguments, but no
#' reference lines at all. the option "pa" introduces shading zones that
#' emphasize the arguments. These zones each cover an angle defined by
#' \code{2*pi/pi2Div}, where p2Div is another parameter of this function (see
#' there). These zones are shaded darkest at the lowest angle (counter
#' clockwise). Option "pm" displays the modulus by indicating zones, where the
#' moduli at the higher edge of each zone are in a constant ratio with the
#' moduli at the lower edge of the zone. Default is a ratio of almost exactly
#' 2 (see parameter \code{logBase}) for details. At the lower edge, color
#' saturation is lowest and highest at the higher edge (see parameters
#' \code{darkestShade}, and \code{stdSaturation}). Option "pma" (default)
#' includes both shading schemes.
#'
#' @param pi2Div Angle distance for the argument reference zones added for
#' \code{pType = "pma"} or \code{pType = "pa"}. The value has to be given as
#' an integer (reasonably) fraction of 2*pi (i.e. 360 degrees). The default is
#' 9; thus, reference zones are delineated by default in distances of 2*pi/9,
#' i.e. (40 degrees), starting with 0, i.e. the color red if not defined
#' otherwise with the parameter \code{argOffset}. In contrast to the borders
#' delimiting the modulus zones, the borders of the reference zones for the
#' argument always follow the same color (by definition).
#'
#' @param logBase Modulus ratio between the edges of the modulus reference zones
#' in \code{pType} \code{"pm"} and \code{"pma"}. As recommended by
#' \insertCite{wegert_visualcpx_2012;textual}{viscomplexr}, the default
#' setting is \code{logBase = exp(2*pi/pi2Div)}. This relation between the
#' parameters \code{logBase} and \code{pi2Div} ensures an analogue scaling of
#' the modulus and argument reference zones (see Details). Conveniently, for
#' the default \code{pi2Div = 9}, we obtain \code{logBase == 2.0099...},
#' which is very close to 2. Thus, the modulus at the higher edge of a given
#' zone is almost exactly two times the value at the lower edge.
#'
#' @param argOffset The (complex number) argument in radians counterclockwise,
#' at which the argument reference zones are fixed. Default is 0, i.e. all
#' argument reference zones align to the center of the red area.
#'
#' @param darkestShade Darkest possible shading of modulus and angle reference
#' zones for \code{pType} \code{"pm"} and \code{"pma"}. It corresponds to the
#' value "v" in the \code{\link{hsv}} color model. \code{darkestShade = 0}
#' means no brightness at all, i.e. black, while \code{darkestShade = 1}
#' indicates maximum brightness. Defaults to 0.1, i.e. very dark, but hue
#' still discernible.
#'
#' @param lambda Parameter steering the shading interpolation between the higher
#' and the lower edges of the the modulus and argument reference zones in
#' \code{pType} \code{"pm"} and \code{"pm"}. Should be > 0, default and
#' reference is \code{lambda = 7}. Values < 7 increase the contrast at the
#' zone borders, values > 7 weaken the contrast.
#'
#' @param gamma Parameter for adjusting the combined shading of modulus and
#' argument reference zones in \code{pType} \code{"pma"}. Should be in the
#' interval \code{[0, 1]}. Default is 0.9. The higher the value, the more the
#' smaller of both shading values will dominate the outcome and vice versa.
#'
#' @param stdSaturation Saturation value for unshaded hues which applies to the
#' whole plot in \code{pType} \code{"p"} and to the (almost) unshaded zones in
#' \code{pType} \code{"pm"} and \code{"p"}. This corresponds to the value "s"
#' in the \code{\link{hsv}} color model. Must be between 0 and 1, where 1
#' indicates full saturation and 0 indicates a neutral grey. Defaults to 0.8.
#'
#' @param hsvNaN \code{\link{hsv}} coded color for being used in areas where the
#' function to be plotted is not defined. Must be given as a numeric vector
#' with containing the values h, s, and v in this order. Defaults to
#' \code{c(0, 0, 0.5)} which is a neutral grey.
#'
#' @param asp Aspect ratio y/x as defined in \code{\link{plot.window}}. Default
#' is 1, ensuring an accurate representation of distances between points on
#' the screen.
#'
#' @param deleteTempFiles If TRUE (default), all temporary files are deleted
#' after the plot is completed. Set it on FALSE only, if you know exactly what
#' you are doing - the temporary files can occupy large amounts of hard disk
#' space (see details).
#'
#' @param noScreenDevice Suppresses any graphical output if TRUE. This is only
#' intended for test purposes and makes probably only sense together with
#' \code{deleteTempFiles == FALSE}. For dimensioning purposes,
#' \code{phasePortrait} will use a 1 x 1 inch pseudo graphics device in this
#' case. The default for this parameter is \code{FALSE}, and you should change
#' it only if you really know what you are doing.
#'
#' @param autoDereg if TRUE, automatically register sequential backend after the
#' phase portrait is completed. Default is FALSE, because registering a
#' parallel backend can be time consuming. Thus, if you want to make several
#' phase portraits in succession, you should set \code{autoDereg} only for the
#' last one, or simply type \code{foreach::registerDoSEQ} after you are done.
#' In any case, you don't want to have an unused parallel backend lying about.
#'
#' @param verbose if TRUE (default), \code{phasePortrait} will continuously
#' write progress messages to the console. This is convenient for normal
#' purposes, as calculating larger phase portraits in higher resolution may
#' take several minutes. The setting \code{verbose = FALSE}, will suppress any
#' output to the console.
#'
#' @param ... All parameters accepted by the \code{\link{plot.default}}
#' function.
#'
#'
#' @references
#' \insertAllCited{}
#'
#'
#' @examples
#' # Map the complex plane on itself
#'
#' # x11(width = 8, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' phasePortrait("z", xlim = c(-2, 2), ylim = c(-2, 2),
#' xlab = "real", ylab = "imaginary",
#' verbose = FALSE, # Suppress progress messages
#' nCores = 2) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#'
#'
#' # A rational function
#' \donttest{
#' # x11(width = 10, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' phasePortrait("(2-z)^2*(-1i+z)^3*(4-3i-z)/((2+2i+z)^4)",
#' xlim = c(-8, 8), ylim = c(-6.3, 4.3),
#' xlab = "real", ylab = "imaginary",
#' nCores = 2) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
#' # Different pType options by example of the sine function.
#' # Note the different equivalent definitions of the sine
#' # function in the calls to phasePortrait
#' \donttest{
#' # x11(width = 9, height = 9) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' op <- par(mfrow = c(2, 2), mar = c(2.1, 2.1, 2.1, 2.1))
#' phasePortrait("sin(z)", xlim = c(-pi, pi), ylim = c(-pi, pi),
#' pType = "p", main = "pType = 'p'", axes = FALSE,
#' nCores = 2) # Max. two cores on CRAN, not a limit for your use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' phasePortrait("sin(z)", xlim = c(-pi, pi), ylim = c(-pi, pi),
#' pType = "pm", main = "pType = 'pm'", axes = FALSE,
#' nCores = 2)
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' phasePortrait("sin", xlim = c(-pi, pi), ylim = c(-pi, pi),
#' pType = "pa", main = "pType = 'pa'", axes = FALSE,
#' nCores = 2)
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' phasePortrait(sin, xlim = c(-pi, pi), ylim = c(-pi, pi),
#' pType = "pma", main = "pType = 'pma'", axes = FALSE,
#' nCores = 2)
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' par(op)}
#'
#'
#' # I called this one 'nuclear fusion'
#'
#' # x11(width = 16/9*8, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' \donttest{
#' op <- par(mar = c(0, 0, 0, 0), omi = c(0.2, 0.2, 0.2, 0.2), bg = "black")
#' phasePortrait("cos((z + 1/z)/(1i/2 * (z-1)^10))",
#' xlim = 16/9*c(-2, 2), ylim = c(-2, 2),
#' axes = FALSE, xaxs = "i", yaxs = "i",
#' nCores = 2) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' par(op)}
#'
#'
#' # Passing function objects to phasePortrait:
#' # Two mathematical celebrities - Riemann's zeta function
#' # and the gamma function, both from the package pracma.
#' # R's built-in gamma is not useful, as it does not work
#' # with complex input values.
#' \donttest{
#' if(requireNamespace("pracma", quietly = TRUE)) {
#' # x11(width = 16, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' op <- par(mfrow = c(1, 2))
#' phasePortrait(pracma::zeta, xlim = c(-35, 15), ylim = c(-25, 25),
#' xlab = "real", ylab = "imaginary",
#' main = expression(zeta(z)), cex.main = 2,
#' nCores = 2) # Max. two cores on CRAN, not a limit for your use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' phasePortrait(pracma::gammaz, xlim = c(-10, 10), ylim = c(-10, 10),
#' xlab = "real", ylab = "imaginary",
#' main = expression(Gamma(z)), cex.main = 2,
#' nCores = 2) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#' }
#'
#'
#' # Using vapply for defining a whole function as a string.
#' # This is a Blaschke product with a sequence a of twenty numbers.
#' # See the documentation of the function vector2String for a more
#' # convenient space-saving definition of a.
#' # But note that a C++ version of the Blaschke product is available
#' # in this package (function blaschkeProd()).
#' \donttest{
#' # x11(width = 10, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' phasePortrait("vapply(z, function(z, a) {
#' fct <- ifelse(abs(a) != 0,
#' abs(a)/a * (a-z)/(1-Conj(a)*z), z)
#' return(prod(fct))
#' },
#' a = c(0.12152611+0.06171533i, 0.53730315+0.32797530i,
#' 0.35269601-0.53259644i, -0.57862039+0.33328986i,
#' -0.94623221+0.06869166i, -0.02392968-0.21993132i,
#' 0.04060671+0.05644165i, 0.15534449-0.14559097i,
#' 0.32884452-0.19524764i, 0.58631745+0.05218419i,
#' 0.02562213+0.36822933i, -0.80418478+0.58621875i,
#' -0.15296208-0.94175193i, -0.02942663+0.38039250i,
#' -0.35184130-0.24438324i, -0.09048155+0.18131963i,
#' 0.63791697+0.47284679i, 0.25651928-0.46341192i,
#' 0.04353117-0.73472528i, -0.04606189+0.76068461i),
#' FUN.VALUE = complex(1))",
#' pType = "p",
#' xlim = c(-4, 2), ylim = c(-2, 2),
#' xlab = "real", ylab = "imaginary",
#' nCores = 2) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
#' # Much more elegant: Define the function outside.
#' # Here comes a Blaschke product with 200 random points.
#' \donttest{
#' # define function for calculating blaschke products, even
#' # possible as a one-liner
#' blaschke <- function(z, a) {
#' return(prod(ifelse(abs(a) != 0, abs(a)/a * (a-z)/(1-Conj(a)*z), z)))
#' }
#' # define 200 random numbers inside the unit circle
#' n <- 200
#' a <- complex(modulus = runif(n), argument = runif(n)*2*pi)
#' # Plot it
#' # x11(width = 10, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' phasePortrait(blaschke,
#' moreArgs = list(a = a),
#' pType = "p",
#' xlim = c(-2.5, 2.5), ylim = c(-1.7, 1.7),
#' xlab = "real", ylab = "imaginary",
#' nCores = 2) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
#' # A hybrid solution: A one-liner expression given as a character string
#' # can be provided additional arguments with moreArgs
#' \donttest{
#' n <- 73
#' a <- complex(modulus = runif(n), argument = runif(n)*2*pi)
#' # x11(width = 10, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' phasePortrait("prod(ifelse(abs(a) != 0,
#' abs(a)/a * (a-z)/(1-Conj(a)*z), z))",
#' moreArgs = list(a = a),
#' pType = "p",
#' xlim = c(-2.5, 2.5), ylim = c(-1.7, 1.7),
#' xlab = "real", ylab = "imaginary",
#' nCores = 1) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
#' # Note the difference in performance when using the C++ defined
#' # function blaschkeProd() provided in this package
#' \donttest{
#' n <- 73
#' a <- complex(modulus = runif(n), argument = runif(n)*2*pi)
#' # Plot it
#' # x11(width = 10, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' phasePortrait(blaschkeProd,
#' moreArgs = list(a = a),
#' pType = "p",
#' xlim = c(-2.5, 2.5), ylim = c(-1.7, 1.7),
#' xlab = "real", ylab = "imaginary",
#' nCores = 1) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
#' # Interesting reunion with Benoit Mandelbrot.
#' # The function mandelbrot() is part of this package (defined
#' # in C++ for performance)
#' \donttest{
#' # x11(width = 11.7, height = 9/16*11.7) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' op <- par(mar = c(0, 0, 0, 0), bg = "black")
#' phasePortrait(mandelbrot,
#' moreArgs = list(itDepth = 100),
#' xlim = c(-0.847, -0.403), ylim = c(0.25, 0.50),
#' axes = TRUE, pType = "pma",
#' hsvNaN = c(0, 0, 0), xaxs = "i", yaxs = "i",
#' nCores = 1) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' par(op)
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
#' # Here comes a Julia set.
#' # The function juliaNormal() is part of this package (defined
#' # in C++ for performance)
#' \donttest{
#' # x11(width = 11.7, height = 9/16*11.7) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' op <- par(mar = c(0, 0, 0, 0), bg = "black")
#' phasePortrait(juliaNormal,
#' moreArgs = list(c = -0.09 - 0.649i, R_esc = 2),
#' xlim = c(-2, 2),
#' ylim = c(-1.3, 1.3),
#' hsvNaN = c(0, 0, 0),
#' nCores = 1) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' par(op)
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
#' @export
phasePortrait <- function(FUN, moreArgs = NULL, xlim, ylim,
invertFlip = FALSE,
res = 150,
blockSizePx = 2250000,
tempDir = NULL,
nCores = max(1, parallel::detectCores() - 1),
pType = "pma",
pi2Div = 9,
logBase = exp(2*pi/pi2Div),
argOffset = 0,
darkestShade = 0.1,
lambda = 7, gamma = 0.9,
stdSaturation = 0.8,
hsvNaN = c(0, 0, 0.5),
asp = 1,
deleteTempFiles = TRUE,
noScreenDevice = FALSE,
autoDereg = FALSE,
verbose = TRUE,
...) {
# Bring the user's function definition in workable form
compFun <- makeFunctionFromInput(FUN, moreArgs)
if(is.null(compFun)) stop("\nFUN cannot be interpreted.")
# Calculate pixel array size from plot region size in inch and the plot
# range for the function given with xlim and ylim
## par("pin"): plot region size in inch; first is horizontal
## if noScreenDevice, region is set to 1 x 1 inch
if(!noScreenDevice) regionPi <- par("pin")
else regionPi <- c(1, 1)
xRange <- abs(xlim[2] - xlim[1])
yRange <- abs(ylim[2] - ylim[1])
yxRangeRatio <- yRange / xRange
yxPinchRatio <- regionPi[1] / regionPi[2]
if(yxRangeRatio < yxPinchRatio) { # height is limiting
heightPx <- res * regionPi[2]
widthPx <- res * regionPi[2] / yxRangeRatio
} # if
else { # width is limiting
widthPx <- res * regionPi[1]
heightPx <- res * regionPi[1] * yxRangeRatio
} #else
widthPx <- round(widthPx)
heightPx <- round(heightPx)
# In case of invertFlip == TRUE swap xlim
if(invertFlip) {
xlim <- c(xlim[2], xlim[1])
} # if invertFlip
# Register parallel Cluster if required or change number of workers
nWorkers <- getDoParWorkers() # number registered
availCores <- detectCores() # number available
# Leave one core free if user has typed in simply a large number
if(nCores > availCores) nCores <- availCores - 1
nCores <- min(max(nCores, 1), availCores) # register at least 1 :)
# and not more than available
if(nCores != 1) {
if(nWorkers != nCores) {
if(verbose) cat("\nRegistering parallel workers ... ")
registerDoSEQ() # Unregister parallel for the sake of safety before
registerDoParallel(cores = nCores) # register with new number of cores
if(verbose) cat(nCores, "parallel workers registered ...")
}
else {
if(verbose)
cat("\n", nCores, " parallel workers previously registered ...",
sep = "")
}
}
# Only one core desired
else {
registerDoSEQ()
if(verbose)
cat("\nnCores set to 1.",
"Parallel loops will be executed sequentially ...")
}
# Make pixelwise array of z-Values (input values to function)
if(verbose) cat("\nBuilding z plane array ...")
if(is.null(tempDir)) tempDir <- tempdir()
zMetaInfrm <- buildArray(widthPx, heightPx, xlim, ylim, blockSizePx, tempDir,
verbose)
# This is where it really happens
if(verbose) cat("\nEvaluation loop starting ... ")
zMetaInfrm$metaZ$wFileNames <- vapply(c(1:nrow(zMetaInfrm$metaZ)),
function(i, zMetaInfrm, compFun,
moreArgs) {
if(verbose) cat("\n.processing block", i, "... ")
fileName <- paste(zMetaInfrm$tempDir,
zMetaInfrm$metaZ[i,]$fileName, sep = "/")
z <- get(load(fileName))
# Split z vertically (by rows) into nCores chunks to be processed
# in parallel
# - here's some pre-work
uplow <- verticalSplitIndex(dim(z)[1], nCores)
# - here's the actual splitting, z becomes a list
z <- lapply(uplow, FUN = function(uplow, z) {
return(z[c(uplow[1]:uplow[2]),])
}, z = z)
# Construct function call to be evaluated inside the parallel loop
if(is.null(moreArgs)) {
vCall <- "vapply(z[[i]], compFun, FUN.VALUE = complex(1))"
}
else {
vCall <- paste("vapply(z[[i]], compFun, FUN.VALUE = complex(1),",
paste(names(moreArgs), "=", moreArgs, collapse = ","),
")")
}
vCall <- parse(text = vCall)
# Run the evaluation parallel on each core and put it together again
if(verbose) cat("parallel loop starting ... ")
w <- foreach(i = c(1:length(z)), .combine = rbind,
.export = c("invertFlip", "compFun")) %dopar% {
# Make sure dimensions are correct, because
# one-line arrays can become vectors mysteriously ...
if(length(dim(z[[i]])) < 2) dims <- c(1, length(z[[i]]))
else dims <- dim(z[[i]])
if(invertFlip) z[[i]] <- Conj(1 / z[[i]])
array(eval(vCall), dim = dims)
} # foreach i
if(verbose) cat("done.")
rm(z) # discard z array
wFileName <- paste(formatC(
zMetaInfrm$metaZ[i,]$lower,
width =
trunc(log10(zMetaInfrm$metaZ$lower[nrow(zMetaInfrm$metaZ)])) + 1,
flag = "0"
), # formatC
"wmat", zMetaInfrm$rndCode, ".RData", sep = "")
save(w, file = paste(zMetaInfrm$tempDir, wFileName, sep = "/"))
rm(w)
return(wFileName)
}, # function FUN
FUN.VALUE = character(1),
zMetaInfrm = zMetaInfrm, compFun = compFun, moreArgs = moreArgs
) # vapply
# Transform into color values and plot it
if(!noScreenDevice) {
if(verbose) cat("\nTransforming function values into colors ...")
complexArrayPlot(zMetaInfrm, xlim, ylim, pType, invertFlip,
lambda, gamma, pi2Div, logBase,
argOffset, stdSaturation, darkestShade, hsvNaN,
verbose = verbose, ...)
} # if(!noScreenDevice)
else if(verbose) cat("\nNo plot is made (explicit wish of the user) ...")
# Delete all temporary files ... or not
if(deleteTempFiles) {
if(verbose) cat("Deleting temporary files ... ")
filesToDelete <- paste(zMetaInfrm$tempDir,
c(as.character(zMetaInfrm$metaZ$fileNames),
as.character(zMetaInfrm$metaZ$wFileNames)),
sep = "/")
unlink(filesToDelete)
if(verbose) cat("done.\n")
} else {
if(verbose)
cat("\nTemporary files are NOT deleted (explicit wish of the user).\n")
} # else (temp files ore not deleted)
# If a parallel backend has been registered, keep or register a sequential
# backend dependent on user settings
if(nCores > 1) {
if(!autoDereg) {
if(verbose) cat("\nParallel backend with", nCores,
"cores remains registered for convenience.")
if(verbose)
cat("\nCan be de-registered manually with",
"'foreach::registerDoSEQ()'.\n")
}
else {
foreach::registerDoSEQ()
if(verbose) cat("\nSequential backend registered again.\n")
}
} # if nCores > 1
invisible(TRUE) # For test purposes
} # phasePortrait
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
|
/scratch/gouwar.j/cran-all/cranData/viscomplexr/R/VisComplex.R
|
# R package viscomplexr - phase portraits of functions in the
# complex number plane
# Copyright (C) 2020 Peter Biber
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
# -----------------------------------------------------------------------------
# function phaseModColBw
# Calculates a hex two-color array based on an array of complex numbers, both
# arrays handed over as pointers.
# The function takes into account only the modulus values. The modulus of a
# complex number is attributed to a zone based on the input parameter logBase,
# and either assigned the first or the second value of the input variable
# bwCols. Only in cases where the modulus cannot be determined (NaNs or Inf),
# the third color in bwCols is used.
# In more detail, for an input number's modulus, the logarithm with base
# logBase is calculated and cut down to the next lower integer value. If this
# is an even number, the first color of bwCols is taken. In case of an odd
# number, the second color is used.
phaseModColBw <- function(pCompArr,
pBwCol,
logBase = exp(2*pi/18),
bwCols = c("black", "gray95", "gray")) {
hexCols <- sapply(bwCols,
function(bwc) rgb(t(col2rgb(bwc)), maxColorValue = 255))
dims <- dim(pCompArr$value)
intMod <- floor(log(Mod(pCompArr$value), logBase))
intIdx <- intMod %% 2 + 1
intIdx <- ifelse(is.nan(intIdx), 3, intIdx)
pBwCol$value <- array(hexCols[intIdx], dims)
return(pBwCol)
} # phaseModColBw
# -----------------------------------------------------------------------------
# function phaseAngColBw
# Calculates a hex two-color array based on an array of complex numbers, both
# arrays handed over as pointers.
# The function takes into account only the argument values. The argument of a
# complex number is attributed to a zone based on the input parameters pi2Div
# and argOffset. Then, it is either assigned to the first or the second value
# of the input variable bwCols. Only in cases where the argeument cannot be
# determined (NaNs) the third color in bwCols is used.
# In more detail, the full angle (2*pi) is divided into p2Div zones, which are
# numbered from 0 to pi2Div - 1 with increasing angle. Even and odd zone numbers
# are attributed the first and the second color in bwCols, respectively.
# Usually, the input parameter pi2Div should be an even number in order to avoid
# the first and the last zone having the same color.
phaseAngColBw <- function(pCompArr,
pBwCol,
pi2Div = 18,
argOffset = 0,
bwCols = c("black", "gray95", "gray")) {
hexCols <- sapply(bwCols,
function(bwc) rgb(t(col2rgb(bwc)), maxColorValue = 255))
dims <- dim(pCompArr$value)
argmt <- Arg(pCompArr$value)
intArg <- floor(ifelse(argmt - argOffset < 0, argmt + 2*pi, argmt) /
(2 * pi / pi2Div))
intIdx <- intArg %% 2 + 1
intIdx <- ifelse(is.nan(intIdx), 3, intIdx)
pBwCol$value <- array(hexCols[intIdx], dims)
return(pBwCol)
} # phaseAngColBw
# -----------------------------------------------------------------------------
# function phaseModAngColBw
# Calculates a hex two-color array based on an array of complex numbers, both
# arrays handed over as pointers.
# The function takes into account the modulus and the argument values and
# colors the resulting grid in a chessboard-like alternation using the first
# and the second color in the input variable bwCols. Only in cases where the
# modulus or the argument cannot be determined (NaNs or Inf), the third color
# in bwCols is used.
# In more detail, for an input number's modulus, the logarithm with base
# logBase is calculated and cut down to the next lower integer value. For the
# argument, the full angle (2*pi) is divided into p2Div zones, which are
# numbered from 0 to pi2Div - 1 with increasing angle. The sum of both integers
# is taken, and if it is an even or an odd number, the first or the second
# color from bwCols is used, respectively.
phaseModAngColBw <- function(pCompArr,
pBwCol,
pi2Div = 18,
logBase = exp(2*pi/pi2Div),
argOffset = 0,
bwCols = c("black", "gray95", "gray")) {
hexCols <- sapply(bwCols,
function(bwc) rgb(t(col2rgb(bwc)), maxColorValue = 255))
dims <- dim(pCompArr$value)
argmt <- Arg(pCompArr$value)
intArg <- floor(ifelse(argmt - argOffset < 0, argmt + 2*pi, argmt) /
(2 * pi / pi2Div))
intMod <- floor(log(Mod(pCompArr$value), logBase))
intIdx <- (intArg + intMod) %% 2 + 1
intIdx <- ifelse(is.nan(intIdx), 3, intIdx)
pBwCol$value <- array(hexCols[intIdx], dims)
return(pBwCol)
} # phaseModAngColBw
# -----------------------------------------------------------------------------
# Function complexArrayPlotBw
# Very much like the function complexArrayPlot, but tailored for two-color
# plots to be created by calling phasePortraitBw.
# Displays an array of complex numbers in an existing plot.
# In order to do so,the temporary files that together form the array are
# read from disk one by one, but each one is processed in a parallel loop.
# The resulting array of hex color values is finally plotted as
# a raster image.
complexArrayPlotBw <- function(zMetaInfrm,
xlim,
ylim,
bwType = "ma",
invertFlip = FALSE,
pi2Div = 18,
logBase = exp(2*pi/pi2Div),
argOffset = 0,
bwCols = c("black", "grey95", "grey"),
asp = 1,
xlab = "", ylab = "",
verbose,
...) {
# Set up plot
plot(NULL, xlim = xlim, ylim = ylim, asp = asp, xlab = xlab, ylab = ylab, ...)
# Define call to color transformation function depending user's
# choice of bwType
colCmd <- switch(bwType,
"m" = "phaseModColBw(pListCompArr[[i]],
pBwCol,
logBase = logBase,
bwCols = bwCols)",
"a" = "phaseAngColBw(pListCompArr[[i]],
pBwCol,
pi2Div = pi2Div,
argOffset = argOffset,
bwCols = bwCols)",
"ma" = "phaseModAngColBw(pListCompArr[[i]],
pBwCol,
pi2Div = pi2Div,
logBase = logBase,
argOffset = argOffset,
bwCols = bwCols)"
) # switch
# Obtain the names of the files to load and process
zMetaInfrm$metaZ$wFileNames <- paste(zMetaInfrm$tempDir,
zMetaInfrm$metaZ$wFileNames, sep = "/")
# Run the color transformation function over each file
pBwCol <- lapply(c(1:nrow(zMetaInfrm$metaZ)),
function(i, zMetaInfrm, colCmd) {
if(verbose) cat("\n.transforming block", i, "... ")
# load a block (will soon become a list of pointers, hence the name)
pListCompArr <- get(load(zMetaInfrm$metaZ[i,]$wFileNames))
# split it for parallel processing
nCores <- getDoParWorkers()
uplow <- verticalSplitIndex(nrow(pListCompArr), nCores)
# here's the actual splitting, pListCompArr becomes a list of pointers
pListCompArr <- lapply(uplow, FUN = function(uplow, pListCompArr) {
nwPtr <- newPointer(pListCompArr[c(uplow[1]:uplow[2]),])
# if the split result has only one line, it will automatically become a
# vector, which is undesired, because functions coming later require it
# as a two-dimensional array. This is made sure here.
if(uplow[1] == uplow[2]) {
dim(nwPtr$value) <- c(1, length(nwPtr$value))
}
return(nwPtr)
}, pListCompArr = pListCompArr)
# Parallel loop transforming the chunks into a color raster each;
# giving back a list of pointers to the rasters
if(verbose) cat("parallel loop starting ... ")
pBwCol <- foreach(i = c(1:length(pListCompArr)),
.export = c("phaseModColBw",
"phaseAngColBw",
"phaseModAngColBw",
"bwCols",
"logBase",
"pi2Div",
"newPointer",
"argOffset"),
.combine = c) %dopar% {
pBwCol <- newPointer(NULL)
eval(parse(text = colCmd)) # Does not require a return value,
# changes color array via pointer
pListCompArr[[i]]$value <- NULL # Reduced here, but removed after
# the foreach loop
return(pBwCol)
} # foreach
if(verbose) cat("done.")
# Remove the original list of array pointers
rm(pListCompArr)
# Combine the color arrays in the value of the first pointer.
# Free the others (rbindArraysbyPointer).
# Enforce (one-element-) list in case there is only one value
# (i.e. if foreach loop was executed sequentially, one core only)
if(length(pBwCol) == 1) pBwCol <- list(pBwCol)
pBwCol <- rbindArraysbyPointer(pBwCol)
return(pBwCol)
}, # function in lapply
zMetaInfrm = zMetaInfrm, colCmd = colCmd
) # lapply
# Now combine all blocks into the big raster ...
if(verbose) cat("\nCombine color rasters ... ")
pBwCol <- rbindArraysbyPointer(pBwCol)
if(verbose) cat("done.\n")
# ... and plot it
if(verbose) cat("Plotting raster image ... ")
rasterImage(as.raster(pBwCol$value), xlim[1], ylim[1], xlim[2], ylim[2])
if(verbose) cat("done.\n")
pBwCol$value <- NULL
rm(pBwCol)
return(NULL)
} # complexArrayPlotBw
# -----------------------------------------------------------------------------
#' Create two-color phase portraits of complex functions
#'
#' \code{phasePortraitBw} allows for creating two-color phase portraits of
#' complex functions based on a polar chessboard grid (cf.
#' \insertCite{wegert_visualcpx_2012;textual}{viscomplexr}, p. 35). Compared to
#' the full phase portraits that can be made with \code{\link{phasePortrait}},
#' two-color portraits omit information. Especially in combination with full
#' phase portraits they can be, however, very helpful tools for interpretation.
#' Besides, two-color phase portraits have a special aesthetic appeal which is
#' worth exploring for itself. In its parameters and its mode of operation,
#' \code{phasePortraitBw} is very similar to \code{\link{phasePortrait}}.
#'
#' This function is intended to be used inside the framework of R base graphics.
#' It plots into the active open graphics device where it will display the phase
#' plot of a user defined function as a raster image. If no graphics device is
#' open when called, the function will plot into the default graphics device.
#' This principle allows to utilize the full functionality of R base graphics.
#' All graphics parameters (\code{\link{par}}) can be freely set and the
#' function \code{phasePortrait} accepts all parameters that can be passed to
#' the \code{\link{plot.default}} function. This allows all kinds of plots -
#' from scientific representations with annotated axes and auxiliary lines,
#' notation, etc. to poster-like artistic pictures. The general mode of operation,
#' including the usage of parallel processing is exactly the same as with
#' \code{\link{phasePortrait}}, see details section there.
#'
#'
#'
#' @param FUN The function to be visualized. There are two possibilities to
#' provide it, a quoted character string, or a function object. The quoted
#' character string must contain an expression that can be interpreted by R as
#' a function of a complex number \code{z} (like e.g. "sin(z)", "(z^2 -
#' 1i)/(tan(z))", "1/4*z^2 - 10*z/(z^4+4)"). See the documentation of
#' \code{\link{phasePortrait}} for a complete presentation of all options.
#'
#' @param moreArgs A named list of other arguments to FUN. The names must match
#' the names of the arguments in FUN's definition.
#'
#' @param xlim The x limits (x1, x2) of the plot as a two-element numeric
#' vector. Follows exactly the same definition as in
#' \code{\link{plot.default}}. Here, \code{xlim} has to be interpreted as the
#' plot limits on the real axis.
#'
#' @param ylim The y limits of the plot (y1, y2) to be used in the same way as
#' \code{xlim}. Evidently, \code{ylim} indicates the plot limits on the
#' imaginary axis.
#'
#' @param invertFlip If \code{TRUE}, the function is mapped over a z plane,
#' which has been transformed to \code{1/z * exp(1i*pi)}. This is the
#' projection required to plot the north Riemann hemisphere in the way
#' proposed by \insertCite{wegert_visualcpx_2012;textual}{viscomplexr}, p. 41.
#' Defaults to \code{FALSE}. If this option is chosen, the numbers at the axis
#' ticks have another meaning than in the normal case. Along the real axis,
#' they represent the real part of \code{1/z}, and along the imaginary axis,
#' they represent the imaginary part of \code{1/z}. Thus, if you want
#' annotation, you should choose appropriate axis labels like \code{xlab =
#' Re(1/z)}, and \code{ylab = Im(1/z)}.
#'
#' @param res Desired resolution of the plot in dots per inch (dpi). Default is
#' 150 dpi. All other things being equal, \code{res} has a strong influence on
#' computing times (double \code{res} means fourfold number of pixels to
#' compute). A good approach could be to make a plot with low resolution (e.g.
#' the default 150 dpi) first, adjust whatever required, and plot into a
#' graphics file with high resolution after that.
#'
#' @param blockSizePx Number of pixels and corresponding complex values to be
#' processed at the same time (see Details). Default is 2250000. This value
#' gave good performance on older systems as well as on a high-end gaming
#' machine, but some tweaking for your individual system might even improve
#' things.
#'
#' @param tempDir \code{NULL} or a character string, specifying the name of the
#' directory where the temporary files written by \code{phasePortrait} are
#' stored. Default is \code{NULL}, which makes \code{phasePortrait} use the
#' current R session's temporary directory. Note that if you specify another
#' directory, it will be created if it does not exist already. Even though the
#' temporary files are deleted after completing a phase portrait (unless the
#' user specifies \code{deleteTempFiles = FALSE}, see below), the directory
#' will remain alive even if has been created by \code{phasePortrait}.
#'
#' @param nCores Number of processor cores to be used in the parallel computing
#' tasks. Defaults to the maximum number of cores available minus 1. Any
#' number between 1 (serial computation) and the maximum number of cores
#' available as indicated by \code{parallel::detectCores()} is accepted. If
#' \code{nCores} is set to a value greater than the available number of cores,
#' the function will use one core less than available.
#'
#' @param bwType One of the three options for plotting, "m", "a", and "ma", to
#' be provided as a character string. Defaults to "ma". This parameter has a
#' comparable role to the parameter \code{pType} in
#' \code{\link{phasePortrait}}. Option "m" produces a plot that colors modulus
#' zones only. In more detail, for each input number's modulus, the logarithm
#' with base \code{logBase} (see below) is calculated and cut down to the next
#' lower integer value. If this is an even number, the first color given in
#' \code{bwCols} (see below) is taken. In case of an odd number, the second
#' color is used. Option "a" produces a plot that exclusively colors argument
#' (phase angle) zones. To that end, the full angle (2*pi) is divided into
#' \code{p2Div} (see below) zones, which are numbered from 0 to pi2Div - 1
#' with increasing angle. Such an integer number is attributed to the complex
#' number of interest according to the zone it falls into. Even and odd zone
#' numbers are mapped to the first and the second color in \code{bwCols},
#' respectively. For normal purposes, the input parameter \code{pi2Div} should
#' be an even number in order to avoid the first and the last zone having the
#' same color. With option "ma", a chessboard-like alternation of colors is
#' displayed over the tiles formed by the intersecting modulus and argument
#' zones (both determined separately as with the options "m" and "a").
#'
#' @param pi2Div Angle distance for the argument reference zones added for
#' \code{pType = "pma"} or \code{pType = "pa"}. The value has to be given as
#' an integer (reasonably) fraction of 2*pi (i.e. 360 degrees). Unlike with
#' \code{\link{phasePortrait}}, the default is 18; thus, reference zones are
#' delineated by default in distances of 2*pi/18, i.e. (20 degrees), starting
#' with 0 if not defined otherwise with the parameter \code{argOffset}. While
#' the default of \code{pi2Div} is 9 with \code{\link{phasePortrait}} for good
#' reasons (see there), setting \code{pi2Div} to an odd number is usually not
#' a good choice with two-color phase portraits, because the first and the
#' last phase angle zone would get the same color. However, as \code{pi2Div}
#' here defaults to double the value as with \code{\link{phasePortrait}}, both
#' plot types can be nicely compared even when using their specific defaults
#' of \code{pi2Div}.
#'
#' @param logBase Modulus ratio between the edges of the modulus zones in
#' \code{bwType} \code{"m"} and \code{"ma"}. As recommended by
#' \insertCite{wegert_visualcpx_2012;textual}{viscomplexr}, the default
#' setting is \code{logBase = exp(2*pi/pi2Div)}. This relation between the
#' parameters \code{logBase} and \code{pi2Div} ensures an analogue scaling of
#' the modulus and argument reference zones (see Details section in the
#' documentation of \code{\link{phasePortrait}}). Conveniently, for the
#' default \code{pi2Div = 18}, we obtain \code{logBase == 1.4177...}, which is
#' very close to the square root of 2. Thus, when crossing two modulus zones,
#' the modulus at the higher edge of the second zone is almost exactly two
#' times the value at the lower edge of the first zone.
#'
#' @param argOffset The (complex number) argument in radians counterclockwise,
#' at which the argument (phase angle) reference zones are fixed, i.e. the
#' lower angle of the first zone. Default is 0.
#'
#' @param bwCols Color definition for the plot provided as a character vector of
#' length 3. Each element of the vector must be either a color name R
#' recognizes, or a hexadecimal color string like e.g. "#00FF11". The first
#' and the second color make the appearance of two-color phase portraits (see
#' \code{bwType} above for details), while the third color is reserved for
#' special cases, where the input value cannot sufficiently evaluated (NaNs,
#' partly Inf). Defaults to c("black", "gray95", "gray"), which leads to an
#' alternation of black and very light gray zones or tiles, and uses a neutral
#' gray in special cases.
#'
#' @param asp Aspect ratio y/x as defined in \code{\link{plot.window}}. Default
#' is 1, ensuring an accurate representation of distances between points on
#' the screen.
#'
#' @param deleteTempFiles If TRUE (default), all temporary files are deleted
#' after the plot is completed. Set it on FALSE only, if you know exactly what
#' you are doing - the temporary files can occupy large amounts of hard disk
#' space (see details).
#'
#' @param noScreenDevice Suppresses any graphical output if TRUE. This is only
#' intended for test purposes and makes probably only sense together with
#' \code{deleteTempFiles == FALSE}. For dimensioning purposes,
#' \code{phasePortraitBw} will use a 1 x 1 inch pseudo graphics device in this
#' case. The default for this parameter is \code{FALSE}, and you should change
#' it only if you really know what you are doing.
#'
#' @param autoDereg if TRUE, automatically register sequential backend after the
#' plot is completed. Default is FALSE, because registering a parallel backend
#' can be time consuming. Thus, if you want to make several phase portraits in
#' succession, you should set \code{autoDereg} only for the last one, or
#' simply type \code{foreach::registerDoSEQ} after you are done. In any case,
#' you don't want to have an unused parallel backend lying about.
#'
#' @param verbose if TRUE (default), \code{phasePortraitBw} will continuously
#' write progress messages to the console. This is convenient for normal
#' purposes, as calculating larger phase portraits in higher resolution may
#' take several minutes. The setting \code{verbose = FALSE}, will suppress any
#' output to the console.
#'
#' @param ... All parameters accepted by the \code{\link{plot.default}}
#' function.
#'
#'
#' @references
#' \insertAllCited{}
#'
#'
#' @export
#'
#'
#' @examples
#' # Map the complex plane on itself
#'
#' # x11(width = 8, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' phasePortraitBw("z", xlim = c(-2, 2), ylim = c(-2, 2),
#' xlab = "real", ylab = "imaginary",
#' verbose = FALSE, # Suppress progress messages
#' nCores = 2) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#'
#'
#'
#' # Sinus with default colors and default bwType ("ma")
#' \donttest{
#' # x11(width = 8, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' phasePortraitBw("sin(z)",
#' xlim = c(-pi, pi),
#' ylim = c(-pi, pi),
#' verbose = FALSE,
#' nCores = 2) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
#'
#' # Sinus with custom colors and bwType "a"
#' \donttest{
#' # x11(width = 8, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' phasePortraitBw("sin(z)",
#' xlim = c(-pi, pi),
#' ylim = c(-pi, pi),
#' bwType = "a",
#' bwCols = c("darkgreen", "green", "gray"),
#' verbose = FALSE,
#' nCores = 2) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
#'
#' # Sinus with custom colors and bwType "m"
#' \donttest{
#' # x11(width = 8, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' phasePortraitBw("sin(z)",
#' xlim = c(-pi, pi),
#' ylim = c(-pi, pi),
#' bwType = "m",
#' bwCols = c("darkblue", "skyblue", "gray"),
#' verbose = FALSE,
#' nCores = 2) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
#'
#' # Map the complex plane on itself, show all bwType options
#' \donttest{
#' # x11(width = 8, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' op <- par(mfrow = c(2, 2), mar = c(4.1, 4.1, 1.1, 1.1))
#' for(bwType in c("ma", "a", "m")) {
#' phasePortraitBw("z", xlim = c(-2, 2), ylim = c(-2, 2),
#' bwType = bwType,
#' xlab = "real", ylab = "imaginary",
#' verbose = FALSE, # Suppress progress messages
#' nCores = 2) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' }
#' # Add normal phase portrait for comparison
#' phasePortrait("z", xlim = c(-2, 2), ylim = c(-2, 2),
#' xlab = "real", ylab = "imaginary",
#' verbose = FALSE,
#' pi2Div = 18, # Use same angular division as default
#' # in phasePortraitBw
#' nCores = 2)
#' par(op)
#'
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
#' # A rational function, show all bwType options
#' \donttest{
#' # x11(width = 8, height = 8) # Screen device commented out
#' # due to CRAN test requirements.
#' # Use it when trying this example
#' funString <- "(z + 1.4i - 1.4)^2/(z^3 + 2)"
#' op <- par(mfrow = c(2, 2), mar = c(4.1, 4.1, 1.1, 1.1))
#' for(bwType in c("ma", "a", "m")) {
#' phasePortraitBw(funString, xlim = c(-2, 2), ylim = c(-2, 2),
#' bwType = bwType,
#' xlab = "real", ylab = "imaginary",
#' verbose = FALSE, # Suppress progress messages
#' nCores = 2) # Max. two cores allowed on CRAN
#' # not a limit for your own use
#' }
#' # Add normal phase portrait for comparison
#' phasePortrait(funString, xlim = c(-2, 2), ylim = c(-2, 2),
#' xlab = "real", ylab = "imaginary",
#' verbose = FALSE,
#' pi2Div = 18, # Use same angular division as default
#' # in phasePortraitBw
#' nCores = 2)
#' par(op)
#'
#' \dontshow{
#' # R CMD check: make sure any open connections are closed afterward
#' foreach::registerDoSEQ()
#' doParallel::stopImplicitCluster()
#' }
#' }
#'
#'
phasePortraitBw <- function(FUN, moreArgs = NULL, xlim, ylim,
invertFlip = FALSE,
res = 150,
blockSizePx = 2250000,
tempDir = NULL,
nCores = max(1, parallel::detectCores() - 1),
bwType = "ma",
pi2Div = 18,
logBase = exp(2*pi/pi2Div),
argOffset = 0,
bwCols = c("black", "gray95", "gray"),
asp = 1,
deleteTempFiles = TRUE,
noScreenDevice = FALSE,
autoDereg = FALSE,
verbose = TRUE,
...) {
# Bring the user's function definition in workable form
compFun <- makeFunctionFromInput(FUN, moreArgs)
if(is.null(compFun)) stop("\nFUN cannot be interpreted.")
# Calculate pixel array size from plot region size in inch and the plot
# range for the function given with xlim and ylim
## par("pin"): plot region size in inch; first is horizontal
## if noScreenDevice, region is set to 1 x 1 inch
if(!noScreenDevice) regionPi <- par("pin")
else regionPi <- c(1, 1)
xRange <- abs(xlim[2] - xlim[1])
yRange <- abs(ylim[2] - ylim[1])
yxRangeRatio <- yRange / xRange
yxPinchRatio <- regionPi[1] / regionPi[2]
if(yxRangeRatio < yxPinchRatio) { # height is limiting
heightPx <- res * regionPi[2]
widthPx <- res * regionPi[2] / yxRangeRatio
} # if
else { # width is limiting
widthPx <- res * regionPi[1]
heightPx <- res * regionPi[1] * yxRangeRatio
} #else
widthPx <- round(widthPx)
heightPx <- round(heightPx)
# In case of invertFlip == TRUE swap xlim
if(invertFlip) {
xlim <- c(xlim[2], xlim[1])
} # if invertFlip
# Register parallel Cluster if required or change number of workers
nWorkers <- getDoParWorkers() # number registered
availCores <- detectCores() # number available
# Leave one core free if user has typed in simply a large number
if(nCores > availCores) nCores <- availCores - 1
nCores <- min(max(nCores, 1), availCores) # register at least 1 :)
# and not more than available
if(nCores != 1) {
if(nWorkers != nCores) {
if(verbose) cat("\nRegistering parallel workers ... ")
registerDoSEQ() # Unregister parallel for the sake of safety before
registerDoParallel(cores = nCores) # register with new number of cores
if(verbose) cat(nCores, "parallel workers registered ...")
}
else {
if(verbose)
cat("\n", nCores, " parallel workers previously registered ...",
sep = "")
}
}
# Only one core desired
else {
registerDoSEQ()
if(verbose)
cat("\nnCores set to 1.",
"Parallel loops will be executed sequentially ...")
}
# Make pixelwise array of z-Values (input values to function)
if(verbose) cat("\nBuilding z plane array ...")
if(is.null(tempDir)) tempDir <- tempdir()
zMetaInfrm <- buildArray(widthPx, heightPx, xlim, ylim, blockSizePx, tempDir,
verbose)
# This is where it really happens
if(verbose) cat("\nEvaluation loop starting ... ")
zMetaInfrm$metaZ$wFileNames <- vapply(c(1:nrow(zMetaInfrm$metaZ)),
function(i, zMetaInfrm, compFun, moreArgs) {
if(verbose) cat("\n.processing block", i, "... ")
fileName <- paste(zMetaInfrm$tempDir,
zMetaInfrm$metaZ[i,]$fileName, sep = "/")
z <- get(load(fileName))
# Split z vertically (by rows) into nCores chunks to be processed
# in parallel
# - here's some pre-work
uplow <- verticalSplitIndex(dim(z)[1], nCores)
# - here's the actual splitting, z becomes a list
z <- lapply(uplow, FUN = function(uplow, z) {
return(z[c(uplow[1]:uplow[2]),])
}, z = z)
# Construct function call to be evaluated inside the parallel loop
if(is.null(moreArgs)) {
vCall <- "vapply(z[[i]], compFun, FUN.VALUE = complex(1))"
}
else {
vCall <- paste("vapply(z[[i]], compFun, FUN.VALUE = complex(1),",
paste(names(moreArgs), "=", moreArgs, collapse = ","),
")")
}
vCall <- parse(text = vCall)
# Run the evaluation parallel on each core and put it together again
if(verbose) cat("parallel loop starting ... ")
w <- foreach(i = c(1:length(z)), .combine = rbind,
.export = c("invertFlip", "compFun")) %dopar% {
# Make sure dimensions are correct, because
# one-line arrays can become vectors mysteriously ...
if(length(dim(z[[i]])) < 2) dims <- c(1, length(z[[i]]))
else dims <- dim(z[[i]])
if(invertFlip) z[[i]] <- Conj(1 / z[[i]])
array(eval(vCall), dim = dims)
} # foreach i
if(verbose) cat("done.")
rm(z) # discard z array
wFileName <- paste(formatC(
zMetaInfrm$metaZ[i,]$lower,
width =
trunc(log10(zMetaInfrm$metaZ$lower[nrow(zMetaInfrm$metaZ)])) + 1,
flag = "0"
), # formatC
"wmat", zMetaInfrm$rndCode, ".RData", sep = "")
save(w, file = paste(zMetaInfrm$tempDir, wFileName, sep = "/"))
rm(w)
return(wFileName)
}, # function FUN
FUN.VALUE = character(1),
zMetaInfrm = zMetaInfrm, compFun = compFun, moreArgs = moreArgs
) # vapply
# Transform into color values and plot it
if(!noScreenDevice) {
if(verbose) cat("\nTransforming function values into colors ...")
complexArrayPlotBw(zMetaInfrm, xlim, ylim, bwType, invertFlip,
pi2Div, logBase, argOffset, bwCols,
verbose = verbose, ...)
} # if(!noScreenDevice)
else if(verbose) cat("\nNo plot is made (explicit wish of the user) ...")
# Delete all temporary files ... or not
if(deleteTempFiles) {
if(verbose) cat("Deleting temporary files ... ")
filesToDelete <- paste(zMetaInfrm$tempDir,
c(as.character(zMetaInfrm$metaZ$fileNames),
as.character(zMetaInfrm$metaZ$wFileNames)),
sep = "/")
unlink(filesToDelete)
if(verbose) cat("done.\n")
} else {
if(verbose)
cat("\nTemporary files are NOT deleted (explicit wish of the user).\n")
} # else (temp files ore not deleted)
# If a parallel backend has been registered, keep or register a sequential
# backend dependent on user settings
if(nCores > 1) {
if(!autoDereg) {
if(verbose) cat("\nParallel backend with", nCores,
"cores remains registered for convenience.")
if(verbose)
cat("\nCan be de-registered manually with",
"'foreach::registerDoSEQ()'.\n")
}
else {
foreach::registerDoSEQ()
if(verbose) cat("\nSequential backend registered again.\n")
}
} # if nCores > 1
invisible(TRUE) # For test purposes
} # phasePortraitBw
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
|
/scratch/gouwar.j/cran-all/cranData/viscomplexr/R/phasePortraitBw.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
# This option anti-aliases the plots made below under Windows
if(Sys.info()[["sysname"]] == "Windows") {
knitr::opts_chunk$set(dev = "CairoPNG")
}
options(rmarkdown.html_vignette.check_title = FALSE)
## ----setup, echo = FALSE------------------------------------------------------
library(viscomplexr)
## ---- figure_1, fig.width = 5, fig.height = 5, results = 'hide', fig.align='center', cache = FALSE, fig.show = 'hold', fig.cap = 'Phase portrait of the function $f(z)=z$ in the window $\\left|\\Re(z)\\right| < 8.5$ and $\\left|\\Im(z)\\right| < 8.5$.'----
phasePortrait("z", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
xlab = "real", ylab = "imaginary", main = "f(z) = z",
nCores = 2) # Probably not required on your machine (see below)
# Note the argument 'nCores' which determines the number of parallel processes to
# be used. Setting nCores = 2 has been done here and in all subsequent
# examples as CRAN checks do not allow more parallel processes.
# For normal work, we recommend not to define nCores at all which will make
# phasePortrait use all available cores on your machine.
# The progress messages phasePortrait is writing to the console can be
# suppressed by including 'verbose = FALSE' in the call (see documentation).
## ----figure_2, fig.width=5, fig.height=5, results="hide", fig.align='center', fig.show='hold', cache=TRUE, fig.cap= "Different options for including reference lines with the argument `pType`."----
# divide graphics device into four regions and adjust plot margins
op <- par(mfrow = c(2, 2),
mar = c(0.25, 0.55, 1.10, 0.25))
# plot four phase portraits with different choices of pType
phasePortrait("z", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5), pType = "p",
main = "pType = 'p'", axes = FALSE, nCores = 2)
phasePortrait("z", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5), pType = "pa",
main = "pType = 'pa'", axes = FALSE, nCores = 2)
phasePortrait("z", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5), pType = "pm",
main = "pType = 'pm'", axes = FALSE, nCores = 2)
phasePortrait("z", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5), pType = "pma",
main = "pType = 'pma'", axes = FALSE, nCores = 2)
par(op) # reset the graphics parameters to their previous values
## ----eval=FALSE, figure_3, fig.width=5, fig.height=5, results='hide', fig.align='center', cache=TRUE, fig.show='hold', fig.cap='Phase portrait of the function $f(z)=\\frac{(3+2\\mathrm{i}+z)(-5+5\\mathrm{i}+z)}{(-2-2\\mathrm{i}+z)^2}$ in the window $\\left|\\Re(z)\\right| < 8.5$ and $\\left|\\Im(z)\\right| < 8.5$.'----
# op <- par(mar = c(5.1, 4.1, 2.1, 2.1), cex = 0.8) # adjust plot margins
# # and general text size
# phasePortrait("(3+2i+z)*(-5+5i+z)/(-2-2i+z)^2",
# xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
# xlab = "real", ylab = "imaginary",
# nCores = 2) # Increase or leave out for higher performance
# par(op) # reset the graphics parameters to their previous values
## ----eval = FALSE, figure_4, fig.width=7, fig.height=2.8, results='hide', fig.align='center', , fig.show='hold', cache=TRUE, fig.cap='The function $f(z)=\\frac{(3+2\\mathrm{i}+z)(-5+5\\mathrm{i}+z)}{(-2-2\\mathrm{i}+z)^2}$ portrayed with three different settings of `pi2Div` and `pType = "pa"`.'----
# # divide graphics device into three regions and adjust plot margins
# op <- par(mfrow = c(1, 3), mar = c(0.2, 0.2, 0.4, 0.2))
# for(n in c(6, 9, 18)) {
# phasePortrait("(3+2i+z)*(-5+5i+z)/(-2-2i+z)^2", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
# pi2Div = n, pType = "pa", axes = FALSE, nCores = 2)
# # separate title call (R base graphics) for nicer line adjustment, just cosmetics
# title(paste("pi2Div =", n), line = -1.2)
# }
# par(op) # reset graphics parameters to previous values
## ----figure_5, fig.width=7, fig.height=2.8, results='hide', fig.align='center', , fig.show='hold', cache=TRUE, fig.cap='The function $f(z)=\\frac{(3+2\\mathrm{i}+z)(-5+5\\mathrm{i}+z)}{(-2-2\\mathrm{i}+z)^2}$ portrayed with three different settings of `pi2Div` and `pType = "pma"`.'----
# divide graphics device into three regions and adjust plot margins
op <- par(mfrow = c(1, 3), mar = c(0.2, 0.2, 0.4, 0.2))
for(n in c(6, 9, 18)) {
phasePortrait("(3+2i+z)*(-5+5i+z)/(-2-2i+z)^2", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
pi2Div = n, pType = "pma", axes = FALSE, nCores = 2)
# separate title call (R base graphics) for nicer line adjustment, just cosmetics
title(paste("pi2Div =", n), line = -1.2)
}
par(op) # reset graphics parameters to previous values
## ----eval=FALSE, figure_6, fig.width=7, fig.height=2.8, results='hide', fig.align='center', , fig.show='hold', cache=TRUE, fig.cap='The function $f(z)=\\frac{(3+2\\mathrm{i}+z)(-5+5\\mathrm{i}+z)}{(-2-2\\mathrm{i}+z)^2}$ portrayed with decoupled settings of `pi2Div` and `logBase`.'----
# # divide graphics device into three regions and adjust plot margins
# op <- par(mfrow = c(1, 3), mar = c(0.2, 0.2, 0.4, 0.2))
# for(n in c(6, 9, 18)) {
# phasePortrait("(3+2i+z)*(-5+5i+z)/(-2-2i+z)^2", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
# pi2Div = n, logBase = sqrt(3), pType = "pma", axes = FALSE, nCores = 2)
# # separate title call (R base graphics) for nicer line adjustment, just cosmetics
# title(paste("pi2Div = ", n, ", logBase = 3^(1/3)", sep = ""), line = -1.2)
# }
# par(op) # reset graphics parameters to previous values
## ----eval=FALSE, figure_7, fig.width = 5, fig.height = 5, results = 'hide', fig.align='center', fig.show='hold', cache = TRUE, fig.cap = 'Phase portrait of the function $f(z)=\\mathrm{e}^z$ in the window $\\left|\\Re(z)\\right| < 8.5$ and $\\left|\\Im(z)\\right| < 8.5$ with iso-modulus lines.'----
# op <- par(mar = c(5.1, 4.1, 2.1, 2.1), cex = 0.8) # adjust plot margins
# # and general text size
# phasePortrait(exp, xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5), pType = "pm",
# xlab = "real", ylab = "imaginary", nCores = 2)
# par(op) # reset graphics parameters to previous values
## ----eval=FALSE, figure_8, fig.width=7, fig.height=2.8, results='hide', fig.align='center', fig.show='hold', cache=TRUE, fig.cap='The function $f(z)=\\mathrm{e}^z$ portrayed with the default coupling of `pi2Div` and `logBase` as implemented in *phasePortrait*.'----
# # divide graphics device into three regions and adjust plot margins
# op <- par(mfrow = c(1, 3), mar = c(0.2, 0.2, 0.4, 0.2))
# for(n in c(6, 9, 18)) {
# phasePortrait("exp(z)", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
# pi2Div = n, pType = "pma", axes = FALSE, nCores = 2)
# # separate title call (R base graphics) for nicer line adjustment, just cosmetics
# title(paste("pi2Div = ", n, ", logBase = exp(2*pi/pi2Div)", sep = ""),
# line = -1.2, cex.main = 0.9)
# }
# par(op) # reset graphics parameters to previous values
## ----eval=FALSE, figure_9, fig.width=5, fig.height=5, fig.show='hold', results='hide', cache=TRUE, fig.cap='Tuning reference zone contrast with the parameters `darkestShade` (column-wise, 0, 0.2, 0.4), and `lambda` (row-wise, 0.1, 1, 10).'----
# op <- par(mfrow = c(3, 3), mar = c(0.2, 0.2, 0.2, 0.2))
# for(lb in c(0.1, 1, 10)) {
# for(dS in c(0, 0.2, 0.4)) {
# phasePortrait("tan(z^2)", xlim = c(-1.7, 1.7), ylim = c(-1.7, 1.7),
# pType = "pm", darkestShade = dS, lambda = lb,
# axes = FALSE, xaxs = "i", yaxs = "i", nCores = 2)
# }
# }
# par(op)
## ----eval=FALSE, figure_10, fig.width=7, fig.height=2.7, results='hide', fig.align='center', fig.show='hold', cache=TRUE, fig.cap='Three phase portraits with branch cuts (dashed line), illustrating the three values of $f(z)=z^{1/3}$, $z \\in \\mathbb{C} \\setminus \\lbrace 0 \\rbrace$. The transitions between the phase portraits are indicated by same-coloured arrows pointing at the branch cuts.'----
# op <- par(mfrow = c(1, 3), mar = c(0.4, 0.2, 0.2, 0.2))
# for(k in 0:2) {
# FUNstring <- paste0("z^(1/3) * exp(1i * 2*pi/3 * ", k, ")")
# phasePortrait(FUN = FUNstring,
# xlim = c(-1.5, 1.5), ylim = c(-1.5, 1.5), pi2Div = 12,
# axes = FALSE, nCores = 2)
# title(sub = paste0("k = ", k), line = -1)
# # emphasize branch cut with a dashed line segment
# segments(-1.5, 0, 0, 0, lwd = 2, lty = "dashed")
# # draw colored arrows
# upperCol <- switch(as.character(k),
# "0" = "black", "1" = "red", "2" = "green")
# lowerCol <- switch(as.character(k),
# "0" = "green", "1" = "black", "2" = "red")
# arrows(x0 = c(-1.2), y0 = c(1, -1), y1 = c(0.2, -0.2),
# lwd = 2, length = 0.1, col = c(upperCol, lowerCol))
# }
# par(op)
## ----eval=FALSE, figure_11, fig.width=7, fig.height=2.7, fig.align='center', results='hide', fig.show='hold', cache=TRUE, fig.cap='Three branches of $\\log z=\\log r+\\mathrm{i}\\cdot(\\varphi + k\\cdot2\\pi), r>0, \\varphi\\in\\left[0,2\\pi\\right[$, with $k=-1,0,1$. The branch cuts are marked with dashed white lines.'----
# op <- par(mfrow = c(1, 3), mar = c(0.4, 0.2, 0.2, 0.2))
# for(k in -1:1) {
# FUNstring <- paste0("log(Mod(z)) + 1i * (Arg(z) + 2 * pi * ", k, ")")
# phasePortrait(FUN = FUNstring, pi2Div = 36,
# xlim = c(-2, 2), ylim = c(-2, 2), axes = FALSE, nCores = 2)
# segments(-2, 0, 0, 0, col = "white", lwd = 1, lty = "dashed")
# title(sub = paste0("k = ", k), line = -1)
# }
# par(op)
## ----figure_12, fig.width=7, fig.height=3.5, fig.align='center', results='hide', fig.show='hold', cache=TRUE, fig.cap='Mapping the complex number plane on the Riemann sphere. Left: lower (southern) hemisphere; right upper (northern hemisphere). Folding both figures face to face along a vertical line in the middle between them can be imagined as closing the Riemann sphere.'----
op <- par(mfrow = c(1, 2), mar = rep(0.1, 4))
# Southern hemisphere
phasePortrait("z", xlim = c(-1.4, 1.4), ylim = c(-1.4, 1.4),
pi2Div = 12, axes = FALSE, nCores = 2)
riemannMask(annotSouth = TRUE)
# Northern hemisphere
phasePortrait("z", xlim = c(-1.4, 1.4), ylim = c(-1.4, 1.4),
pi2Div = 12, axes = FALSE, invertFlip = TRUE, nCores = 2)
riemannMask(annotNorth = TRUE)
par(op)
## ----figure_13, fig.width=7, fig.height=3.7, fig.align='center', results='hide', fig.show='hold', cache=TRUE, fig.cap='Riemann sphere plot of the function $f(z)=\\frac{(z^{2}+\\frac{1}{\\sqrt{2}}+\\frac{\\mathrm{i}}{\\sqrt{2}})\\cdot(z+\\frac{1}{2}+\\frac{\\mathrm{i}}{2})}{z-1}$. Annotated are the zeroes $z_1$, $z_2$, $z_3$, and the poles $z_4$, $z_5$.'----
op <- par(mfrow = c(1, 2), mar = c(0.1, 0.1, 1.4, 0.1))
# Define function
FUNstring <- "(z^2 + 1/sqrt(2) * (1 + 1i)) * (z + 1/2*(1 + 1i)) / (z - 1)"
# Southern hemisphere
phasePortrait(FUNstring, xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2),
pi2Div = 12, axes = FALSE, nCores = 2)
riemannMask()
title("Southern Hemisphere", line = 0)
# - annotate zeroes and poles
text(c(cos(5/8*pi), cos(13/8*pi), cos(5/4*pi)/sqrt(2), 1),
c(sin(5/8*pi), sin(13/8*pi), sin(5/4*pi)/sqrt(2), 0),
c(expression(z[1]), expression(z[2]), expression(z[3]), expression(z[4])),
pos = c(1, 2, 4, 2), offset = 1, col = "white")
# Northern hemisphere
phasePortrait(FUNstring, xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2),
pi2Div = 12, axes = FALSE, invertFlip = TRUE, nCores = 2)
riemannMask()
title("Northern Hemisphere", line = 0)
# - annotate zeroes and poles
text(c(cos(5/8*pi), cos(13/8*pi), cos(5/4*pi)*sqrt(2), 1, 0),
c(sin(5/8*pi), sin(13/8*pi), sin(5/4*pi)*sqrt(2), 0, 0),
c(expression(z[1]), expression(z[2]), expression(z[3]),
expression(z[4]), expression(z[5])),
pos = c(1, 4, 3, 4, 4), offset = 1,
col = c("white", "white", "black", "white", "white"))
par(op)
## ---- eval=FALSE--------------------------------------------------------------
# x11(width = 8, height = 2/3 * 8) # Open graphics window on screen
# op <- par(mar = c(0, 0, 0, 0)) # Do not leave plot margins
# phasePortrait(mandelbrot, moreArgs = list(itDepth = 30),
# ncores = 1, # Increase or leave out for higher performance
# xlim = c(-2, 1), ylim = c(-1, 1),
# hsvNaN = c(0, 0, 0), # black color for points outside the set
# axes = FALSE, # No coordinate axes
# xaxs = "i", yaxs = "i") # No space between plot region and plot
# par(op) # Set graphics parameters to original
## ---- eval=FALSE--------------------------------------------------------------
# res <- 600 # set resolution to 600 dpi
# # open png graphics device with in DIN A4 format
# # DIN A format has an edge length ratio of sqrt(2)
# png("Mandelbrot Example.png",
# width = 29.7, height = 29.7/sqrt(2), # DIN A4 landscape
# units = "cm",
# res = res) # resolution is required
# op <- par(mar = c(0, 0, 0, 0)) # set graphics parameters - no plot margins
# xlim <- c(-1.254, -1.248) # horizontal (real) plot limits
# # the function below adjusts the imaginary plot limits to the
# # desired ratio (sqrt(2)) centered around the desired imaginary value
# ylim <- ylimFromXlim(xlim, centerY = 0.02, x_to_y = sqrt(2))
# phasePortrait(mandelbrot,
# nCores = 1, # Increase or leave out for higher performance
# xlim = xlim, ylim = ylim,
# hsvNaN = c(0, 0, 0), # Black color for NaN results
# xaxs = "i", yaxs = "i", # suppress R's default axis margins
# axes = FALSE, # do not plot axes
# res = res) # resolution is required
# par(op) # reset graphics parameters
# dev.off() # close graphics device and complete the png file
## ---- eval=FALSE--------------------------------------------------------------
# res <- 600
# png("Julia Example 1.png", width = 29.7, height = 29.7/sqrt(2),
# units = "cm", res = res)
# op <- par(mar = c(0, 0, 0, 0))
# xlim <- c(-1.8, 1.8)
# ylim <- ylimFromXlim(xlim, centerY = 0, x_to_y = sqrt(2))
# phasePortrait(juliaNormal,
# # see documentation of juliaNormal about the arguments
# # c and R_esc
# moreArgs = list(c = -0.09 - 0.649i, R_esc = 2),
# nCores = 1, # Increase or leave out for higher performance
# xlim = xlim, ylim = ylim,
# hsvNaN = c(0, 0, 0),
# xaxs = "i", yaxs = "i",
# axes = FALSE,
# res = res)
# par(op)
# dev.off()
## ---- eval=FALSE--------------------------------------------------------------
# res <- 600
# png("Julia Example 2.png", width = 29.7, height = 29.7/sqrt(2),
# units = "cm", res = res)
# op <- par(mar = c(0, 0, 0, 0))
# xlim <- c(-0.32, 0.02)
# ylim <- ylimFromXlim(xlim, center = -0.78, x_to_y = sqrt(2))
# phasePortrait(juliaNormal,
# # see documentation of juliaNormal about the arguments
# # c and R_esc
# moreArgs = list(c = -0.119 - 0.882i, R_esc = 2),
# nCores = 1, # Increase or leave out for higher performance
# xlim = xlim, ylim = ylim,
# hsvNaN = c(0, 0, 0),
# xaxs = "i", yaxs = "i",
# axes = FALSE,
# res = res)
# par(op)
# dev.off()
## ---- eval=FALSE--------------------------------------------------------------
# # Map the complex plane on itself, show all bwType options
#
# x11(width = 8, height = 8)
# op <- par(mfrow = c(2, 2), mar = c(4.1, 4.1, 1.1, 1.1))
# for(bwType in c("ma", "a", "m")) {
# phasePortraitBw("z", xlim = c(-2, 2), ylim = c(-2, 2),
# bwType = bwType,
# xlab = "real", ylab = "imaginary",
# nCores = 2) # Increase or leave out for higher performance
# }
# # Add normal phase portrait for comparison
# phasePortrait("z", xlim = c(-2, 2), ylim = c(-2, 2),
# xlab = "real", ylab = "imaginary",
# pi2Div = 18, # Use same angular division as default
# # in phasePortraitBw
# nCores = 2) # Increase or leave out for higher performance
# par(op)
#
## ----eval=FALSE---------------------------------------------------------------
# # A rational function, show all bwType options
#
# x11(width = 8, height = 8)
# funString <- "(z + 1.4i - 1.4)^2/(z^3 + 2)"
# op <- par(mfrow = c(2, 2), mar = c(4.1, 4.1, 1.1, 1.1))
# for(bwType in c("ma", "a", "m")) {
# phasePortraitBw(funString, xlim = c(-2, 2), ylim = c(-2, 2),
# bwType = bwType,
# xlab = "real", ylab = "imaginary",
# nCores = 2) # Increase or leave out for higher performance
# }
# # Add normal phase portrait for comparison
# phasePortrait(funString, xlim = c(-2, 2), ylim = c(-2, 2),
# xlab = "real", ylab = "imaginary",
# pi2Div = 18, # Use same angular division as default
# # in phasePortraitBw
# nCores = 2) # Increase or leave out for higher performance
# par(op)
#
## ---- eval = FALSE------------------------------------------------------------
# # Set the plot margins at all four sides to 1/5 inch with mai,
# # set the background color to black with bg, and the default foreground
# # color with fg (e.g. for axes and boxes around plots, or the color of
# # the circle outline from the function riemannMask).
# # We catch the previous parameter values in a variable, I called
# # "op" ("old parameters")
# op <- par(mai = c(1/5, 1/5, 1/5, 1/5), bg = "black", fg = "white")
#
# # Make any phase portraits and/or other graphics of your interest
# # ...
#
# # Set the graphical parameters back to the values previously stored in op
# par(op)
## ---- eval = FALSE------------------------------------------------------------
# phasePortrait("tan(z^3 + 1/2 - 2i)/(1 - 1i - z)",
# xlim = c(-6, 6), ylim = c(-3, 3),
# axes = FALSE,
# nCores = 2) # Increase or leave out for higher performance
## ---- eval=FALSE--------------------------------------------------------------
# phasePortrait("tan(z^3 + 1/2 - 2i)/(1 - 1i - z)",
# xlim = c(-6, 6), ylim = c(-3, 3),
# axes = FALSE,
# nCores = 2) # Increase or leave out for higher performance
# box()
## ---- eval=FALSE--------------------------------------------------------------
# # set background and foreground colors
# op <- par(bg = "black", fg = "white")
# # Setting the parameter fg has an effect on the box, the axes, and the axes'
# # ticks, but not on the axis annotations and axis labels.
# # Also the color of the title (main) is not affected.
# # The colors of these elements have to be set manually and separately. While we
# # could simply set them to "white", we set them, more flexibly, to the
# # current foreground color (par("fg")).
# phasePortrait("tan(z^3 + 1/2 - 2i)/(2 - 1i - z)",
# xlim = c(-6, 6), ylim = c(-3, 3), col.axis = par("fg"),
# xlab = "real", ylab = "imaginary", col.lab = par("fg"),
# main = "All annotation in foreground color", col.main = par("fg"),
# # Adjust text size
# cex.axis = 0.9, cex.lab = 0.9,
# nCores = 2) # Increase or leave out for higher performance
# par(op)
## ---- eval=FALSE--------------------------------------------------------------
# # Open graphics device with 16/9 aspect ratio and 7 inch width
# x11(width = 7, height = 9/16 * 7)
# op <- par(mar = c(0, 0, 0, 0)) # Set plot margins to zero
# xlim <- c(-3, 3)
# # Calculate ylim with desired center fitting the desired aspect ratio
# ylim <- ylimFromXlim(xlim, centerY = 0, x_to_y = 16/9)
# phasePortrait(jacobiTheta, moreArgs = list(tau = 1i/5 + 1/5), pType = "p",
# xlim = xlim, ylim = ylim,
# xaxs = "i", yaxs = "i",
# axes = FALSE,
# nCores = 2) # Increase or leave out for higher performance
# par(op)
## ---- eval=FALSE--------------------------------------------------------------
# # Open graphics device with 16/9 aspect ratio and a width of 7 inches
# x11(width = 7, height = 9/16 * 7)
# # Set plot margins to zero, outer margins to 1/7 inch,
# # and background color to black
# outerMar <- 1/7 # outer margin width in inches
# op <- par(mar = c(0, 0, 0, 0), omi = rep(outerMar, 4), bg = "black")
# xlim <- c(-1.5, 0.5)
# # Calculate ylim with desired center fitting the desired aspect ratio;
# # however, the omi settings slightly change the required
# # ratio of xlim and ylim
# ratio <- (7 - 2*outerMar) / (7 * 9/16 - 2*outerMar)
# ylim <- ylimFromXlim(xlim, centerY = 0, x_to_y = ratio)
# phasePortrait("sin(jacobiTheta(z, tau))/z", moreArgs = list(tau = 1i/5 + 1/5),
# pType = "p",
# xlim = xlim, ylim = ylim,
# xaxs = "i", yaxs = "i",
# axes = FALSE,
# nCores = 1) # Increase or leave out for higher performance
# par(op)
## ---- eval = FALSE------------------------------------------------------------
# # Note that 'FUN =' is not required if the argument to FUN is handed to
# # phasePortrait in the first position
# phasePortrait(FUN = "1/(1 - z^2)", xlim = c(-5, 5), ylim = c(-5, 5), nCores = 2)
# phasePortrait("sin((z - 2)/(z + 2))", xlim = c(-5, 5), ylim = c(-5, 5), nCores = 2)
# phasePortrait("tan(z)", xlim = c(-5, 5), ylim = c(-5, 5), nCores = 2)
## ---- eval = FALSE------------------------------------------------------------
# phasePortrait("-1 * sum(z^c(-k:k))", moreArgs = list(k = 11),
# xlim = c(-2, 2), ylim = c(-1.5, 1.5),
# pType = "p",
# nCores = 2) # Increase or leave out for higher performance
## ---- eval = FALSE------------------------------------------------------------
# funString <- "vapply(z, FUN = function(z) {
# n <- 9
# k <- z^(c(1:n))
# rslt <- sum(sin(k))
# return(rslt)
# },
# FUN.VALUE = complex(1))"
# phasePortrait(funString, xlim = c(-2, 2), ylim = c(-2, 2),
# nCores = 2) # Increase or leave out for higher performance
## ---- eval = FALSE------------------------------------------------------------
# funString <- "vapply(z, FUN = function(z, fct) {
# n <- 9
# k <- z^(fct * c(1:n))
# rslt <- sum(sin(k))
# return(rslt)
# },
# fct = -1,
# FUN.VALUE = complex(1))"
# phasePortrait(funString, xlim = c(-2, 2), ylim = c(-2, 2),
# nCores = 2) # Increase or leave out for higher performance
## ---- eval = FALSE------------------------------------------------------------
# # Define function
# tryThisOne <- function(z, fct, n) {
# k <- z^(fct * c(1:n))
# rslt <- prod(cos(k))
# return(rslt)
# }
#
# # Call function by its name only, provide additional arguments via "moreArgs"
# phasePortrait("tryThisOne", moreArgs = list(fct = 1, n = 5),
# xlim = c(-2.5, 2.5), ylim = c(-2, 2),
# nCores = 2) # Increase or leave out for higher performance
## ---- eval = FALSE------------------------------------------------------------
# # Use argument "hsvNaN = c(0, 0, 0)" if you want the grey area black
# phasePortrait(function(z) {
# for(j in 1:20) {
# z <- z * sin(z) - 1 + 1/2i
# }
# return(z)
# },
# xlim = c(-3, 3), ylim = c(-2, 2),
# nCores = 2) # Increase or leave out for higher performance
## ---- eval = FALSE------------------------------------------------------------
# # Use argument "hsvNaN = c(0, 0, 0)" if you want the grey area black
# phasePortrait(function(z, n) {
# for(j in 1:n) {
# z <- z * cos(z)
# }
# return(z)
# },
# moreArgs = list(n = 27),
# xlim = c(-3, 3), ylim = c(-2, 2),
# nCores = 2) # Increase or leave out for higher performance
## ---- eval = FALSE------------------------------------------------------------
# # atan from package base
# phasePortrait(atan, xlim = c(-pi, pi), ylim = c(-pi, pi),
# nCores = 2)
#
# # gammaz from package pracma (the package must be installed on your machine
# # if you want this example to be working)
# phasePortrait(pracma::gammaz, xlim = c(-9, 9), ylim = c(-5, 5),
# nCores = 2)
#
# # blaschkeProd from this package (moreArgs example)
# # make random vector of zeroes
# n <- 12
# a <- complex(modulus = runif(n), argument = 2 * pi * runif(n))
# # plot the actual phase portrait
# phasePortrait(blaschkeProd, moreArgs = list(a = a),
# xlim = c(-1.3, 1.3), ylim = c(-1.3, 1.3),
# nCores = 2)
#
# # User function example
# tryThisOneToo <- function(z, n, r) {
# for(j in 1:n) {
# z <- r * (z + z^2)
# }
# return(z)
# }
# # Use argument "hsvNaN = c(0, 0, 0)" if you want the gray areas black
# phasePortrait(tryThisOneToo, moreArgs = list(n = 50, r = 1/2 - 1/2i),
# xlim = c(-3, 2), ylim = c(-2.5, 2.5),
# nCores = 2)
#
## ---- eval = FALSE------------------------------------------------------------
# res <- 300 # Define desired resolution in dpi
# png("Logistic_Function.png", width = 40, height = 40 * 3/4,
# units = "cm", res = res)
# phasePortrait("1/(1+exp(-z))", xlim = c(-25, 25), ylim = c(-15, 15), res = res,
# xlab = "real", ylab = "imaginary",
# nCores = 2) # Increase or leave out for higher performance
# dev.off()
## ---- eval=FALSE--------------------------------------------------------------
# switch(1 + trunc(runif(1, 0, 6)),
# "... at all?",
# "... in a quick-and-dirty way?",
# "... in Hadley-Wickham-style?",
# "... without a loop?",
# "... without nested loops?",
# "... in a way somebody can understand?")
## ---- include = FALSE---------------------------------------------------------
foreach::registerDoSEQ()
|
/scratch/gouwar.j/cran-all/cranData/viscomplexr/inst/doc/viscomplexr-vignette.R
|
---
title: "Phase Portraits of Complex Functions with the R Package *viscomplexr*"
author: "Peter Biber"
output: rmarkdown::html_vignette
header-includes: \usepackage{amsmath}
bibliography: REFERENCES.bib
vignette: |
%\VignetteIndexEntry{viscomplexr-vignette}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
# This option anti-aliases the plots made below under Windows
if(Sys.info()[["sysname"]] == "Windows") {
knitr::opts_chunk$set(dev = "CairoPNG")
}
options(rmarkdown.html_vignette.check_title = FALSE)
```
```{r setup, echo = FALSE}
library(viscomplexr)
```
## Introduction
The **R** package *viscomplexr* has been written as a visualization tool for complex functions. More precisely, it provides functionality for making *phase portraits* of such functions. The method, sometimes called *domain coloring*, exists in [many sub-varieties](https://en.wikipedia.org/wiki/Domain_coloring). However, from the author's point of view, the style proposed by E. Wegert in his book *Visual Complex Functions* [@wegert_visualcpx_2012] comes with a particular clarity and a special aesthetic appeal at the same time. Therefore, this package closely follows Wegert's conventions. Conceptually, the package is intended for being used inside the framework of **R**'s base graphics, i.e. users of this package can freely utilize all features of base graphics for obtaining an optimum result, be it for scientific or artistic purposes. This vignette is not at all an introduction to function theory or an exhaustive treatment of what can be done with phase portraits - I recommend Wegert's book for an ideal combination of both; the purpose of this vignette is in fact to make the reader acquainted with the technical features the package provides in a step-by-step process.
Due to the size restriction of CRAN packages, the number of illustrations in this vignette is kept to a minimum. Readers are encouraged to run all code examples shown below (and hopefully enjoy what they see), but especially those where we explicitly invite them to do so. Alternatively, visit the [package's website](https://peterbiber.github.io/viscomplexr/) for a [richly illustrated version of this vignette](https://peterbiber.github.io/viscomplexr/articles/viscomplexr-vignette_for_website.html).
## Using the function *phasePortrait*
### Visualizing the complex plane
The package does not contain many functions, but provides a very versatile workhorse called *phasePortrait*. We will explore some of its key features now. Let us first consider a function that maps a complex number $z \in \mathbb{C}$ on itself, i.e. $f(z)=z$. After attaching the package with `library(viscomplexr)`, a phase portrait of this function is obtained very easily with:
```{r, figure_1, fig.width = 5, fig.height = 5, results = 'hide', fig.align='center', cache = FALSE, fig.show = 'hold', fig.cap = 'Phase portrait of the function $f(z)=z$ in the window $\\left|\\Re(z)\\right| < 8.5$ and $\\left|\\Im(z)\\right| < 8.5$.'}
phasePortrait("z", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
xlab = "real", ylab = "imaginary", main = "f(z) = z",
nCores = 2) # Probably not required on your machine (see below)
# Note the argument 'nCores' which determines the number of parallel processes to
# be used. Setting nCores = 2 has been done here and in all subsequent
# examples as CRAN checks do not allow more parallel processes.
# For normal work, we recommend not to define nCores at all which will make
# phasePortrait use all available cores on your machine.
# The progress messages phasePortrait is writing to the console can be
# suppressed by including 'verbose = FALSE' in the call (see documentation).
```
Such a phase portrait is based on the polar representation of complex numbers. Any complex number $z$ can be written as $z=r\cdot\mathrm{e}^{\mathrm{i}\varphi}$ or equivalently $z=r\cdot(\cos\varphi+\mathrm{i}\cdot\sin\varphi)$, whereby $r$ is the *modulus* and the angle $\varphi$ is the *argument*. The argument, also called the *phase angle*, is the angle in the origin of the complex number plane between the real axis and the position vector of the number in counter-clockwise orientation. The main feature of a phase portrait is to translate the argument into a color. In addition, there are options for visualizing the modulus or, more precisely, its relative change.
The translation of the phase angle $\varphi$ into a color follows the [hsv color model](https://en.wikipedia.org/wiki/HSL_and_HSV), where radian values of $\varphi=0+k\cdot2\pi$, $\varphi=\frac{2\pi}{3}+k\cdot2\pi$, and $\varphi=\frac{4\pi}{3}+k\cdot2\pi$ with $k\in\mathbb{Z}$ translate into the colors red, green, and blue, respectively, with a continuous transition of colors with values between. As all numbers with the same argument $\varphi$ obtain the same color, the numbers of the complex plane as visualized in the Figure above are colored along the chromatic cycle. In order to add visual structure, argument values of $\varphi=\frac{2\pi}{9}$, i.e. $40°$ and their integer multiples are emphasized by black lines. Note that each of these lines follows exactly one color. Moreover, the zones between two neighboring arguments $\varphi_1=k\cdot\frac{2\pi}{9}$ and $\varphi_2=(k+1)\cdot\frac{2\pi}{9}$ with $k\in\mathbb{Z}$ are shaded in a way that the brightness of the colors inside one such zone increases with increasing $\varphi$, i.e. in counterclockwise sense of rotation.
The other lines visible in the figure above relate to the modulus $r$. One such line follows the same value of $r$; it is thus obvious that each of these iso-modulus lines must form a concentric circle on the complex number plane (see the figure above). The distance between neighboring iso-modulus lines is chosen so that it always indicates the same relative change. For reasons to talk about later [see also @wegert_visualcpx_2012], the default setting of the function *phasePortrait* is a relative change of $b=\mathrm{e}^{2\pi/9}$ which is very close to $2$. Thus, with a grain of salt, the modulus of the complex numbers doubles or halves when moving from one iso-modulus line to the other. In the phase portrait, the zones between two adjacent iso-modulus lines are shaded in a way that the colors inside such a zone become brighter in the direction of increasing modulus. The lines themselves are located at the moduli $r=b^k$, with $k\in\mathbb{Z}$. This is nicely visible in the phase portrait above, where the outmost circular iso-modulus line indicates (approximately, as $b$ is not exactly $2$) $r=2^3=8$. Moving inwards, the following iso-modulus lines are at (approximately) $r=2^2=4$, $r=2^1=2$, $r=2^0=1$, $r=2^{-1}=\frac{1}{2}$, $r=2^{-2}=\frac{1}{4}$, etc. Obviously, as the modulus of the numbers on the complex plane is their distance from the origin, the width of the concentric rings formed by adjacent iso-modulus lines approximately doubles from ring to ring when moving outwards.
### Visual structuring - the argument *pType*
When working with the function *phasePortrait*, it might not always be desirable to display all of these reference lines and zonings. The argument `pType` allows for four different options as illustrated in the next example:
```{r figure_2, fig.width=5, fig.height=5, results="hide", fig.align='center', fig.show='hold', cache=TRUE, fig.cap= "Different options for including reference lines with the argument `pType`."}
# divide graphics device into four regions and adjust plot margins
op <- par(mfrow = c(2, 2),
mar = c(0.25, 0.55, 1.10, 0.25))
# plot four phase portraits with different choices of pType
phasePortrait("z", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5), pType = "p",
main = "pType = 'p'", axes = FALSE, nCores = 2)
phasePortrait("z", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5), pType = "pa",
main = "pType = 'pa'", axes = FALSE, nCores = 2)
phasePortrait("z", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5), pType = "pm",
main = "pType = 'pm'", axes = FALSE, nCores = 2)
phasePortrait("z", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5), pType = "pma",
main = "pType = 'pma'", axes = FALSE, nCores = 2)
par(op) # reset the graphics parameters to their previous values
```
As evident from the figure above, setting `ptype` to 'p' displays a phase portrait in the literal sense, i.e. only the phase of the complex numbers is displayed and nothing else. The option 'pa' adds reference lines for the argument, the option 'pm' adds iso-modulus lines, and the (default) option 'pma' adds both. In addition to these options, the example shows *phasePortrait* in combination with **R**'s base graphics. The first and the last line of the code chunk set and reset global graphics parameters, and inside the calls to *phasePortrait*, we use the arguments `main` (diagram title) and `axes` which are generic plot arguments.
### Visual structuring - the arguments *pi2Div* and *logBase*
For demonstrating options to adjust the density of the argument and modulus reference lines, consider the rational function
$$
f(z)=\frac{(3+2\mathrm{i}+z)(-5+5\mathrm{i}+z)}{(-2-2\mathrm{i}+z)^2}
$$
Evidently, this function has two zeroes, $z_1=-3-2\mathrm{i}$, and $z_2=5-5\mathrm{i}$. It also has a second order pole at $z_3=2+2\mathrm{i}$. We make a phase portrait of this function over the same cutout of the complex plane as we did in the figures above. When calling *phasePortrait* with such simple functions, it is most convenient to define them as as a quoted character string in **R** syntax containing the variable $z$. Run the code below for displaying the phase portrait (active 7" x 7" screen graphics device suggested, e.g. `x11()`).
```{r eval=FALSE, figure_3, fig.width=5, fig.height=5, results='hide', fig.align='center', cache=TRUE, fig.show='hold', fig.cap='Phase portrait of the function $f(z)=\\frac{(3+2\\mathrm{i}+z)(-5+5\\mathrm{i}+z)}{(-2-2\\mathrm{i}+z)^2}$ in the window $\\left|\\Re(z)\\right| < 8.5$ and $\\left|\\Im(z)\\right| < 8.5$.'}
op <- par(mar = c(5.1, 4.1, 2.1, 2.1), cex = 0.8) # adjust plot margins
# and general text size
phasePortrait("(3+2i+z)*(-5+5i+z)/(-2-2i+z)^2",
xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
xlab = "real", ylab = "imaginary",
nCores = 2) # Increase or leave out for higher performance
par(op) # reset the graphics parameters to their previous values
```
The resulting figure nicely displays the function's two zeroes and the pole. Note that all colors meet in zeroes and poles. Around zeroes, the colors cycle counterclockwise in the order red, green, blue, while this order is reversed around poles. For $n$<sup>th</sup> order ($n\in\mathbb{N}$) zeroes and poles, the cycle is passed through $n$ times. I recommend to check this out with examples of your own.
Now, suppose we want to change the density of the reference lines for the phase angle $\varphi$. This can be done by way of the argument `pi2Div`. For usual applications, `pi2Div` should be a natural number $n\:(n\in\mathbb{N})$. It defines the angle $\Delta\varphi$ between two adjacent reference lines as a fraction of the round angle, i.e. $\Delta\varphi=\frac{2\pi}{n}$. The default value of `pi2Div` is 9, i.e. $\Delta\varphi=\frac{2\pi}{9}=40°$. Let's plot our function in three flavors of `pi2Div`, namely, 6, 9 (the default), and 18, resulting in $\Delta\varphi$ values of $\frac{\pi}{3}=60°$, $\frac{2\pi}{9}=40°$, and $\frac{\pi}{9}=20°$. In order to suppress the iso-modulus lines and display the argument reference lines only, we are using `pType = "pa"`. Visualize this by running the code below (active 7" x 2.8" screen graphics device suggested, e.g. `x11(width = 7, height = 2.8)`).
```{r eval = FALSE, figure_4, fig.width=7, fig.height=2.8, results='hide', fig.align='center', , fig.show='hold', cache=TRUE, fig.cap='The function $f(z)=\\frac{(3+2\\mathrm{i}+z)(-5+5\\mathrm{i}+z)}{(-2-2\\mathrm{i}+z)^2}$ portrayed with three different settings of `pi2Div` and `pType = "pa"`.'}
# divide graphics device into three regions and adjust plot margins
op <- par(mfrow = c(1, 3), mar = c(0.2, 0.2, 0.4, 0.2))
for(n in c(6, 9, 18)) {
phasePortrait("(3+2i+z)*(-5+5i+z)/(-2-2i+z)^2", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
pi2Div = n, pType = "pa", axes = FALSE, nCores = 2)
# separate title call (R base graphics) for nicer line adjustment, just cosmetics
title(paste("pi2Div =", n), line = -1.2)
}
par(op) # reset graphics parameters to previous values
```
So far, this is exactly, what had to be expected. But see what happens when we choose the default `pType`, `"pma"` which also adds modulus reference lines:
```{r figure_5, fig.width=7, fig.height=2.8, results='hide', fig.align='center', , fig.show='hold', cache=TRUE, fig.cap='The function $f(z)=\\frac{(3+2\\mathrm{i}+z)(-5+5\\mathrm{i}+z)}{(-2-2\\mathrm{i}+z)^2}$ portrayed with three different settings of `pi2Div` and `pType = "pma"`.'}
# divide graphics device into three regions and adjust plot margins
op <- par(mfrow = c(1, 3), mar = c(0.2, 0.2, 0.4, 0.2))
for(n in c(6, 9, 18)) {
phasePortrait("(3+2i+z)*(-5+5i+z)/(-2-2i+z)^2", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
pi2Div = n, pType = "pma", axes = FALSE, nCores = 2)
# separate title call (R base graphics) for nicer line adjustment, just cosmetics
title(paste("pi2Div =", n), line = -1.2)
}
par(op) # reset graphics parameters to previous values
```
Evidently, the choice of `pi2Div` has influenced the density of the iso-modulus lines. This is because, by default, the parameter `logBase`, which controls how dense the iso-modulus lines are arranged, is linked to `pi2Div`. As stated above, `pi2Div` is usually a natural number $n\:(n \in\mathbb{N})$, and `logBase` is the real number $b\:(b\in\mathbb{R})$ which defines the moduli $r=b^k\:(k\in\mathbb{Z})$ where the reference lines are drawn. When $n$ is given, the default definition of $b$ is $b=\mathrm{e}^{2\pi/n}$. In the default case, $n=9$, this results in $b\approx2.009994$. Thus, by default, moving from one iso-modulus line to the adjacent one means almost exactly doubling or halving the modulus, depending on the direction. For the other two cases $n=6$ and $n=18$, the resulting values for $b$ are $b\approx2.85$ and $b\approx1.42$, the latter obviously being the square root of $\mathrm{e}^{2\pi/9}$. For $n=9$, the modulus (approximately) doubles or halves when traversing two adjacent iso-modulus lines.
Before we demonstrate the special property of this linkage between $n$ and $b$, i.e. between `pi2Div` and `logBase`, we shortly show, that they can be decoupled in *phasePortrait* without any complication. In the following example, we want to define the density of the iso-modulus lines in a way that the modulus triples when traversing three zones in the direction of ascending moduli. Clearly, this requires to define `logBase` as $b=\sqrt[3]{3}\approx1.44$. Thus, when moving from one iso-modulus line to the next higher one, the modulus has increased by a factor of about $1.4$. One line further, it has about doubled (${\sqrt[3]{3}}^{2}\approx2.08$), and another line further it has exactly tripled. While varying `pi2Div` exactly as in the previous example, we now keep `logBase` constant at $\sqrt[3]{3}$. Run the code below for visualizing this (active 7" x 2.8" screen graphics device suggested, e.g. `x11(width = 7, height = 2.8)`).
```{r eval=FALSE, figure_6, fig.width=7, fig.height=2.8, results='hide', fig.align='center', , fig.show='hold', cache=TRUE, fig.cap='The function $f(z)=\\frac{(3+2\\mathrm{i}+z)(-5+5\\mathrm{i}+z)}{(-2-2\\mathrm{i}+z)^2}$ portrayed with decoupled settings of `pi2Div` and `logBase`.'}
# divide graphics device into three regions and adjust plot margins
op <- par(mfrow = c(1, 3), mar = c(0.2, 0.2, 0.4, 0.2))
for(n in c(6, 9, 18)) {
phasePortrait("(3+2i+z)*(-5+5i+z)/(-2-2i+z)^2", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
pi2Div = n, logBase = sqrt(3), pType = "pma", axes = FALSE, nCores = 2)
# separate title call (R base graphics) for nicer line adjustment, just cosmetics
title(paste("pi2Div = ", n, ", logBase = 3^(1/3)", sep = ""), line = -1.2)
}
par(op) # reset graphics parameters to previous values
```
In order to understand why by default the parameters `pi2Div` and `logBase` are linked as described above, we consider the exponential function $f(z)=\mathrm{e}^z$. We can write $z=r\cdot(\cos\varphi+\mathrm{i}\cdot\sin\varphi)$ and thus $f(z)=\mathrm{e}^{r\cdot(\cos\varphi+\mathrm{i}\cdot\sin\varphi)}$ or $w=f(z)=\mathrm{e}^{r\cdot\cos\varphi}\cdot\mathrm{e}^{\mathrm{i}\cdot r\cdot\sin\varphi}$. The modulus of $w$ is $\mathrm{e}^{r\cdot\cos\varphi}$ and its argument is $r\cdot\sin\varphi$ with $\Re(z)=r\cdot\cos\varphi$ and $\Im(z)=r\cdot \sin\varphi$. So, the modulus and the argument of $w=\mathrm{e}^z$ depend solely on the real and the imaginary part of $z$, respectively. This can be easily verified with a phase portrait of $f(z)=\mathrm{e}^z$. Run the code below for displaying the phase portrait (active 7" x 7" screen graphics device suggested, e.g. `x11()`). Note that in the call to *phasePortrait* we hand over the `exp` function directly as an object. Alternatively, the quoted strings `"exp(z)"` or `"exp"` can be used as well (see section [ways to provide functions to *phasePortrait*](#ways_functions) below).
```{r eval=FALSE, figure_7, fig.width = 5, fig.height = 5, results = 'hide', fig.align='center', fig.show='hold', cache = TRUE, fig.cap = 'Phase portrait of the function $f(z)=\\mathrm{e}^z$ in the window $\\left|\\Re(z)\\right| < 8.5$ and $\\left|\\Im(z)\\right| < 8.5$ with iso-modulus lines.'}
op <- par(mar = c(5.1, 4.1, 2.1, 2.1), cex = 0.8) # adjust plot margins
# and general text size
phasePortrait(exp, xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5), pType = "pm",
xlab = "real", ylab = "imaginary", nCores = 2)
par(op) # reset graphics parameters to previous values
```
If we now define the argument `pi2Div` as a number $n\:(n\in\mathbb{N})$ and use it for determining the angular difference $\Delta\varphi=\frac{2\pi}{n}$ between two subsequent phase angle reference lines, our default link between `pi2Div` and `logBase` (which is the ratio $b$ of the moduli at two subsequent iso-modulus lines) establishes $b=\mathrm{e}^{\Delta\varphi}$. This means, if we add $\Delta\varphi$ to the argument of any $w=\mathrm{e}^z\:(z\in\mathbb{C})$ or increase its modulus by the factor $\mathrm{e}^{\Delta\varphi}$, both are equidistant reference line steps in a plot of $f(z)=\mathrm{e}^z$. You can visualize this with the following **R** code (active 7" x 2.8" screen graphics device suggested, e.g. `x11(width = 7, height = 2.8)`):
```{r eval=FALSE, figure_8, fig.width=7, fig.height=2.8, results='hide', fig.align='center', fig.show='hold', cache=TRUE, fig.cap='The function $f(z)=\\mathrm{e}^z$ portrayed with the default coupling of `pi2Div` and `logBase` as implemented in *phasePortrait*.'}
# divide graphics device into three regions and adjust plot margins
op <- par(mfrow = c(1, 3), mar = c(0.2, 0.2, 0.4, 0.2))
for(n in c(6, 9, 18)) {
phasePortrait("exp(z)", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
pi2Div = n, pType = "pma", axes = FALSE, nCores = 2)
# separate title call (R base graphics) for nicer line adjustment, just cosmetics
title(paste("pi2Div = ", n, ", logBase = exp(2*pi/pi2Div)", sep = ""),
line = -1.2, cex.main = 0.9)
}
par(op) # reset graphics parameters to previous values
```
As expected, the default coupling of both arguments produces square patterns when applied to a phase portrait of the exponential function which can, insofar, serve as a visual reference. Recall, that equidistant modulus reference lines (in ascending order) indicate an exponentially growing modulus. In the middle phase portrait one such steps means (approximately) doubling the modulus. From the left to the right, the plot covers 24 of these steps, indicating a total increase of the modulus by factor $2^{24}$ which amounts to almost 17 millions.
### Fine tuning shading and contrast
For optimizing visualization in a technical sense, as well as for aesthetic purposes, it may be useful to adjust shading and contrast of the argument and modulus reference zones mentioned above. This is done by modifying the parameters `darkestShade` ($s$) and `lambda` ($\lambda$) when calling `phasePortrait`. These two parameters can be used to steer the transition from the lower to the uper edge of a reference zone. They address the v-value of the [hsv color model](https://en.wikipedia.org/wiki/HSL_and_HSV), which can take values between 0 and 1, indicating maximum darkness (black), and no shading at all, respectively. Hereby, $s$ gives the v-value at the lower edge of a reference zone, and $\lambda$ determines the interpolation from there to the upper edge, where no shading is applied. The intended use is $\lambda > 0$ where small values sharpen the contrast between shaded and non-shaded zones and vice versa. Exactly, the shading value $v$ is calculated as:
$$
v = s + (1-s)\cdot x^{1/\lambda}
$$
For modulus zone shading at a point $z$ in the complex plane when portraying a function $f(z)$, $x$ is the fractional part of $\log_b{f(z)}$, with the base $b$ being the parameter `logBase` that defines the modulus reference zoning (see above). For shading argument reference zones, $x$ is simply the difference between the upper and the lower angle of an argument reference zone, linearly mapped to the range $[0, 1[$. The following code generates a $3\times3$ matrix of phase portraits of $f(z)=\tan{z^2}$ with $\lambda$ and $s$ changing along the rows and columns, respectively. Run the code for visualizing these concepts (active 7" x 7" screen graphics device suggested, e.g. `x11()`).
```{r eval=FALSE, figure_9, fig.width=5, fig.height=5, fig.show='hold', results='hide', cache=TRUE, fig.cap='Tuning reference zone contrast with the parameters `darkestShade` (column-wise, 0, 0.2, 0.4), and `lambda` (row-wise, 0.1, 1, 10).'}
op <- par(mfrow = c(3, 3), mar = c(0.2, 0.2, 0.2, 0.2))
for(lb in c(0.1, 1, 10)) {
for(dS in c(0, 0.2, 0.4)) {
phasePortrait("tan(z^2)", xlim = c(-1.7, 1.7), ylim = c(-1.7, 1.7),
pType = "pm", darkestShade = dS, lambda = lb,
axes = FALSE, xaxs = "i", yaxs = "i", nCores = 2)
}
}
par(op)
```
Additional possibilities exist for tuning the interplay of modulus and argument reference zones when they are used in combination; this can be controlled with the parameter `gamma` when calling `phasePortrait`). The maximum brightness of the colours in a phase portrait is adjustable with the parameter `stdSaturation` (see documentation of `phasePortrait`; we will also get back to these points in the chapter [aesthetic hints](#hints_artistic) below).
### Be aware of branch cuts
When exploring functions with *phasePortrait*, discontinuities of certain functions can become visible as abrupt color changes. Typical examples are integer root functions which, for a given point $z, z\in\mathbb{C}\setminus\lbrace0\rbrace$ in the complex plane, can attain $n$ values with $n$ being the root's degree. It takes, so to speak, $n$ full cycles around the origin of the complex plane in order to cover all values obtained from a function $f(z)=z^{1/n}, n\in\mathbb{N}$. The code below creates an illustration comprising three phase portraits with branch cuts (dashed lines), illustrating the three values of $f(z)=z^{1/3}$, $z\in\mathbb{C}\setminus\lbrace0\rbrace$. The transitions between the phase portraits are indicated by same-coloured arrows pointing at the branch cuts. For running the code, an open 7" x 2.7" graphics device is suggested, e.g. `x11(width = 7, height = 2.8)`.
```{r eval=FALSE, figure_10, fig.width=7, fig.height=2.7, results='hide', fig.align='center', fig.show='hold', cache=TRUE, fig.cap='Three phase portraits with branch cuts (dashed line), illustrating the three values of $f(z)=z^{1/3}$, $z \\in \\mathbb{C} \\setminus \\lbrace 0 \\rbrace$. The transitions between the phase portraits are indicated by same-coloured arrows pointing at the branch cuts.'}
op <- par(mfrow = c(1, 3), mar = c(0.4, 0.2, 0.2, 0.2))
for(k in 0:2) {
FUNstring <- paste0("z^(1/3) * exp(1i * 2*pi/3 * ", k, ")")
phasePortrait(FUN = FUNstring,
xlim = c(-1.5, 1.5), ylim = c(-1.5, 1.5), pi2Div = 12,
axes = FALSE, nCores = 2)
title(sub = paste0("k = ", k), line = -1)
# emphasize branch cut with a dashed line segment
segments(-1.5, 0, 0, 0, lwd = 2, lty = "dashed")
# draw colored arrows
upperCol <- switch(as.character(k),
"0" = "black", "1" = "red", "2" = "green")
lowerCol <- switch(as.character(k),
"0" = "green", "1" = "black", "2" = "red")
arrows(x0 = c(-1.2), y0 = c(1, -1), y1 = c(0.2, -0.2),
lwd = 2, length = 0.1, col = c(upperCol, lowerCol))
}
par(op)
```
After you have run the code, have a look at the leftmost diagram first. Note that the argument reference lines have been adjusted to represent angle distances of $30°$, i.e. `pi2Div` = 12. Most noticeable is the abrupt color change from yellow to magenta along the negative real axis (emphasized with a dashed line). This is what is called a *branch cut*, and it suggests that our picture of the function $f(z)=z^{1/3}$ is not complete. As the three third roots of any complex number $z=r\cdot\mathrm{e}^{\mathrm{i}\varphi}, z\in\mathbb{C}\setminus\lbrace0\rbrace$ are $r^{1/3}\cdot\mathrm{e}^{\mathrm{i}\cdot(\varphi+k\cdot2\pi)/3}; k=0,1,2; \varphi\in\left[0,2\pi\right[$, we require three different phase portraits, one for each $k$, as shown in the figure above. With the argument reference line distance being $30°$, it is easy to see that each phase portrait covers a total argument range of $120°$, i.e. $2\pi/3$.
Obviously, each of the three portraits has a branch cut along the negative real axis, and the colors at the branch cuts show, where the transitions between the phase portraits have to happen. In the figure, we have illustrated this by arrows pointing to the branch cuts. Same-colored arrows in different phase portraits indicate the transitions. Thus, the first phase portrait ($k = 0$) links to the second ($k = 1$) in their yellow zones (black arrows); the second links to the third ($k = 2$) in their blue zones (red arrows), and the third links back to the first in their magenta zones (green arrows). Actually, one could imagine to stack the three face portraits in ascending order, cut them at the dashed line, and glue the branch cuts together according to the correct transitions. The resulting object is a Riemann surface with each phase portrait being a 'sheet'. See more about this fascinating concept in @wegert_visualcpx_2012, Chapter7.
While the function $f(z)=z^{1/3}$ could be fully covered with three phase portraits, $f(z)=\log z$ has an infinite number of branches. As the (natural) logarithm of any complex number $z=r\cdot\mathrm{e}^{i\cdot\varphi}, r>0$ is $\log z=\log r+\mathrm{i}\cdot\varphi$, it is evident that the imaginary part of $\log z$ increases linearly with the argument of $z$, $\varphi$. In terms of phase portraits, this means an infinite number of stacked 'sheets' in either direction, clockwise and counterclockwise. Neighboring sheets connect at a branch cut. Run the code below to illustrate this with a phase portrait of $\log z=\log r+\mathrm{i}\cdot(\varphi+k\cdot2\pi), r > 0, \varphi\in\left[0,2\pi\right[$ for $k=-1, 0, 1$ (active 7" x 2.7" screen graphics device suggested, e.g. `x11(width = 7, height = 2.7)`). In the resulting illustration, the branch cuts are marked with dashed white lines.
```{r eval=FALSE, figure_11, fig.width=7, fig.height=2.7, fig.align='center', results='hide', fig.show='hold', cache=TRUE, fig.cap='Three branches of $\\log z=\\log r+\\mathrm{i}\\cdot(\\varphi + k\\cdot2\\pi), r>0, \\varphi\\in\\left[0,2\\pi\\right[$, with $k=-1,0,1$. The branch cuts are marked with dashed white lines.'}
op <- par(mfrow = c(1, 3), mar = c(0.4, 0.2, 0.2, 0.2))
for(k in -1:1) {
FUNstring <- paste0("log(Mod(z)) + 1i * (Arg(z) + 2 * pi * ", k, ")")
phasePortrait(FUN = FUNstring, pi2Div = 36,
xlim = c(-2, 2), ylim = c(-2, 2), axes = FALSE, nCores = 2)
segments(-2, 0, 0, 0, col = "white", lwd = 1, lty = "dashed")
title(sub = paste0("k = ", k), line = -1)
}
par(op)
```
### Riemann sphere plots
A convenient way to visualize the whole complex number plane is based on a stereographic projection suggested by Bernhard Riemann (see @wegert_visualcpx_2012, p. 20 ff. and p. 39 ff.). The *Riemann Sphere* is a sphere with radius 1, centered around the origin of the complex plane. It is cut into an upper (northern) and lower (southern) half by the complex plane. By connecting any point on the complex plane to the north pole with a straight line, the line's intersection with the sphere's surface marks the location on the sphere where the point is projected onto. Thus, all points inside the unit disk on the complex plane are projected onto the southern hemisphere, the origin being represented by the south pole. In contrast, all points outside the unit disk are projected onto the northern hemisphere, the north pole representing the *point at infinity*. For visualizing both hemispheres as 2D phase portraits, they have to be projected onto a flat surface in turn.
If we perform a stereographic projection of the southern hemisphere from the north pole to the complex plane (and look at the plane's upper - the northern - side), this obviously results in a phase portrait on the untransformed complex plane as were all examples shown so far in this text. We can perform an analogue procedure for the northern hemisphere, projecting it from the south pole to the complex plane. We now want to think of the northern hemisphere projection as layered on top of the southern hemisphere projection, for the northern hemisphere, which it depicts, is naturally also on top of the southern hemisphere. If, in a 'normal' visualization of the complex plane (orthogonal real and imaginary axes), a point at any location represents a complex number $z$, a point at the same location in the northern hemisphere projection is mapped into $1/z$. The origin is mapped into the point at infinity. Technically, this mapping can be easily achieved when calling the function `phasePortrait` by setting the flag `invertFlip = TRUE` (default is `FALSE`). The resulting map is, in addition, rotated counter-clockwise around the point at infinity by an angle of $\pi$. As @wegert_visualcpx_2012 argues, this way of mapping has a convenient visual effect: Consider two phase portraits of the same function, one made with `invertFlip = FALSE` and the other one with `invertFlip = TRUE`. Both are shown side by side (see the pairs of phase portraits in the next two figures below). This can be imagined as a view into a Riemann sphere that has been cut open along the equator and swung open along a hinge in the line $\Re(z)=1$ (if the southern hemisphere is at the left side) or $\Re(z)=-1$ (if the northern hemisphere is at the left side). In order to highlight the Riemann sphere in Phase Portraits if desired, we provide the function `riemannMask`. Let's first demonstrate this for the function $f(z)=z$.
```{r figure_12, fig.width=7, fig.height=3.5, fig.align='center', results='hide', fig.show='hold', cache=TRUE, fig.cap='Mapping the complex number plane on the Riemann sphere. Left: lower (southern) hemisphere; right upper (northern hemisphere). Folding both figures face to face along a vertical line in the middle between them can be imagined as closing the Riemann sphere.'}
op <- par(mfrow = c(1, 2), mar = rep(0.1, 4))
# Southern hemisphere
phasePortrait("z", xlim = c(-1.4, 1.4), ylim = c(-1.4, 1.4),
pi2Div = 12, axes = FALSE, nCores = 2)
riemannMask(annotSouth = TRUE)
# Northern hemisphere
phasePortrait("z", xlim = c(-1.4, 1.4), ylim = c(-1.4, 1.4),
pi2Div = 12, axes = FALSE, invertFlip = TRUE, nCores = 2)
riemannMask(annotNorth = TRUE)
par(op)
```
The function `riemannMask` provides several options, among others adjusting the mask's transparency or adding annotations to landmark points (see the function's documentation). In the next example, we will use it without any such features. Consider the following function:
$$
f(z)=\frac{(z^{2}+\frac{1}{\sqrt{2}}+\frac{\mathrm{i}}{\sqrt{2}})\cdot(z+\frac{1}{2}+\frac{\mathrm{i}}{2})}{z-1}
$$
This function has two zeroes exactly located on the unit circle, $z_1=\mathrm{e}^{\mathrm{i}\frac{5\pi}{8}}$, and $z_2=\mathrm{e}^{\mathrm{i}\frac{13\pi}{8}}$. Moreover, it has another zero inside the unit circle, $z_3=\frac{1}{\sqrt{2}}\cdot\mathrm{e}^{\mathrm{i}\frac{5\pi}{4}}$. Equally obvious, it has a pole exactly on the unit circle, $z_4=1$. Less obvious, it has a double pole, $z_5$, at the point at infinity. The code required for producing the following figure looks somewhat bulky, but most lines are required for annotating the zeroes and poles. Note that the real axis coordinates of the northern hemisphere's annotation do not have to be multiplied with $-1$ in order to take into account the rotation of the inverted complex plane. By calling `phasePortrait` with `invertFlip = TRUE` the coordinate system of the plot is already set up correctly and will remain so for subsequent operations.
```{r figure_13, fig.width=7, fig.height=3.7, fig.align='center', results='hide', fig.show='hold', cache=TRUE, fig.cap='Riemann sphere plot of the function $f(z)=\\frac{(z^{2}+\\frac{1}{\\sqrt{2}}+\\frac{\\mathrm{i}}{\\sqrt{2}})\\cdot(z+\\frac{1}{2}+\\frac{\\mathrm{i}}{2})}{z-1}$. Annotated are the zeroes $z_1$, $z_2$, $z_3$, and the poles $z_4$, $z_5$.'}
op <- par(mfrow = c(1, 2), mar = c(0.1, 0.1, 1.4, 0.1))
# Define function
FUNstring <- "(z^2 + 1/sqrt(2) * (1 + 1i)) * (z + 1/2*(1 + 1i)) / (z - 1)"
# Southern hemisphere
phasePortrait(FUNstring, xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2),
pi2Div = 12, axes = FALSE, nCores = 2)
riemannMask()
title("Southern Hemisphere", line = 0)
# - annotate zeroes and poles
text(c(cos(5/8*pi), cos(13/8*pi), cos(5/4*pi)/sqrt(2), 1),
c(sin(5/8*pi), sin(13/8*pi), sin(5/4*pi)/sqrt(2), 0),
c(expression(z[1]), expression(z[2]), expression(z[3]), expression(z[4])),
pos = c(1, 2, 4, 2), offset = 1, col = "white")
# Northern hemisphere
phasePortrait(FUNstring, xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2),
pi2Div = 12, axes = FALSE, invertFlip = TRUE, nCores = 2)
riemannMask()
title("Northern Hemisphere", line = 0)
# - annotate zeroes and poles
text(c(cos(5/8*pi), cos(13/8*pi), cos(5/4*pi)*sqrt(2), 1, 0),
c(sin(5/8*pi), sin(13/8*pi), sin(5/4*pi)*sqrt(2), 0, 0),
c(expression(z[1]), expression(z[2]), expression(z[3]),
expression(z[4]), expression(z[5])),
pos = c(1, 4, 3, 4, 4), offset = 1,
col = c("white", "white", "black", "white", "white"))
par(op)
```
With some consideration it becomes quite easy to see that both phase portraits are kind of everted versions of each other. What is inside the unit disk in the left phase portrait is outside in the right one, and vice versa. If you mentally visualize both unit disks touching in, e.g., point $z_4$ and one disk rolling along the edge of the other, you will see immediately how one disk continues the picture shown in the other. Having the 'Riemann Mask' somewhat transparent is helpful for orientation (see the function's documentation). Note, how the zeroes $z_{1, 2}$ and the pole $z_4$ which are located exactly on the unit circle continue outside the unit disk on 'their own' plane and in the other unit disk. Note also, that on both hemispheres zeroes and poles can be identified by the same sequence of colors: When circling counter-clockwise around the point of interest, a zero will always exhibit the color sequence red, yellow, green, blue, magenta, red ..., while this order will always be reverted for a pole. As the pole at the point at infinity ($z_5$) is a double pole, the color sequence is run through twice during one turn around the pole. As the zero $z_3$ is inside the unit disk representing the southern hemisphere, it lies outside the northern hemisphere disk, but it is still visible on the continuation of the inverted (and rotated) complex plane (belonging to the northern hemisphere) outside the unit disk. Observe, how the grid lines in the vicinity of $z_3$ merge when passing from one unit disk to the other.
### Fractals
While visualizing fractals is not among the original purposes of this package, *phasePortrait* allows for an unusual way of displaying such that are functions of complex numbers. Classic examples are the [Mandelbrot set](https://en.wikipedia.org/wiki/Mandelbrot_set) and the [Julia set](https://en.wikipedia.org/wiki/Julia_set), and this package provides the functions `mandelbrot` and `juliaNormal` (implemented in C++) for supporting visualization. The mandelbrot set comprises all complex numbers $z$ for which the sequence $a_{n+1}=a_n^{2}+z$ with $a_0=0$ remains bounded for all $n\in\mathbb{N_0}$. Normal julia sets are closely related to the Mandelbrot set. They comprise all complex numbers $z$ for which the sequence $a_{n+1}=a_n^2+c$ with $a_0=z$ remains bounded for all $n\in\mathbb{N_0}$. The parameter $c$ is a complex number, and interesting visualizations with this package (i.e. other than a blank screen) are only obtained for $c$ being an element of the Mandelbrot set. For the author's taste, the Julia set visualizations look best and are most interesting when $c$ is located near the border of the Mandelbrot set.
The classic visualizations of the Mandelbrot and the Julia set use a uniform color (usually black) for all points which belong to the set and color the points outside the set dependent on how quickly they diverge. Visualizations with *phasePortrait*, in contrast, color the points inside the sets by the argument and modulus of the number to which the series described above converges (or, more precisely, at which the iteration terminates). While the results are visually appealing (please try the code examples we provide below), they are not unambiguous, as the sequences that define the sets do not always converge to one single value, but to limit cycles.
The following code plots an overview picture of the Mandelbrot set. Note that the function `mandelbrot` is called with a considerably low value (30) for the parameter `itDepth` which defines the number of iterations to be calculated (default is 500). This is because we are using *phasePortrait* for plotting into a graphics window with a comparatively low resolution (see section [defining image quality](#img_qual) for how to obtain high-quality phase portraits). While a high number of iterations produces a more accurate representation of the set, the resulting filigree structures might become hardly visible or even invisible when the resolution to be plotted on is low.
```{r, eval=FALSE}
x11(width = 8, height = 2/3 * 8) # Open graphics window on screen
op <- par(mar = c(0, 0, 0, 0)) # Do not leave plot margins
phasePortrait(mandelbrot, moreArgs = list(itDepth = 30),
ncores = 1, # Increase or leave out for higher performance
xlim = c(-2, 1), ylim = c(-1, 1),
hsvNaN = c(0, 0, 0), # black color for points outside the set
axes = FALSE, # No coordinate axes
xaxs = "i", yaxs = "i") # No space between plot region and plot
par(op) # Set graphics parameters to original
```
With the code example below we plot a cutout of the Mandelbrot set into a png file with a resolution of 600 dpi using the default number of iterations (500). We are using a few features that are just commented here, but will be explained below in the section [aesthetic hints](#hints_artistic). Other graphics file formats can be used in almost the same way. Type `?png` in order to see all formats and how to call them. See also section [defining image quality](#img_qual).
```{r, eval=FALSE}
res <- 600 # set resolution to 600 dpi
# open png graphics device with in DIN A4 format
# DIN A format has an edge length ratio of sqrt(2)
png("Mandelbrot Example.png",
width = 29.7, height = 29.7/sqrt(2), # DIN A4 landscape
units = "cm",
res = res) # resolution is required
op <- par(mar = c(0, 0, 0, 0)) # set graphics parameters - no plot margins
xlim <- c(-1.254, -1.248) # horizontal (real) plot limits
# the function below adjusts the imaginary plot limits to the
# desired ratio (sqrt(2)) centered around the desired imaginary value
ylim <- ylimFromXlim(xlim, centerY = 0.02, x_to_y = sqrt(2))
phasePortrait(mandelbrot,
nCores = 1, # Increase or leave out for higher performance
xlim = xlim, ylim = ylim,
hsvNaN = c(0, 0, 0), # Black color for NaN results
xaxs = "i", yaxs = "i", # suppress R's default axis margins
axes = FALSE, # do not plot axes
res = res) # resolution is required
par(op) # reset graphics parameters
dev.off() # close graphics device and complete the png file
```
Inside the same technical setting, the following two examples plot Julia sets into a png file.
```{r, eval=FALSE}
res <- 600
png("Julia Example 1.png", width = 29.7, height = 29.7/sqrt(2),
units = "cm", res = res)
op <- par(mar = c(0, 0, 0, 0))
xlim <- c(-1.8, 1.8)
ylim <- ylimFromXlim(xlim, centerY = 0, x_to_y = sqrt(2))
phasePortrait(juliaNormal,
# see documentation of juliaNormal about the arguments
# c and R_esc
moreArgs = list(c = -0.09 - 0.649i, R_esc = 2),
nCores = 1, # Increase or leave out for higher performance
xlim = xlim, ylim = ylim,
hsvNaN = c(0, 0, 0),
xaxs = "i", yaxs = "i",
axes = FALSE,
res = res)
par(op)
dev.off()
```
```{r, eval=FALSE}
res <- 600
png("Julia Example 2.png", width = 29.7, height = 29.7/sqrt(2),
units = "cm", res = res)
op <- par(mar = c(0, 0, 0, 0))
xlim <- c(-0.32, 0.02)
ylim <- ylimFromXlim(xlim, center = -0.78, x_to_y = sqrt(2))
phasePortrait(juliaNormal,
# see documentation of juliaNormal about the arguments
# c and R_esc
moreArgs = list(c = -0.119 - 0.882i, R_esc = 2),
nCores = 1, # Increase or leave out for higher performance
xlim = xlim, ylim = ylim,
hsvNaN = c(0, 0, 0),
xaxs = "i", yaxs = "i",
axes = FALSE,
res = res)
par(op)
dev.off()
```
### Phase portraits based on a polar chessboard
Since version 1.1.0, *viscomplexr* provides the function `phasePortraitBw` which allows for creating two-color phase portraits of complex functions based on a polar chessboard grid (cf. @wegert_visualcpx_2012, p. 35). Compared to the full phase portraits that can be made with `phasePortrait`, two-color portraits omit information. Especially in combination with full phase portraits they can be, however, very helpful tools for interpretation. Besides, two-color phase portraits have a special aesthetic appeal which is worth exploring for itself. In its parameters and its mode of operation, `phasePortraitBw` is very similar to `phasePortrait` (see the documentations of both functions for details). The parameters `pi2Div` and `logBase` have exactly the same effect as with `phasePortrait`. Instead of the parameter `pType`, `phasePortraitBw` has the parameter `bwType` which allows for the three settings "m", "a", and "ma". These produce two-color phase portraits which take into account the modulus only, the argument (phase angle), only, and the combination of both, respectively. Plots made with the latter option show a chessboard-like color alteration over the tiles resulting from the intersection of modulus and argument zones. The following code maps the complex plane to itself, comparing all three options of `bwType`. It also adds a standard phase portrait for comparison.
```{r, eval=FALSE}
# Map the complex plane on itself, show all bwType options
x11(width = 8, height = 8)
op <- par(mfrow = c(2, 2), mar = c(4.1, 4.1, 1.1, 1.1))
for(bwType in c("ma", "a", "m")) {
phasePortraitBw("z", xlim = c(-2, 2), ylim = c(-2, 2),
bwType = bwType,
xlab = "real", ylab = "imaginary",
nCores = 2) # Increase or leave out for higher performance
}
# Add normal phase portrait for comparison
phasePortrait("z", xlim = c(-2, 2), ylim = c(-2, 2),
xlab = "real", ylab = "imaginary",
pi2Div = 18, # Use same angular division as default
# in phasePortraitBw
nCores = 2) # Increase or leave out for higher performance
par(op)
```
Note that the parameter `pi2Div` should not be chosen as an odd number when working with `phasePortraitBw`. In this case, the first and the last phase angle zone would obtain the same color, which is probably an undesired effect for most applications. While `pi2Div = 9` is the default setting in `phasePortrait` for good reasons (see above), its default in `phasePortraitBw` is 18. Also by default, the parameter `logBase` is linked to `pi2Div` in the same way as by default in `phasePortrait` (`logBase = exp(2*pi/pi2Div)`). So, if defaults for both, `phasePortrait` and `phasePortraitBw` are used, each zone of the former covers two zones of the latter. In the code above, however, `pi2Div` was set to 18 also in the call to `phasePortrait` for direct comparability. This is also the case in the code below, which displays a rational function.
```{r eval=FALSE}
# A rational function, show all bwType options
x11(width = 8, height = 8)
funString <- "(z + 1.4i - 1.4)^2/(z^3 + 2)"
op <- par(mfrow = c(2, 2), mar = c(4.1, 4.1, 1.1, 1.1))
for(bwType in c("ma", "a", "m")) {
phasePortraitBw(funString, xlim = c(-2, 2), ylim = c(-2, 2),
bwType = bwType,
xlab = "real", ylab = "imaginary",
nCores = 2) # Increase or leave out for higher performance
}
# Add normal phase portrait for comparison
phasePortrait(funString, xlim = c(-2, 2), ylim = c(-2, 2),
xlab = "real", ylab = "imaginary",
pi2Div = 18, # Use same angular division as default
# in phasePortraitBw
nCores = 2) # Increase or leave out for higher performance
par(op)
```
While the letters 'Bw' in `phasePortraitBw` stand for 'black/white', the natural colors for such chessboard plots, the user is not limited to these. The choice of colors is defined by the parameter `bwCols` which, by default, is set as `bwCols = c("black", "gray95", "gray")`. The first and the second color are used for coloring the alternating zones, while the last color is used in cases where the function of interest produces results which cannot be sufficiently evaluated for modulus or argument (`NaN`, partly `Inf`). Note that the second color, `"gray95"` is almost, but not exactly white, which contrasts 'white' tiles against a white background in a visually unobtrusive way. The parameter `bwCols` can be freely changed; values must be either color names that **R** can interpret (call `colors()` for a list) or hexadecimal color strings like e.g. `"#00FF32"` (the format is `"#RRGGBB"` with 'RR', 'GG', and 'BB' representing red, green, and blue with an allowed value range of `00` to `FF` for each).
## Aesthetic hints {#hints_artistic}
While phase portraits were originally invented for scientific and technical purposes, their aesthetic quality is a feature in itself. In this section, we give a few technical hints that might be helpful for obtaining appealing graphics. We will be not only talking about features implemented in this package, but also mention some useful options provided by R base graphics. A general recommendation when plotting for maximum aesthetic results is also to first check out the function to be plotted with lower resolutions (e.g. the default 150 dpi), and a smaller format (but with the desired device aspect ratio), adjust the domain (`xlim`, `ylim`), and all other parameters of *phasePortrait* you might want to change (including the parameters `gamma` and `stdSaturation` we have not mentioned in this vignette, so far). Only if you are satisfied with the result at that stage, you should start the run with the desired final resolution and format, as plots at high resolution and large formats may be time-consuming depending on your hardware (see also the section [defining image quality](#img_qual)). When using the function `riemannMask` you could try changing the mask's transparency (`alphaMask`) and it's color (`colMask`). Often, black is a good alternative to the default white). Besides such simple issues there are, however, a few points we will talk about in more detail below.
### The `par(op)` mechanism
As mentioned above, *phasePortrait* uses **R**'s base graphic system. This is a powerful tool, its functionality is, however, not always easy to understand and use. Many fundamental settings of base **R** graphics are stored in a set of parameters, which can be set or queried using the function `par()`. Among the important graphical parameters in our context are those which steer the outer margins and the plot margins (`oma`, `omi`, `mar`, `mai`) and those which define the default background and foreground colors (`bg`, `fg`). Type `?par` to see a documentation of all parameters. Changing these parameters here and there during an **R** session can easily lead to graphical results that may be nice but hard to reproduce. For avoiding this, when `par()` is called to change one or more graphical parameters, it invisibly returns all parameters and their values *before* the change. These can be stored in a variable, and used to restore the original parameter values after the plotting has been done. This concept seems to be unknown to surprisingly many users of **R**:
```{r, eval = FALSE}
# Set the plot margins at all four sides to 1/5 inch with mai,
# set the background color to black with bg, and the default foreground
# color with fg (e.g. for axes and boxes around plots, or the color of
# the circle outline from the function riemannMask).
# We catch the previous parameter values in a variable, I called
# "op" ("old parameters")
op <- par(mai = c(1/5, 1/5, 1/5, 1/5), bg = "black", fg = "white")
# Make any phase portraits and/or other graphics of your interest
# ...
# Set the graphical parameters back to the values previously stored in op
par(op)
```
### Dealing with axes
Usually, when aiming for mainly aesthetic effects, you want to suppress plot axes from being drawn. As phase *phasePortrait* accepts, via its `...` argument, all arguments also accepted by **R**'s `plot.default`,
this can be easily achieved by providing the argument `axes = FALSE`:
```{r, eval = FALSE}
phasePortrait("tan(z^3 + 1/2 - 2i)/(1 - 1i - z)",
xlim = c(-6, 6), ylim = c(-3, 3),
axes = FALSE,
nCores = 2) # Increase or leave out for higher performance
```
Note that this does not only suppress both axes, but also the box usually drawn around a plot. If such a box is desired, it can be simply added afterwards by calling `box()`:
```{r, eval=FALSE}
phasePortrait("tan(z^3 + 1/2 - 2i)/(1 - 1i - z)",
xlim = c(-6, 6), ylim = c(-3, 3),
axes = FALSE,
nCores = 2) # Increase or leave out for higher performance
box()
```
If axes are desired together with a special aesthetic appeal (e.g. for presentations), it is worth trying out a black background and white axes. However, there are unexpected hurdles to take, before the result looks as it should:
```{r, eval=FALSE}
# set background and foreground colors
op <- par(bg = "black", fg = "white")
# Setting the parameter fg has an effect on the box, the axes, and the axes'
# ticks, but not on the axis annotations and axis labels.
# Also the color of the title (main) is not affected.
# The colors of these elements have to be set manually and separately. While we
# could simply set them to "white", we set them, more flexibly, to the
# current foreground color (par("fg")).
phasePortrait("tan(z^3 + 1/2 - 2i)/(2 - 1i - z)",
xlim = c(-6, 6), ylim = c(-3, 3), col.axis = par("fg"),
xlab = "real", ylab = "imaginary", col.lab = par("fg"),
main = "All annotation in foreground color", col.main = par("fg"),
# Adjust text size
cex.axis = 0.9, cex.lab = 0.9,
nCores = 2) # Increase or leave out for higher performance
par(op)
```
Note that by default the axes are constructed with an overhang of 4% beyond the ranges given with `xlim` and `ylim` at each end. More often than not this looks nice, but sometimes it is undesired, e.g. when a phase portrait is intended to cover the full display without any frame and margin. This behavior is due to the graphical parameters `xaxs` and `yaxs` (axis style) being set to 'r' ('regular') by default. Setting these parameters as `xaxs = "i"` and `yaxs = "i"` ('internal'), no overhang is added. Both, `xaxs` and `yaxs`, can be either set in a call to `par()` or handed as arguments to *phasePortrait*. We will come back to these parameters in the following section.
### Device ratio and margins
You might want to plot a phase portrait that fully covers the graphics device. The following code example shows how to achieve this. First, it is necessary to set the plot margins to zero (note that the outer margins are zero by default, so, usually, there is no need to care for them). Second, as *phasePortrait* uses an aspect ratio of 1 by default, `xlim` and `ylim` have to exactly match the aspect ratio of the graphics device to be plotted in. In order to facilitate this, we provide the functions `ylimFromXlim` and `xlimFromYlim`. In the example, we use the former in order to match `xlim` and `ylim` a device aspect ratio of 16/9. Third, in order to omit the 4% axis overhang (would look like a margin), the parameters `xaxs` and `yaxs` are set to "i". Setting `axes` to FALSE is not absolutely necessary in this case, but is good style.
```{r, eval=FALSE}
# Open graphics device with 16/9 aspect ratio and 7 inch width
x11(width = 7, height = 9/16 * 7)
op <- par(mar = c(0, 0, 0, 0)) # Set plot margins to zero
xlim <- c(-3, 3)
# Calculate ylim with desired center fitting the desired aspect ratio
ylim <- ylimFromXlim(xlim, centerY = 0, x_to_y = 16/9)
phasePortrait(jacobiTheta, moreArgs = list(tau = 1i/5 + 1/5), pType = "p",
xlim = xlim, ylim = ylim,
xaxs = "i", yaxs = "i",
axes = FALSE,
nCores = 2) # Increase or leave out for higher performance
par(op)
```
Not many changes are necessary for obtaining a phase portrait like above but with a frame. A convenient way to do this is to set the outer margins of the graphics device in inches with the graphical parameter `omi` and the background to the desired color. Adding this to the code above, however, leads to differing horizontal and vertical frame widths. This occurs because, due to the margin setting, the required ratio of the `xlim` and `ylim` ranges is no longer exactly 16/9. The precise ratio has to be calculated and provided to `ylimFromXlim` as shown in the code example below.
```{r, eval=FALSE}
# Open graphics device with 16/9 aspect ratio and a width of 7 inches
x11(width = 7, height = 9/16 * 7)
# Set plot margins to zero, outer margins to 1/7 inch,
# and background color to black
outerMar <- 1/7 # outer margin width in inches
op <- par(mar = c(0, 0, 0, 0), omi = rep(outerMar, 4), bg = "black")
xlim <- c(-1.5, 0.5)
# Calculate ylim with desired center fitting the desired aspect ratio;
# however, the omi settings slightly change the required
# ratio of xlim and ylim
ratio <- (7 - 2*outerMar) / (7 * 9/16 - 2*outerMar)
ylim <- ylimFromXlim(xlim, centerY = 0, x_to_y = ratio)
phasePortrait("sin(jacobiTheta(z, tau))/z", moreArgs = list(tau = 1i/5 + 1/5),
pType = "p",
xlim = xlim, ylim = ylim,
xaxs = "i", yaxs = "i",
axes = FALSE,
nCores = 1) # Increase or leave out for higher performance
par(op)
```
## Technical moreabouts {#tech_moreabouts}
This chapter details a few technical points which might be of interest for optimizing the results obtained by using the **R** package at hand. We talk about different ways to [provide functions](#ways_functions) to *phasePortrait*, and about how to control [image quality](#img_qual). And there is more: The function *phasePortrait* has to perform several memory and time critical operations. In order to keep memory utilization on a reasonable level and to optimize computing times, the function works with [temporary files](#tempfiles) and [parallel processing](#par_proc). We explain both below, because the user can influence their behavior. For avoiding unnecessary copying of big arrays, *phasePortrait* makes also use of pointers, but as there is no related control option for the user, we reserve this for a later version of this vignette.
### Ways to provide functions to *phasePortrait* {#ways_functions}
#### Quoted character strings
Any function to be visualized with *phasePortrait* must be provided as the argument `FUN`. In some cases (see below), the argument `moreArgs` can turn out useful in combination with `FUN`. Probably the easiest way of defining `FUN` is a character string which is an expression **R** can evaluate as a function of a complex number $z$. See some examples:
```{r, eval = FALSE}
# Note that 'FUN =' is not required if the argument to FUN is handed to
# phasePortrait in the first position
phasePortrait(FUN = "1/(1 - z^2)", xlim = c(-5, 5), ylim = c(-5, 5), nCores = 2)
phasePortrait("sin((z - 2)/(z + 2))", xlim = c(-5, 5), ylim = c(-5, 5), nCores = 2)
phasePortrait("tan(z)", xlim = c(-5, 5), ylim = c(-5, 5), nCores = 2)
```
If your expression requires arguments besides $z$ you can provide them to *phasePortrait* by means of `moreArgs`, which expects a named list containing the additional arguments:
```{r, eval = FALSE}
phasePortrait("-1 * sum(z^c(-k:k))", moreArgs = list(k = 11),
xlim = c(-2, 2), ylim = c(-1.5, 1.5),
pType = "p",
nCores = 2) # Increase or leave out for higher performance
```
While we recommend other solutions (see below), it is also possible to hand over more extensive user-defined functions as character strings. To make this work, however, the function must be wrapped in a `vapply` construct which guarantees the output of the function being a complex number by setting vapply's argument `FUN.VALUE` as `complex(1)`. Moreover, the first argument to `vapply` must be $z$. In such cases, it is often convenient to define the character string outside the call to *phasePortrait* and hand it over after that, as we do in the following example:
```{r, eval = FALSE}
funString <- "vapply(z, FUN = function(z) {
n <- 9
k <- z^(c(1:n))
rslt <- sum(sin(k))
return(rslt)
},
FUN.VALUE = complex(1))"
phasePortrait(funString, xlim = c(-2, 2), ylim = c(-2, 2),
nCores = 2) # Increase or leave out for higher performance
```
If such a function has arguments in addition to $z$, they can be included into the call to 'vapply' and thus included into the string (for supporting this, we provide the function `vector2string`, see the example in its documentation), but we do not recommend that. Anyway, if you must know, here it is:
```{r, eval = FALSE}
funString <- "vapply(z, FUN = function(z, fct) {
n <- 9
k <- z^(fct * c(1:n))
rslt <- sum(sin(k))
return(rslt)
},
fct = -1,
FUN.VALUE = complex(1))"
phasePortrait(funString, xlim = c(-2, 2), ylim = c(-2, 2),
nCores = 2) # Increase or leave out for higher performance
```
Probably, the most useful application of this concept is when the `vapply` construct is pasted together at runtime with values for the additional arguments depending on what happened earlier. However, defining the function directly as a function first, and then simply passing its name to *phasePortrait* leads to a more readable code:
```{r, eval = FALSE}
# Define function
tryThisOne <- function(z, fct, n) {
k <- z^(fct * c(1:n))
rslt <- prod(cos(k))
return(rslt)
}
# Call function by its name only, provide additional arguments via "moreArgs"
phasePortrait("tryThisOne", moreArgs = list(fct = 1, n = 5),
xlim = c(-2.5, 2.5), ylim = c(-2, 2),
nCores = 2) # Increase or leave out for higher performance
```
As the function in the example above requires two additional arguments beside $z$, we hand them over to *phasePortrait* via the argument `moreArgs`, which must be (even in case of only one additional argument) a named list (names must match the names of the required arguments), where the argument values are assigned.
#### Function objects
Besides character strings as shown above, the argument `FUN` can also directly take function objects. The simplest case is an anonymous function definition:
```{r, eval = FALSE}
# Use argument "hsvNaN = c(0, 0, 0)" if you want the grey area black
phasePortrait(function(z) {
for(j in 1:20) {
z <- z * sin(z) - 1 + 1/2i
}
return(z)
},
xlim = c(-3, 3), ylim = c(-2, 2),
nCores = 2) # Increase or leave out for higher performance
```
Evidently, this can be used with `moreArgs` as well:
```{r, eval = FALSE}
# Use argument "hsvNaN = c(0, 0, 0)" if you want the grey area black
phasePortrait(function(z, n) {
for(j in 1:n) {
z <- z * cos(z)
}
return(z)
},
moreArgs = list(n = 27),
xlim = c(-3, 3), ylim = c(-2, 2),
nCores = 2) # Increase or leave out for higher performance
```
Any function object that is known by name to R, be it user-defined or contained in a package will work in the same way. Just hand over the function object itself:
```{r, eval = FALSE}
# atan from package base
phasePortrait(atan, xlim = c(-pi, pi), ylim = c(-pi, pi),
nCores = 2)
# gammaz from package pracma (the package must be installed on your machine
# if you want this example to be working)
phasePortrait(pracma::gammaz, xlim = c(-9, 9), ylim = c(-5, 5),
nCores = 2)
# blaschkeProd from this package (moreArgs example)
# make random vector of zeroes
n <- 12
a <- complex(modulus = runif(n), argument = 2 * pi * runif(n))
# plot the actual phase portrait
phasePortrait(blaschkeProd, moreArgs = list(a = a),
xlim = c(-1.3, 1.3), ylim = c(-1.3, 1.3),
nCores = 2)
# User function example
tryThisOneToo <- function(z, n, r) {
for(j in 1:n) {
z <- r * (z + z^2)
}
return(z)
}
# Use argument "hsvNaN = c(0, 0, 0)" if you want the gray areas black
phasePortrait(tryThisOneToo, moreArgs = list(n = 50, r = 1/2 - 1/2i),
xlim = c(-3, 2), ylim = c(-2.5, 2.5),
nCores = 2)
```
Defining own functions in C++ and using them with *phasePortrait* usually gives a substantial performance gain. This is especially true if they require operations that cannot be vectorized in **R** (i.e. if there is now way to avoid for-loops or similar). The ideal tool for integrating C++ code in **R** programs is *Rcpp* [@edelbuettel_rcpp_2017], but be aware that C++ functions compiled with `Rcpp::sourceCpp` will not work with *phasePortrait*, as this is not compatible with parallel processing. What it takes is to provide the C++ functions as or as part of an R package which is not difficult at all. The functions of the `math` family included in this package have all been coded in C++ and integrated with *Rcpp*.
### Defining image quality {#img_qual}
Clearly, there is a trade-off between the quality of an image plotted with *phasePortrait* and the computing time. The image quality is defined by the argument `res` and has a default value of 150 dpi. For pictures in standard sizes of about 30 x 20 cm plotting with 150 dpi does not take much time, and for many purposes, this resolution is sufficient. When resolutions of 300, 600 or more dpi are desired for high-quality printouts, we recommend to try out everything with 150 dpi (and, maybe, on a small format) before starting the final high-quality run. Technically, early after being called, *phasePortrait* gets the plot region size of the active graphics device and calculates the number of required pixels from this size and the value of `res`. Note, that **R** graphics devices for files, like `png`, `bmp`, `jpeg` and `tiff`, also expect a parameter `res` (if the units given for device height and width are cm or inches). For plotting to graphics files, we suggest to store the desired resolution in a variable first, and pass it to both, the graphics device and *phasePortrait*:
```{r, eval = FALSE}
res <- 300 # Define desired resolution in dpi
png("Logistic_Function.png", width = 40, height = 40 * 3/4,
units = "cm", res = res)
phasePortrait("1/(1+exp(-z))", xlim = c(-25, 25), ylim = c(-15, 15), res = res,
xlab = "real", ylab = "imaginary",
nCores = 2) # Increase or leave out for higher performance
dev.off()
```
### Temporary files {#tempfiles}
In order to keep the machine's RAM workload manageable, *phasePortrait* will always save data in temporary files. These files are stored in the directory specified with the argument `tempDir` (default is the current **R** session's temporary directory). After normal execution, these files will be automatically deleted, so, usually, there is no need to care about. Automatic deletion, however, will not happen, if the user calls *phasePortrait* with the parameter `deleteTempFiles` set to FALSE or if phasePortrait does not terminate properly. Thus, if *phasePortrait* crashed you should check the directory specified as `tempDir` and delete these files, because they usually are of considerable size. However, such orphans will never interfere with further runs of *phasePortrait* (see below).
The size of these temporary files depends from *phasePortrait*'s parameter `blockSizePx` (default: 2250000; it may be worth varying this value in order to obtain optimum performance on your machine). If the two-dimensional array of pixels to be plotted comprises more pixels than specified by this parameter, the array will be vertically split into blocks of that size. These sub-arrays are what is stored in the temporary files. More precisely, there are two temporary files per sub-array. One represents the cutout of the untransformed complex plane over which the function of interest is applied; the other contains the values obtained by applying the function to the first one. Thus, each array cell contains a double precision complex number; in a temporary file pair an array cell at the same position refers to the same pixel in the plot.
These files are `.RData` files, and their names adhere to a strict convention, see the following examples:
`0001zmat2238046385.RData`
`0001wmat2238046385.RData`
These are examples of names of a pair of temporary files belonging to the same block (sub-array). The names are equal except for one substring which can either be 'zmat' or 'wmat'. The former file contains an untransformed cutout of the complex plane, and the latter the corresponding values obtained from the function of interest as explained above.
Both names begin with '0001', indicating that the array's top line is the first line of the *whole* pixel array to be covered by the phasePortrait. The name of the file that contains the subsequent array can e.g. begin with a number like '0470', indicating that its first line is line number 470 of the whole array. The number of digits for these line numbers is not fixed. It is determined by the greatest number required. Numbers with less digits are zero-padded. The second part of the file name is either zmat or wmat (see above). The third part of the file names is a ten-digit integer. This is a random number which all temporary files stemming from the same call of *phasePortrait* have in common. This guarantees that no temporary files will be confounded in subsequent calls of *phasePortrait*, even if undeleted temporary files from previous runs are still present.
### Parallel processing {#par_proc}
For enhanced performance, *phasePortrait* (and in the same way *phasePortraitBw*) uses parallel processing as provided via the **R** packages `doParallel` [@micwest_doparallel_2019] and `foreach` [@micwest_foreach_2020]. The number of processor cores to be used can be set with the parameter `nCores` when calling *phasePortrait*. By default, one less than all available cores will be utilized. Clearly, setting `nCores = 1` will result in sequential processing. When *phasePortrait* is called with `ncores > 1`, and no parallel backend is registered, one will be registered first. The same applies, when a parallel backend is already registered, but the user desires a different number of cores. This registering may take some time. Therefore, when it terminates, *phasePortrait* does not automatically de-register the parallel backend (and register a sequential backend again). This saves registering time in subsequent runs of *phasePortrait* with the same number of cores to be used. This default behavior - keeping parallel backends registered after termination - can be changed by setting the parameter `autoDereg` on TRUE (default is FALSE). Otherwise, *phasePortrait*, after completing the plot, prints a message that the current parallel backend can be manually de-registered with the command `foreach::registerDoSEQ()`. We recommend to do this after the last call to *phasePortrait* in an **R** session.
There are three occasions, when *phasePortrait* utilizes parallel processing: First, after determining the size and number range (in the complex plane) of the whole two dimensional array of pixels to be plotted, the sub-arrays (blocks) corresponding to the parameter value `blockSizePx` are constructed and saved as temporary files in a parallel loop. These are the temporary files with the string `zmat` in their names (see section [Temporary files](#tempfiles)).
Second, while the single blocks are loaded and processed sequentially, each block is evaluated in a separate parallel process. In order to do so, the block is split into a few approximately equally sized parts; the number of these parts corresponds to the number of processor cores to be used. In each parallel process the function to be plotted is applied to each single cell of the corresponding block part (internally vectorized with `vapply`). The outcomes of all parallel processes are combined into one array which is saved as a temporary file which has the string `wmat` in its name.
Third, for transforming the function values stored in the `wmat` files into [hsv colors](https://en.wikipedia.org/wiki/HSL_and_HSV), a similar concept as in the second step is utilized: The single `wmat` files are processed sequentially, but the array stored in each file is split into chunks which are dealt with parallely. Eventually, transforming all `wmat` arrays results in one large color value array which can be plotted after that. Handling this large array is the most memory-intensive task when running *phasePortrait*, and it can take considerable time for large plots in high quality. So far, however, no alternative solution provided fully satisfying results.
As mentioned in the section about [temporary files](#tempfiles), users can possibly optimize performance by trying different values for the parameter `blockSizePx`. We mention this here as well, as `blockSizePx` does not only influence size and number of the temporary files, but also the size of the array chunks that are processed parallely.
Obviously, applying the function of interest to millions of values is time-critical. Therefore, when defining a function for a phase portrait in **R**, use all options at hand for vectorizing calculations. Moreover, you can count on a significant performance gain when you write time critical functions in C++. Thanks to the package *Rcpp* [@edelbuettel_rcpp_2017] this not really a hurdle anymore. As mentioned above, however, C++ functions compiled with `Rcpp::sourceCpp` will not work with *phasePortrait*, as this is not compatible with parallel processing; you have to provide such functions in a package. The package at hand provides a few functions (function family `maths`); all of them have been implemented in C++.
## Acknowledgments
While this package is a leisure project, it would have been a mission impossible without the background of my daily work with R as a Forest Scientist at the Technical University of Munich (TUM). Fortunately, I have a job that allows me to learn about Nature by asking her questions (or trying to simulate what she is doing) with ever-improving methods and tools. I would like to thank everyone at the Chair of Forest Growth and Yield Science at TUM who keep me involved in discussions like: *How can this be solved in R ...*
```{r, eval=FALSE}
switch(1 + trunc(runif(1, 0, 6)),
"... at all?",
"... in a quick-and-dirty way?",
"... in Hadley-Wickham-style?",
"... without a loop?",
"... without nested loops?",
"... in a way somebody can understand?")
```
**Veronika Biber** provided expert advice for improving the vignette. **Johannes Biber** turned out the most patient pre-release tester one can imagine, boosting things with his high-end gaming machine. Thanks, guys! Also thanks to **Gregor Seyer** for his helpful review of the CRAN submission.
Clearly, programming in R would not be what it is, weren't there some R titans who generously share their knowledge online. While I keep learning from all of them, I would like to thank especially **Hadley Wickham** and **Dirk Eddelbüttel**.
## References
```{r, include = FALSE}
foreach::registerDoSEQ()
```
|
/scratch/gouwar.j/cran-all/cranData/viscomplexr/inst/doc/viscomplexr-vignette.Rmd
|
---
title: "Phase Portraits of Complex Functions with the R Package *viscomplexr*"
author: "Peter Biber"
output: rmarkdown::html_vignette
header-includes: \usepackage{amsmath}
bibliography: REFERENCES.bib
vignette: |
%\VignetteIndexEntry{viscomplexr-vignette}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
# This option anti-aliases the plots made below under Windows
if(Sys.info()[["sysname"]] == "Windows") {
knitr::opts_chunk$set(dev = "CairoPNG")
}
options(rmarkdown.html_vignette.check_title = FALSE)
```
```{r setup, echo = FALSE}
library(viscomplexr)
```
## Introduction
The **R** package *viscomplexr* has been written as a visualization tool for complex functions. More precisely, it provides functionality for making *phase portraits* of such functions. The method, sometimes called *domain coloring*, exists in [many sub-varieties](https://en.wikipedia.org/wiki/Domain_coloring). However, from the author's point of view, the style proposed by E. Wegert in his book *Visual Complex Functions* [@wegert_visualcpx_2012] comes with a particular clarity and a special aesthetic appeal at the same time. Therefore, this package closely follows Wegert's conventions. Conceptually, the package is intended for being used inside the framework of **R**'s base graphics, i.e. users of this package can freely utilize all features of base graphics for obtaining an optimum result, be it for scientific or artistic purposes. This vignette is not at all an introduction to function theory or an exhaustive treatment of what can be done with phase portraits - I recommend Wegert's book for an ideal combination of both; the purpose of this vignette is in fact to make the reader acquainted with the technical features the package provides in a step-by-step process.
Due to the size restriction of CRAN packages, the number of illustrations in this vignette is kept to a minimum. Readers are encouraged to run all code examples shown below (and hopefully enjoy what they see), but especially those where we explicitly invite them to do so. Alternatively, visit the [package's website](https://peterbiber.github.io/viscomplexr/) for a [richly illustrated version of this vignette](https://peterbiber.github.io/viscomplexr/articles/viscomplexr-vignette_for_website.html).
## Using the function *phasePortrait*
### Visualizing the complex plane
The package does not contain many functions, but provides a very versatile workhorse called *phasePortrait*. We will explore some of its key features now. Let us first consider a function that maps a complex number $z \in \mathbb{C}$ on itself, i.e. $f(z)=z$. After attaching the package with `library(viscomplexr)`, a phase portrait of this function is obtained very easily with:
```{r, figure_1, fig.width = 5, fig.height = 5, results = 'hide', fig.align='center', cache = FALSE, fig.show = 'hold', fig.cap = 'Phase portrait of the function $f(z)=z$ in the window $\\left|\\Re(z)\\right| < 8.5$ and $\\left|\\Im(z)\\right| < 8.5$.'}
phasePortrait("z", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
xlab = "real", ylab = "imaginary", main = "f(z) = z",
nCores = 2) # Probably not required on your machine (see below)
# Note the argument 'nCores' which determines the number of parallel processes to
# be used. Setting nCores = 2 has been done here and in all subsequent
# examples as CRAN checks do not allow more parallel processes.
# For normal work, we recommend not to define nCores at all which will make
# phasePortrait use all available cores on your machine.
# The progress messages phasePortrait is writing to the console can be
# suppressed by including 'verbose = FALSE' in the call (see documentation).
```
Such a phase portrait is based on the polar representation of complex numbers. Any complex number $z$ can be written as $z=r\cdot\mathrm{e}^{\mathrm{i}\varphi}$ or equivalently $z=r\cdot(\cos\varphi+\mathrm{i}\cdot\sin\varphi)$, whereby $r$ is the *modulus* and the angle $\varphi$ is the *argument*. The argument, also called the *phase angle*, is the angle in the origin of the complex number plane between the real axis and the position vector of the number in counter-clockwise orientation. The main feature of a phase portrait is to translate the argument into a color. In addition, there are options for visualizing the modulus or, more precisely, its relative change.
The translation of the phase angle $\varphi$ into a color follows the [hsv color model](https://en.wikipedia.org/wiki/HSL_and_HSV), where radian values of $\varphi=0+k\cdot2\pi$, $\varphi=\frac{2\pi}{3}+k\cdot2\pi$, and $\varphi=\frac{4\pi}{3}+k\cdot2\pi$ with $k\in\mathbb{Z}$ translate into the colors red, green, and blue, respectively, with a continuous transition of colors with values between. As all numbers with the same argument $\varphi$ obtain the same color, the numbers of the complex plane as visualized in the Figure above are colored along the chromatic cycle. In order to add visual structure, argument values of $\varphi=\frac{2\pi}{9}$, i.e. $40°$ and their integer multiples are emphasized by black lines. Note that each of these lines follows exactly one color. Moreover, the zones between two neighboring arguments $\varphi_1=k\cdot\frac{2\pi}{9}$ and $\varphi_2=(k+1)\cdot\frac{2\pi}{9}$ with $k\in\mathbb{Z}$ are shaded in a way that the brightness of the colors inside one such zone increases with increasing $\varphi$, i.e. in counterclockwise sense of rotation.
The other lines visible in the figure above relate to the modulus $r$. One such line follows the same value of $r$; it is thus obvious that each of these iso-modulus lines must form a concentric circle on the complex number plane (see the figure above). The distance between neighboring iso-modulus lines is chosen so that it always indicates the same relative change. For reasons to talk about later [see also @wegert_visualcpx_2012], the default setting of the function *phasePortrait* is a relative change of $b=\mathrm{e}^{2\pi/9}$ which is very close to $2$. Thus, with a grain of salt, the modulus of the complex numbers doubles or halves when moving from one iso-modulus line to the other. In the phase portrait, the zones between two adjacent iso-modulus lines are shaded in a way that the colors inside such a zone become brighter in the direction of increasing modulus. The lines themselves are located at the moduli $r=b^k$, with $k\in\mathbb{Z}$. This is nicely visible in the phase portrait above, where the outmost circular iso-modulus line indicates (approximately, as $b$ is not exactly $2$) $r=2^3=8$. Moving inwards, the following iso-modulus lines are at (approximately) $r=2^2=4$, $r=2^1=2$, $r=2^0=1$, $r=2^{-1}=\frac{1}{2}$, $r=2^{-2}=\frac{1}{4}$, etc. Obviously, as the modulus of the numbers on the complex plane is their distance from the origin, the width of the concentric rings formed by adjacent iso-modulus lines approximately doubles from ring to ring when moving outwards.
### Visual structuring - the argument *pType*
When working with the function *phasePortrait*, it might not always be desirable to display all of these reference lines and zonings. The argument `pType` allows for four different options as illustrated in the next example:
```{r figure_2, fig.width=5, fig.height=5, results="hide", fig.align='center', fig.show='hold', cache=TRUE, fig.cap= "Different options for including reference lines with the argument `pType`."}
# divide graphics device into four regions and adjust plot margins
op <- par(mfrow = c(2, 2),
mar = c(0.25, 0.55, 1.10, 0.25))
# plot four phase portraits with different choices of pType
phasePortrait("z", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5), pType = "p",
main = "pType = 'p'", axes = FALSE, nCores = 2)
phasePortrait("z", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5), pType = "pa",
main = "pType = 'pa'", axes = FALSE, nCores = 2)
phasePortrait("z", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5), pType = "pm",
main = "pType = 'pm'", axes = FALSE, nCores = 2)
phasePortrait("z", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5), pType = "pma",
main = "pType = 'pma'", axes = FALSE, nCores = 2)
par(op) # reset the graphics parameters to their previous values
```
As evident from the figure above, setting `ptype` to 'p' displays a phase portrait in the literal sense, i.e. only the phase of the complex numbers is displayed and nothing else. The option 'pa' adds reference lines for the argument, the option 'pm' adds iso-modulus lines, and the (default) option 'pma' adds both. In addition to these options, the example shows *phasePortrait* in combination with **R**'s base graphics. The first and the last line of the code chunk set and reset global graphics parameters, and inside the calls to *phasePortrait*, we use the arguments `main` (diagram title) and `axes` which are generic plot arguments.
### Visual structuring - the arguments *pi2Div* and *logBase*
For demonstrating options to adjust the density of the argument and modulus reference lines, consider the rational function
$$
f(z)=\frac{(3+2\mathrm{i}+z)(-5+5\mathrm{i}+z)}{(-2-2\mathrm{i}+z)^2}
$$
Evidently, this function has two zeroes, $z_1=-3-2\mathrm{i}$, and $z_2=5-5\mathrm{i}$. It also has a second order pole at $z_3=2+2\mathrm{i}$. We make a phase portrait of this function over the same cutout of the complex plane as we did in the figures above. When calling *phasePortrait* with such simple functions, it is most convenient to define them as as a quoted character string in **R** syntax containing the variable $z$. Run the code below for displaying the phase portrait (active 7" x 7" screen graphics device suggested, e.g. `x11()`).
```{r eval=FALSE, figure_3, fig.width=5, fig.height=5, results='hide', fig.align='center', cache=TRUE, fig.show='hold', fig.cap='Phase portrait of the function $f(z)=\\frac{(3+2\\mathrm{i}+z)(-5+5\\mathrm{i}+z)}{(-2-2\\mathrm{i}+z)^2}$ in the window $\\left|\\Re(z)\\right| < 8.5$ and $\\left|\\Im(z)\\right| < 8.5$.'}
op <- par(mar = c(5.1, 4.1, 2.1, 2.1), cex = 0.8) # adjust plot margins
# and general text size
phasePortrait("(3+2i+z)*(-5+5i+z)/(-2-2i+z)^2",
xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
xlab = "real", ylab = "imaginary",
nCores = 2) # Increase or leave out for higher performance
par(op) # reset the graphics parameters to their previous values
```
The resulting figure nicely displays the function's two zeroes and the pole. Note that all colors meet in zeroes and poles. Around zeroes, the colors cycle counterclockwise in the order red, green, blue, while this order is reversed around poles. For $n$<sup>th</sup> order ($n\in\mathbb{N}$) zeroes and poles, the cycle is passed through $n$ times. I recommend to check this out with examples of your own.
Now, suppose we want to change the density of the reference lines for the phase angle $\varphi$. This can be done by way of the argument `pi2Div`. For usual applications, `pi2Div` should be a natural number $n\:(n\in\mathbb{N})$. It defines the angle $\Delta\varphi$ between two adjacent reference lines as a fraction of the round angle, i.e. $\Delta\varphi=\frac{2\pi}{n}$. The default value of `pi2Div` is 9, i.e. $\Delta\varphi=\frac{2\pi}{9}=40°$. Let's plot our function in three flavors of `pi2Div`, namely, 6, 9 (the default), and 18, resulting in $\Delta\varphi$ values of $\frac{\pi}{3}=60°$, $\frac{2\pi}{9}=40°$, and $\frac{\pi}{9}=20°$. In order to suppress the iso-modulus lines and display the argument reference lines only, we are using `pType = "pa"`. Visualize this by running the code below (active 7" x 2.8" screen graphics device suggested, e.g. `x11(width = 7, height = 2.8)`).
```{r eval = FALSE, figure_4, fig.width=7, fig.height=2.8, results='hide', fig.align='center', , fig.show='hold', cache=TRUE, fig.cap='The function $f(z)=\\frac{(3+2\\mathrm{i}+z)(-5+5\\mathrm{i}+z)}{(-2-2\\mathrm{i}+z)^2}$ portrayed with three different settings of `pi2Div` and `pType = "pa"`.'}
# divide graphics device into three regions and adjust plot margins
op <- par(mfrow = c(1, 3), mar = c(0.2, 0.2, 0.4, 0.2))
for(n in c(6, 9, 18)) {
phasePortrait("(3+2i+z)*(-5+5i+z)/(-2-2i+z)^2", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
pi2Div = n, pType = "pa", axes = FALSE, nCores = 2)
# separate title call (R base graphics) for nicer line adjustment, just cosmetics
title(paste("pi2Div =", n), line = -1.2)
}
par(op) # reset graphics parameters to previous values
```
So far, this is exactly, what had to be expected. But see what happens when we choose the default `pType`, `"pma"` which also adds modulus reference lines:
```{r figure_5, fig.width=7, fig.height=2.8, results='hide', fig.align='center', , fig.show='hold', cache=TRUE, fig.cap='The function $f(z)=\\frac{(3+2\\mathrm{i}+z)(-5+5\\mathrm{i}+z)}{(-2-2\\mathrm{i}+z)^2}$ portrayed with three different settings of `pi2Div` and `pType = "pma"`.'}
# divide graphics device into three regions and adjust plot margins
op <- par(mfrow = c(1, 3), mar = c(0.2, 0.2, 0.4, 0.2))
for(n in c(6, 9, 18)) {
phasePortrait("(3+2i+z)*(-5+5i+z)/(-2-2i+z)^2", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
pi2Div = n, pType = "pma", axes = FALSE, nCores = 2)
# separate title call (R base graphics) for nicer line adjustment, just cosmetics
title(paste("pi2Div =", n), line = -1.2)
}
par(op) # reset graphics parameters to previous values
```
Evidently, the choice of `pi2Div` has influenced the density of the iso-modulus lines. This is because, by default, the parameter `logBase`, which controls how dense the iso-modulus lines are arranged, is linked to `pi2Div`. As stated above, `pi2Div` is usually a natural number $n\:(n \in\mathbb{N})$, and `logBase` is the real number $b\:(b\in\mathbb{R})$ which defines the moduli $r=b^k\:(k\in\mathbb{Z})$ where the reference lines are drawn. When $n$ is given, the default definition of $b$ is $b=\mathrm{e}^{2\pi/n}$. In the default case, $n=9$, this results in $b\approx2.009994$. Thus, by default, moving from one iso-modulus line to the adjacent one means almost exactly doubling or halving the modulus, depending on the direction. For the other two cases $n=6$ and $n=18$, the resulting values for $b$ are $b\approx2.85$ and $b\approx1.42$, the latter obviously being the square root of $\mathrm{e}^{2\pi/9}$. For $n=9$, the modulus (approximately) doubles or halves when traversing two adjacent iso-modulus lines.
Before we demonstrate the special property of this linkage between $n$ and $b$, i.e. between `pi2Div` and `logBase`, we shortly show, that they can be decoupled in *phasePortrait* without any complication. In the following example, we want to define the density of the iso-modulus lines in a way that the modulus triples when traversing three zones in the direction of ascending moduli. Clearly, this requires to define `logBase` as $b=\sqrt[3]{3}\approx1.44$. Thus, when moving from one iso-modulus line to the next higher one, the modulus has increased by a factor of about $1.4$. One line further, it has about doubled (${\sqrt[3]{3}}^{2}\approx2.08$), and another line further it has exactly tripled. While varying `pi2Div` exactly as in the previous example, we now keep `logBase` constant at $\sqrt[3]{3}$. Run the code below for visualizing this (active 7" x 2.8" screen graphics device suggested, e.g. `x11(width = 7, height = 2.8)`).
```{r eval=FALSE, figure_6, fig.width=7, fig.height=2.8, results='hide', fig.align='center', , fig.show='hold', cache=TRUE, fig.cap='The function $f(z)=\\frac{(3+2\\mathrm{i}+z)(-5+5\\mathrm{i}+z)}{(-2-2\\mathrm{i}+z)^2}$ portrayed with decoupled settings of `pi2Div` and `logBase`.'}
# divide graphics device into three regions and adjust plot margins
op <- par(mfrow = c(1, 3), mar = c(0.2, 0.2, 0.4, 0.2))
for(n in c(6, 9, 18)) {
phasePortrait("(3+2i+z)*(-5+5i+z)/(-2-2i+z)^2", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
pi2Div = n, logBase = sqrt(3), pType = "pma", axes = FALSE, nCores = 2)
# separate title call (R base graphics) for nicer line adjustment, just cosmetics
title(paste("pi2Div = ", n, ", logBase = 3^(1/3)", sep = ""), line = -1.2)
}
par(op) # reset graphics parameters to previous values
```
In order to understand why by default the parameters `pi2Div` and `logBase` are linked as described above, we consider the exponential function $f(z)=\mathrm{e}^z$. We can write $z=r\cdot(\cos\varphi+\mathrm{i}\cdot\sin\varphi)$ and thus $f(z)=\mathrm{e}^{r\cdot(\cos\varphi+\mathrm{i}\cdot\sin\varphi)}$ or $w=f(z)=\mathrm{e}^{r\cdot\cos\varphi}\cdot\mathrm{e}^{\mathrm{i}\cdot r\cdot\sin\varphi}$. The modulus of $w$ is $\mathrm{e}^{r\cdot\cos\varphi}$ and its argument is $r\cdot\sin\varphi$ with $\Re(z)=r\cdot\cos\varphi$ and $\Im(z)=r\cdot \sin\varphi$. So, the modulus and the argument of $w=\mathrm{e}^z$ depend solely on the real and the imaginary part of $z$, respectively. This can be easily verified with a phase portrait of $f(z)=\mathrm{e}^z$. Run the code below for displaying the phase portrait (active 7" x 7" screen graphics device suggested, e.g. `x11()`). Note that in the call to *phasePortrait* we hand over the `exp` function directly as an object. Alternatively, the quoted strings `"exp(z)"` or `"exp"` can be used as well (see section [ways to provide functions to *phasePortrait*](#ways_functions) below).
```{r eval=FALSE, figure_7, fig.width = 5, fig.height = 5, results = 'hide', fig.align='center', fig.show='hold', cache = TRUE, fig.cap = 'Phase portrait of the function $f(z)=\\mathrm{e}^z$ in the window $\\left|\\Re(z)\\right| < 8.5$ and $\\left|\\Im(z)\\right| < 8.5$ with iso-modulus lines.'}
op <- par(mar = c(5.1, 4.1, 2.1, 2.1), cex = 0.8) # adjust plot margins
# and general text size
phasePortrait(exp, xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5), pType = "pm",
xlab = "real", ylab = "imaginary", nCores = 2)
par(op) # reset graphics parameters to previous values
```
If we now define the argument `pi2Div` as a number $n\:(n\in\mathbb{N})$ and use it for determining the angular difference $\Delta\varphi=\frac{2\pi}{n}$ between two subsequent phase angle reference lines, our default link between `pi2Div` and `logBase` (which is the ratio $b$ of the moduli at two subsequent iso-modulus lines) establishes $b=\mathrm{e}^{\Delta\varphi}$. This means, if we add $\Delta\varphi$ to the argument of any $w=\mathrm{e}^z\:(z\in\mathbb{C})$ or increase its modulus by the factor $\mathrm{e}^{\Delta\varphi}$, both are equidistant reference line steps in a plot of $f(z)=\mathrm{e}^z$. You can visualize this with the following **R** code (active 7" x 2.8" screen graphics device suggested, e.g. `x11(width = 7, height = 2.8)`):
```{r eval=FALSE, figure_8, fig.width=7, fig.height=2.8, results='hide', fig.align='center', fig.show='hold', cache=TRUE, fig.cap='The function $f(z)=\\mathrm{e}^z$ portrayed with the default coupling of `pi2Div` and `logBase` as implemented in *phasePortrait*.'}
# divide graphics device into three regions and adjust plot margins
op <- par(mfrow = c(1, 3), mar = c(0.2, 0.2, 0.4, 0.2))
for(n in c(6, 9, 18)) {
phasePortrait("exp(z)", xlim = c(-8.5, 8.5), ylim = c(-8.5, 8.5),
pi2Div = n, pType = "pma", axes = FALSE, nCores = 2)
# separate title call (R base graphics) for nicer line adjustment, just cosmetics
title(paste("pi2Div = ", n, ", logBase = exp(2*pi/pi2Div)", sep = ""),
line = -1.2, cex.main = 0.9)
}
par(op) # reset graphics parameters to previous values
```
As expected, the default coupling of both arguments produces square patterns when applied to a phase portrait of the exponential function which can, insofar, serve as a visual reference. Recall, that equidistant modulus reference lines (in ascending order) indicate an exponentially growing modulus. In the middle phase portrait one such steps means (approximately) doubling the modulus. From the left to the right, the plot covers 24 of these steps, indicating a total increase of the modulus by factor $2^{24}$ which amounts to almost 17 millions.
### Fine tuning shading and contrast
For optimizing visualization in a technical sense, as well as for aesthetic purposes, it may be useful to adjust shading and contrast of the argument and modulus reference zones mentioned above. This is done by modifying the parameters `darkestShade` ($s$) and `lambda` ($\lambda$) when calling `phasePortrait`. These two parameters can be used to steer the transition from the lower to the uper edge of a reference zone. They address the v-value of the [hsv color model](https://en.wikipedia.org/wiki/HSL_and_HSV), which can take values between 0 and 1, indicating maximum darkness (black), and no shading at all, respectively. Hereby, $s$ gives the v-value at the lower edge of a reference zone, and $\lambda$ determines the interpolation from there to the upper edge, where no shading is applied. The intended use is $\lambda > 0$ where small values sharpen the contrast between shaded and non-shaded zones and vice versa. Exactly, the shading value $v$ is calculated as:
$$
v = s + (1-s)\cdot x^{1/\lambda}
$$
For modulus zone shading at a point $z$ in the complex plane when portraying a function $f(z)$, $x$ is the fractional part of $\log_b{f(z)}$, with the base $b$ being the parameter `logBase` that defines the modulus reference zoning (see above). For shading argument reference zones, $x$ is simply the difference between the upper and the lower angle of an argument reference zone, linearly mapped to the range $[0, 1[$. The following code generates a $3\times3$ matrix of phase portraits of $f(z)=\tan{z^2}$ with $\lambda$ and $s$ changing along the rows and columns, respectively. Run the code for visualizing these concepts (active 7" x 7" screen graphics device suggested, e.g. `x11()`).
```{r eval=FALSE, figure_9, fig.width=5, fig.height=5, fig.show='hold', results='hide', cache=TRUE, fig.cap='Tuning reference zone contrast with the parameters `darkestShade` (column-wise, 0, 0.2, 0.4), and `lambda` (row-wise, 0.1, 1, 10).'}
op <- par(mfrow = c(3, 3), mar = c(0.2, 0.2, 0.2, 0.2))
for(lb in c(0.1, 1, 10)) {
for(dS in c(0, 0.2, 0.4)) {
phasePortrait("tan(z^2)", xlim = c(-1.7, 1.7), ylim = c(-1.7, 1.7),
pType = "pm", darkestShade = dS, lambda = lb,
axes = FALSE, xaxs = "i", yaxs = "i", nCores = 2)
}
}
par(op)
```
Additional possibilities exist for tuning the interplay of modulus and argument reference zones when they are used in combination; this can be controlled with the parameter `gamma` when calling `phasePortrait`). The maximum brightness of the colours in a phase portrait is adjustable with the parameter `stdSaturation` (see documentation of `phasePortrait`; we will also get back to these points in the chapter [aesthetic hints](#hints_artistic) below).
### Be aware of branch cuts
When exploring functions with *phasePortrait*, discontinuities of certain functions can become visible as abrupt color changes. Typical examples are integer root functions which, for a given point $z, z\in\mathbb{C}\setminus\lbrace0\rbrace$ in the complex plane, can attain $n$ values with $n$ being the root's degree. It takes, so to speak, $n$ full cycles around the origin of the complex plane in order to cover all values obtained from a function $f(z)=z^{1/n}, n\in\mathbb{N}$. The code below creates an illustration comprising three phase portraits with branch cuts (dashed lines), illustrating the three values of $f(z)=z^{1/3}$, $z\in\mathbb{C}\setminus\lbrace0\rbrace$. The transitions between the phase portraits are indicated by same-coloured arrows pointing at the branch cuts. For running the code, an open 7" x 2.7" graphics device is suggested, e.g. `x11(width = 7, height = 2.8)`.
```{r eval=FALSE, figure_10, fig.width=7, fig.height=2.7, results='hide', fig.align='center', fig.show='hold', cache=TRUE, fig.cap='Three phase portraits with branch cuts (dashed line), illustrating the three values of $f(z)=z^{1/3}$, $z \\in \\mathbb{C} \\setminus \\lbrace 0 \\rbrace$. The transitions between the phase portraits are indicated by same-coloured arrows pointing at the branch cuts.'}
op <- par(mfrow = c(1, 3), mar = c(0.4, 0.2, 0.2, 0.2))
for(k in 0:2) {
FUNstring <- paste0("z^(1/3) * exp(1i * 2*pi/3 * ", k, ")")
phasePortrait(FUN = FUNstring,
xlim = c(-1.5, 1.5), ylim = c(-1.5, 1.5), pi2Div = 12,
axes = FALSE, nCores = 2)
title(sub = paste0("k = ", k), line = -1)
# emphasize branch cut with a dashed line segment
segments(-1.5, 0, 0, 0, lwd = 2, lty = "dashed")
# draw colored arrows
upperCol <- switch(as.character(k),
"0" = "black", "1" = "red", "2" = "green")
lowerCol <- switch(as.character(k),
"0" = "green", "1" = "black", "2" = "red")
arrows(x0 = c(-1.2), y0 = c(1, -1), y1 = c(0.2, -0.2),
lwd = 2, length = 0.1, col = c(upperCol, lowerCol))
}
par(op)
```
After you have run the code, have a look at the leftmost diagram first. Note that the argument reference lines have been adjusted to represent angle distances of $30°$, i.e. `pi2Div` = 12. Most noticeable is the abrupt color change from yellow to magenta along the negative real axis (emphasized with a dashed line). This is what is called a *branch cut*, and it suggests that our picture of the function $f(z)=z^{1/3}$ is not complete. As the three third roots of any complex number $z=r\cdot\mathrm{e}^{\mathrm{i}\varphi}, z\in\mathbb{C}\setminus\lbrace0\rbrace$ are $r^{1/3}\cdot\mathrm{e}^{\mathrm{i}\cdot(\varphi+k\cdot2\pi)/3}; k=0,1,2; \varphi\in\left[0,2\pi\right[$, we require three different phase portraits, one for each $k$, as shown in the figure above. With the argument reference line distance being $30°$, it is easy to see that each phase portrait covers a total argument range of $120°$, i.e. $2\pi/3$.
Obviously, each of the three portraits has a branch cut along the negative real axis, and the colors at the branch cuts show, where the transitions between the phase portraits have to happen. In the figure, we have illustrated this by arrows pointing to the branch cuts. Same-colored arrows in different phase portraits indicate the transitions. Thus, the first phase portrait ($k = 0$) links to the second ($k = 1$) in their yellow zones (black arrows); the second links to the third ($k = 2$) in their blue zones (red arrows), and the third links back to the first in their magenta zones (green arrows). Actually, one could imagine to stack the three face portraits in ascending order, cut them at the dashed line, and glue the branch cuts together according to the correct transitions. The resulting object is a Riemann surface with each phase portrait being a 'sheet'. See more about this fascinating concept in @wegert_visualcpx_2012, Chapter7.
While the function $f(z)=z^{1/3}$ could be fully covered with three phase portraits, $f(z)=\log z$ has an infinite number of branches. As the (natural) logarithm of any complex number $z=r\cdot\mathrm{e}^{i\cdot\varphi}, r>0$ is $\log z=\log r+\mathrm{i}\cdot\varphi$, it is evident that the imaginary part of $\log z$ increases linearly with the argument of $z$, $\varphi$. In terms of phase portraits, this means an infinite number of stacked 'sheets' in either direction, clockwise and counterclockwise. Neighboring sheets connect at a branch cut. Run the code below to illustrate this with a phase portrait of $\log z=\log r+\mathrm{i}\cdot(\varphi+k\cdot2\pi), r > 0, \varphi\in\left[0,2\pi\right[$ for $k=-1, 0, 1$ (active 7" x 2.7" screen graphics device suggested, e.g. `x11(width = 7, height = 2.7)`). In the resulting illustration, the branch cuts are marked with dashed white lines.
```{r eval=FALSE, figure_11, fig.width=7, fig.height=2.7, fig.align='center', results='hide', fig.show='hold', cache=TRUE, fig.cap='Three branches of $\\log z=\\log r+\\mathrm{i}\\cdot(\\varphi + k\\cdot2\\pi), r>0, \\varphi\\in\\left[0,2\\pi\\right[$, with $k=-1,0,1$. The branch cuts are marked with dashed white lines.'}
op <- par(mfrow = c(1, 3), mar = c(0.4, 0.2, 0.2, 0.2))
for(k in -1:1) {
FUNstring <- paste0("log(Mod(z)) + 1i * (Arg(z) + 2 * pi * ", k, ")")
phasePortrait(FUN = FUNstring, pi2Div = 36,
xlim = c(-2, 2), ylim = c(-2, 2), axes = FALSE, nCores = 2)
segments(-2, 0, 0, 0, col = "white", lwd = 1, lty = "dashed")
title(sub = paste0("k = ", k), line = -1)
}
par(op)
```
### Riemann sphere plots
A convenient way to visualize the whole complex number plane is based on a stereographic projection suggested by Bernhard Riemann (see @wegert_visualcpx_2012, p. 20 ff. and p. 39 ff.). The *Riemann Sphere* is a sphere with radius 1, centered around the origin of the complex plane. It is cut into an upper (northern) and lower (southern) half by the complex plane. By connecting any point on the complex plane to the north pole with a straight line, the line's intersection with the sphere's surface marks the location on the sphere where the point is projected onto. Thus, all points inside the unit disk on the complex plane are projected onto the southern hemisphere, the origin being represented by the south pole. In contrast, all points outside the unit disk are projected onto the northern hemisphere, the north pole representing the *point at infinity*. For visualizing both hemispheres as 2D phase portraits, they have to be projected onto a flat surface in turn.
If we perform a stereographic projection of the southern hemisphere from the north pole to the complex plane (and look at the plane's upper - the northern - side), this obviously results in a phase portrait on the untransformed complex plane as were all examples shown so far in this text. We can perform an analogue procedure for the northern hemisphere, projecting it from the south pole to the complex plane. We now want to think of the northern hemisphere projection as layered on top of the southern hemisphere projection, for the northern hemisphere, which it depicts, is naturally also on top of the southern hemisphere. If, in a 'normal' visualization of the complex plane (orthogonal real and imaginary axes), a point at any location represents a complex number $z$, a point at the same location in the northern hemisphere projection is mapped into $1/z$. The origin is mapped into the point at infinity. Technically, this mapping can be easily achieved when calling the function `phasePortrait` by setting the flag `invertFlip = TRUE` (default is `FALSE`). The resulting map is, in addition, rotated counter-clockwise around the point at infinity by an angle of $\pi$. As @wegert_visualcpx_2012 argues, this way of mapping has a convenient visual effect: Consider two phase portraits of the same function, one made with `invertFlip = FALSE` and the other one with `invertFlip = TRUE`. Both are shown side by side (see the pairs of phase portraits in the next two figures below). This can be imagined as a view into a Riemann sphere that has been cut open along the equator and swung open along a hinge in the line $\Re(z)=1$ (if the southern hemisphere is at the left side) or $\Re(z)=-1$ (if the northern hemisphere is at the left side). In order to highlight the Riemann sphere in Phase Portraits if desired, we provide the function `riemannMask`. Let's first demonstrate this for the function $f(z)=z$.
```{r figure_12, fig.width=7, fig.height=3.5, fig.align='center', results='hide', fig.show='hold', cache=TRUE, fig.cap='Mapping the complex number plane on the Riemann sphere. Left: lower (southern) hemisphere; right upper (northern hemisphere). Folding both figures face to face along a vertical line in the middle between them can be imagined as closing the Riemann sphere.'}
op <- par(mfrow = c(1, 2), mar = rep(0.1, 4))
# Southern hemisphere
phasePortrait("z", xlim = c(-1.4, 1.4), ylim = c(-1.4, 1.4),
pi2Div = 12, axes = FALSE, nCores = 2)
riemannMask(annotSouth = TRUE)
# Northern hemisphere
phasePortrait("z", xlim = c(-1.4, 1.4), ylim = c(-1.4, 1.4),
pi2Div = 12, axes = FALSE, invertFlip = TRUE, nCores = 2)
riemannMask(annotNorth = TRUE)
par(op)
```
The function `riemannMask` provides several options, among others adjusting the mask's transparency or adding annotations to landmark points (see the function's documentation). In the next example, we will use it without any such features. Consider the following function:
$$
f(z)=\frac{(z^{2}+\frac{1}{\sqrt{2}}+\frac{\mathrm{i}}{\sqrt{2}})\cdot(z+\frac{1}{2}+\frac{\mathrm{i}}{2})}{z-1}
$$
This function has two zeroes exactly located on the unit circle, $z_1=\mathrm{e}^{\mathrm{i}\frac{5\pi}{8}}$, and $z_2=\mathrm{e}^{\mathrm{i}\frac{13\pi}{8}}$. Moreover, it has another zero inside the unit circle, $z_3=\frac{1}{\sqrt{2}}\cdot\mathrm{e}^{\mathrm{i}\frac{5\pi}{4}}$. Equally obvious, it has a pole exactly on the unit circle, $z_4=1$. Less obvious, it has a double pole, $z_5$, at the point at infinity. The code required for producing the following figure looks somewhat bulky, but most lines are required for annotating the zeroes and poles. Note that the real axis coordinates of the northern hemisphere's annotation do not have to be multiplied with $-1$ in order to take into account the rotation of the inverted complex plane. By calling `phasePortrait` with `invertFlip = TRUE` the coordinate system of the plot is already set up correctly and will remain so for subsequent operations.
```{r figure_13, fig.width=7, fig.height=3.7, fig.align='center', results='hide', fig.show='hold', cache=TRUE, fig.cap='Riemann sphere plot of the function $f(z)=\\frac{(z^{2}+\\frac{1}{\\sqrt{2}}+\\frac{\\mathrm{i}}{\\sqrt{2}})\\cdot(z+\\frac{1}{2}+\\frac{\\mathrm{i}}{2})}{z-1}$. Annotated are the zeroes $z_1$, $z_2$, $z_3$, and the poles $z_4$, $z_5$.'}
op <- par(mfrow = c(1, 2), mar = c(0.1, 0.1, 1.4, 0.1))
# Define function
FUNstring <- "(z^2 + 1/sqrt(2) * (1 + 1i)) * (z + 1/2*(1 + 1i)) / (z - 1)"
# Southern hemisphere
phasePortrait(FUNstring, xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2),
pi2Div = 12, axes = FALSE, nCores = 2)
riemannMask()
title("Southern Hemisphere", line = 0)
# - annotate zeroes and poles
text(c(cos(5/8*pi), cos(13/8*pi), cos(5/4*pi)/sqrt(2), 1),
c(sin(5/8*pi), sin(13/8*pi), sin(5/4*pi)/sqrt(2), 0),
c(expression(z[1]), expression(z[2]), expression(z[3]), expression(z[4])),
pos = c(1, 2, 4, 2), offset = 1, col = "white")
# Northern hemisphere
phasePortrait(FUNstring, xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2),
pi2Div = 12, axes = FALSE, invertFlip = TRUE, nCores = 2)
riemannMask()
title("Northern Hemisphere", line = 0)
# - annotate zeroes and poles
text(c(cos(5/8*pi), cos(13/8*pi), cos(5/4*pi)*sqrt(2), 1, 0),
c(sin(5/8*pi), sin(13/8*pi), sin(5/4*pi)*sqrt(2), 0, 0),
c(expression(z[1]), expression(z[2]), expression(z[3]),
expression(z[4]), expression(z[5])),
pos = c(1, 4, 3, 4, 4), offset = 1,
col = c("white", "white", "black", "white", "white"))
par(op)
```
With some consideration it becomes quite easy to see that both phase portraits are kind of everted versions of each other. What is inside the unit disk in the left phase portrait is outside in the right one, and vice versa. If you mentally visualize both unit disks touching in, e.g., point $z_4$ and one disk rolling along the edge of the other, you will see immediately how one disk continues the picture shown in the other. Having the 'Riemann Mask' somewhat transparent is helpful for orientation (see the function's documentation). Note, how the zeroes $z_{1, 2}$ and the pole $z_4$ which are located exactly on the unit circle continue outside the unit disk on 'their own' plane and in the other unit disk. Note also, that on both hemispheres zeroes and poles can be identified by the same sequence of colors: When circling counter-clockwise around the point of interest, a zero will always exhibit the color sequence red, yellow, green, blue, magenta, red ..., while this order will always be reverted for a pole. As the pole at the point at infinity ($z_5$) is a double pole, the color sequence is run through twice during one turn around the pole. As the zero $z_3$ is inside the unit disk representing the southern hemisphere, it lies outside the northern hemisphere disk, but it is still visible on the continuation of the inverted (and rotated) complex plane (belonging to the northern hemisphere) outside the unit disk. Observe, how the grid lines in the vicinity of $z_3$ merge when passing from one unit disk to the other.
### Fractals
While visualizing fractals is not among the original purposes of this package, *phasePortrait* allows for an unusual way of displaying such that are functions of complex numbers. Classic examples are the [Mandelbrot set](https://en.wikipedia.org/wiki/Mandelbrot_set) and the [Julia set](https://en.wikipedia.org/wiki/Julia_set), and this package provides the functions `mandelbrot` and `juliaNormal` (implemented in C++) for supporting visualization. The mandelbrot set comprises all complex numbers $z$ for which the sequence $a_{n+1}=a_n^{2}+z$ with $a_0=0$ remains bounded for all $n\in\mathbb{N_0}$. Normal julia sets are closely related to the Mandelbrot set. They comprise all complex numbers $z$ for which the sequence $a_{n+1}=a_n^2+c$ with $a_0=z$ remains bounded for all $n\in\mathbb{N_0}$. The parameter $c$ is a complex number, and interesting visualizations with this package (i.e. other than a blank screen) are only obtained for $c$ being an element of the Mandelbrot set. For the author's taste, the Julia set visualizations look best and are most interesting when $c$ is located near the border of the Mandelbrot set.
The classic visualizations of the Mandelbrot and the Julia set use a uniform color (usually black) for all points which belong to the set and color the points outside the set dependent on how quickly they diverge. Visualizations with *phasePortrait*, in contrast, color the points inside the sets by the argument and modulus of the number to which the series described above converges (or, more precisely, at which the iteration terminates). While the results are visually appealing (please try the code examples we provide below), they are not unambiguous, as the sequences that define the sets do not always converge to one single value, but to limit cycles.
The following code plots an overview picture of the Mandelbrot set. Note that the function `mandelbrot` is called with a considerably low value (30) for the parameter `itDepth` which defines the number of iterations to be calculated (default is 500). This is because we are using *phasePortrait* for plotting into a graphics window with a comparatively low resolution (see section [defining image quality](#img_qual) for how to obtain high-quality phase portraits). While a high number of iterations produces a more accurate representation of the set, the resulting filigree structures might become hardly visible or even invisible when the resolution to be plotted on is low.
```{r, eval=FALSE}
x11(width = 8, height = 2/3 * 8) # Open graphics window on screen
op <- par(mar = c(0, 0, 0, 0)) # Do not leave plot margins
phasePortrait(mandelbrot, moreArgs = list(itDepth = 30),
ncores = 1, # Increase or leave out for higher performance
xlim = c(-2, 1), ylim = c(-1, 1),
hsvNaN = c(0, 0, 0), # black color for points outside the set
axes = FALSE, # No coordinate axes
xaxs = "i", yaxs = "i") # No space between plot region and plot
par(op) # Set graphics parameters to original
```
With the code example below we plot a cutout of the Mandelbrot set into a png file with a resolution of 600 dpi using the default number of iterations (500). We are using a few features that are just commented here, but will be explained below in the section [aesthetic hints](#hints_artistic). Other graphics file formats can be used in almost the same way. Type `?png` in order to see all formats and how to call them. See also section [defining image quality](#img_qual).
```{r, eval=FALSE}
res <- 600 # set resolution to 600 dpi
# open png graphics device with in DIN A4 format
# DIN A format has an edge length ratio of sqrt(2)
png("Mandelbrot Example.png",
width = 29.7, height = 29.7/sqrt(2), # DIN A4 landscape
units = "cm",
res = res) # resolution is required
op <- par(mar = c(0, 0, 0, 0)) # set graphics parameters - no plot margins
xlim <- c(-1.254, -1.248) # horizontal (real) plot limits
# the function below adjusts the imaginary plot limits to the
# desired ratio (sqrt(2)) centered around the desired imaginary value
ylim <- ylimFromXlim(xlim, centerY = 0.02, x_to_y = sqrt(2))
phasePortrait(mandelbrot,
nCores = 1, # Increase or leave out for higher performance
xlim = xlim, ylim = ylim,
hsvNaN = c(0, 0, 0), # Black color for NaN results
xaxs = "i", yaxs = "i", # suppress R's default axis margins
axes = FALSE, # do not plot axes
res = res) # resolution is required
par(op) # reset graphics parameters
dev.off() # close graphics device and complete the png file
```
Inside the same technical setting, the following two examples plot Julia sets into a png file.
```{r, eval=FALSE}
res <- 600
png("Julia Example 1.png", width = 29.7, height = 29.7/sqrt(2),
units = "cm", res = res)
op <- par(mar = c(0, 0, 0, 0))
xlim <- c(-1.8, 1.8)
ylim <- ylimFromXlim(xlim, centerY = 0, x_to_y = sqrt(2))
phasePortrait(juliaNormal,
# see documentation of juliaNormal about the arguments
# c and R_esc
moreArgs = list(c = -0.09 - 0.649i, R_esc = 2),
nCores = 1, # Increase or leave out for higher performance
xlim = xlim, ylim = ylim,
hsvNaN = c(0, 0, 0),
xaxs = "i", yaxs = "i",
axes = FALSE,
res = res)
par(op)
dev.off()
```
```{r, eval=FALSE}
res <- 600
png("Julia Example 2.png", width = 29.7, height = 29.7/sqrt(2),
units = "cm", res = res)
op <- par(mar = c(0, 0, 0, 0))
xlim <- c(-0.32, 0.02)
ylim <- ylimFromXlim(xlim, center = -0.78, x_to_y = sqrt(2))
phasePortrait(juliaNormal,
# see documentation of juliaNormal about the arguments
# c and R_esc
moreArgs = list(c = -0.119 - 0.882i, R_esc = 2),
nCores = 1, # Increase or leave out for higher performance
xlim = xlim, ylim = ylim,
hsvNaN = c(0, 0, 0),
xaxs = "i", yaxs = "i",
axes = FALSE,
res = res)
par(op)
dev.off()
```
### Phase portraits based on a polar chessboard
Since version 1.1.0, *viscomplexr* provides the function `phasePortraitBw` which allows for creating two-color phase portraits of complex functions based on a polar chessboard grid (cf. @wegert_visualcpx_2012, p. 35). Compared to the full phase portraits that can be made with `phasePortrait`, two-color portraits omit information. Especially in combination with full phase portraits they can be, however, very helpful tools for interpretation. Besides, two-color phase portraits have a special aesthetic appeal which is worth exploring for itself. In its parameters and its mode of operation, `phasePortraitBw` is very similar to `phasePortrait` (see the documentations of both functions for details). The parameters `pi2Div` and `logBase` have exactly the same effect as with `phasePortrait`. Instead of the parameter `pType`, `phasePortraitBw` has the parameter `bwType` which allows for the three settings "m", "a", and "ma". These produce two-color phase portraits which take into account the modulus only, the argument (phase angle), only, and the combination of both, respectively. Plots made with the latter option show a chessboard-like color alteration over the tiles resulting from the intersection of modulus and argument zones. The following code maps the complex plane to itself, comparing all three options of `bwType`. It also adds a standard phase portrait for comparison.
```{r, eval=FALSE}
# Map the complex plane on itself, show all bwType options
x11(width = 8, height = 8)
op <- par(mfrow = c(2, 2), mar = c(4.1, 4.1, 1.1, 1.1))
for(bwType in c("ma", "a", "m")) {
phasePortraitBw("z", xlim = c(-2, 2), ylim = c(-2, 2),
bwType = bwType,
xlab = "real", ylab = "imaginary",
nCores = 2) # Increase or leave out for higher performance
}
# Add normal phase portrait for comparison
phasePortrait("z", xlim = c(-2, 2), ylim = c(-2, 2),
xlab = "real", ylab = "imaginary",
pi2Div = 18, # Use same angular division as default
# in phasePortraitBw
nCores = 2) # Increase or leave out for higher performance
par(op)
```
Note that the parameter `pi2Div` should not be chosen as an odd number when working with `phasePortraitBw`. In this case, the first and the last phase angle zone would obtain the same color, which is probably an undesired effect for most applications. While `pi2Div = 9` is the default setting in `phasePortrait` for good reasons (see above), its default in `phasePortraitBw` is 18. Also by default, the parameter `logBase` is linked to `pi2Div` in the same way as by default in `phasePortrait` (`logBase = exp(2*pi/pi2Div)`). So, if defaults for both, `phasePortrait` and `phasePortraitBw` are used, each zone of the former covers two zones of the latter. In the code above, however, `pi2Div` was set to 18 also in the call to `phasePortrait` for direct comparability. This is also the case in the code below, which displays a rational function.
```{r eval=FALSE}
# A rational function, show all bwType options
x11(width = 8, height = 8)
funString <- "(z + 1.4i - 1.4)^2/(z^3 + 2)"
op <- par(mfrow = c(2, 2), mar = c(4.1, 4.1, 1.1, 1.1))
for(bwType in c("ma", "a", "m")) {
phasePortraitBw(funString, xlim = c(-2, 2), ylim = c(-2, 2),
bwType = bwType,
xlab = "real", ylab = "imaginary",
nCores = 2) # Increase or leave out for higher performance
}
# Add normal phase portrait for comparison
phasePortrait(funString, xlim = c(-2, 2), ylim = c(-2, 2),
xlab = "real", ylab = "imaginary",
pi2Div = 18, # Use same angular division as default
# in phasePortraitBw
nCores = 2) # Increase or leave out for higher performance
par(op)
```
While the letters 'Bw' in `phasePortraitBw` stand for 'black/white', the natural colors for such chessboard plots, the user is not limited to these. The choice of colors is defined by the parameter `bwCols` which, by default, is set as `bwCols = c("black", "gray95", "gray")`. The first and the second color are used for coloring the alternating zones, while the last color is used in cases where the function of interest produces results which cannot be sufficiently evaluated for modulus or argument (`NaN`, partly `Inf`). Note that the second color, `"gray95"` is almost, but not exactly white, which contrasts 'white' tiles against a white background in a visually unobtrusive way. The parameter `bwCols` can be freely changed; values must be either color names that **R** can interpret (call `colors()` for a list) or hexadecimal color strings like e.g. `"#00FF32"` (the format is `"#RRGGBB"` with 'RR', 'GG', and 'BB' representing red, green, and blue with an allowed value range of `00` to `FF` for each).
## Aesthetic hints {#hints_artistic}
While phase portraits were originally invented for scientific and technical purposes, their aesthetic quality is a feature in itself. In this section, we give a few technical hints that might be helpful for obtaining appealing graphics. We will be not only talking about features implemented in this package, but also mention some useful options provided by R base graphics. A general recommendation when plotting for maximum aesthetic results is also to first check out the function to be plotted with lower resolutions (e.g. the default 150 dpi), and a smaller format (but with the desired device aspect ratio), adjust the domain (`xlim`, `ylim`), and all other parameters of *phasePortrait* you might want to change (including the parameters `gamma` and `stdSaturation` we have not mentioned in this vignette, so far). Only if you are satisfied with the result at that stage, you should start the run with the desired final resolution and format, as plots at high resolution and large formats may be time-consuming depending on your hardware (see also the section [defining image quality](#img_qual)). When using the function `riemannMask` you could try changing the mask's transparency (`alphaMask`) and it's color (`colMask`). Often, black is a good alternative to the default white). Besides such simple issues there are, however, a few points we will talk about in more detail below.
### The `par(op)` mechanism
As mentioned above, *phasePortrait* uses **R**'s base graphic system. This is a powerful tool, its functionality is, however, not always easy to understand and use. Many fundamental settings of base **R** graphics are stored in a set of parameters, which can be set or queried using the function `par()`. Among the important graphical parameters in our context are those which steer the outer margins and the plot margins (`oma`, `omi`, `mar`, `mai`) and those which define the default background and foreground colors (`bg`, `fg`). Type `?par` to see a documentation of all parameters. Changing these parameters here and there during an **R** session can easily lead to graphical results that may be nice but hard to reproduce. For avoiding this, when `par()` is called to change one or more graphical parameters, it invisibly returns all parameters and their values *before* the change. These can be stored in a variable, and used to restore the original parameter values after the plotting has been done. This concept seems to be unknown to surprisingly many users of **R**:
```{r, eval = FALSE}
# Set the plot margins at all four sides to 1/5 inch with mai,
# set the background color to black with bg, and the default foreground
# color with fg (e.g. for axes and boxes around plots, or the color of
# the circle outline from the function riemannMask).
# We catch the previous parameter values in a variable, I called
# "op" ("old parameters")
op <- par(mai = c(1/5, 1/5, 1/5, 1/5), bg = "black", fg = "white")
# Make any phase portraits and/or other graphics of your interest
# ...
# Set the graphical parameters back to the values previously stored in op
par(op)
```
### Dealing with axes
Usually, when aiming for mainly aesthetic effects, you want to suppress plot axes from being drawn. As phase *phasePortrait* accepts, via its `...` argument, all arguments also accepted by **R**'s `plot.default`,
this can be easily achieved by providing the argument `axes = FALSE`:
```{r, eval = FALSE}
phasePortrait("tan(z^3 + 1/2 - 2i)/(1 - 1i - z)",
xlim = c(-6, 6), ylim = c(-3, 3),
axes = FALSE,
nCores = 2) # Increase or leave out for higher performance
```
Note that this does not only suppress both axes, but also the box usually drawn around a plot. If such a box is desired, it can be simply added afterwards by calling `box()`:
```{r, eval=FALSE}
phasePortrait("tan(z^3 + 1/2 - 2i)/(1 - 1i - z)",
xlim = c(-6, 6), ylim = c(-3, 3),
axes = FALSE,
nCores = 2) # Increase or leave out for higher performance
box()
```
If axes are desired together with a special aesthetic appeal (e.g. for presentations), it is worth trying out a black background and white axes. However, there are unexpected hurdles to take, before the result looks as it should:
```{r, eval=FALSE}
# set background and foreground colors
op <- par(bg = "black", fg = "white")
# Setting the parameter fg has an effect on the box, the axes, and the axes'
# ticks, but not on the axis annotations and axis labels.
# Also the color of the title (main) is not affected.
# The colors of these elements have to be set manually and separately. While we
# could simply set them to "white", we set them, more flexibly, to the
# current foreground color (par("fg")).
phasePortrait("tan(z^3 + 1/2 - 2i)/(2 - 1i - z)",
xlim = c(-6, 6), ylim = c(-3, 3), col.axis = par("fg"),
xlab = "real", ylab = "imaginary", col.lab = par("fg"),
main = "All annotation in foreground color", col.main = par("fg"),
# Adjust text size
cex.axis = 0.9, cex.lab = 0.9,
nCores = 2) # Increase or leave out for higher performance
par(op)
```
Note that by default the axes are constructed with an overhang of 4% beyond the ranges given with `xlim` and `ylim` at each end. More often than not this looks nice, but sometimes it is undesired, e.g. when a phase portrait is intended to cover the full display without any frame and margin. This behavior is due to the graphical parameters `xaxs` and `yaxs` (axis style) being set to 'r' ('regular') by default. Setting these parameters as `xaxs = "i"` and `yaxs = "i"` ('internal'), no overhang is added. Both, `xaxs` and `yaxs`, can be either set in a call to `par()` or handed as arguments to *phasePortrait*. We will come back to these parameters in the following section.
### Device ratio and margins
You might want to plot a phase portrait that fully covers the graphics device. The following code example shows how to achieve this. First, it is necessary to set the plot margins to zero (note that the outer margins are zero by default, so, usually, there is no need to care for them). Second, as *phasePortrait* uses an aspect ratio of 1 by default, `xlim` and `ylim` have to exactly match the aspect ratio of the graphics device to be plotted in. In order to facilitate this, we provide the functions `ylimFromXlim` and `xlimFromYlim`. In the example, we use the former in order to match `xlim` and `ylim` a device aspect ratio of 16/9. Third, in order to omit the 4% axis overhang (would look like a margin), the parameters `xaxs` and `yaxs` are set to "i". Setting `axes` to FALSE is not absolutely necessary in this case, but is good style.
```{r, eval=FALSE}
# Open graphics device with 16/9 aspect ratio and 7 inch width
x11(width = 7, height = 9/16 * 7)
op <- par(mar = c(0, 0, 0, 0)) # Set plot margins to zero
xlim <- c(-3, 3)
# Calculate ylim with desired center fitting the desired aspect ratio
ylim <- ylimFromXlim(xlim, centerY = 0, x_to_y = 16/9)
phasePortrait(jacobiTheta, moreArgs = list(tau = 1i/5 + 1/5), pType = "p",
xlim = xlim, ylim = ylim,
xaxs = "i", yaxs = "i",
axes = FALSE,
nCores = 2) # Increase or leave out for higher performance
par(op)
```
Not many changes are necessary for obtaining a phase portrait like above but with a frame. A convenient way to do this is to set the outer margins of the graphics device in inches with the graphical parameter `omi` and the background to the desired color. Adding this to the code above, however, leads to differing horizontal and vertical frame widths. This occurs because, due to the margin setting, the required ratio of the `xlim` and `ylim` ranges is no longer exactly 16/9. The precise ratio has to be calculated and provided to `ylimFromXlim` as shown in the code example below.
```{r, eval=FALSE}
# Open graphics device with 16/9 aspect ratio and a width of 7 inches
x11(width = 7, height = 9/16 * 7)
# Set plot margins to zero, outer margins to 1/7 inch,
# and background color to black
outerMar <- 1/7 # outer margin width in inches
op <- par(mar = c(0, 0, 0, 0), omi = rep(outerMar, 4), bg = "black")
xlim <- c(-1.5, 0.5)
# Calculate ylim with desired center fitting the desired aspect ratio;
# however, the omi settings slightly change the required
# ratio of xlim and ylim
ratio <- (7 - 2*outerMar) / (7 * 9/16 - 2*outerMar)
ylim <- ylimFromXlim(xlim, centerY = 0, x_to_y = ratio)
phasePortrait("sin(jacobiTheta(z, tau))/z", moreArgs = list(tau = 1i/5 + 1/5),
pType = "p",
xlim = xlim, ylim = ylim,
xaxs = "i", yaxs = "i",
axes = FALSE,
nCores = 1) # Increase or leave out for higher performance
par(op)
```
## Technical moreabouts {#tech_moreabouts}
This chapter details a few technical points which might be of interest for optimizing the results obtained by using the **R** package at hand. We talk about different ways to [provide functions](#ways_functions) to *phasePortrait*, and about how to control [image quality](#img_qual). And there is more: The function *phasePortrait* has to perform several memory and time critical operations. In order to keep memory utilization on a reasonable level and to optimize computing times, the function works with [temporary files](#tempfiles) and [parallel processing](#par_proc). We explain both below, because the user can influence their behavior. For avoiding unnecessary copying of big arrays, *phasePortrait* makes also use of pointers, but as there is no related control option for the user, we reserve this for a later version of this vignette.
### Ways to provide functions to *phasePortrait* {#ways_functions}
#### Quoted character strings
Any function to be visualized with *phasePortrait* must be provided as the argument `FUN`. In some cases (see below), the argument `moreArgs` can turn out useful in combination with `FUN`. Probably the easiest way of defining `FUN` is a character string which is an expression **R** can evaluate as a function of a complex number $z$. See some examples:
```{r, eval = FALSE}
# Note that 'FUN =' is not required if the argument to FUN is handed to
# phasePortrait in the first position
phasePortrait(FUN = "1/(1 - z^2)", xlim = c(-5, 5), ylim = c(-5, 5), nCores = 2)
phasePortrait("sin((z - 2)/(z + 2))", xlim = c(-5, 5), ylim = c(-5, 5), nCores = 2)
phasePortrait("tan(z)", xlim = c(-5, 5), ylim = c(-5, 5), nCores = 2)
```
If your expression requires arguments besides $z$ you can provide them to *phasePortrait* by means of `moreArgs`, which expects a named list containing the additional arguments:
```{r, eval = FALSE}
phasePortrait("-1 * sum(z^c(-k:k))", moreArgs = list(k = 11),
xlim = c(-2, 2), ylim = c(-1.5, 1.5),
pType = "p",
nCores = 2) # Increase or leave out for higher performance
```
While we recommend other solutions (see below), it is also possible to hand over more extensive user-defined functions as character strings. To make this work, however, the function must be wrapped in a `vapply` construct which guarantees the output of the function being a complex number by setting vapply's argument `FUN.VALUE` as `complex(1)`. Moreover, the first argument to `vapply` must be $z$. In such cases, it is often convenient to define the character string outside the call to *phasePortrait* and hand it over after that, as we do in the following example:
```{r, eval = FALSE}
funString <- "vapply(z, FUN = function(z) {
n <- 9
k <- z^(c(1:n))
rslt <- sum(sin(k))
return(rslt)
},
FUN.VALUE = complex(1))"
phasePortrait(funString, xlim = c(-2, 2), ylim = c(-2, 2),
nCores = 2) # Increase or leave out for higher performance
```
If such a function has arguments in addition to $z$, they can be included into the call to 'vapply' and thus included into the string (for supporting this, we provide the function `vector2string`, see the example in its documentation), but we do not recommend that. Anyway, if you must know, here it is:
```{r, eval = FALSE}
funString <- "vapply(z, FUN = function(z, fct) {
n <- 9
k <- z^(fct * c(1:n))
rslt <- sum(sin(k))
return(rslt)
},
fct = -1,
FUN.VALUE = complex(1))"
phasePortrait(funString, xlim = c(-2, 2), ylim = c(-2, 2),
nCores = 2) # Increase or leave out for higher performance
```
Probably, the most useful application of this concept is when the `vapply` construct is pasted together at runtime with values for the additional arguments depending on what happened earlier. However, defining the function directly as a function first, and then simply passing its name to *phasePortrait* leads to a more readable code:
```{r, eval = FALSE}
# Define function
tryThisOne <- function(z, fct, n) {
k <- z^(fct * c(1:n))
rslt <- prod(cos(k))
return(rslt)
}
# Call function by its name only, provide additional arguments via "moreArgs"
phasePortrait("tryThisOne", moreArgs = list(fct = 1, n = 5),
xlim = c(-2.5, 2.5), ylim = c(-2, 2),
nCores = 2) # Increase or leave out for higher performance
```
As the function in the example above requires two additional arguments beside $z$, we hand them over to *phasePortrait* via the argument `moreArgs`, which must be (even in case of only one additional argument) a named list (names must match the names of the required arguments), where the argument values are assigned.
#### Function objects
Besides character strings as shown above, the argument `FUN` can also directly take function objects. The simplest case is an anonymous function definition:
```{r, eval = FALSE}
# Use argument "hsvNaN = c(0, 0, 0)" if you want the grey area black
phasePortrait(function(z) {
for(j in 1:20) {
z <- z * sin(z) - 1 + 1/2i
}
return(z)
},
xlim = c(-3, 3), ylim = c(-2, 2),
nCores = 2) # Increase or leave out for higher performance
```
Evidently, this can be used with `moreArgs` as well:
```{r, eval = FALSE}
# Use argument "hsvNaN = c(0, 0, 0)" if you want the grey area black
phasePortrait(function(z, n) {
for(j in 1:n) {
z <- z * cos(z)
}
return(z)
},
moreArgs = list(n = 27),
xlim = c(-3, 3), ylim = c(-2, 2),
nCores = 2) # Increase or leave out for higher performance
```
Any function object that is known by name to R, be it user-defined or contained in a package will work in the same way. Just hand over the function object itself:
```{r, eval = FALSE}
# atan from package base
phasePortrait(atan, xlim = c(-pi, pi), ylim = c(-pi, pi),
nCores = 2)
# gammaz from package pracma (the package must be installed on your machine
# if you want this example to be working)
phasePortrait(pracma::gammaz, xlim = c(-9, 9), ylim = c(-5, 5),
nCores = 2)
# blaschkeProd from this package (moreArgs example)
# make random vector of zeroes
n <- 12
a <- complex(modulus = runif(n), argument = 2 * pi * runif(n))
# plot the actual phase portrait
phasePortrait(blaschkeProd, moreArgs = list(a = a),
xlim = c(-1.3, 1.3), ylim = c(-1.3, 1.3),
nCores = 2)
# User function example
tryThisOneToo <- function(z, n, r) {
for(j in 1:n) {
z <- r * (z + z^2)
}
return(z)
}
# Use argument "hsvNaN = c(0, 0, 0)" if you want the gray areas black
phasePortrait(tryThisOneToo, moreArgs = list(n = 50, r = 1/2 - 1/2i),
xlim = c(-3, 2), ylim = c(-2.5, 2.5),
nCores = 2)
```
Defining own functions in C++ and using them with *phasePortrait* usually gives a substantial performance gain. This is especially true if they require operations that cannot be vectorized in **R** (i.e. if there is now way to avoid for-loops or similar). The ideal tool for integrating C++ code in **R** programs is *Rcpp* [@edelbuettel_rcpp_2017], but be aware that C++ functions compiled with `Rcpp::sourceCpp` will not work with *phasePortrait*, as this is not compatible with parallel processing. What it takes is to provide the C++ functions as or as part of an R package which is not difficult at all. The functions of the `math` family included in this package have all been coded in C++ and integrated with *Rcpp*.
### Defining image quality {#img_qual}
Clearly, there is a trade-off between the quality of an image plotted with *phasePortrait* and the computing time. The image quality is defined by the argument `res` and has a default value of 150 dpi. For pictures in standard sizes of about 30 x 20 cm plotting with 150 dpi does not take much time, and for many purposes, this resolution is sufficient. When resolutions of 300, 600 or more dpi are desired for high-quality printouts, we recommend to try out everything with 150 dpi (and, maybe, on a small format) before starting the final high-quality run. Technically, early after being called, *phasePortrait* gets the plot region size of the active graphics device and calculates the number of required pixels from this size and the value of `res`. Note, that **R** graphics devices for files, like `png`, `bmp`, `jpeg` and `tiff`, also expect a parameter `res` (if the units given for device height and width are cm or inches). For plotting to graphics files, we suggest to store the desired resolution in a variable first, and pass it to both, the graphics device and *phasePortrait*:
```{r, eval = FALSE}
res <- 300 # Define desired resolution in dpi
png("Logistic_Function.png", width = 40, height = 40 * 3/4,
units = "cm", res = res)
phasePortrait("1/(1+exp(-z))", xlim = c(-25, 25), ylim = c(-15, 15), res = res,
xlab = "real", ylab = "imaginary",
nCores = 2) # Increase or leave out for higher performance
dev.off()
```
### Temporary files {#tempfiles}
In order to keep the machine's RAM workload manageable, *phasePortrait* will always save data in temporary files. These files are stored in the directory specified with the argument `tempDir` (default is the current **R** session's temporary directory). After normal execution, these files will be automatically deleted, so, usually, there is no need to care about. Automatic deletion, however, will not happen, if the user calls *phasePortrait* with the parameter `deleteTempFiles` set to FALSE or if phasePortrait does not terminate properly. Thus, if *phasePortrait* crashed you should check the directory specified as `tempDir` and delete these files, because they usually are of considerable size. However, such orphans will never interfere with further runs of *phasePortrait* (see below).
The size of these temporary files depends from *phasePortrait*'s parameter `blockSizePx` (default: 2250000; it may be worth varying this value in order to obtain optimum performance on your machine). If the two-dimensional array of pixels to be plotted comprises more pixels than specified by this parameter, the array will be vertically split into blocks of that size. These sub-arrays are what is stored in the temporary files. More precisely, there are two temporary files per sub-array. One represents the cutout of the untransformed complex plane over which the function of interest is applied; the other contains the values obtained by applying the function to the first one. Thus, each array cell contains a double precision complex number; in a temporary file pair an array cell at the same position refers to the same pixel in the plot.
These files are `.RData` files, and their names adhere to a strict convention, see the following examples:
`0001zmat2238046385.RData`
`0001wmat2238046385.RData`
These are examples of names of a pair of temporary files belonging to the same block (sub-array). The names are equal except for one substring which can either be 'zmat' or 'wmat'. The former file contains an untransformed cutout of the complex plane, and the latter the corresponding values obtained from the function of interest as explained above.
Both names begin with '0001', indicating that the array's top line is the first line of the *whole* pixel array to be covered by the phasePortrait. The name of the file that contains the subsequent array can e.g. begin with a number like '0470', indicating that its first line is line number 470 of the whole array. The number of digits for these line numbers is not fixed. It is determined by the greatest number required. Numbers with less digits are zero-padded. The second part of the file name is either zmat or wmat (see above). The third part of the file names is a ten-digit integer. This is a random number which all temporary files stemming from the same call of *phasePortrait* have in common. This guarantees that no temporary files will be confounded in subsequent calls of *phasePortrait*, even if undeleted temporary files from previous runs are still present.
### Parallel processing {#par_proc}
For enhanced performance, *phasePortrait* (and in the same way *phasePortraitBw*) uses parallel processing as provided via the **R** packages `doParallel` [@micwest_doparallel_2019] and `foreach` [@micwest_foreach_2020]. The number of processor cores to be used can be set with the parameter `nCores` when calling *phasePortrait*. By default, one less than all available cores will be utilized. Clearly, setting `nCores = 1` will result in sequential processing. When *phasePortrait* is called with `ncores > 1`, and no parallel backend is registered, one will be registered first. The same applies, when a parallel backend is already registered, but the user desires a different number of cores. This registering may take some time. Therefore, when it terminates, *phasePortrait* does not automatically de-register the parallel backend (and register a sequential backend again). This saves registering time in subsequent runs of *phasePortrait* with the same number of cores to be used. This default behavior - keeping parallel backends registered after termination - can be changed by setting the parameter `autoDereg` on TRUE (default is FALSE). Otherwise, *phasePortrait*, after completing the plot, prints a message that the current parallel backend can be manually de-registered with the command `foreach::registerDoSEQ()`. We recommend to do this after the last call to *phasePortrait* in an **R** session.
There are three occasions, when *phasePortrait* utilizes parallel processing: First, after determining the size and number range (in the complex plane) of the whole two dimensional array of pixels to be plotted, the sub-arrays (blocks) corresponding to the parameter value `blockSizePx` are constructed and saved as temporary files in a parallel loop. These are the temporary files with the string `zmat` in their names (see section [Temporary files](#tempfiles)).
Second, while the single blocks are loaded and processed sequentially, each block is evaluated in a separate parallel process. In order to do so, the block is split into a few approximately equally sized parts; the number of these parts corresponds to the number of processor cores to be used. In each parallel process the function to be plotted is applied to each single cell of the corresponding block part (internally vectorized with `vapply`). The outcomes of all parallel processes are combined into one array which is saved as a temporary file which has the string `wmat` in its name.
Third, for transforming the function values stored in the `wmat` files into [hsv colors](https://en.wikipedia.org/wiki/HSL_and_HSV), a similar concept as in the second step is utilized: The single `wmat` files are processed sequentially, but the array stored in each file is split into chunks which are dealt with parallely. Eventually, transforming all `wmat` arrays results in one large color value array which can be plotted after that. Handling this large array is the most memory-intensive task when running *phasePortrait*, and it can take considerable time for large plots in high quality. So far, however, no alternative solution provided fully satisfying results.
As mentioned in the section about [temporary files](#tempfiles), users can possibly optimize performance by trying different values for the parameter `blockSizePx`. We mention this here as well, as `blockSizePx` does not only influence size and number of the temporary files, but also the size of the array chunks that are processed parallely.
Obviously, applying the function of interest to millions of values is time-critical. Therefore, when defining a function for a phase portrait in **R**, use all options at hand for vectorizing calculations. Moreover, you can count on a significant performance gain when you write time critical functions in C++. Thanks to the package *Rcpp* [@edelbuettel_rcpp_2017] this not really a hurdle anymore. As mentioned above, however, C++ functions compiled with `Rcpp::sourceCpp` will not work with *phasePortrait*, as this is not compatible with parallel processing; you have to provide such functions in a package. The package at hand provides a few functions (function family `maths`); all of them have been implemented in C++.
## Acknowledgments
While this package is a leisure project, it would have been a mission impossible without the background of my daily work with R as a Forest Scientist at the Technical University of Munich (TUM). Fortunately, I have a job that allows me to learn about Nature by asking her questions (or trying to simulate what she is doing) with ever-improving methods and tools. I would like to thank everyone at the Chair of Forest Growth and Yield Science at TUM who keep me involved in discussions like: *How can this be solved in R ...*
```{r, eval=FALSE}
switch(1 + trunc(runif(1, 0, 6)),
"... at all?",
"... in a quick-and-dirty way?",
"... in Hadley-Wickham-style?",
"... without a loop?",
"... without nested loops?",
"... in a way somebody can understand?")
```
**Veronika Biber** provided expert advice for improving the vignette. **Johannes Biber** turned out the most patient pre-release tester one can imagine, boosting things with his high-end gaming machine. Thanks, guys! Also thanks to **Gregor Seyer** for his helpful review of the CRAN submission.
Clearly, programming in R would not be what it is, weren't there some R titans who generously share their knowledge online. While I keep learning from all of them, I would like to thank especially **Hadley Wickham** and **Dirk Eddelbüttel**.
## References
```{r, include = FALSE}
foreach::registerDoSEQ()
```
|
/scratch/gouwar.j/cran-all/cranData/viscomplexr/vignettes/viscomplexr-vignette.Rmd
|
#' Abbreviate all variables in a data frame
#'
#' It can be useful to abbreviate variable names in a data set to make them
#' easier to plot. This function takes in a data set and some minimum length
#' to abbreviate the data to.
#'
#' @param data data.frame
#' @param min_length minimum number of characters to abbreviate down to
#'
#' @return data frame with abbreviated variable names
#'
#' @examples
#' long_data <- data.frame(
#' really_really_long_name = c(NA, NA, 1:8),
#' very_quite_long_name = c(-1:-8, NA, NA),
#' this_long_name_is_something_else = c(NA, NA,
#' seq(from = 0, to = 1, length.out = 8))
#' )
#'
#' vis_miss(long_data)
#' long_data %>% abbreviate_vars() %>% vis_miss()
#' @export
abbreviate_vars <- function(data, min_length = 10){
test_if_dataframe(data)
dplyr::rename_with(
data,
.fn = ~abbreviate(.x,
minlength = min_length,
method = "both"),
# this didn't work with ncar_over for some reason?
.cols = dplyr::everything()
)
}
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/abbreviate.R
|
#' A small toy dataset of binary data with missings.
#'
#' A dataset containing binary values and missing values. It is created to
#' illustrate the usage of [vis_binary()].
#'
#' @format A data frame with 100 rows and 3 variables:
#' \describe{
#' \item{x}{a binary variable with missing values.}
#' \item{y}{a binary variable with missing values.}
#' \item{z}{a binary variable with **no** missing values.}
#' }
#'
"dat_bin"
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/data-binary-data.R
|
#' A small toy dataset of imaginary people
#'
#' A wider dataset than `typical_data` containing information about some
#' randomly generated people, created using the excellent `wakefield`
#' package. It is created as deliberately odd / eclectic dataset.
#'
#' @format A data frame with 300 rows and 49 variables:
#' \describe{
#' \item{Age}{Age of each individual, see ?wakefield::age for more info}
#' \item{Animal}{A vector of animals, see ?wakefield::animal}
#' \item{Answer}{A vector of "Yes" or "No"}
#' \item{Area}{A vector of living areas "Suburban", "Urban", "Rural"}
#' \item{Car}{names of cars - see ?mtcars}
#' \item{Children}{vector of number of children - see ?wakefield::children}
#' \item{Coin}{character vector of "heads" and "tails"}
#' \item{Color}{vector of vectors from "colors()"}
#' \item{Date}{vector of "important" dates for an individual}
#' \item{Death}{TRUE / FALSE for whether this person died}
#' \item{Dice}{6 sided dice result}
#' \item{DNA}{vector of GATC nucleobases}
#' \item{DOB}{birth dates}
#' \item{Dummy}{a 0/1 dummy var}
#' \item{Education}{education attainment level}
#' \item{Employment}{employee status}
#' \item{Eye}{eye colour}
#' \item{Grade}{percent grades}
#' \item{Grade_Level}{favorite school grade}
#' \item{Group}{control or treatment}
#' \item{hair}{hair colours - "brown", "black", "blonde", or "red"}
#' \item{Height}{height in cm}
#' \item{Income}{yearly income}
#' \item{Browser}{choice of internet browser}
#' \item{IQ}{intelligence quotient}
#' \item{Language}{random language of the world}
#' \item{Level}{levels between 1 and 4}
#' \item{Likert}{likert response - "strongly agree", "agree", and so on}
#' \item{Lorem_Ipsum}{lorem ipsum text}
#' \item{Marital}{marital status- "married", "divorced", "widowed", "separated", etc}
#' \item{Military}{miliary branch they are in}
#' \item{Month}{their favorite month}
#' \item{Name}{their name}
#' \item{Normal}{a random normal number}
#' \item{Political}{their favorite political party}
#' \item{Race}{their race}
#' \item{Religion}{their religion}
#' \item{SAT}{their SAT score}
#' \item{Sentence}{an uttered sentence}
#' \item{Sex_1}{sex of their first child}
#' \item{Sex_2}{sex of their second child}
#' \item{Smokes}{do they smoke}
#' \item{Speed}{their median speed travelled in a car}
#' \item{State}{the last state they visited in the USA}
#' \item{String}{a random string they smashed out on the keyboard}
#' \item{Upper}{the last key they hit in upper case}
#' \item{Valid}{TRUE FALSE answer to a question}
#' \item{Year}{significant year to that individuals}
#' \item{Zip}{a zip code they have visited}
#' }
"typical_data_large"
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/data-typical-data-large.R
|
#' A small toy dataset of imaginary people
#'
#' A dataset containing information about some randomly generated people,
#' created using the excellent `wakefield` package. It is created as
#' deliberately messy dataset.
#'
#' @format A data frame with 5000 rows and 11 variables:
#' \describe{
#' \item{ID}{Unique identifier for each individual, a sequential character
#' vector of zero-padded identification numbers (IDs). see ?wakefield::id}
#' \item{Race}{Race for each individual, "Black", "White", "Hispanic",
#' "Asian", "Other", "Bi-Racial", "Native", and "Hawaiin", see
#' ?wakefield::race}
#' \item{Age}{Age of each individual, see ?wakefield::age}
#' \item{Sex}{Male or female, see ?wakefield::sex }
#' \item{Height(cm)}{Height in centimeters, see ?wakefield::height}
#' \item{IQ}{vector of intelligence quotients (IQ), see ?wakefield::iq}
#' \item{Smokes}{whether or not this person smokes, see ?wakefield::smokes}
#' \item{Income}{Yearly income in dollars, see ?wakefield::income}
#' \item{Died}{Whether or not this person has died yet., see ?wakefield::died}
#' }
#'
"typical_data"
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/data-typical-data.r
|
#' Return data used to create vis_cor plot
#'
#' @param x data.frame
#' @param ... extra arguments (currently unused)
#'
#' @return data frame
#' @name data-vis-cor
#' @export
#'
#' @examples
#' data_vis_cor(airquality)
#'
#' \dontrun{
#' #return vis_dat data for each group
#' library(dplyr)
#' airquality %>%
#' group_by(Month) %>%
#' data_vis_cor()
#' }
data_vis_cor <- function(x, ...){
UseMethod("data_vis_cor")
}
#' @rdname data-vis-cor
#' @export
data_vis_cor.default <- function(x, ...){
data_vis_class_not_implemented("vis_cor")
}
#' Create a tidy dataframe of correlations suitable for plotting
#'
#' @param x data.frame
#' @param cor_method correlation method to use, from `cor`: "a character
#' string indicating which correlation coefficient (or covariance) is to be
#' computed. One of "pearson" (default), "kendall", or "spearman": can be
#' abbreviated."
#' @param na_action The method for computing covariances when there are missing
#' values present. This can be "everything", "all.obs", "complete.obs",
#' "na.or.complete", or "pairwise.complete.obs" (default). This option is
#' taken from the `cor` function argument `use`.
#'
#' @return tidy dataframe of correlations
#'
#' @examples
#' data_vis_cor(airquality)
#'
#' @rdname data-vis-cor
#' @export
data_vis_cor.data.frame <- function(x,
cor_method = "pearson",
na_action = "pairwise.complete.obs",
...){
stats::cor(x,
method = cor_method,
use = na_action) %>%
as.data.frame() %>%
tibble::rownames_to_column() %>%
tidyr::pivot_longer(
cols = -rowname,
names_to = "key",
values_to = "value"
) %>%
purrr::set_names(c("row_1", "row_2", "value"))
}
#' @rdname data-vis-cor
#' @export
data_vis_cor.grouped_df <- function(x, ...){
group_by_fun(x, data_vis_cor)
}
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/data-vis-cor.R
|
#' Return data used to create vis_dat plot
#'
#' @param x data.frame
#' @param ... extra arguments (currently unused)
#'
#' @return data frame
#' @name data-vis-dat
#' @export
#'
#' @examples
#' data_vis_dat(airquality)
#'
#' \dontrun{
#' #return vis_dat data for each group
#' library(dplyr)
#' airquality %>%
#' group_by(Month) %>%
#' data_vis_dat()
#' }
data_vis_dat <- function(x, ...){
UseMethod("data_vis_dat")
}
#' @rdname data-vis-dat
#' @export
data_vis_dat.default <- function(x, ...){
data_vis_class_not_implemented("vis_dat")
}
#' @rdname data-vis-dat
#' @export
data_vis_dat.data.frame <- function(x, ...){
x %>%
fingerprint_df() %>%
vis_gather_() %>%
# get the values here so plotly can make them visible
dplyr::mutate(value = vis_extract_value_(x))
}
#' @rdname data-vis-dat
#' @export
data_vis_dat.grouped_df <- function(x, ...){
group_by_fun(x, data_vis_dat)
}
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/data-vis-dat.R
|
#' Return data used to create vis_miss plot
#'
#' @param x data.frame
#' @param ... extra arguments (currently unused)
#'
#' @return data frame
#' @name data-vis-miss
#' @export
#'
#' @examples
#' data_vis_miss(airquality)
#'
#' \dontrun{
#' #return vis_dat data for each group
#' library(dplyr)
#' airquality %>%
#' group_by(Month) %>%
#' data_vis_miss()
#' }
data_vis_miss <- function(x, ...){
UseMethod("data_vis_miss")
}
#' @rdname data-vis-miss
#' @export
data_vis_miss.default <- function(x, ...){
data_vis_class_not_implemented("vis_miss")
}
#' Create a tidy dataframe of missing data suitable for plotting
#'
#' @param x data.frame
#' @param cluster logical - whether to cluster missingness. Default is FALSE.
#'
#' @return tidy dataframe of missing data
#'
#' @examples
#' data_vis_miss(airquality)
#'
#' @rdname data-vis-miss
#' @export
data_vis_miss.data.frame <- function(x, cluster = FALSE, ...){
x.na <- x %>%
purrr::map_df(~fingerprint(.x) %>% is.na)
# switch for creating the missing clustering
if (cluster){
# this retrieves a row order of the clustered missingness
row_order_index <-
stats::dist(x.na*1) %>%
stats::hclust(method = "mcquitty") %>%
stats::as.dendrogram() %>%
stats::order.dendrogram()
} else {
row_order_index <- seq_len(nrow(x))
} # end else
# Arranged data by dendrogram order index
# gather the variables together for plotting
# here we now have a column of the row number (row),
# then the variable(variables),
# then the contents of that variable (value)
vis_miss_data <- as.data.frame(x.na[row_order_index , ])
vis_miss_data %>%
vis_gather_() %>%
# add info for plotly mousover
dplyr::mutate(value = vis_extract_value_(vis_miss_data))
}
#' @rdname data-vis-miss
#' @export
data_vis_miss.grouped_df <- function(x, ...){
group_by_fun(x, data_vis_miss)
}
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/data-vis-miss.R
|
#' Take the fingerprint of a data.frame - find the class or return NA
#'
#' `fingerprint` is an internal function that takes the "fingerprint" of a
#' dataframe, and currently replaces the contents (x) with the class of a
#' given object, unless it is missing (coded as `NA`), in which case it leaves
#' it as `NA`. The name "fingerprint" is taken from the csv-fingerprint, of
#' which the package, `visdat`, is based upon
#'
#' @param x a vector
#' @keywords internal
#' @noRd
#'
fingerprint <- function(x){
# is the data missing?
if (!is.list(x)) {
ifelse(is.na(x),
# yes? Leave as is NA
yes = NA,
# no? make that value no equal to the class of this cell.
no = glue::glue_collapse(class(x),
sep = "\n")
)
} else {
ifelse(purrr::map_lgl(x,~length(.x)==0),
# yes? Leave as is NA
yes = NA,
# no? make that value no equal to the class of this cell.
no = glue::glue_collapse(class(x),
sep = "\n")
)
}
} # end function
#' Run fingerprint on a dataframe
#'
#' @param x data frame
#'
#' @return data frame with column types and NA values
#' @noRd
#' @note internal
fingerprint_df <- function(x){
purrr::map_df(x, fingerprint)
}
#' (Internal) Gather rows into a format appropriate for grid visualisation
#'
#' @param x a dataframe
#'
#' @return data.frame gathered to have columns "variables", "valueType", and a
#' row id called "rows".
#' @keywords internal
#' @noRd
#'
vis_gather_ <- function(x){
x %>%
dplyr::mutate(rows = dplyr::row_number()) %>%
tidyr::pivot_longer(
cols = -rows,
names_to = "variable",
values_to = "valueType",
values_transform = list(valueType = as.character)
) %>%
dplyr::arrange(rows, variable, valueType)
}
#' (Internal) Add values of each row as a column
#'
#' This adds information about each row, so that when called by plotly, the
#' values are made visible on hover. Warnings are suppressed because `tidyr`
#' gives a warning about type coercion, which is fine.
#'
#' @param x dataframe created from `vis_gather_`
#'
#' @return the x dataframe with the added column `value`.
#' @noRd
#' @keywords internal
#'
vis_extract_value_ <- function(x){
data_longer <- tidyr::pivot_longer(
data = x,
cols = dplyr::everything(),
names_to = "variable",
values_to = "value",
values_transform = list(value = as.character)
)
data_longer$value
}
#' (Internal) Create a boilerplate for visualisations of the `vis_` family
#'
#' @param x a dataframe in longformat as transformed by `vis_gather_` and
#' `vis_extract_value`.
#'
#' @return a ggplot object
#' @keywords internal
#' @noRd
#'
vis_create_ <- function(x){
ggplot2::ggplot(data = x,
ggplot2::aes(x = variable,
y = rows,
# text assists with plotly mouseover
text = value)) +
ggplot2::geom_raster(ggplot2::aes(fill = valueType)) +
ggplot2::theme_minimal() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45,
vjust = 1,
hjust = 1)) +
ggplot2::labs(x = "",
y = "Observations") +
# flip the axes
ggplot2::scale_y_reverse() +
ggplot2::theme(axis.text.x = ggplot2::element_text(hjust = 0.5)) +
ggplot2::guides(colour = "none")
}
#' (Internal) Add a specific palette to a visdat plot
#'
#' @param vis_plot visdat plot created using `vis_gather_`, `vis_extract_value`
#' and `vis_create_`
#' @param palette character "default", "qual" or "cb_safe". "default" (the
#' default) provides the stock ggplot scale for separating the colours. "qual"
#' uses an experimental qualitative colour scheme for providing distinct
#' colours for each Type. "cb_safe" is a set of colours that are appropriate
#' for those with colourblindness. "qual" and "cb_safe" are drawn from
#' http://colorbrewer2.org/.
#'
#' @return a visdat plot with a particular palette
#' @keywords internal
#' @noRd
#' @examples
#' \dontrun{
#' # see internal use inside vis_guess and vis_dat
#' }
#'
add_vis_dat_pal <- function(vis_plot, palette){
# palette options: http://docs.ggplot2.org/current/discrete_scale.html
# qualitative, 6 colours --------------------------------------------------
vis_pal_qual <- c("#e41a1c", # red
"#ffff33", # yellow
"#ff7f00", # Orange
"#377eb8", # blue
"#4daf4a", # Green
"#984ea3") # Purple
# diverging, 6 colours, colour-blind safe -------------------------------
vis_pal_cb_safe <- c('#d73027', # red
'#fc8d59', # orange
'#fee090', # yellow
'#e0f3f8', # light blue
'#91bfdb', # mid blue
'#4575b4') # dark blue
if (palette == "default"){
vis_plot
} else if (palette == "qual") {
vis_plot +
ggplot2::scale_fill_manual(limits = c("character",
"date",
"factor",
"integer",
"logical",
"numeric"),
breaks = c("character", # red
"date", # orange
"factor", # yellow
"integer", # light blue
"logical", # mid blue
"numeric"), # dark blue
values = vis_pal_qual,
na.value = "grey")
} else if (palette == "cb_safe") {
vis_plot +
ggplot2::scale_fill_manual(limits = c("character",
"date",
"factor",
"integer",
"logical",
"numeric"),
breaks = c("character", # red
"date", # orange
"factor", # yellow
"integer", # light blue
"logical", # mid blue
"numeric"), # dark blue
values = vis_pal_cb_safe,
na.value = "grey")
} else {
cli::cli_abort(
c(
"Palette arguments need to be one of: 'qual', 'cb_safe', or 'default'",
"You palette argument was: {.arg {palette}}"
)
)
} # close else brace
} # close the function
#' Create labels for the columns containing the percent missing data
#'
#' @param x data.frame
#' @param col_order_index the order of the columns
#' @note internal
#' @keywords internal
#' @noRd
#' @return data.frame containing the missingness percent down to 0.1 percent
#'
label_col_missing_pct <- function(x,
col_order_index){
# present everything in the right order
labelled_pcts <- colMeans(is.na(x))[col_order_index] %>%
purrr::map_chr(function(x){
dplyr::case_when(
x == 0 ~ "0%",
x >= 0.001 ~ scales::percent(x, accuracy = 1),
x < 0.001 ~ "<0.1%"
)
})
glue::glue("{col_order_index} ({labelled_pcts})")
}
#' Label the legend with the percent of missing data
#'
#' `miss_guide_label` is an internal function for vis_miss to label the legend.
#'
#' @param x is a dataframe passed from vis_miss(x).
#'
#' @return a tibble with two columns `p_miss_lab` and `p_pres_lab`,
#' containing the labels to use for present and missing. A dataframe is
#' returned because I think it is a good style habit compared to a list.
#' @noRd
#' @keywords internal
#'
miss_guide_label <- function(x) {
p_miss <- (mean(is.na(x)) * 100)
if (p_miss == 0) {
p_miss_lab <- "No Missing Values"
p_pres_lab <- "Present (100%)"
} else if (p_miss < 0.1) {
p_miss_lab <- "Missing (< 0.1%)"
p_pres_lab <- "Present (> 99.9%)"
} else {
# calculate rounded percentages
p_miss <- round(p_miss, 1)
p_pres <- 100 - p_miss
# create the labels
p_miss_lab <- glue::glue("Missing \n({p_miss}%)")
p_pres_lab <- glue::glue("Present \n({p_pres}%)")
}
label_frame <- tibble::tibble(p_miss_lab,
p_pres_lab)
return(label_frame)
}
#' (Internal) Are they all numeric columns?
#'
#' @param x data.frame
#' @param ... optional extra inputs
#'
#' @return logical - TRUE means that there is a column with numerics, FALSE
#' means there is a column that is not numeric
#' @noRd
#' @keywords internal
#'
#' @examples
#'
#'\dontrun{
#' all_numeric(airquality) # TRUE
#' all_numeric(iris) # FALSE
#'}
#'
all_numeric <- function(x, ...){
all(as.logical(lapply(x, is.numeric)))
}
# Can I capture moving from a value to NA, or, from NA to another value?
is_binary <- function(x) all(x %in% c(0L, 1L, NA))
all_binary <- function(x, ...){
all(as.logical(lapply(x, is_binary)))
}
#' Test if input is a data.frame
#'
#' @param x object
#'
#' @return an error if input (x) is not a data.frame
#'
#' @examples
#' \dontrun{
#' # success
#' test_if_dataframe(airquality)
#' #fail
#' test_if_dataframe(AirPassengers)
#' }
#' @keywords internal
#' @noRd
test_if_dataframe <- function(x){
if (!inherits(x, "data.frame")) {
cli::cli_abort(
c(
"{.code vis_dat()} requires a {.cls data.frame}",
"the object I see has class(es): ",
"{.cls {glue::glue_collapse(class(x), sep = ', ', last = ', and ')}}"
)
)
}
}
test_if_all_numeric <- function(data){
if (!all_numeric(data)) {
cli::cli_abort(
c(
"Data input can only contain numeric values",
"Please subset the data to the numeric values you would like.",
"{.code dplyr::select(<data>, where(is.numeric))}",
"Can be helpful here!"
)
)
}
}
test_if_all_binary <- function(data){
if (!all_binary(data)) {
cli::cli_abort(
c(
"data input can only contain binary values",
"This means values are either 0 or 1, or NA.",
"Please subset the data to be binary values, or see {.code ?vis_value.}"
)
)
}
}
monotonic <- function(x) all(diff(x) == 0)
#' Scale a vector between 0 and one.
#'
#' @param x numeric vector
#'
#' @return numeric vector between 0 and 1
#' @noRd
#' @keywords internal
scale_01 <- function(x) {
if (monotonic(x)){
return((x/x))
}
(x - min(x, na.rm = TRUE)) / diff(range(x, na.rm = TRUE))
}
group_by_fun <- function(data,.fun, ...){
tidyr::nest(data) %>%
dplyr::mutate(data = purrr::map(data, .fun, ...)) %>%
tidyr::unnest(cols = c(data))
}
data_vis_class_not_implemented <- function(fun){
cli::cli_abort(
c(
"We have not (yet) implemented the method for {.code fun} for \\
object with class(es): ",
"{.cls {glue::glue_collapse(class(x), sep = ', ', last = ', and ')}}"
)
)
}
update_col_order_index <- function(col_order_index, facet, env = environment()){
group_string <- deparse(substitute(facet, env = env))
facet_position <- which(col_order_index == group_string)
col_order_index <- col_order_index[-facet_position]
}
test_if_large_data <- function(x, large_data_size, warn_large_data){
if (ncol(x) * nrow(x) > large_data_size && warn_large_data){
cli::cli_abort(
c(
"Data exceeds recommended size for visualisation",
"Consider downsampling your data with {.fn dplyr::slice_sample}",
"Or set argument, {.arg warn_large_data} = {.arg FALSE}"
)
)
}
}
fast_n_miss_col <- function(x) colSums(is.na(x))
n_miss_col <- function(data, sort = FALSE){
# if no list columns
any_list <- any(purrr::map_lgl(data, is.list))
if (!any_list){
n_missing_cols <- fast_n_miss_col(data)
} else if (any_list){
n_missing_cols <- fast_n_miss_col(fingerprint_df(data))
}
if (sort){
n_missing_cols <- sort(n_missing_cols, decreasing = TRUE)
}
n_missing_cols
}
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/internals.R
|
#' Pipe operator
#'
#' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
#' @param lhs A value or the magrittr placeholder.
#' @param rhs A function call using the magrittr semantics.
#' @return The result of calling `rhs(lhs)`.
NULL
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/utils-pipe.R
|
#' Visualise binary values
#'
#'
#' @param data a data.frame
#' @param col_zero colour for zeroes, default is "salmon"
#' @param col_one colour for ones, default is "steelblue2"
#' @param col_na colour for NA, default is "grey90"
#' @param order optional character vector of the order of variables
#'
#' @return a ggplot plot of the binary values
#'
#' @examples
#' vis_binary(dat_bin)
#'
#' # changing order of variables
#' # create numeric names
#' df <- setNames(dat_bin, c("1.1", "8.9", "10.4"))
#' df
#'
#' # not ideal
#' vis_binary(df)
#' # good - specify the original order
#' vis_binary(df, order = names(df))
#' @export
vis_binary <- function(data,
col_zero = "salmon",
col_one = "steelblue2",
col_na = "grey90",
order = NULL) {
test_if_all_binary(data)
data %>%
vis_gather_() %>%
dplyr::mutate(value = vis_extract_value_(data)) %>%
dplyr::mutate(valueType = forcats::as_factor(valueType),
value = forcats::as_factor(value),
variable = forcats::fct_relevel(variable, order)) %>%
vis_create_() +
# change the limits etc.
ggplot2::guides(fill = ggplot2::guide_legend(title = "Value")) +
# add info about the axes
ggplot2::scale_x_discrete(position = "top") +
ggplot2::theme(axis.text.x = ggplot2::element_text(hjust = 0)) +
ggplot2::scale_fill_manual(values = c(col_zero, # zero
col_one), # one
na.value = col_na)
}
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/vis-binary.R
|
#' Visually compare two dataframes and see where they are different.
#'
#' `vis_compare`, like the other `vis_*` families, gives an at-a-glance ggplot
#' of a dataset, but in this case, hones in on visualising **two** different
#' dataframes of the same dimension, so it takes two dataframes as arguments.
#'
#' @param df1 The first dataframe to compare
#'
#' @param df2 The second dataframe to compare to the first.
#'
#' @return `ggplot2` object displaying which values in each data frame are
#' present in each other, and which are not.
#'
#' @seealso [vis_miss()] [vis_dat()] [vis_guess()] [vis_expect()] [vis_cor()]
#'
#' @examples
#'
#' # make a new dataset of iris that contains some NA values
#' aq_diff <- airquality
#' aq_diff[1:10, 1:2] <- NA
#' vis_compare(airquality, aq_diff)
#' @export
vis_compare <- function(df1,
df2){
# could add a parameter, sort_match, to help with
# sort_match logical TRUE/FALSE.
# TRUE arranges the columns in order of most matches.
test_if_dataframe(df1)
test_if_dataframe(df2)
if (!identical(dim(df1), dim(df2))) {
cli::cli_abort(
c(
"{.fun vis_compare} requires identical dimensions of {.arg df1} and \\
{.arg df2}",
"The dimensions of {.arg df1} are: {dim(df1)}",
"The dimensions of {.arg df2} are: {dim(df2)}"
)
)
}
v_identical <- Vectorize(identical)
df_diff <- purrr::map2_df(df1, df2, v_identical)
d <- df_diff %>%
as.data.frame() %>%
purrr::map_df(compare_print) %>%
vis_gather_() %>%
dplyr::mutate(value_df1 = vis_extract_value_(df1),
value_df2 = vis_extract_value_(df2))
# then we plot it
ggplot2::ggplot(data = d,
ggplot2::aes(
x = variable,
y = rows)) +
# text assists with plotly mouseover
# text = c("value_df1", "value_df2"))) +
# this test code has been removed as ggplot2 version 3.0.0
# breaks.
# Logged in issue https://github.com/ropensci/visdat/issues/89
ggplot2::geom_raster(ggplot2::aes(fill = valueType)) +
ggplot2::theme_minimal() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45,
vjust = 1,
hjust = 1)) +
ggplot2::labs(x = "",
y = "Observations",
# this prevents it from being used in the boilerplate
fill = "Cell Type") +
ggplot2::scale_fill_manual(limits = c("same",
"different"),
breaks = c("same", # red
"different"), # dark blue
values = c("#fc8d59", # Orange
"#91bfdb"), # blue
na.value = "grey") +
# flip the axes
ggplot2::scale_y_reverse() +
ggplot2::theme(axis.text.x = ggplot2::element_text(hjust = 0.25)) +
ggplot2::scale_x_discrete(position = "top",
limits = names(df_diff))
}
#' (Internal) A utility function for `vis_compare`
#'
#' `compare_print` is an internal function that takes creates a dataframe with
#' information about where there are differences in the dataframe. This
#' function is used in `vis_compare`. It evaluates on the data `(df1 == df2)`
#' and (currently) replaces the "true" (the same) with "Same"
#' and FALSE with "Different", unless it is missing (coded as NA), in which
#' case it leaves it as NA.
#'
#' @param x a vector
#' @keywords internal
#' @noRd
#'
compare_print <- function(x){
dplyr::if_else(x == "TRUE",
true = "same",
false = "different",
missing = "missing")
} # end function
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/vis-compare.R
|
#' Visualise correlations amongst variables in your data as a heatmap
#'
#' Visualise correlations amongst variables in your data as a heatmap
#'
#' @param data data.frame
#' @param cor_method correlation method to use, from `cor`: "a character
#' string indicating which correlation coefficient (or covariance) is to be
#' computed. One of "pearson" (default), "kendall", or "spearman": can be
#' abbreviated."
#' @param na_action The method for computing covariances when there are missing
#' values present. This can be "everything", "all.obs", "complete.obs",
#' "na.or.complete", or "pairwise.complete.obs" (default). This option is
#' taken from the `cor` function argument `use`.,
#' @param facet bare unqouted variable to use for facetting
#' @param ... extra arguments you may want to pass to `cor`
#'
#' @return ggplot2 object
#'
#' @export
#'
#' @examples
#' vis_cor(airquality)
#' vis_cor(airquality, facet = Month)
#' vis_cor(mtcars)
#' \dontrun{
#' # this will error
#' vis_cor(iris)
#' }
vis_cor <- function(data,
cor_method = "pearson",
na_action = "pairwise.complete.obs",
facet,
...){
test_if_dataframe(data)
test_if_all_numeric(data)
if (!missing(facet)){
data <- dplyr::group_by(data, {{ facet }})
}
cor_data <- data_vis_cor(data,
cor_method,
na_action)
vis_cor_plot <- vis_cor_create(cor_data)
if (!missing(facet)){
vis_cor_plot <- vis_cor_plot +
ggplot2::facet_wrap(facets = dplyr::vars({{ facet }}))
}
vis_cor_plot
}
vis_cor_create <- function(data){
ggplot2::ggplot(data = data,
ggplot2::aes(x = row_1,
y = row_2,
fill = value)) +
ggplot2::geom_raster() +
# colours from scico::scico(3, palette = "vik")
ggplot2::scale_fill_gradient2(low = "#001260",# blue
mid = "#EAEDE9", # white
high = "#601200", # red
breaks = c(-1, -0.5, 0, 0.5, 1),
limits = c(-1, 1)) +
ggplot2::theme_minimal() +
ggplot2::scale_x_discrete(position = "top") +
ggplot2::labs(x = "",
y = "") +
ggplot2::guides(fill = ggplot2::guide_legend(title = "Correlation")) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45,
hjust = 0))
}
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/vis-cor.R
|
#' Visualises a data.frame to tell you what it contains.
#'
#' `vis_dat` gives you an at-a-glance ggplot object of what is inside a
#' dataframe. Cells are coloured according to what class they are and whether
#' the values are missing. As `vis_dat` returns a ggplot object, it is very
#' easy to customize and change labels, and customize the plot
#'
#' @param x a data.frame object
#'
#' @param sort_type logical TRUE/FALSE. When TRUE (default), it sorts by the
#' type in the column to make it easier to see what is in the data
#'
#' @param palette character "default", "qual" or "cb_safe". "default" (the
#' default) provides the stock ggplot scale for separating the colours.
#' "qual" uses an experimental qualitative colour scheme for providing
#' distinct colours for each Type. "cb_safe" is a set of colours that are
#' appropriate for those with colourblindness. "qual" and "cb_safe" are drawn
#' from http://colorbrewer2.org/.
#'
#' @param warn_large_data logical - warn if there is large data? Default is TRUE
#' see note for more details
#'
#' @param large_data_size integer default is 900000 (given by
#' `nrow(data.frame) * ncol(data.frame)``). This can be changed. See
#' note for more details.
#'
#' @param facet bare variable name for a variable you would like to facet
#' by. By default there is no facetting. Only one variable can be facetted.
#' You can get the data structure using `data_vis_dat` and the facetted
#' structure by using `group_by` and then `data_vis_dat`.
#'
#' @return `ggplot2` object displaying the type of values in the data frame and
#' the position of any missing values.
#'
#' @seealso [vis_miss()] [vis_guess()] [vis_expect()] [vis_cor()]
#' [vis_compare()]
#'
#' @note Some datasets might be too large to plot, sometimes creating a blank
#' plot - if this happens, I would recommend downsampling the data, either
#' looking at the first 1,000 rows or by taking a random sample. This means
#' that you won't get the same "look" at the data, but it is better than
#' a blank plot! See example code for suggestions on doing this.
#'
#' @examples
#'
#' vis_dat(airquality)
#'
#' # experimental colourblind safe palette
#' vis_dat(airquality, palette = "cb_safe")
#' vis_dat(airquality, palette = "qual")
#'
#' # if you have a large dataset, you might want to try downsampling:
#' \dontrun{
#' library(nycflights13)
#' library(dplyr)
#' flights %>%
#' sample_n(1000) %>%
#' vis_dat()
#'
#' flights %>%
#' slice(1:1000) %>%
#' vis_dat()
#'}
#'
#' @export
vis_dat <- function(x,
sort_type = TRUE,
palette = "default",
warn_large_data = TRUE,
large_data_size = 900000,
facet) {
test_if_dataframe(x)
test_if_large_data(x, large_data_size, warn_large_data)
if (sort_type) {
type_sort <- order(
# get the class, if there are multiple classes, combine them together
purrr::map_chr(.x = x,
.f = function(x) glue::glue_collapse(class(x),
sep = "\n"))
)
# get the names of those columns
col_order_index <- names(x)[type_sort]
} else {
# this means that the order remains the same as the dataframe.
col_order_index <- names(x)
}
# reshape the dataframe ready for geom_raster
if (!missing(facet)){
vis_dat_data <- x %>%
dplyr::group_by({{ facet }}) %>%
data_vis_dat()
col_order_index <- update_col_order_index(
col_order_index,
facet,
environment()
)
} else {
vis_dat_data <- data_vis_dat(x)
}
# do the plotting
vis_dat_plot <-
# add the boilerplate
vis_create_(vis_dat_data) +
# change the limits etc.
ggplot2::guides(fill = ggplot2::guide_legend(title = "Type")) +
# add info about the axes
ggplot2::scale_x_discrete(limits = col_order_index,
position = "top") +
ggplot2::theme(axis.text.x = ggplot2::element_text(hjust = 0))
if (!missing(facet)) {
vis_dat_plot <- vis_dat_plot +
ggplot2::facet_wrap(facets = dplyr::vars( {{ facet }} ))
}
# specify a palette ----------------------------------------------------------
add_vis_dat_pal(vis_dat_plot, palette)
} # close function
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/vis-dat.R
|
#' Visualise whether a value is in a data frame
#'
#' `vis_expect` visualises certain conditions or values in your data. For
#' example, If you are not sure whether to expect -1 in your data, you could
#' write: `vis_expect(data, ~.x == -1)`, and you can see if there are times
#' where the values in your data are equal to -1. You could also, for example,
#' explore a set of bad strings, or possible NA values and visualise where
#' they are using \code{vis_expect(data, ~.x \%in\% bad_strings)} where
#' `bad_strings` is a character vector containing bad strings like `N A`
#' `N/A` etc.
#'
#' @param data a data.frame
#' @param expectation a formula following the syntax: `~.x {condition}`.
#' For example, writing `~.x < 20` would mean "where a variable value is less
#' than 20, replace with NA", and \code{~.x \%in\% {vector}} would mean "where a
#' variable has values that are in that vector".
#' @param show_perc logical. TRUE now adds in the \% of expectations are
#' TRUE or FALSE in the whole dataset into the legend. Default value is TRUE.
#' @return a ggplot2 object
#'
#' @seealso [vis_miss()] [vis_dat()] [vis_guess()] [vis_cor()] [vis_compare()]
#'
#' @export
#'
#' @examples
#'
#' dat_test <- tibble::tribble(
#' ~x, ~y,
#' -1, "A",
#' 0, "B",
#' 1, "C",
#' NA, NA
#' )
#'
#' vis_expect(dat_test, ~.x == -1)
#'
#' vis_expect(airquality, ~.x == 5.1)
#'
#' # explore some common NA strings
#'
#' common_nas <- c(
#' "NA",
#' "N A",
#' "N/A",
#' "na",
#' "n a",
#' "n/a"
#' )
#'
#' dat_ms <- tibble::tribble(~x, ~y, ~z,
#' "1", "A", -100,
#' "3", "N/A", -99,
#' "NA", NA, -98,
#' "N A", "E", -101,
#' "na", "F", -1)
#'
#' vis_expect(dat_ms, ~.x %in% common_nas)
#'
#'
vis_expect <- function(data, expectation, show_perc = TRUE){
test_if_dataframe(data)
data_expect <- expect_frame(data, expectation)
# calculate the overall % expecations to display in legend -------------------
if (show_perc) {
temp <- expect_guide_label(data_expect)
p_expect_true_lab <- temp$p_expect_false_lab
p_expect_false_lab <- temp$p_expect_true_lab
# else if show_perc FALSE (do nothing)
} else {
p_expect_true_lab <- "TRUE"
p_expect_false_lab <- "FALSE"
}
colnames_data <- colnames(data_expect)
data_expect <- data_expect %>%
# expect_frame(expectation) %>%
dplyr::mutate(rows = dplyr::row_number()) %>%
tidyr::pivot_longer(
cols = dplyr::all_of(colnames_data),
names_to = "variable",
values_to = "valueType",
values_transform = list(valueType = as.character)
)
data_expect <- data_expect %>%
dplyr::mutate(variable = factor(variable, levels = colnames_data))
vis_expect_plot <- data_expect %>%
ggplot2::ggplot(ggplot2::aes(x = variable,
y = rows)) +
ggplot2::geom_raster(ggplot2::aes(fill = valueType)) +
ggplot2::theme_minimal() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45,
vjust = 1,
hjust = 1)) +
ggplot2::labs(x = "",
y = "Observations") +
# flip the axes
ggplot2::scale_y_reverse() +
ggplot2::scale_x_discrete(position = "top") +
ggplot2::scale_fill_manual(name = "",
values = c("#998ec3", # purple
"#f1a340", # orange
"grey"),
labels = c(p_expect_false_lab,
p_expect_true_lab),
na.value = "#E5E5E5") + # light gray
ggplot2::guides(fill = ggplot2::guide_legend(reverse = TRUE)) +
# change the limits etc.
ggplot2::guides(fill = ggplot2::guide_legend(title = "Expectation")) +
# add info about the axes
ggplot2::theme(legend.position = "bottom") +
# ggplot2::theme(axis.text.x = ggplot2::element_text(hjust = 0.5)) +
ggplot2::theme(axis.text.x = ggplot2::element_text(hjust = 0))
vis_expect_plot
}
#' Create a dataframe to help visualise 'expected' values
#'
#' @param data data.frame
#' @param expectation unquoted conditions or "expectations" to test
#'
#' @return data.frames where expectation are true
#' @author Stuart Lee and Earo Wang
#' @keywords internal
#' @noRd
#'
#' @examples
#' \dontrun{
#' dat_test <- tibble::tribble(
#' ~x, ~y,
#' -1, "A",
#' 0, "B",
#' 1, "C"
#' )
#'
#' expect_frame(dat_test,
#' ~ .x == -1)
#' }
expect_frame <- function(data, expectation){
my_fun <- purrr::as_mapper(expectation)
purrr::map_dfc(data, my_fun)
}
#' (Internal) Label the legend with the percent of missing data
#'
#' `miss_guide_label` is an internal function to label the legend of `vis_miss`.
#'
#' @param x is a dataframe passed from `vis_miss(x)`.
#'
#' @return a `tibble` with two columns `p_miss_lab` and `p_pres_lab`,
#' containing the labels to use for present and missing. A dataframe is
#' returned because I think it is a good style habit compared to a list.
#' @keywords internal
#' @noRd
#'
expect_guide_label <- function(x) {
p_expect <- (mean(as.matrix(x), na.rm = TRUE) * 100)
if (p_expect == 0) {
p_expect_false_lab <- "No Expectations True"
p_expect_true_lab <- "Present (100%)"
} else if (p_expect < 0.1) {
p_expect_false_lab <- "TRUE (< 0.1%)"
p_expect_true_lab <- "FALSE (> 99.9%)"
} else {
# calculate rounded percentages
p_expect_false <- round(p_expect, 1)
p_expect_true <- round(100 - p_expect,1)
# create the labels
p_expect_false_lab <- glue::glue("TRUE\n({p_expect_false}%)")
p_expect_true_lab <- glue::glue("FALSE\n({p_expect_true}%)")
}
label_frame <- tibble::tibble(p_expect_false_lab,
p_expect_true_lab)
return(label_frame)
}
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/vis-expect.R
|
#' Visualise type guess in a data.frame
#'
#' `vis_guess` visualises the class of every single individual cell in a
#' dataframe and displays it as ggplot object, similar to `vis_dat`. Cells
#' are coloured according to what class they are and whether the values are
#' missing. `vis_guess` estimates the class of individual elements using
#' `readr::guess_parser`. It may be currently slow on larger datasets.
#'
#' @param x a data.frame
#' @param palette character "default", "qual" or "cb_safe". "default" (the
#' default) provides the stock ggplot scale for separating the colours.
#' "qual" uses an experimental qualitative colour scheme for providing
#' distinct colours for each Type. "cb_safe" is a set of colours that are
#' appropriate for those with colourblindness. "qual" and "cb_safe" are drawn
#' from http://colorbrewer2.org/.
#'
#' @return `ggplot2` object displaying the guess of the type of values in the
#' data frame and the position of any missing values.
#'
#' @seealso [vis_miss()] [vis_dat()] [vis_expect()] [vis_cor()] [vis_compare()]
#'
#' @examples
#'
#' messy_vector <- c(TRUE,
#' "TRUE",
#' "T",
#' "01/01/01",
#' "01/01/2001",
#' NA,
#' NaN,
#' "NA",
#' "Na",
#' "na",
#' "10",
#' 10,
#' "10.1",
#' 10.1,
#' "abc",
#' "$%TG")
#' set.seed(1114)
#' messy_df <- data.frame(var1 = messy_vector,
#' var2 = sample(messy_vector),
#' var3 = sample(messy_vector))
#' vis_guess(messy_df)
#' @export
vis_guess <- function(x, palette = "default"){
test_if_dataframe(x)
# x = messy_df
# suppress warnings here as this is just a note about combining classes
d <- suppressWarnings(vis_gather_(x)) %>%
dplyr::mutate(valueType = guess_type(valueType)) %>%
# value for plotly mouseover
dplyr::mutate(value = vis_extract_value_(x))
# add the boilerplate information
vis_plot <- vis_create_(d) +
ggplot2::guides(fill = ggplot2::guide_legend(title = "Type")) +
# flip the axes, add info for axes
ggplot2::scale_x_discrete(position = "top",
limits = names(x)) +
ggplot2::theme(axis.text.x = ggplot2::element_text(hjust = 0))
# specify a palette ----------------------------------------------------------
add_vis_dat_pal(vis_plot, palette)
} # close function
#' (Internal) Guess the type of each individual cell in a dataframe
#'
#' `vis_guess` uses `guess_type` to guess cell elements, like `fingerprint`.
#'
#' @param x is a vector of values you want to guess
#'
#' @return a character vector that describes the suspected class. e.g., "10" is
#' an integer, "20.11" is a double, "text" is character, etc.
#'
#' @keywords internal
#' @noRd
#'
#' @examples
#' \dontrun{
#' guess_type(1)
#'
#' guess_type("x")
#'
#' guess_type(c("1", "0L"))
#'
#' purrr::map_df(iris, guess_type)
#' }
guess_type <- function(x){
# since
# readr::collector_guess(NA,
# locale_ = readr::locale())
#
# returns "character", use an ifelse to identify NAs
#
# This is a fast way to check individual elements of a vector.
# `purrr::map` writes more function calls, slowing down things by a factor
# of about 3. This is faster, for the moment.
output <- character(length(x))
nas <- (x %>% fingerprint() %>% is.na() | is.na(x))
output[!nas] <- vapply(FUN = readr::guess_parser,
X = x[!nas],
FUN.VALUE = character(1),
guess_integer = TRUE)
output[nas] <- NA
output
}
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/vis-guess.R
|
#' Visualise a data.frame to display missingness.
#'
#' `vis_miss` provides an at-a-glance ggplot of the missingness inside a
#' dataframe, colouring cells according to missingness, where black indicates
#' a missing cell and grey indicates a present cell. As it returns a ggplot
#' object, it is very easy to customize and change labels.
#'
#' The missingness summaries in the columns are rounded to the nearest integer.
#' For more detailed summaries, please see the summaries in the `naniar` R
#' package, specifically, `naniar::miss_var_summary()`.
#'
#' @param x a data.frame
#'
#' @param cluster logical. TRUE specifies that you want to use hierarchical
#' clustering (mcquitty method) to arrange rows according to missingness.
#' FALSE specifies that you want to leave it as is. Default value is FALSE.
#'
#' @param sort_miss logical. TRUE arranges the columns in order of missingness.
#' Default value is FALSE.
#'
#' @param show_perc logical. TRUE now adds in the \% of missing/complete data
#' in the whole dataset into the legend. Default value is TRUE.
#'
#' @param show_perc_col logical. TRUE adds in the \% missing data in a given
#' column into the x axis. Can be disabled with FALSE. Default value is TRUE.
#' No missingness percentage column information will be presented when `facet`
#' argument is used. Please see the `naniar` package to provide missingness
#' summaries over groups.
#'
#' @param warn_large_data logical - warn if there is large data? Default is TRUE
#' see note for more details
#'
#' @param large_data_size integer default is 900000 (given by
#' `nrow(data.frame) * ncol(data.frame)``). This can be changed. See
#' note for more details.
#'
#' @param facet (optional) bare variable name, if you want to create a faceted
#' plot, with one plot per level of the variable. No missingness percentage
#' column information will be presented when `facet` argument is used. Please
#' see the `naniar` package to provide missingness summaries over groups.
#'
#' @return `ggplot2` object displaying the position of missing values in the
#' dataframe, and the percentage of values missing and present.
#'
#' @seealso [vis_dat()] [vis_guess()] [vis_expect()] [vis_cor()] [vis_compare()]
#'
#' @note Some datasets might be too large to plot, sometimes creating a blank
#' plot - if this happens, I would recommend downsampling the data, either
#' looking at the first 1,000 rows or by taking a random sample. This means
#' that you won't get the same "look" at the data, but it is better than
#' a blank plot! See example code for suggestions on doing this.
#'
#' @examples
#'
#' vis_miss(airquality)
#'
#' vis_miss(airquality, cluster = TRUE)
#'
#' vis_miss(airquality, sort_miss = TRUE)
#'
#' vis_miss(airquality, facet = Month)
#'
#' \dontrun{
#' # if you have a large dataset, you might want to try downsampling:
#' library(nycflights13)
#' library(dplyr)
#' flights %>%
#' sample_n(1000) %>%
#' vis_miss()
#'
#' flights %>%
#' slice(1:1000) %>%
#' vis_miss()
#' }
#'
#' @export
vis_miss <- function(
x,
cluster = FALSE,
sort_miss = FALSE,
show_perc = TRUE,
show_perc_col = TRUE,
large_data_size = 900000,
warn_large_data = TRUE,
facet
) {
test_if_dataframe(x)
test_if_large_data(x, large_data_size, warn_large_data)
if (sort_miss) {
col_order_index <- names(n_miss_col(x, sort = TRUE))
} else if (!sort_miss) {
col_order_index <- names(x)
}
if (!missing(facet)) {
vis_miss_data <- x %>%
dplyr::group_by({{ facet }}) %>%
data_vis_miss(cluster)
col_order_index <- update_col_order_index(
col_order_index,
facet,
environment()
)
} else {
vis_miss_data <- data_vis_miss(x, cluster)
}
# calculate the overall % missingness to display in legend -------------------
# make a TRUE/FALSE matrix of the data.
# This tells us whether it is missing (true) or not (false)
x_fingerprinted <- fingerprint_df(x)
if (show_perc) {
temp <- miss_guide_label(x_fingerprinted)
p_miss_lab <- temp$p_miss_lab
p_pres_lab <- temp$p_pres_lab
# else if show_perc FALSE
} else {
p_miss_lab <- "Missing"
p_pres_lab <- "Present"
}
# then we plot it
vis_miss_plot <- vis_create_(vis_miss_data) +
ggplot2::scale_fill_manual(
name = "",
values = c(
"grey80",
"grey20"
),
labels = c(
p_pres_lab,
p_miss_lab
)
) +
ggplot2::guides(fill = ggplot2::guide_legend(reverse = TRUE)) +
ggplot2::theme(legend.position = "bottom") +
# fix up the location of the text
ggplot2::theme(axis.text.x = ggplot2::element_text(hjust = 0))
# add the missingness column labels
# if there is only one colummn you don't need to sort the columns
# this is perhaps a bit of a hacky way around, but I can't see another
# way around it. Related issue: https://github.com/ropensci/visdat/issues/72
if (ncol(x) == 1) {
if (show_perc_col) {
return(
vis_miss_plot <- vis_miss_plot +
ggplot2::scale_x_discrete(
position = "top",
labels = label_col_missing_pct(
x_fingerprinted,
col_order_index
)
)
)
} else if (!show_perc_col) {
return(
vis_miss_plot <- vis_miss_plot +
ggplot2::scale_x_discrete(
position = "top",
labels = col_order_index
)
)
}
}
if (!missing(facet)) {
vis_miss_plot <- vis_miss_plot +
ggplot2::facet_wrap(facets = dplyr::vars({{ facet }}))
}
if (show_perc_col && missing(facet)) {
# flip the axes, add the info about limits
vis_miss_plot <- vis_miss_plot +
ggplot2::scale_x_discrete(
position = "top",
limits = col_order_index,
labels = label_col_missing_pct(
x_fingerprinted,
col_order_index
)
)
} else {
vis_miss_plot <- vis_miss_plot +
ggplot2::scale_x_discrete(
position = "top",
limits = col_order_index
)
}
return(vis_miss_plot)
# guides(fill = guide_legend(title = "Type"))
# Thanks to
# http://www.markhneedham.com/blog/2015/02/27/rggplot-controlling-x-axis-order/
# For the tip on using scale_x_discrete
} # end of function
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/vis-miss.R
|
#' Visualise the value of data values
#'
#' Visualise all of the values in the data on a 0 to 1 scale. Only works on
#' numeric data - see examples for how to subset to only numeric data.
#'
#' @param data a data.frame
#' @param na_colour a character vector of length one describing what colour
#' you want the NA values to be. Default is "grey90"
#' @param viridis_option A character string indicating the colormap option to
#' use. Four options are available: "magma" (or "A"), "inferno" (or "B"),
#' "plasma" (or "C"), "viridis" (or "D", the default option) and "cividis"
#' (or "E").
#'
#' @return a ggplot plot of the values
#' @export
#'
#' @examples
#'
#' vis_value(airquality)
#' vis_value(airquality, viridis_option = "A")
#' vis_value(airquality, viridis_option = "B")
#' vis_value(airquality, viridis_option = "C")
#' vis_value(airquality, viridis_option = "E")
#' \dontrun{
#' library(dplyr)
#' diamonds %>%
#' select_if(is.numeric) %>%
#' vis_value()
#'}
vis_value <- function(data,
na_colour = "grey90",
viridis_option = "D") {
test_if_all_numeric(data)
purrr::map_dfr(data, scale_01) %>%
vis_gather_() %>%
dplyr::mutate(
value = vis_extract_value_(data),
value = as.numeric(value),
valueType = as.numeric(valueType)
) %>%
vis_create_() +
# change the limits etc.
ggplot2::guides(fill = ggplot2::guide_legend(title = "Value")) +
# add info about the axes
ggplot2::scale_x_discrete(position = "top") +
ggplot2::theme(axis.text.x = ggplot2::element_text(hjust = 0)) +
ggplot2::scale_fill_viridis_c(option = viridis_option,
na.value = na_colour)
}
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/vis-value.R
|
#' visdat
#'
#' visdat is a package that helps with the preliminary visualisation of data.
#' visdat makes it easy to visualise your whole dataset so that you can
#' visually identify problems.
#'
#' @seealso
#'
#' It's main functions are:
#' \itemize{
#' \item [vis_dat()]
#' \item [vis_miss()]
#' \item [vis_guess()]
#' \item [vis_compare()]
#' \item [vis_expect()]
#' }
#'
#' Learn more about visdat at \url{https://docs.ropensci.org/visdat/}
#' @name visdat
#' @docType package
#' @importFrom magrittr %>%
#' @importFrom stats cor
#' @importFrom stats setNames
#' @keywords internal
"_PACKAGE"
if(getRversion() >= "2.15.1") utils::globalVariables(c("."))
globalVariables(c("valueGuess",
"valueType",
"variable",
"rows",
"row_1",
"row_2",
"value",
"cor",
"setNames",
"rowname",
"n"))
|
/scratch/gouwar.j/cran-all/cranData/visdat/R/visdat-package.r
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(visdat)
## ----standard-----------------------------------------------------------------
vis_dat(typical_data)
## ----custom-------------------------------------------------------------------
library(ggplot2)
vis_dat(typical_data) +
scale_fill_manual(
values = c(
"character" = "red",
"factor" = "blue",
"logical" = "green",
"numeric" = "purple",
"NA" = "gray"
))
## ----show-pal-----------------------------------------------------------------
palette()
## ----pal-hex-visdat-----------------------------------------------------------
vis_dat(typical_data) +
scale_fill_manual(
values = c(
"character" = "#61D04F",
"factor" = "#2297E6",
"logical" = "#28E2E5",
"numeric" = "#CD0BBC",
"NA" = "#F5C710"
))
## ----scale-fill-brewer--------------------------------------------------------
vis_dat(typical_data) +
scale_fill_brewer()
## ----scale-fill-viridis-------------------------------------------------------
vis_dat(typical_data) +
scale_fill_viridis_d()
|
/scratch/gouwar.j/cran-all/cranData/visdat/inst/doc/customising-colour-palettes.R
|
---
title: "Customising colour palettes in visdat"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Customising-colour-palettes-in-visdat}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(visdat)
```
# How to provide your own colour palette?
This vignette shoes you how to provide your own colour palette with `visdat`.
A `visdat` plot is a `ggplot` object - so we can use the tools from ggplot to
tinker with colours. In this case, that is the `scale_fill_manual` function.
A "standard" visdat plot might be like so:
```{r standard}
vis_dat(typical_data)
```
You can name the colours yourself like so (after first loading the `ggplot` package.
```{r custom}
library(ggplot2)
vis_dat(typical_data) +
scale_fill_manual(
values = c(
"character" = "red",
"factor" = "blue",
"logical" = "green",
"numeric" = "purple",
"NA" = "gray"
))
```
This is a pretty, uh, "popping" set of colours? You can also use some hex colours instead.
Say, taken from `palette()`:
```{r show-pal}
palette()
```
```{r pal-hex-visdat}
vis_dat(typical_data) +
scale_fill_manual(
values = c(
"character" = "#61D04F",
"factor" = "#2297E6",
"logical" = "#28E2E5",
"numeric" = "#CD0BBC",
"NA" = "#F5C710"
))
```
How can we get nicer ones?
Well, you can use any of `ggplot`'s `scale_fill_*` functions from inside ggplot2
For example:
```{r scale-fill-brewer}
vis_dat(typical_data) +
scale_fill_brewer()
```
```{r scale-fill-viridis}
vis_dat(typical_data) +
scale_fill_viridis_d()
```
Happy colour palette exploring! You might want to take a look at some of the following colour palettes from other packages:
- [scico](https://github.com/thomasp85/scico#ggplot2-support)
- [colorspace](https://cran.r-project.org/package=colorspace/vignettes/colorspace.html#Usage_with_ggplot2)
- [wesanderson](https://github.com/karthik/wesanderson#palettes)
|
/scratch/gouwar.j/cran-all/cranData/visdat/inst/doc/customising-colour-palettes.Rmd
|
## ----setup, echo = FALSE, include = FALSE-------------------------------------
knitr::opts_chunk$set(fig.width = 5,
fig.height = 4)
## ----head-iris----------------------------------------------------------------
head(iris)
## ----glimpse------------------------------------------------------------------
library(dplyr)
glimpse(iris)
## ----visdat-glimpse-----------------------------------------------------------
library(visdat)
glimpse(typical_data)
## ----load-data----------------------------------------------------------------
vis_dat(typical_data)
## ----example-vis-miss---------------------------------------------------------
vis_miss(typical_data)
## ----vis_dat------------------------------------------------------------------
vis_dat(airquality)
## ----visdat-typical-----------------------------------------------------------
vis_dat(typical_data)
vis_dat(typical_data,
sort_type = FALSE)
## ----vis_miss-----------------------------------------------------------------
vis_miss(airquality)
## ----vismiss-new-data---------------------------------------------------------
df_test <- data.frame(x1 = 1:10000,
x2 = rep("A", 10000),
x3 = c(rep(1L, 9999), NA))
vis_miss(df_test)
## ----vismiss-mtcars-----------------------------------------------------------
df_test <- data.frame(x1 = 1:10000,
x2 = rep("tidy", 10000),
x3 = rep("data", 10000))
vis_miss(df_test)
## ----vismiss------------------------------------------------------------------
vis_miss(airquality,
sort_miss = TRUE)
## ----vis_miss-cluster---------------------------------------------------------
vis_miss(airquality,
cluster = TRUE)
## ----vis-compare-iris---------------------------------------------------------
set.seed(2019-04-03-1107)
chickwts_diff <- chickwts
chickwts_diff[sample(1:nrow(chickwts), 30),sample(1:ncol(chickwts), 2)] <- NA
vis_compare(chickwts_diff, chickwts)
## ----vis-compare-error, eval = FALSE------------------------------------------
#
# chickwts_diff_2 <- chickwts
# chickwts_diff_2$new_col <- chickwts_diff_2$weight*2
#
# vis_compare(chickwts, chickwts_diff_2)
# # Error in vis_compare(chickwts, chickwts_diff_2) :
# # Dimensions of df1 and df2 are not the same. vis_compare requires dataframes of identical dimensions.
## ----vis-expect---------------------------------------------------------------
vis_expect(airquality, ~.x >= 25)
## ----vis-expect-bad-strings---------------------------------------------------
bad_data <- data.frame(x = c(rnorm(100), rep("N/A", 10)),
y = c(rep("N A ", 30), rnorm(80)))
vis_expect(bad_data, ~.x %in% c("N/A", "N A "))
## ----vis-cor------------------------------------------------------------------
vis_cor(airquality)
## ----vis-cor-spearman---------------------------------------------------------
vis_cor(airquality, cor_method = "spearman")
## ----vis-cor-na-action--------------------------------------------------------
vis_cor(airquality,
na_action = "complete.obs")
## ----vis-value----------------------------------------------------------------
vis_value(airquality)
## ----diamonds-error, eval = FALSE---------------------------------------------
# vis_value(iris)
## ----diamonds-error-subset----------------------------------------------------
iris %>%
select_if(is.numeric) %>%
vis_value()
## ----airquality-arrange-------------------------------------------------------
airquality %>%
arrange(Wind) %>%
vis_value()
## ----vis-binary---------------------------------------------------------------
vis_binary(dat_bin)
## ----create-messy-vec---------------------------------------------------------
messy_vector <- c(TRUE,
T,
"TRUE",
"T",
"01/01/01",
"01/01/2001",
NA,
NaN,
"NA",
"Na",
"na",
"10",
10,
"10.1",
10.1,
"abc",
"$%TG")
set.seed(1114)
messy_df <- data.frame(var1 = messy_vector,
var2 = sample(messy_vector),
var3 = sample(messy_vector))
## ----vis-guess-messy-df, fig.show='hold', out.width='50%'---------------------
vis_guess(messy_df)
vis_dat(messy_df)
## ----intx, eval = FALSE-------------------------------------------------------
#
# library(plotly)
# ggplotly(vis_dat(airquality))
# ggplotly(vis_miss(airquality))
# ggplotly(vis_guess(airquality))
#
|
/scratch/gouwar.j/cran-all/cranData/visdat/inst/doc/using_visdat.R
|
---
title: "Using visdat"
author: "Nicholas Tierney"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Using visdat}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, echo = FALSE, include = FALSE}
knitr::opts_chunk$set(fig.width = 5,
fig.height = 4)
```
When you get a new data set, you need to look at the data to get a sense of what it contains and potential problems with it. That's a key phrase here "looking at the data" - what does that mean?
On the one hand, you can look at the head of the data:
```{r head-iris}
head(iris)
```
Or you can have a `glimpse` at it through `dplyr::glimpse`
```{r glimpse}
library(dplyr)
glimpse(iris)
```
Here we see that we have doubles, and a factor. We get some insight into the data.
But we don't always have data like the canonical iris dataset. let's take a look at some data that might be a bit more typical of "messy" data using the `typical_data` dataset
from the `visdat` package.
```{r visdat-glimpse}
library(visdat)
glimpse(typical_data)
```
Looking at this, you might then ask:
> Isn't it odd that Income is a factor? And Age is a character?
And you might start to wonder what else is different, what else changed?
And it might be a bit unclear where to go from there. Do you plot the data? Why does my plot look weird? What are these other strange features in the data? The `visdat` package provides visualisations of an entire dataframe at once. Initially inspired by [`csv-fingerprint`](https://github.com/setosa/csv-fingerprint), `visdat` provides tools to create heatmap-like visualisations of an entire dataframe. `visdat` provides 2 main functions: `vis_dat` and `vis_miss`.
`vis_dat()` helps explore the data class structure and missingness:
```{r load-data}
vis_dat(typical_data)
```
And the `vis_miss` function provides a custom plot for missing data.
```{r example-vis-miss}
vis_miss(typical_data)
```
The name `visdat` was chosen as it borrows from the idea of [`testdat`](https://github.com/karthik/testdat), which provides unit testing for your data. In a similar way, `visdat` provides visual tests, the idea being that first you visualise your data (`visdat`), then you run tests from `testdat`, or a package like `assertr`, to fix these errors.
## `vis_dat`
Let's see what's inside the dataset `airquality`, which contains information about daily air quality measurements in New York from May to September 1973. More information about the dataset can be found with `?airquality`.
```{r vis_dat}
vis_dat(airquality)
```
The plot above tells us that R reads this dataset as having numeric and integer values, with some missing data in `Ozone` and `Solar.R`. The classes are represented on the legend, and missing data represented by grey. The column/variable names are listed on the x axis.
By default, `vis_dat` sorts the columns according to the type of the data in the vectors. You can turn this off by setting `sort_type = FALSE`. This feature is better illustrated using the `typical_data` dataset, created using [wakefield](https://github.com/trinker/wakefield) and contained within `visdat`.
```{r visdat-typical}
vis_dat(typical_data)
vis_dat(typical_data,
sort_type = FALSE)
```
## `vis_miss`
We can explore the missing data further using `vis_miss`.
```{r vis_miss}
vis_miss(airquality)
```
Notice that the percentages of missingness are provided in the data. These are accurate to 1 decimal place. `vis_miss` indicates when there is a very small amount of missing data at <0.1% missingness.
```{r vismiss-new-data}
df_test <- data.frame(x1 = 1:10000,
x2 = rep("A", 10000),
x3 = c(rep(1L, 9999), NA))
vis_miss(df_test)
```
`vis_miss` will also indicate when there is no missing data at all.
```{r vismiss-mtcars}
df_test <- data.frame(x1 = 1:10000,
x2 = rep("tidy", 10000),
x3 = rep("data", 10000))
vis_miss(df_test)
```
Columns can be arranged by columns with most missingness, by setting `sort_miss = TRUE`.
```{r vismiss}
vis_miss(airquality,
sort_miss = TRUE)
```
And missingness can be clustered by setting `cluster = TRUE`
```{r vis_miss-cluster}
vis_miss(airquality,
cluster = TRUE)
```
To further explore the missingness structure in a dataset, I recommend the [`naniar`](https://github.com/njtierney/naniar) package, which provides more general tools for graphical and numerical exploration of missing values.
## `vis_compare`
Sometimes you want to see what has changed in your data. `vis_compare()` displays the differences in two dataframes of the same size. Let's look at an example.
Let's make some changes to the `chickwts`, and compare this new dataset.
```{r vis-compare-iris}
set.seed(2019-04-03-1107)
chickwts_diff <- chickwts
chickwts_diff[sample(1:nrow(chickwts), 30),sample(1:ncol(chickwts), 2)] <- NA
vis_compare(chickwts_diff, chickwts)
```
Here the differences are marked in blue.
If you try and compare differences when the dimensions are different, you get an ugly error.
```{r vis-compare-error, eval = FALSE}
chickwts_diff_2 <- chickwts
chickwts_diff_2$new_col <- chickwts_diff_2$weight*2
vis_compare(chickwts, chickwts_diff_2)
# Error in vis_compare(chickwts, chickwts_diff_2) :
# Dimensions of df1 and df2 are not the same. vis_compare requires dataframes of identical dimensions.
```
## `vis_expect`
`vis_expect` visualises certain conditions or values in your data. For example,
If you are not sure whether to expect values greater than 25 in your data
(airquality), you could write: `vis_expect(airquality, ~.x >= 25)`, and you can
see if there are times where the values in your data are greater than or equal
to 25.
```{r vis-expect}
vis_expect(airquality, ~.x >= 25)
```
This shows the proportion of times that there are values greater than 25, as well as the missings.
You could also, for example, explore a
set of bad strings, or possible NA values and visualise where they are
using `vis_expect(data, ~.x %in% bad_strings)` where `bad_strings` is a
character vector containing bad strings like `N A`, `N/A` etc.
```{r vis-expect-bad-strings}
bad_data <- data.frame(x = c(rnorm(100), rep("N/A", 10)),
y = c(rep("N A ", 30), rnorm(80)))
vis_expect(bad_data, ~.x %in% c("N/A", "N A "))
```
## `vis_cor`
To make it easy to plot correlations of your data, use `vis_cor`:
```{r vis-cor}
vis_cor(airquality)
```
Under the hood, `vis_cor` is powered by the `cor` function in base R, and takes
a character string indicating which correlation coefficient (or covariance) is
to be computed. One of "pearson" (default), "kendall", or "spearman".
```{r vis-cor-spearman}
vis_cor(airquality, cor_method = "spearman")
```
You can also specify what to do for the missing data using the `na_action`
function, which again borrows from the `cor` methods. This can be "everything",
"all.obs", "complete.obs", "na.or.complete", or "pairwise.complete.obs"
(default), e.g.:
```{r vis-cor-na-action}
vis_cor(airquality,
na_action = "complete.obs")
```
## `vis_value`
`vis_value()` visualises the values of your data on a 0 to 1 scale.
```{r vis-value}
vis_value(airquality)
```
It only works on numeric data:
```{r diamonds-error, eval = FALSE}
vis_value(iris)
```
```
data input can only contain numeric values, please subset the data to the numeric values you would like. dplyr::select_if(data, is.numeric) can be helpful here!
```
So you might need to subset the data beforehand like so:
```{r diamonds-error-subset}
iris %>%
select_if(is.numeric) %>%
vis_value()
```
It can be useful to arrange your data before using `vis_value` to explore possible relationships in the data:
```{r airquality-arrange}
airquality %>%
arrange(Wind) %>%
vis_value()
```
## `vis_binary`
`vis_binary()` visualises the occurrence of binary values in your data. It is
similar to `vis_value()` except it just focusses on values that are NA, 0, and 1.
```{r vis-binary}
vis_binary(dat_bin)
```
## `vis_guess`
`vis_guess()` takes a guess at what each cell is. It's best illustrated using some messy data, which we'll make here.
```{r create-messy-vec}
messy_vector <- c(TRUE,
T,
"TRUE",
"T",
"01/01/01",
"01/01/2001",
NA,
NaN,
"NA",
"Na",
"na",
"10",
10,
"10.1",
10.1,
"abc",
"$%TG")
set.seed(1114)
messy_df <- data.frame(var1 = messy_vector,
var2 = sample(messy_vector),
var3 = sample(messy_vector))
```
```{r vis-guess-messy-df, fig.show='hold', out.width='50%'}
vis_guess(messy_df)
vis_dat(messy_df)
```
So here we see that there are many different kinds of data in your dataframe. As an analyst this might be a depressing finding. We can see this comparison above.
Here, you might just assume your data is weird because it's all factors - or worse, not notice that this is a problem.
At the moment `vis_guess` is very slow. Please take this into consideration when you are using it on data with more than 1000 rows. We're looking into ways of making it faster, potentially using methods from the `parallel` package, or extending the c++ code from `readr:::collectorGuess`.
# Interactivity
You can make the plots in visdat by wrapping them in `plotly::ggplotly`:
```{r intx, eval = FALSE}
library(plotly)
ggplotly(vis_dat(airquality))
ggplotly(vis_miss(airquality))
ggplotly(vis_guess(airquality))
```
In the future these will have their own functions, written in plotly with nice
standardised on-hover behaviour. If you would like to see how these work,
please see the [development version on GitHub](https://github.com/ropensci/visdat).
# Future work
Future work from here is focussed on making `visdat` more stable, improving the
speed of plotting, and adding interactive versions for each function.
|
/scratch/gouwar.j/cran-all/cranData/visdat/inst/doc/using_visdat.Rmd
|
---
title: "Customising colour palettes in visdat"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Customising-colour-palettes-in-visdat}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(visdat)
```
# How to provide your own colour palette?
This vignette shoes you how to provide your own colour palette with `visdat`.
A `visdat` plot is a `ggplot` object - so we can use the tools from ggplot to
tinker with colours. In this case, that is the `scale_fill_manual` function.
A "standard" visdat plot might be like so:
```{r standard}
vis_dat(typical_data)
```
You can name the colours yourself like so (after first loading the `ggplot` package.
```{r custom}
library(ggplot2)
vis_dat(typical_data) +
scale_fill_manual(
values = c(
"character" = "red",
"factor" = "blue",
"logical" = "green",
"numeric" = "purple",
"NA" = "gray"
))
```
This is a pretty, uh, "popping" set of colours? You can also use some hex colours instead.
Say, taken from `palette()`:
```{r show-pal}
palette()
```
```{r pal-hex-visdat}
vis_dat(typical_data) +
scale_fill_manual(
values = c(
"character" = "#61D04F",
"factor" = "#2297E6",
"logical" = "#28E2E5",
"numeric" = "#CD0BBC",
"NA" = "#F5C710"
))
```
How can we get nicer ones?
Well, you can use any of `ggplot`'s `scale_fill_*` functions from inside ggplot2
For example:
```{r scale-fill-brewer}
vis_dat(typical_data) +
scale_fill_brewer()
```
```{r scale-fill-viridis}
vis_dat(typical_data) +
scale_fill_viridis_d()
```
Happy colour palette exploring! You might want to take a look at some of the following colour palettes from other packages:
- [scico](https://github.com/thomasp85/scico#ggplot2-support)
- [colorspace](https://cran.r-project.org/package=colorspace/vignettes/colorspace.html#Usage_with_ggplot2)
- [wesanderson](https://github.com/karthik/wesanderson#palettes)
|
/scratch/gouwar.j/cran-all/cranData/visdat/vignettes/customising-colour-palettes.Rmd
|
---
title: "Using visdat"
author: "Nicholas Tierney"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Using visdat}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, echo = FALSE, include = FALSE}
knitr::opts_chunk$set(fig.width = 5,
fig.height = 4)
```
When you get a new data set, you need to look at the data to get a sense of what it contains and potential problems with it. That's a key phrase here "looking at the data" - what does that mean?
On the one hand, you can look at the head of the data:
```{r head-iris}
head(iris)
```
Or you can have a `glimpse` at it through `dplyr::glimpse`
```{r glimpse}
library(dplyr)
glimpse(iris)
```
Here we see that we have doubles, and a factor. We get some insight into the data.
But we don't always have data like the canonical iris dataset. let's take a look at some data that might be a bit more typical of "messy" data using the `typical_data` dataset
from the `visdat` package.
```{r visdat-glimpse}
library(visdat)
glimpse(typical_data)
```
Looking at this, you might then ask:
> Isn't it odd that Income is a factor? And Age is a character?
And you might start to wonder what else is different, what else changed?
And it might be a bit unclear where to go from there. Do you plot the data? Why does my plot look weird? What are these other strange features in the data? The `visdat` package provides visualisations of an entire dataframe at once. Initially inspired by [`csv-fingerprint`](https://github.com/setosa/csv-fingerprint), `visdat` provides tools to create heatmap-like visualisations of an entire dataframe. `visdat` provides 2 main functions: `vis_dat` and `vis_miss`.
`vis_dat()` helps explore the data class structure and missingness:
```{r load-data}
vis_dat(typical_data)
```
And the `vis_miss` function provides a custom plot for missing data.
```{r example-vis-miss}
vis_miss(typical_data)
```
The name `visdat` was chosen as it borrows from the idea of [`testdat`](https://github.com/karthik/testdat), which provides unit testing for your data. In a similar way, `visdat` provides visual tests, the idea being that first you visualise your data (`visdat`), then you run tests from `testdat`, or a package like `assertr`, to fix these errors.
## `vis_dat`
Let's see what's inside the dataset `airquality`, which contains information about daily air quality measurements in New York from May to September 1973. More information about the dataset can be found with `?airquality`.
```{r vis_dat}
vis_dat(airquality)
```
The plot above tells us that R reads this dataset as having numeric and integer values, with some missing data in `Ozone` and `Solar.R`. The classes are represented on the legend, and missing data represented by grey. The column/variable names are listed on the x axis.
By default, `vis_dat` sorts the columns according to the type of the data in the vectors. You can turn this off by setting `sort_type = FALSE`. This feature is better illustrated using the `typical_data` dataset, created using [wakefield](https://github.com/trinker/wakefield) and contained within `visdat`.
```{r visdat-typical}
vis_dat(typical_data)
vis_dat(typical_data,
sort_type = FALSE)
```
## `vis_miss`
We can explore the missing data further using `vis_miss`.
```{r vis_miss}
vis_miss(airquality)
```
Notice that the percentages of missingness are provided in the data. These are accurate to 1 decimal place. `vis_miss` indicates when there is a very small amount of missing data at <0.1% missingness.
```{r vismiss-new-data}
df_test <- data.frame(x1 = 1:10000,
x2 = rep("A", 10000),
x3 = c(rep(1L, 9999), NA))
vis_miss(df_test)
```
`vis_miss` will also indicate when there is no missing data at all.
```{r vismiss-mtcars}
df_test <- data.frame(x1 = 1:10000,
x2 = rep("tidy", 10000),
x3 = rep("data", 10000))
vis_miss(df_test)
```
Columns can be arranged by columns with most missingness, by setting `sort_miss = TRUE`.
```{r vismiss}
vis_miss(airquality,
sort_miss = TRUE)
```
And missingness can be clustered by setting `cluster = TRUE`
```{r vis_miss-cluster}
vis_miss(airquality,
cluster = TRUE)
```
To further explore the missingness structure in a dataset, I recommend the [`naniar`](https://github.com/njtierney/naniar) package, which provides more general tools for graphical and numerical exploration of missing values.
## `vis_compare`
Sometimes you want to see what has changed in your data. `vis_compare()` displays the differences in two dataframes of the same size. Let's look at an example.
Let's make some changes to the `chickwts`, and compare this new dataset.
```{r vis-compare-iris}
set.seed(2019-04-03-1107)
chickwts_diff <- chickwts
chickwts_diff[sample(1:nrow(chickwts), 30),sample(1:ncol(chickwts), 2)] <- NA
vis_compare(chickwts_diff, chickwts)
```
Here the differences are marked in blue.
If you try and compare differences when the dimensions are different, you get an ugly error.
```{r vis-compare-error, eval = FALSE}
chickwts_diff_2 <- chickwts
chickwts_diff_2$new_col <- chickwts_diff_2$weight*2
vis_compare(chickwts, chickwts_diff_2)
# Error in vis_compare(chickwts, chickwts_diff_2) :
# Dimensions of df1 and df2 are not the same. vis_compare requires dataframes of identical dimensions.
```
## `vis_expect`
`vis_expect` visualises certain conditions or values in your data. For example,
If you are not sure whether to expect values greater than 25 in your data
(airquality), you could write: `vis_expect(airquality, ~.x >= 25)`, and you can
see if there are times where the values in your data are greater than or equal
to 25.
```{r vis-expect}
vis_expect(airquality, ~.x >= 25)
```
This shows the proportion of times that there are values greater than 25, as well as the missings.
You could also, for example, explore a
set of bad strings, or possible NA values and visualise where they are
using `vis_expect(data, ~.x %in% bad_strings)` where `bad_strings` is a
character vector containing bad strings like `N A`, `N/A` etc.
```{r vis-expect-bad-strings}
bad_data <- data.frame(x = c(rnorm(100), rep("N/A", 10)),
y = c(rep("N A ", 30), rnorm(80)))
vis_expect(bad_data, ~.x %in% c("N/A", "N A "))
```
## `vis_cor`
To make it easy to plot correlations of your data, use `vis_cor`:
```{r vis-cor}
vis_cor(airquality)
```
Under the hood, `vis_cor` is powered by the `cor` function in base R, and takes
a character string indicating which correlation coefficient (or covariance) is
to be computed. One of "pearson" (default), "kendall", or "spearman".
```{r vis-cor-spearman}
vis_cor(airquality, cor_method = "spearman")
```
You can also specify what to do for the missing data using the `na_action`
function, which again borrows from the `cor` methods. This can be "everything",
"all.obs", "complete.obs", "na.or.complete", or "pairwise.complete.obs"
(default), e.g.:
```{r vis-cor-na-action}
vis_cor(airquality,
na_action = "complete.obs")
```
## `vis_value`
`vis_value()` visualises the values of your data on a 0 to 1 scale.
```{r vis-value}
vis_value(airquality)
```
It only works on numeric data:
```{r diamonds-error, eval = FALSE}
vis_value(iris)
```
```
data input can only contain numeric values, please subset the data to the numeric values you would like. dplyr::select_if(data, is.numeric) can be helpful here!
```
So you might need to subset the data beforehand like so:
```{r diamonds-error-subset}
iris %>%
select_if(is.numeric) %>%
vis_value()
```
It can be useful to arrange your data before using `vis_value` to explore possible relationships in the data:
```{r airquality-arrange}
airquality %>%
arrange(Wind) %>%
vis_value()
```
## `vis_binary`
`vis_binary()` visualises the occurrence of binary values in your data. It is
similar to `vis_value()` except it just focusses on values that are NA, 0, and 1.
```{r vis-binary}
vis_binary(dat_bin)
```
## `vis_guess`
`vis_guess()` takes a guess at what each cell is. It's best illustrated using some messy data, which we'll make here.
```{r create-messy-vec}
messy_vector <- c(TRUE,
T,
"TRUE",
"T",
"01/01/01",
"01/01/2001",
NA,
NaN,
"NA",
"Na",
"na",
"10",
10,
"10.1",
10.1,
"abc",
"$%TG")
set.seed(1114)
messy_df <- data.frame(var1 = messy_vector,
var2 = sample(messy_vector),
var3 = sample(messy_vector))
```
```{r vis-guess-messy-df, fig.show='hold', out.width='50%'}
vis_guess(messy_df)
vis_dat(messy_df)
```
So here we see that there are many different kinds of data in your dataframe. As an analyst this might be a depressing finding. We can see this comparison above.
Here, you might just assume your data is weird because it's all factors - or worse, not notice that this is a problem.
At the moment `vis_guess` is very slow. Please take this into consideration when you are using it on data with more than 1000 rows. We're looking into ways of making it faster, potentially using methods from the `parallel` package, or extending the c++ code from `readr:::collectorGuess`.
# Interactivity
You can make the plots in visdat by wrapping them in `plotly::ggplotly`:
```{r intx, eval = FALSE}
library(plotly)
ggplotly(vis_dat(airquality))
ggplotly(vis_miss(airquality))
ggplotly(vis_guess(airquality))
```
In the future these will have their own functions, written in plotly with nice
standardised on-hover behaviour. If you would like to see how these work,
please see the [development version on GitHub](https://github.com/ropensci/visdat).
# Future work
Future work from here is focussed on making `visdat` more stable, improving the
speed of plotting, and adding interactive versions for each function.
|
/scratch/gouwar.j/cran-all/cranData/visdat/vignettes/using_visdat.Rmd
|
# Generated by rstantools. Do not edit by hand.
# names of stan models
stanmodels <- c("visit")
# load each stan module
Rcpp::loadModule("stan_fit4visit_mod", what = TRUE)
# instantiate each stanmodel object
stanmodels <- sapply(stanmodels, function(model_name) {
# create C++ code for stan model
stan_file <- if(dir.exists("stan")) "stan" else file.path("inst", "stan")
stan_file <- file.path(stan_file, paste0(model_name, ".stan"))
stanfit <- rstan::stanc_builder(stan_file,
allow_undefined = TRUE,
obfuscate_model_name = FALSE)
stanfit$model_cpp <- list(model_cppname = stanfit$model_name,
model_cppcode = stanfit$cppcode)
# create stanmodel object
methods::new(Class = "stanmodel",
model_name = stanfit$model_name,
model_code = stanfit$model_code,
model_cpp = stanfit$model_cpp,
mk_cppmodule = function(x) get(paste0("rstantools_model_", model_name)))
})
|
/scratch/gouwar.j/cran-all/cranData/visit/R/stanmodels.R
|
#' Obtain decision map information
#'
#' Summarize the posterior distribution of \eqn{\theta^{(l)}_{00},
#' \theta^{(l)}_{01}, \theta^{(l)}_{10}, \theta^{(l)}_{11}} and get
#' information for making dose escalation decisions
#'
#' @rdname vtDecMap
#'
#' @inheritParams parameters
#'
#' @param thetas Posterior samples of \eqn{\theta}, a class \code{VTPOST} matrix
#' generated by \code{\link{vtPost}}
#'
#' @details
#'
#' This function summarizes the posterior distribution of the
#' \eqn{\theta^{(l)}_{00}, \theta^{(l)}_{01}, \theta^{(l)}_{10},
#' \theta^{(l)}_{11}} and sequentially get the conditional probabilities of
#' each decision map region. See \code{\link{visit}} for details of the
#' decision map regions.
#'
#' @return
#'
#' A class \code{VTDEC} list. See the return value from \code{\link{vtInterim}}
#' for details.
#'
#' @examples
#' etas <- c(0.1, 0.3)
#' dec.cut <- c(0.6,0.6,0.6)
#' obs.y <- rbind(c(5, 2, 0, 0))
#' rst.post <- vtPost(obs.y, prob.mdl = "NONPARA", nsmp = 2000)
#' dec.map <- vtDecMap(rst.post, etas = etas, dec.cut = dec.cut)
#'
#' @export
#'
vtDecMap <- function(thetas, etas, prev.res=0, dec.cut=0.6) {
stopifnot(get.const()$CLSPOST %in% class(thetas));
## allow 3 cuts
dec.cut <- rep(dec.cut, 3)[1:3];
## posterior region probabilities
cur.smp <- thetas;
cur.dlt <- cur.smp[,3] + cur.smp[,4];
cur.res <- cur.smp[,2] + cur.smp[,4];
rst <- rep(0,4);
##d.reg.1: over toxic
rst[1] <- mean(cur.dlt >= etas[2]);
##d.reg.2: less effective
rst[2] <- mean(cur.dlt < etas[2] & cur.res < prev.res);
##d.reg.3: safe and effective
rst[3] <- mean(cur.dlt < etas[1] & cur.res >= prev.res);
##d.reg.4: not over toxic, not safe, effective
rst[4] <- 1 - sum(rst[1:3]);
##conditional approach
cond.prob <- rep(0,2);
cond.prob[1] <- rst[2]/(1 - rst[1] + 1e-10); ## avoid Nan
cond.prob[2] <- rst[3]/(1 - sum(rst[1:2]) + 1e-10);
if (rst[1] > dec.cut[1]) {
region <- 1;
cond.prob[1:2] <- 0;
} else if (cond.prob[1] > dec.cut[2]) {
region <- 2;
cond.prob[2] <- 0;
} else if (cond.prob[2] > dec.cut[3]) {
region <- 3;
} else {
region <- 4;
}
rst.dec <- list(prob = rst,
region = region,
ptox = mean(cur.dlt),
pres = mean(cur.res),
cond.prob = cond.prob,
prev.res = mean(prev.res),
etas = etas,
dec.cut = dec.cut);
class(rst.dec) <- get.const()$CLSDEC;
rst.dec
}
#' Conduct interim analysis
#'
#' Conduct an interim analysis for determining dose escalation actions
#'
#' @rdname vtInterim
#'
#' @inheritParams parameters
#'
#' @param cur.obs.y Observed data from the current level, which is a vector of
#' length 4. The numbers correspond to \code{obs.y} in \code{\link{vtPost}}.
#' @param prev.obs.y Observed data from previous levels, which has the same
#' structure as \code{obs.y} in \code{\link{vtPost}}.
#' @param ... Additional arguments for \code{\link{vtPost}}
#'
#' @details
#'
#' Using data from previous levels and the current level to conduct Bayesian
#' analysis, get the decision map information and make decision about dose
#' escalation actions. The actions include stop the trial, escalate to the next
#' higher dose level, or enroll more patients in the current level. See \code{\link{visit}}
#' for details.
#'
#' @return
#' A class \code{VTDEC} list containing
#' \itemize{
#' \item{prob: }{Probabilities of each decision map region}
#' \item{region: }{The region selected based on the sequential procedure described in \code{\link{visit}} }
#' \item{ptox: }{Mean risk of DLT, \eqn{E(p^{(l)})}}
#' \item{pres: }{Mean immune response rate, \eqn{E(q^{(l)})}}
#' \item{con.prob: }{Conditional probabilities of each decision map region}
#' \item{prev.res: }{Function parameter}
#' \item{etas: }{Function parameter}
#' \item{dec.cut: }{Function parameter}
#' }
#'
#' @examples
#'
#' etas <- c(0.1, 0.3)
#' dec.cut <- c(0.6,0.6,0.6)
#' cur.obs.y <- c(3, 2, 1, 1)
#' prev.obs.y <- c(5, 2, 0, 0)
#' rst.inter <- vtInterim(cur.obs.y, prev.obs.y = prev.obs.y,
#' prob.mdl = "NONPARA", etas = etas,
#' dec.cut = dec.cut,
#' nsmp = 2000);
#'
#' @export
#'
vtInterim <- function(cur.obs.y, prev.obs.y = NULL, prev.res = NULL,
etas = c(0.1,0.3),
dec.cut = 0.65,
priors = NULL,
prob.mdl = c("NONPARA", "NONPARA+", "PARA", "PARA+"),
seed = NULL,
...) {
if (!is.null(seed)) {
old_seed <- set.seed(seed);
}
prob.mdl <- match.arg(prob.mdl);
##bayesian
post.smp <- vtPost(rbind(cur.obs.y), prob.mdl, priors, ...);
if (!is.null(prev.obs.y)) {
post.prev <- vtPost(rbind(prev.obs.y), prob.mdl, priors, ...);
prev.res <- apply(post.prev[,c(2,4)],1,sum);
} else if (is.null(prev.res)){
stop("Please provide either prev.obs.y or prev.res");
}
## decision
rst.dec <- vtDecMap(post.smp, etas, prev.res=prev.res, dec.cut=dec.cut);
## reset random see
if (!is.null(seed)) {
set.seed(old_seed);
}
rst.dec
}
#' Plot decision map
#'
#' Plot a decision map based on a class \code{VTDEC} object that contains the
#' current posterior analysis results
#'
#' @rdname plot.VTDEC
#'
#' @param x A class \code{VTDEC} list generated by \code{\link{vtDecMap}}
#' @param margin Margin between regions in the decision map
#' @param nms Labels of the regions on a decision map. Defaults are:
#' \itemize{
#' \item{\code{TT}:}{Too Toxic}
#' \item{\code{NME}:}{No More Effective}
#' \item{\code{SE}:}{Safe and Effective}
#' \item{\code{UN}:}{Uncertain}
#' }
#' @param col.reg Background color of the selected region
#' @param col.prob Text color of the selected region.
#' @param cex.prob Text size of the probabilities
#' @param cex.nms Text size of the region labels
#' @param ... Optional arguments for \code{plot}.
#'
#' @examples
#'
#' etas <- c(0.1, 0.3)
#' dec.cut <- c(0.6,0.6,0.6)
#' cur.obs.y <- c(3, 2, 1, 1)
#' prev.obs.y <- c(5, 2, 0, 0)
#' rst.inter <- vtInterim(cur.obs.y, prev.obs.y = prev.obs.y,
#' prob.mdl = "NONPARA", etas = etas, dec.cut = dec.cut,
#' nsmp = 2000);
#' plot(rst.inter)
#'
#' @method plot VTDEC
#'
#' @export
#'
plot.VTDEC <- function(x,
margin = 0.003, nms = c("TT", "NME", "SE", "UN"),
col.reg = "pink", col.prob = "blue", cex.prob = 0.9, cex.nms = 1,
...) {
f.reg <- function(x1, x2, y1, y2, labels, cols, tt = margin) {
x1 <- x1 + tt;
x2 <- x2 - tt;
y1 <- y1 + tt;
y2 <- y2 - tt;
rect(x1, y1, x2, y2, col = cols[1], lwd = 1);
text(x1+(x2-x1)/2, y1+(y2-y1)/2, labels = labels[1], col=cols[2], cex=cex.prob);
text(x1+(x2-x1)/2, y1+(y2-y1)/2, labels = labels[2], col=cols[2], cex=cex.prob, pos = 1);
text(x1+(x2-x1)/2, y2, labels = labels[3], cex=cex.nms, pos = 1);
}
##
etas <- x$etas;
prev.res <- round(x$prev.res,2);
probs <- x$prob * 100;
cond.prob <- x$cond.prob * 100;
col.regs <- rep("white", 4);
col.ps <- rep("black", 4);
col.regs[x$region] <- col.reg;
col.ps[x$region] <- col.prob;
##plot
par(xaxs="i", yaxs="i");
plot(NULL, xlim=c(0,1), ylim=c(0,1),
xlab="DLT Risk", ylab="Immune Response Rate",
axes=FALSE, ...);
##box(lwd = 3);
##axis(1, at = round(etas,2));
##region I
lbls <- c(sprintf("%.2f", probs[1]), "", nms[1]);
f.reg(etas[2], 1, 0, 1, labels = lbls, cols=c(col.regs[1], col.ps[1]));
##region II
if (prev.res > 0) {
axis(2, at = prev.res);
l2 <- "";
if (0 < cond.prob[1]) {
l2 <- sprintf("(%.2f)", cond.prob[1]);
}
lbls <- c(sprintf("%.2f", probs[2]), l2, nms[2]);
f.reg(0, etas[2], 0, prev.res,
labels = lbls, cols=c(col.regs[2], col.ps[2]));
}
##region III
l2 <- "";
if (0 < cond.prob[2]) {
l2 <- sprintf("(%.2f)", cond.prob[2]);
}
lbls <- c(sprintf("%.2f", probs[3]), l2, nms[3]);
f.reg(0, etas[1], prev.res, 1,
labels = lbls, cols=c(col.regs[3], col.ps[3]));
##region IV
lbls <- c(sprintf("%.2f", probs[4]), "", nms[4]);
f.reg(etas[1], etas[2], prev.res, 1,
labels = lbls, cols=c(col.regs[4], col.ps[4]));
}
#' Plot the track plot of dose escalation
#'
#' Generate a plot representing the observed data and dose escalation decisions.
#'
#' @rdname vtTrack
#'
#' @param obs.all All observations collected in a matrix with 5 columns. Column
#' 1 is the index of interim analysis starting from 1. Columns 2-5
#' correspond to columns 1-4 in \code{obs.y} for \code{\link{vtPost}}.
#' @param cex.txt Text size of numbers in the plot
#' @param decision Dose escalation decision. The options are
#' \itemize{
#' \item{\code{1}: }{Escalate}
#' \item{\code{2}: }{Continue at the same level}
#' \item{\code{3}: }{Stop the trial}
#' }
#'
#' @param max.level Maximum number of dose levels shown in the plot
#' @param letters Labels for dose escalation actions 1-3. Default values are
#' "E", "C", "S"
#' @param colors Possible colors in the last action box
#' @param height Height of each individual box
#' @param end.width Width of the last action box
#' @param end.height Height of the last action box
#' @param cex.roman Text size of the roman numerals
#' @param cex.end Text size of the letter in the last action box
#' @param ... Optional arguments for \code{plot}.
#'
#'
#' @examples
#'
#' obs.all <- rbind(c(1, 5, 2, 0, 0),
#' c(2, 3, 4, 0, 0),
#' c(3, 1, 6, 0, 0));
#' vtTrack(obs.all, end.width = 0.8, max.level = 3, decision = 3);
#'
#' @export
#'
vtTrack <- function(obs.all,
cex.txt = 0.9, decision = 1, max.level = NULL,
letters = c("E", "C", "S"),
colors = c("green", "yellow", "red"), height = 0.5,
end.width = 2, end.height = height, cex.roman = 0.9, cex.end = 0.9,
...) {
f.rec <- function(x, y, width, height, ys, cex.txt, margin = 0.1) {
width <- width/4;
cys <- rbind(c(0,0), c(0,1), c(1,0), c(1,1));
for (i in 1:4) {
cur.x <- x + width *(i-1);
xleft <- cur.x;
xright <- xleft + 0.5 * width;
ytop <- y;
ybottom <- ytop - 0.5 * height;
if (1 == cys[i,1]) {
r.txt <- "T";
r.col <- "gray80";
} else {
r.txt <- "NoT";
r.col <- "white";
}
rect(xleft, ybottom, xright, ytop, col=r.col, lwd=0.1);
text(xleft + 0.25 * width, ytop - 0.25 * height, labels = r.txt, cex = 0.5 * cex.txt);
xleft <- cur.x + 0.5 * width;
xright <- xleft + 0.5 * width;
ytop <- y;
ybottom <- ytop - 0.5 * height;
if (1 == cys[i,2]) {
r.txt <- "R";
r.col <- "pink";
} else {
r.txt <- "NoR";
r.col <- "white";
}
rect(xleft, ybottom, xright, ytop, col=r.col, lwd=0.1);
text(xleft + 0.25 * width, ytop - 0.25 * height, labels = r.txt, cex = 0.5 * cex.txt);
xleft <- cur.x;
xright <- xleft + width;
ytop <- y - 0.5 * height;
ybottom <- ytop - 0.5 * height;
rect(xleft, ybottom, xright, ytop, lwd=0.1);
text(xleft + 0.5 * width, ytop - 0.25 * height, labels = ys[i], cex = cex.txt);
rect(cur.x, y - height, cur.x + width, y, lwd=1);
}
}
f.arrow <- function(x, y, width, height, direction, labels = NULL) {
end.xy <- switch(direction,
"flat" = c(width, 0, 3),
"up" = c(0, height, 2),
"down" = c(0, -height, 4));
arrows(x, y, x + end.xy[1], y + end.xy[2], length = 0.1);
text(x + end.xy[1]/2, y + end.xy[2]/2,
labels = as.roman(labels), pos = end.xy[3], col = "brown", cex = cex.roman);
}
f.end <- function(x, y, width, height, letter, color) {
rect(x - 0.5*width, y - 0.5*height, x + 0.5*width, y + 0.5*height, lwd = 1);
rect(x - 0.45*width, y - 0.45*height, x + 0.45*width, y + 0.45*height,
lwd = 1, col = color);
text(x, y, labels = letter, cex = cex.end);
}
if (is.null(max.level))
max.level <- max(obs.all[,1]);
tbl.level <- table(obs.all[,1]);
x.tot <- 0;
for (i in 1:length(tbl.level)) {
x.tot <- x.tot + 4*tbl.level[i] + tbl.level[i] - 1;
}
x.tot <- x.tot - length(tbl.level) + 1;
x.max <- (x.tot + 3) * 1;
y.max <- max.level * 1;
plot(NULL, NULL, xlim=c(0, x.max), ylim=c(height/2, y.max),
axes = FALSE, xlab = "", ylab = "", ...);
box();
last.level <- 0;
cur.x <- 1;
cur.y <- 1;
inx <- 0;
for (i in 1:nrow(obs.all)) {
cur.level <- obs.all[i,1];
if (cur.level != last.level) {
last.level <- cur.level;
if (cur.level > 1) {
inx <- inx + 1;
f.arrow(cur.x + 3.5, cur.y, 0, 1-height, "up", labels = inx);
cur.x <- cur.x + 3;
cur.y <- cur.y + 1;
}
} else {
inx <- inx + 1;
f.arrow(cur.x + 4, cur.y - 0.5*height, 1, 0, "flat", labels = inx);
cur.x <- cur.x + 5;
}
f.rec(cur.x, cur.y, 4, height = height, ys = obs.all[i, 2:5], cex.txt = cex.txt);
}
inx <- inx + 1;
f.arrow(cur.x + 4, cur.y - 0.5*height, 1, 0, "flat", labels = inx);
f.end(cur.x + 5 + end.width/2, cur.y - 0.5*height, width = end.width, height = end.height,
letter = letters[decision], color = colors[decision]);
}
|
/scratch/gouwar.j/cran-all/cranData/visit/R/visit_analysis.R
|
#' cancer Vaccine phase I design with Simultaneous evaluation of Immunogenecity and Toxicity
#'
#' @docType package
#' @name visit-package
#' @aliases visit
#' @useDynLib visit, .registration = TRUE
#'
#' @importFrom rstan sampling extract stanc rstan_options traceplot stan_rhat
#' @importFrom rstantools rstan_config
#' @importFrom RcppParallel RcppParallelLibs
#' @importFrom grDevices colors
#' @importFrom graphics axis box legend lines par plot points text arrows grid rect
#' @importFrom parallel detectCores
#' @importFrom utils as.roman
#'
#' @import stats
#' @import Rcpp
#' @import methods
#'
#' @description
#'
#' This package contains the functions for implementing the \strong{visit} design for
#' Phase I cancer vaccine trials.
#'
#' @section Background:
#'
#' Phase I clinical trials are the first step in drug development to apply a new
#' drug or drug combination on humans. Typical designs of Phase I trials use
#' toxicity as the primary endpoint and aim to find the maximum tolerable
#' dosage. However, these designs are generally inapplicable for the development
#' of cancer vaccines because the primary objectives of a cancer vaccine Phase I
#' trial often include determining whether the vaccine shows biologic activity.
#'
#' The \strong{visit} design allows dose escalation to simultaneously account
#' for immunogenicity and toxicity. It uses lower dose levels as the reference
#' for determining if the current dose level is optimal in terms of immune
#' response. It also ensures subject safety by capping the toxicity rate with a
#' given upper bound. These two criteria are simultaneously evaluated using an
#' intuitive decision region that avoids complicated safety and immunogenicity
#' trade-off elicitation from physicians.
#'
#' There are several considerations that are clinically necessary for developing
#' the design algorithm. First, we assume that there is a non-decreasing
#' relationship that exists between toxicity and dosage, i.e., the toxicity risk
#' does not decrease as dose level increases. Second, the immune response rate
#' may reach a plateau or even start to decline as the dose level increases.
#'
#' @section Notation:
#'
#' For subject \eqn{s}, let \eqn{D_s=l} (\eqn{l=1,\ldots,L}) denote the received
#' dose level, \eqn{T_s=1} if any DLT event is observed from the subject and
#' \eqn{0} otherwise, \eqn{R_s=1} if immune response is achieved for the subject
#' and \eqn{0} otherwise.
#'
#' Let \eqn{\theta^{(l)}_{ij}=P(T=i, R=j|D=l)} for \eqn{i,j=0,1},
#' \eqn{\theta^{(l)}=\{\theta_{ij}^{(l)}:i,j=0,1\}} and \eqn{\Theta =
#' \{\theta^{(l)}: l=1,\ldots,L\}}. Furthermore, for dose level \eqn{l}, let
#' \eqn{p^{(l)}=P(T=1|D=l)=\theta_{10}^{(l)}+\theta_{11}^{(l)}} be the DLT risk,
#' \eqn{q^{(l)}=P(R=1|D=l)=\theta_{01}^{(l)}+\theta_{11}^{(l)}} be the immune
#' response probability, and
#' \eqn{r^{(l)}=\theta_{00}^{(l)}\theta_{11}^{(l)}/\theta_{01}^{(l)}\theta_{10}^{(l)}}
#' be the odds ratio. Let \eqn{n_{ij}^{(l)}} be the observed number of subjects
#' with \eqn{T=i} and \eqn{R=j} at dose level \eqn{l},
#' \eqn{n^{(l)}=\{n_{ij}^{(l)}:i,j=0,1\}} and \eqn{H} denote all the data
#' observed by the time the current analysis is conducted.
#'
#' @section Dose escalation algorithm:
#'
#' The dose escalation algorithm is based on the posterior probability
#' distribution of \eqn{\pi(p^{(l)}, q^{(l)}|H)}, where \eqn{p^{(l)}} and
#' \eqn{q^{(l)}} represent the DLT risk and immune response rate, respectively,
#' of the current dose level \eqn{l}, and \eqn{H} denotes the cumulative data at
#' the time of interim analysis.
#'
#' Let \eqn{p_l} denote the lower boundary of DLT risk below which the dose is
#' considered absolutely safe, \eqn{p_u} denote the upper boundary of DLT risk
#' above which the dose is considered toxic. \strong{visit} implements a sequential
#' identification approach based on conditional probabilities derived from
#' \eqn{\pi(p^{(l)}, q^{(l)}|H)}. Let \eqn{C_1, C_2, C_3} be fixed cut-off
#' values in \eqn{[0,1]}. The steps are as follows:
#'
#' \describe{
#'
#' \item{Step 1.}{If \eqn{Prob(p^{(l)} > p_U|H) > C_1}, then the current dose level is
#' considered to be \strong{too toxic}. The trial should be stopped and the next lower
#' dose level should be reported as the recommended dose.}
#'
#' \item{Step 2.}{\eqn{Prob(q^{(l)} \leq q_L| p^{(l)} \leq p_U, H) > C_2}, then the
#' current dose level is considered to be \strong{no more effective than its lower dose}
#' levels. The trial should be stopped and the next lower dose level should be
#' reported as the recommended dose.}
#'
#' \item{Step 3.}{If \eqn{Prob(p^{(l)} \leq p_L| p^{(l)} \leq p_U, q^{(l)} >
#' q_L, H) > C_3}, then the current dose level is considered to be \strong{safe and
#' effective}. The trial will escalate to dose level \eqn{l+1}.}
#'
#' \item{Step 4.}{The current dose level is considered to be \strong{uncertain}. The
#' trial should continue to treat more patients at dose level \eqn{l}.}
#'
#' }
#'
#' The values of should be chosen \eqn{C_1, C_2, C_3} prior to study initiation
#' and reflect the considerations of the investigators and patients. These
#' thresholds should also give reasonable overall study operating
#' characteristics.
#'
#' We can see that, based on the posterior distribution of \eqn{\pi(p^{(l)},
#' q^{(l)}|H)}, the currently dose level is in one of the four regions:
#' \strong{1: too toxic}, \strong{2: no more effective than its lower dose},
#' \strong{3: safe and effective}, and \strong{4: uncertain}. These regions are termed
#' as a \code{Decision Map}.
#'
#'
#' @section Probability models:
#'
#' \strong{visit} provides several options for the probability models that can
#' be considered for Bayesian inference. The models are non-decreasing with
#' respect to the dose-toxicity relationship and avoid monotonic assumptions for
#' the dose-immune response curve.
#'
#' \subsection{Non-parametric model}{As one of the simplest models, we posit
#' no assumptions on the dose-toxicity or dose-immune response relationships and
#' assume the outcome data \eqn{n_{00}, n_{01}, n_{10}, n_{11}} follow a
#' multinomial distribution.
#' }
#'
#' \subsection{Non-parametric+ model}{This is the simplified
#' \strong{non-parametric} model with the odds ratios \eqn{r=1}. }
#'
#' \subsection{Partially parametric model}{Compared to non-parametric models, a
#' parametric model may allow the incorporation of dose-toxicity, dose-efficacy,
#' and toxicity-efficacy relationships in dose escalation. In the context of
#' evaluating cancer vaccines, however, it is difficult to posit assumptions on
#' the dose-efficacy relationship, since the immune response rate may even
#' decrease as the dose level increases. On the other hand, it remains
#' reasonable to assume that the dose-toxicity curve is non-decreasing.
#' Therefore, we propose a partially parametric model that only makes
#' assumptions about dose-toxicities but leaves the dose-immune response
#' relationship unspecified.
#'
#' Specifically, we construct the dose-toxicity model as: \deqn{ \log p^{(l)}=
#' e^\alpha \log \tau^{(l)}. } The \eqn{\tau^{(l)}}'s are deterministic design
#' parameters reflecting the expectation of the DLT risk at dose level \eqn{l}
#' with \eqn{\tau^{(l)} > \tau^{(l')}} for \eqn{l> l'}.
#'
#' For the immune response and the odds ratio, we assume \eqn{q^{(l)}} and
#' \eqn{r^{(l)}} at different dose levels are independent a priori.
#' }
#'
#' \subsection{Partially parametric+ model}{This is the simplified
#' \strong{partially parametric} model with the odds ratios \eqn{r=1}.}
#'
#' @section Graphical user interface:
#'
#' This package provides a web-based graphical user interface developed using R
#' Shiny. See \code{\link{vtShiny}} for details.
#'
#' @references
#'
#' Wang, C., Rosner, G. L., & Roden, R. B. (2019). A Bayesian design for phase I cancer
#' therapeutic vaccine trials. Statistics in medicine, 38(7), 1170-1189.
NULL
#' Parameters
#'
#' Parameters that are shared by multiple functions
#'
#' @name parameters
#'
#' @param obs.y Observed data matrix with \eqn{l} rows and 4 columns. Row \eqn{k} in the matrix
#' represents the observed data from dose level \eqn{k}. The columns are
#'
#' \itemize{
#' \item{column 1: }{number of patient with no DLT, no immune response}
#' \item{column 2: }{number of patient with no DLT, immune response}
#' \item{column 3: }{number of patient with DLT, no immune response}
#' \item{column 4: }{number of patient with DLT, immune response}
#' }
#'
#' @param prob.mdl Option of the probability models:
#'
#' \itemize{\item{NONPARA: }{non-parametric+ model}
#'
#' \item{NONPARA+: }{non-parametric model}
#'
#' \item{PARA: }{partially parametric model}
#'
#' \item{PARA+: }{partially parametric+ model} }
#'
#' Default value is \code{NONPARA}. See \code{\link{visit}} for details.
#'
#' @param priors A class \code{VTPRIOR} object created by
#' \code{\link{vtPriorPar}} for \code{PARA} and \code{PARA+} model.
#'
#'
#' @param etas Vector of length 2 representing \eqn{(p_L, p_U)}. \eqn{p_L}: lower
#' bound of DLT risk, below which the current dose is considered absolutely
#' safe; \eqn{p_U}: upper bound of DLT risk above which the current dose is
#' considered too toxic
#'
#' @param prev.res Response rate from the next lower dose level, say, \eqn{l-1}.
#' This can be a scalar representing the mean of the response rate
#' \eqn{E(q^{(l-1)})}, or a vector of posterior samples of the response rate
#' \eqn{q^{(l-1)}}. For \eqn{l=1}, this value is set to \eqn{0}.
#'
#' @param dec.cut Thresholds \eqn{C_1,C_2,C_3}. If the vector length is shorter
#' than \eqn{3}, it is repeated to have \eqn{3} elements. See
#' \code{\link{visit}} for details.
#'
#' @param digits Digits for print
#'
#' @param seed Random seed
#'
#' @param ... Reserved parameters
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/visit/R/visit_package.R
|
##constants in the package
get.const <- function() {
rst <- list(REGIONS = c("Toxic", "Ineffective", "Safe,Effective", "Effective,Safety concern"),
THETA = c("No DLT, No Response", "No DLT, Response",
"DLT, No Response", "DLT, Response"),
CLSPRIOR = "VTPRIOR",
CLSPOST = "VTPOST",
CLSTRUEPS = "VTTRUEPS",
CLSSIMU = "VTSIMU",
CLSDEC = "VTDEC");
rst$DENLEGEND <- c("Toxicity Rate", "Response Rate", rst$THETA);
rst
}
##set options
set.option <- function(x, opt.x) {
if (is.null(x)) {
x <- opt.x;
} else {
x <- x[x %in% opt.x];
if (0 == length(x)) {
x <- opt.x;
}
}
x
}
## p = P(tox);
## q = P(response);
## rho = odds ratio p00p11/p10/p01
get.p11 <- function(pqr) {
p <- pqr[1];
q <- pqr[2];
rho <- pqr[3];
if (1 == rho) {
p11 = p*q;
p01 = (1-p)*q;
p10 = p*(1-q);
p00 = (1-p)*(1-q);
} else {
p11 = -(sqrt((p+q-p*rho-q*rho-1)^2-4*(rho-1)*p*q*rho)+(p+q-p*rho-q*rho-1))/2/(rho-1);
p01 = q-p11;
p10 = p-p11;
p00 = p01*p10*rho/p11;
}
##check
if (0) {
cp <- p11 + p10;
cq <- p11 + p01;
crho <- p11*p00/p01/p10;
print(c(cp, cq, crho));
}
rst <- c(p00,p01,p10,p11);
rst <- rst/sum(rst);
rst
}
rdirichlet <- function (n, alpha) {
l <- length(alpha);
x <- matrix(rgamma(l * n, alpha), ncol = l, byrow = TRUE);
sm <- x %*% rep(1, l);
return(x/as.vector(sm))
}
##get samples using the priors
get.prior.smp <- function(p.prior, model = 0, n=10000) {
stopifnot(get.const()$CLSPRIOR %in% class(p.prior));
ndose <- nrow(p.prior);
last.alpha <- 0;
rst <- list(NULL);
for (dose.level in 1:ndose) {
beta <- p.prior[dose.level,];
## toxcity rate
tau <- beta["TAU"];
if (0 == model) {
alpha <- rnorm(n, last.alpha, beta["SDALPHA"]);
last.alpha <- alpha;
} else {
alpha <- rnorm(n, beta["MEANALPHA"], beta["SDALPHA"]);
}
smp.tox <- tau^exp(alpha);
## response
smp.rep <- rbeta(n, beta["A"], beta["B"]);
## odds ratio
smp.r <- exp(rnorm(n, beta["C"], beta["D"]));
## theta ij
smp.theta <- apply(cbind(smp.tox, smp.rep, smp.r), 1, get.p11);
rst[[dose.level]] <- list(smp.tox=smp.tox,
smp.rep=smp.rep,
smp.r=smp.r,
smp.theta=t(smp.theta));
}
rst
}
##combine simu results
lst.combine <- function(lst, var) {
simplify2array(lapply(lst, function(x) {x[[var]]}));
}
##---------------------------------------------------
## project specic functions
##---------------------------------------------------
plot.densities <- function(x, draw.curves = 1:6, draw.levels=NULL,
legends=NULL, mfrow=NULL,
ltys = c(1,1,2,2,2,2),
cols = c("red", "blue", "brown", "black", "gray", "green"),
max.y.adjust = 1.1,
...) {
lst.dens <- x;
if (is.null(legends)) {
legends <- get.const()$DENLEGEND;
}
draw.levels <- set.option(draw.levels, 1:length(lst.dens));
draw.curves <- set.option(draw.curves, 1:6);
if (is.null(mfrow)) {
mfrow <- c(ceiling(length(draw.levels)/2), min(2, length(draw.levels)));
}
##get all densities
max.y <- 0;
for (l in draw.levels) {
cur.den <- lst.dens[[l]];
for (k in draw.curves) {
max.y <- max(max.y, cur.den[[k]]$y);
}
}
## plot
par(mfrow = mfrow);
for (l in draw.levels) {
plot(NULL, xlim=c(0,1), ylim=c(0, max.y.adjust*max(max.y)),
xlab="Rate", ylab="Density", main=paste("Level", l, sep=" "),
bty="n");
for (k in draw.curves) {
lines(lst.dens[[l]][[k]], col = cols[k], lty = ltys[k]);
}
if (draw.levels[1] == l) {
legend("topright", bty="n", legend = legends[draw.curves],
lty = ltys[draw.curves], col = cols[draw.curves]);
}
}
}
plot.VTPRIOR <- function(x, model = 0, n = 10000, draw.levels = NULL, adjust = 1.2,
...) {
##get samples
priors <- x;
draw.levels <- set.option(draw.levels, 1:nrow(priors));
prior.smps <- get.prior.smp(priors, model = model, n = n);
##get all densities
lst.dens <- list(NULL);
for (l in draw.levels) {
cur.den <- list(NULL);
cur.den[[1]] <- density(prior.smps[[l]]$smp.tox, adjust = adjust, na.rm = TRUE);
cur.den[[2]] <- density(prior.smps[[l]]$smp.rep, adjust = adjust, na.rm = TRUE);
for (k in 1:4) {
cur.den[[k+2]] <- density(prior.smps[[l]]$smp.theta[,k], adjust=adjust, na.rm = TRUE);
}
lst.dens[[l]] <- cur.den;
}
## plot
plot.densities(lst.dens, draw.levels = draw.levels, ...);
}
summary.VTPRIOR <- function(object, qs = c(0.025,0.5, 0.975), model = 0, n = 10000, ...) {
f.sum <- function(l, lbl, smp) {
c("Level" = l,
"Prob" = lbl,
"Mean" = mean(smp, na.rm = TRUE),
quantile(smp, qs, na.rm = TRUE));
}
legends <- get.const()$DENLEGEND;
##get samples
priors <- object;
prior.smps <- get.prior.smp(priors, model = model, n = n);
##get all densities
rst <- NULL;
for (l in 1:nrow(priors)) {
rst <- rbind(rst, f.sum(l, legends[1], prior.smps[[l]]$smp.tox));
rst <- rbind(rst, f.sum(l, legends[2], prior.smps[[l]]$smp.rep));
for (k in 1:4) {
rst <- rbind(rst,
f.sum(l, legends[k+2], prior.smps[[l]]$smp.theta[,k]));
}
}
## plot
data.frame(rst);
}
|
/scratch/gouwar.j/cran-all/cranData/visit/R/visit_private.R
|
#' Run Web-Based \code{visit} application
#'
#' Call Shiny to run \code{visit} as a web-based application.
#'
#' @details
#'
#' A web browser will be brought up for users to access the GUI of \code{\link{visit}}.
#'
#' @examples
#' \dontrun{
#' vtShiny()}
#'
#' @export
#'
vtShiny <- function() {
req.pkgs <- c("shiny", "xtable", "knitr", "rmarkdown", "pander");
chk.uninstalled <- sapply(req.pkgs, function(x) {!requireNamespace(x, quietly = TRUE)});
chk.inx <- which(chk.uninstalled);
if (0 < length(chk.inx)) {
msg <- paste("For the GUI to work, please install ",
ifelse(1 < length(chk.inx), "packages ", "package "),
paste(req.pkgs[chk.inx], collapse = ", "),
" by \n install.packages(",
paste(paste("'", req.pkgs[chk.inx], "'", sep = ""), collapse = ", "),
") \n ",
sep = "");
stop(msg, call. = FALSE);
}
appDir <- system.file("shiny", package = "visit")
if (appDir == "") {
stop("Could not find Shiny directory. Try re-installing `visit`.",
call. = FALSE)
}
shiny::runApp(appDir, display.mode = "normal");
}
|
/scratch/gouwar.j/cran-all/cranData/visit/R/visit_shiny.R
|
#' S3 Summary function
#'
#' @param x object
#' @param ... reserved parameters
#'
#' @export
summary2 <- function (x, ...) {
UseMethod("summary2", x)
}
#' Set simulation scenario
#'
#' Simulation function. Get true \eqn{\theta}'s using marginal probabilities and
#' odds ratio \eqn{\rho} for all dose levels.
#'
#' @rdname vtScenario
#'
#' @param tox Vector of marginal DLT risk rates for all levels
#' @param res Vector of marginal immune response rates for all levels
#' @param rho Vector of odds ratio for all levels. If length of \code{rho} is
#' shorter than the length of \code{tox} or \code{res}, vector \code{rho} is
#' repeated to have the same length as \code{tox} and \code{res}.
#'
#' @details
#'
#' The calculation is as following. If \eqn{\rho = 1}, then \eqn{\theta_{11} =
#' pq}, \eqn{\theta_{01} = (1-p)q}, \eqn{\theta_{10} = p(1-q)}, and
#' \eqn{\theta_{00} = (1-p)(1-q)}. Otherwise, \eqn{ \theta_{11} = -(\sqrt{A+B}},
#' \eqn{\theta_{01} = q-\theta_{11}}, \eqn{\theta_{10} = p-\theta_{11}}, and
#' \eqn{\theta_{00} = \theta_{01}\theta_{10}\rho/\theta_{11}}, where
#' \eqn{A=(p+q-p \rho-q\rho-1)^2-4(\rho-1)pq\rho)} and
#' \eqn{B=(p+q-p\rho-q\rho-1))/2/(\rho-1)}.
#'
#'
#' @return a \code{VTTRUEPS} object containing all \eqn{\theta}'s in a matrix
#' with its number of rows equaling the number of dose levels and its number
#' of columns being 4.
#'
#' @examples
#' rst.sce <- vtScenario(tox=c(0.05, 0.05, 0.08), res=c(0.2, 0.3, 0.5), rho=1)
#'
#' @export
#'
vtScenario <- function(tox = c(0.05, 0.05, 0.08),
res = c(0.2, 0.3, 0.5),
rho = 1) {
if (!is.vector(rho)) {
rho <- rep(rho, length(tox));
}
true.theta <- apply(cbind(tox, res, rho),
1,
function(x) {
tt <- get.p11(x);
tt[4] <- 1 - sum(tt[1:3]);
tt
});
rst <- t(true.theta);
colnames(rst) <- get.const()$THETA;
class(rst) <- get.const()$CLSTRUEPS;
invisible(rst)
}
#' Get prior distribution parameters
#'
#' Get prior distribution parameters for partially parametric or partially parametric+ models
#'
#' @rdname vtPriorPar
#'
#' @param prior.y Historical data for generating prior parameters. It has the
#' same structure as \code{obs.y} in \code{\link{vtPost}}.
#' @param tau Vector of \eqn{\tau} values. See \code{\link{visit}} for details.
#' Can not be \code{NULL} if \code{prior.y} is \code{NULL}.
#' @param sdalpha \eqn{\sigma_\alpha}. See \code{\link{visit}} for details.
#' @param sdrho \eqn{\sigma_\rho}.
#' @param vtheta Additional variance term for eliciting prior parameters from
#' \code{prior.y}
#'
#' @details
#'
#' The priors are specified as \eqn{q^{(l)} \sim Beta(a_q^{(l)}, b_q^{(l)})},
#' and \eqn{\log\rho^{(l)} \sim N(0, \sigma_\rho^2)}.
#'
#' @return A \code{VTPRIOR} list with
#'
#' \itemize{
#'
#' \item{TAU:}{vector of \eqn{\tau}'s for each level}
#'
#' \item{ABCD:}{A matrix of 4 columns: \eqn{a_q}, \eqn{b_q}, \eqn{a_\rho},
#' \eqn{{\sigma_\rho}}. Each row represents a dose level.}}
#'
#' @examples
#'
#' par.prior <- vtPriorPar(tau = c(0.2, 0.4, 0.6), sdalpha = 10);
#'
#' @export
#'
vtPriorPar <- function(prior.y = NULL, tau = NULL, sdalpha=10, sdrho=10, vtheta=NULL) {
if (!is.null(prior.y)) {
beta <- NULL;
for (i in 1:nrow(prior.y)) {
pt <- prior.y[i,];
if (!is.null(vtheta))
pt <- vtheta * pt/sum(pt);
## toxcity rate
p.tox <- sum(pt[3:4])/sum(pt);
## response
a.rep <- sum(pt[c(2,4)]);
b.rep <- sum(pt[c(1,3)]);
## odds ratio
a.lr <- log(pt[1]*pt[4]/pt[2]/pt[3]);
b.lr <- sqrt(sum(1/pt));
beta <- rbind(beta, c(p.tox, a.rep, b.rep, a.lr, b.lr));
}
colnames(beta) <- c("TAU","A", "B","C","D");
rst <- list(TAU = beta[,1],
ABCD = beta[,2:5]);
} else {
if (is.null(tau))
stop("Please provide prior.y or tau");
abcd <- array(0, dim=c(length(tau), 4));
abcd[,1:2] <- 0.5;
abcd[,4] <- sdrho;
rst <- list(TAU = tau,
ABCD = abcd);
}
rst$SDALPHA <- sdalpha;
##return
class(rst) <- get.const()$CLSPRIOR;
rst
}
#' Simulate a single trial
#'
#' Simulation function for simulating a single trial
#'
#' @rdname vtSingleTrial
#'
#' @inheritParams parameters
#'
#' @param trueps True \eqn{\theta}'s. A \code{VTTRUEPS} object made from
#' \code{\link{vtScenario}}
#' @param size.cohort Size of each cohort
#' @param size.level Maximum number of patients for each dose level
#' @param ... Optional arguments for \code{vtPost}
#'
#'
#' @return
#' \itemize{
#' \item{\code{dose}: }{Optimal dose level}
#' \item{\code{n.patients}: }{Number of patients for each dose level and each cohort}
#' \item{\code{ptox}: }{Posterior mean of DLT risk rate after each interim analysis}
#' \item{\code{pres}: }{Posterior mean of immune response rate after each interim analysis}
#' \item{\code{region}: }{Identified region in the decision map after each interim analysis}
#' \item{\code{prob}: }{Posterior mean of \eqn{\theta}'s after each interim analysis}
#' \item{\code{smps}: }{Observed data after each cohort}
#' }
#'
#' @examples
#' rst.sce <- vtScenario(tox = c(0.05, 0.05, 0.08),
#' res = c(0.2, 0.3, 0.5),
#' rho = 1)
#' rst.simu <- vtSingleTrial(trueps = rst.sce, size.cohort=3, size.level=12,
#' prob.mdl="NONPARA");
#'
#' @export
#'
vtSingleTrial <- function(trueps,
size.cohort = 3,
size.level = NULL,
etas = c(0.1,0.3),
dec.cut = 0.65,
prob.mdl = c("NONPARA", "NONPARA+", "PARA", "PARA+"),
priors = NULL,
...) {
## parameters
ndose <- nrow(trueps);
if (is.null(size.level))
size.level <- rep(size.cohort * 2, ndose);
if (length(size.level) < ndose) {
size.level <- c(size.level,
rep(size.level[length(size.level)], ndose - length(size.level)));
}
prob.mdl <- match.arg(prob.mdl);
nstages <- ceiling(max(size.level)/size.cohort);
## prepare returns
rst.np <- array(0, dim=c(ndose, nstages)); ##n patients treated on each level
rst.smp <- array(0, dim=c(ndose, nstages, 4)); ##simulated data
rst.ptox <- array(NA, dim=c(ndose, nstages)); ##n dlt on each level
rst.pres <- array(NA, dim=c(ndose, nstages)); ##n responses on each level
rst.prob <- array(NA, dim=c(ndose, nstages, 4)); ##region prob
rst.region <- array(NA, dim=c(ndose, nstages)); ##region selected
## simulation
smps <- NULL;
action <- 1; ##1 escalate; 0 stay;
while (action >= 0) {
if (1 == action) {
smps <- rbind(smps, rep(0,4));
cur.dose <- nrow(smps);
cur.stage <- 1;
if (1 == cur.dose) {
prev.res <- 0;
} else {
prev.res <- apply(post.smp[,c(2,4)],1,sum);
}
} else {
cur.stage <- cur.stage + 1;
}
##sample next cohort
cur.smp <- rmultinom(1,
min(size.cohort, size.level[cur.dose] - sum(smps[cur.dose,])),
trueps[cur.dose,]);
smps[cur.dose,] <- smps[cur.dose,] + cur.smp;
##bayesian
post.smp <- vtPost(smps, prob.mdl, priors, ...);
##decision
cur.dec <- vtDecMap(post.smp, etas, prev.res=prev.res, dec.cut=dec.cut);
##keep records
rst.np[cur.dose, cur.stage] <- sum(cur.smp);
rst.smp[cur.dose, cur.stage,] <- cur.smp;
rst.ptox[cur.dose, cur.stage] <- cur.dec$ptox;
rst.pres[cur.dose, cur.stage] <- cur.dec$pres;
rst.prob[cur.dose, cur.stage,] <- cur.dec$prob;
rst.region[cur.dose, cur.stage] <- cur.region <- cur.dec$region;
##decision
action <- c(-1,-1,1,0)[cur.region];
##action
if (0 == action &
size.level[cur.dose] == sum(smps[cur.dose,])) {
action <- 1;
}
if (ndose == cur.dose & 1 == action) {
rst.dose <- ndose;
action <- -2;
} else if (-1 == action) {
rst.dose <- cur.dose - 1;
}
}
##return
list(dose=rst.dose,
n.patients=rst.np,
ptox=rst.ptox,
pres=rst.pres,
region=rst.region,
prob=rst.prob,
smps=rst.smp);
}
#' Conduct simulation study
#'
#' Simulate clinical trials with given settings for multiple times to evaluate
#' the study operating characteristics.
#'
#' @rdname vtSimu
#'
#' @param n.rep Number of repetitions
#' @param seed Seed
#' @param ... Optional parameters for \code{\link{vtSingleTrial}}
#' @param n.cores Number of cores for parallel computations
#' @param update.progress Reserved parameter for Shiny GUI
#'
#' @return
#'
#' A class \code{VTSIMU} list with length \code{n.rep} of results. Each item is
#' a list return from \code{\link{vtSingleTrial}}.
#'
#' @examples
#'
#' rst.sce <- vtScenario(tox = c(0.05, 0.05, 0.08),
#' res = c(0.2, 0.3, 0.5),
#' rho = 1)
#'
#' rst.simu <- vtSimu(n.rep = 100, n.cors = 2, trueps = rst.sce,
#' size.cohort=3, size.level=12, prob.mdl="NONPARA");
#'
#'
#' @export
#'
vtSimu <- function(n.rep=100, seed=NULL, ..., n.cores=1, update.progress=NULL) {
if (!is.null(seed)) {
old_seed <- .Random.seed;
set.seed(seed)
}
if ("PROGRESS" %in% toupper(class(update.progress)))
update.progress$set(value=1, detail=paste(""));
rst <- parallel::mclapply(1:n.rep,
function(x) {
if ("PROGRESS" %in% toupper(class(update.progress))) {
update.progress$set(value=x/n.rep,
detail=paste("Replication", x, sep=" "));
} else {
print(x);
}
vtSingleTrial(...);
}, mc.cores=n.cores);
class(rst) <- get.const()$CLSSIMU;
if (!is.null(seed)) {
set.seed(old_seed);
}
rst
}
#' Summarize simulation results
#'
#' Summarize simulation results to get the frequency of a dose level is identified
#' as the optimal dose level and the number of DLT's and responses
#'
#' @rdname summary2.VTSIMU
#'
#' @param x A class \code{VTSIMU} list generated by \code{\link{vtSimu}}
#' @param ... Reserved parameters
#'
#'
#' @return A numeric array that shows 1: number of times each level is selected,
#' 2. total number of times any level is selected, 3. frequency each
#' level is selected, 4. frequency any level is selected, 5. average number
#' of DLT's and responders for each level, 6. average total number of DLT's
#' and responders
#'
#' @examples
#'
#' rst.sce <- vtScenario(tox = c(0.05, 0.05, 0.08),
#' res = c(0.2, 0.3, 0.5),
#' rho = 1)
#' rst.simu <- vtSimu(n.rep = 20, n.cors = 2, trueps = rst.sce,
#' size.cohort=3, size.level=12,
#' prob.mdl="NONPARA");
#' sum.simu <- summary2(rst.simu)
#'
#'
#' @method summary2 VTSIMU
#'
#' @export
#'
summary2.VTSIMU <- function(x, ...) {
cur.rst <- summary(x);
rst <- NULL;
##dose selection percentage
rst <- c(rst, cur.rst$dose[2,]);
rst <- c(rst, sum(cur.rst$dose[2,]));
rst <- c(rst, cur.rst$npat[,"total"]);
rst <- c(rst, sum(cur.rst$npat[,"total"]));
ss <- 0;
for (j in 1:ncol(cur.rst$dose)) {
cur.ss <- cur.rst$samples[paste("level ",j," total", sep = ""), c("T", "R")];
ss <- ss + cur.ss;
rst <- c(rst, cur.ss);
}
rst <- c(rst, ss);
invisible(rst);
}
#' Plot true parameters
#'
#' Plot true DLT risk rates and response rates.
#'
#' @rdname plot.VTTRUEPS
#'
#' @param x A class \code{VTTRUEPS} matrix generated by \code{\link{vtScenario}}
#' @param draw.levels Select dose levels to draw. Default \code{NULL} draws all
#' levels.
#' @param draw.curves Indicate which curves to plot. The options are
#'
#' \itemize{
#' \item{1:}{p: DLT risk rate}
#' \item{2:}{q: Response rate}
#' \item{3:}{\eqn{\theta_{00}}}
#' \item{4:}{\eqn{\theta_{01}}}
#' \item{5:}{\eqn{\theta_{10}}}
#' \item{6:}{\eqn{\theta_{11}}}
#' }
#'
#' See \code{\link{visit}} for details.
#'
#' @param legends Line legends
#' @param ltys Line types
#' @param pch Line PCH
#' @param ylim Y limits
#' @param cols Line colors
#' @param add.legend Include legends (TRUE) or not (FALSE)
#' @param ... optional arguments for plot
#'
#'
#'
#' @examples
#' rst.sce <- vtScenario(tox = c(0.05, 0.05, 0.08),
#' res = c(0.2, 0.3, 0.5),
#' rho = 1)
#' plot(rst.sce, draw.levels = 1:2, draw.curves=1:6)
#'
#' @method plot VTTRUEPS
#'
#' @export
#'
plot.VTTRUEPS <- function(x, draw.levels = NULL, draw.curves = 1:6,
legends = NULL, ltys = c(1,1,2,2,2,2), pch=19:24, ylim = c(0,1),
cols = c("red", "blue", "brown", "black", "gray", "green"),
add.legend = TRUE, ...) {
## parameter checking
stopifnot(get.const()$CLSTRUEPS %in% class(x));
draw.levels <- set.option(draw.levels, 1:nrow(x));
draw.curves <- set.option(draw.curves, 1:6);
if (is.null(legends)) {
legends <- get.const()$DENLEGEND;
}
## append trueps
x <- cbind(x[,3] + x[,4],
x[,2] + x[,4],
x);
## plot
plot(NULL, xlim=range(draw.levels), xlab="Levels", ylab="Rate", ylim = ylim, ...);
for (k in draw.curves) {
lines(draw.levels, x[draw.levels,k], type = "b",
col = cols[k], lty = ltys[k], pch = pch[k]) ;
}
if (add.legend) {
legend("topleft", bty="n",
legend = legends[draw.curves],
pch = pch[draw.curves],
lty = ltys[draw.curves], col = cols[draw.curves]);
}
}
#' Print true probabilities
#'
#' Print the true probabilities, with probabilities of toxicity and resistance,
#' and \eqn{\rho}.
#'
#' @rdname summary.VTTRUEPS
#'
#' @param object A class \code{VTTRUEPS} matrix generated by \code{\link{vtScenario}}
#' @inheritParams parameters
#'
#'
#' @return
#'
#' A table showing the summary of the \code{VTTRUEPS} object. The first
#' four columns are individual probability, fifth and sixth are probability
#' for toxicity and resistance, and seventh is rho, the correlation.
#'
#' @examples
#' rst.sce <- vtScenario(tox = c(0.05, 0.05, 0.08),
#' res = c(0.2, 0.3, 0.5),
#' rho = 1)
#' summary(rst.sce)
#'
#' @method summary VTTRUEPS
#'
#' @export
#'
summary.VTTRUEPS <- function(object, digits = 2, ...) {
stopifnot(get.const()$CLSTRUEPS %in% class(object));
rst <- round(object, digits = digits);
rst2 <- cbind(object[,3]+object[,4],
object[,2]+object[,4]);
colnames(rst2) <- c("Toxicity Rate", "Response Rate");
rst3 <- object[,1]*object[,4]/object[,2]/object[,3];
rst3 <- cbind(rst3);
colnames(rst3) <- "Rho";
cbind(rst, rst2, rst3);
}
#' Print true probabilities in latex format
#'
#' Print the true probabilities, with probabilities of toxicity and resistance,
#' and \eqn{\rho}, in latex format
#'
#' @rdname summary2.VTTRUEPS
#'
#' @param x A class \code{VTTRUEPS} matrix generated by \code{\link{vtScenario}}
#' @inheritParams parameters
#' @param rp2d Columns to be in bold font
#'
#' @return
#'
#' A summary of the true probabilities in latex format.
#'
#' @examples
#'
#' rst.sce <- vtScenario(tox = c(0.05, 0.05, 0.08),
#' res = c(0.2, 0.3, 0.5),
#' rho = 1)
#' ltx.ps <- summary2(rst.sce)
#'
#'
#' @method summary2 VTTRUEPS
#'
#' @export
#'
summary2.VTTRUEPS <- function(x, rp2d = -1, digits = 2, ...) {
fill.pq <- NULL;
cur.pq <- summary(x)[,5:6];
for (j in 1:nrow(cur.pq)) {
cur.pqnum <- round(cur.pq[j,], digits = digits);
cur.pqtxt <- paste("(", cur.pqnum[1], ",", cur.pqnum[2], ")", sep="");
if (j %in% rp2d) {
cur.pqtxt <- paste("\\\\textbf{", cur.pqtxt,"}");
}
fill.pq <- c(fill.pq, cur.pqtxt);
}
invisible(fill.pq);
}
#' Summarize simulation results
#'
#' Summarize the simulation results with numerous statistical measures
#'
#' @rdname summary.VTSIMU
#'
#' @param object A class \code{VTSIMU} list generated by \code{\link{vtSimu}}
#' @inheritParams parameters
#'
#' @return
#'
#' A list containing
#' \itemize{
#' \item{dose}{: Frequency for each dose level being selected as the optimal dose level}
#' \item{npat}{: Average number of patients for each cohort and each dose level}
#' \item{samples}{: Average number of DLT risks and responses for each cohort on each dose level}
#' \item{decision}{: Frequency each region in the decision map is selected for each cohort on each dose level}
#'
#' \item{prob}{: Average conditional probabilities corresponding to each region in
#' the decision map for each cohort on each dose level}
#'
#' \item{ptox}{: Mean and credible interval of DLT risk rates for each cohort on each dose level}
#'
#' \item{pres}{: Mean and credible interval of immune response rates for each cohort on each dose level}
#' }
#'
#' @examples
#' rst.sce <- vtScenario(tox = c(0.05, 0.05, 0.08),
#' res = c(0.2, 0.3, 0.5),
#' rho = 1)
#' rst.simu <- vtSimu(n.rep = 50, n.cors = 2, trueps = rst.sce,
#' size.cohort=3, size.level=12,
#' prob.mdl="NONPARA");
#' sum.simu <- summary(rst.simu)
#'
#' @method summary VTSIMU
#'
#' @export
#'
summary.VTSIMU <- function(object, ...) {
f.tp <- function(var) {
rst.p <- lst.combine(object, var);
rst.p <- apply(rst.p, 1:2, function(x) {
if (all(is.na(x))) {
rst <- rep(NA, 3);
} else {
rst <- c(mean(x, na.rm = TRUE),
quantile(x, c(0.025,0.975), na.rm = TRUE))
};
rst
});
rst <- NULL;
for (i in 1:n.dose) {
for (j in 1:n.stage) {
rst <- rbind(rst, rst.p[,i,j]);
}
}
colnames(rst) <- c("Mean", "LB", "UB");
rst
}
chk.pts <- object[[1]]$n.patients;
n.dose <- nrow(chk.pts);
n.stage <- ncol(chk.pts);
n.reps <- length(object);
##selected dose
rst.dose <- lst.combine(object, "dose");
rst.dose <- table(factor(rst.dose, levels=1:n.dose));
rst.dose <- rbind(rst.dose, rst.dose/n.reps*100);
rownames(rst.dose) <- c("Frequency", "%");
##enrolled patients
rst.np <- lst.combine(object, "n.patients");
rst.np <- apply(rst.np, 1:2, mean);
rst.np <- cbind(rst.np, apply(rst.np, 1, sum));
rownames(rst.np) <- paste("level", 1:n.dose, sep = " ");
colnames(rst.np) <- c(paste("stage", 1:n.stage, sep = " "), "total");
##patients details
rst.smps <- lst.combine(object, "smps");
rst.smps <- apply(rst.smps, 1:3, mean);
s.smps <- NULL;
s.smps.names <- NULL;
for (i in 1:n.dose) {
for (j in 1:n.stage) {
s.smps.names <- c(s.smps.names,
paste("level", i, "stage", j, sep = " "));
cur.smps <- rst.smps[i,j,];
s.smps <- rbind(s.smps,
c(cur.smps,
cur.smps[3] + cur.smps[4],
cur.smps[2] + cur.smps[4]));
}
s.smps.names <- c(s.smps.names,
paste("level", i, "total", sep = " "));
cur.level <- apply(rst.smps[i,,], 2, sum);
s.smps <- rbind(s.smps,
c(cur.level,
cur.level[3] + cur.level[4],
cur.level[2] + cur.level[4]));
}
rownames(s.smps) <- s.smps.names;
colnames(s.smps) <- c("No T, No R", "No T, R", "T, No R", "T,R", "T", "R");
##probabilities
rst.prob <- lst.combine(object, "prob");
rst.prob <- apply(rst.prob, 1:3, mean, na.rm=TRUE);
s.probs <- NULL;
s.probs.names <- NULL;
for (i in 1:n.dose) {
for (j in 1:n.stage) {
s.probs.names <- c(s.probs.names,
paste("level", i, "stage", j, sep = " "));
s.probs <- rbind(s.probs, rst.prob[i,j,]);
}
}
rownames(s.probs) <- s.probs.names;
colnames(s.probs) <- get.const()$REGIONS;
##decision
rst.decision <- lst.combine(object, "region");
rst.decision <- apply(rst.decision, 1:2, function(x) {
if (all(is.na(x))) {
rst <- rep(NA, 4);
} else {
rst <- table(factor(x, levels=1:4))/sum(!is.na(x)) * 100;
};
c(sum(!is.na(x)), rst);
});
s.dec <- NULL;
s.dec.names <- NULL;
for (i in 1:n.dose) {
for (j in 1:n.stage) {
s.dec.names <- c(s.dec.names,
paste("level", i, "stage", j, sep = " "));
s.dec <- rbind(s.dec, rst.decision[,i,j]);
}
}
rownames(s.dec) <- s.dec.names;
colnames(s.dec) <- c("N", get.const()$REGIONS);
##p.tox
p.tox <- f.tp("ptox");
p.res <- f.tp("pres");
rownames(p.tox) <- s.dec.names;
rownames(p.res) <- s.dec.names;
##return
rst <- list(dose = rst.dose,
npat = rst.np,
samples = s.smps,
decison = s.dec,
prob = s.probs,
ptox = p.tox,
pres = p.res);
}
|
/scratch/gouwar.j/cran-all/cranData/visit/R/visit_simu.R
|
#' Call STAN models for MCMC sampling
#'
#' Call STAN to draw posterior samples of the joint distribution of
#' immunogenicity rate and toxicity risk for parametric and parametric+ model
#'
#' @rdname vtStan
#'
#' @inheritParams parameters
#'
#' @param model option of the probability models:
#' \describe{
#' \item{0:}{parametric model}
#' \item{1:}{parametric+ model}
#' }
#' See \code{\link{visit}} for details.
#'
#' @param iter STAN option: number of iterations
#' @param chains STAN option: number of chains
#' @param warmup STAN option: number of warmup
#' @param ... additional parameters for package rstan's sampling method. These
#' options include \code{iter}, \code{warmup}, \code{thin},
#' \code{algorithm}. See \code{rstan::sampling} for details.
#'
#'
#' @return A \code{rstan} object that contains the posterior sampling results
#'
#'
vtStan <- function(obs.y,
priors,
model = 0,
iter = 4000,
chains = 4,
warmup = 2000,
...) {
##model
if (0 == model) {
##PARA
FIXRHOAT1 <- 0;
SINGLERHO <- 1;
SINGLEALPHA <- 1;
} else if (1 == model) {
##PARA+
FIXRHOAT1 <- 1;
SINGLERHO <- 1;
SINGLEALPHA <- 1;
} else if (2 == model) {
FIXRHOAT1 <- 0;
SINGLERHO <- 1;
SINGLEALPHA <- 0;
} else if (3 == model) {
FIXRHOAT1 <- 1;
SINGLERHO <- 1;
SINGLEALPHA <- 0;
}
##data
NDOSE <- length(priors$TAU);
SDALPHA <- priors$SDALPHA;
TAU <- priors$TAU;
PAR <- priors$ABCD;
OBSY <- array(0, dim=c(1,4)); ##pseudo record for stan
if (is.null(obs.y)) {
NOBSLEVEL <- 0;
} else {
NOBSLEVEL <- nrow(obs.y);
OBSY <- rbind(obs.y, OBSY);
}
##call stan
stan.rst <- rstan::sampling(stanmodels[["visit"]],
data=list(NDOSE=NDOSE,
TAU=as.array(TAU),
PAR=PAR,
SDALPHA=SDALPHA,
NOBSLEVEL=NOBSLEVEL,
OBSY=OBSY,
SINGLEALPHA=SINGLEALPHA,
SINGLERHO=SINGLERHO,
FIXRHOAT1=FIXRHOAT1),
iter=iter,
chains=chains,
warmup=warmup,
...);
##return
stan.rst
}
#' Postetrior sampling for given observed samples
#'
#' Call STAN to draw posterior samples of the joint distribution of
#' immunogenicity rate and toxicity risk
#'
#' @rdname vtPost
#'
#' @inheritParams parameters
#'
#'
#' @param nsmp number of iterations
#'
#' @param ... additional parameters for package rstan's sampling method. These
#' options include \code{warmup}, \code{thin}, \code{algorithm}. See
#' \code{rstan::sampling} for details.
#'
#' @param prior.const Specify \eqn{\alpha} for a Beta(\eqn{\alpha},
#' \eqn{\alpha}) prior. The Beta prior is used for \code{NONPARA} and
#' \code{NONPARA+} models. Default value \eqn{0.5}.
#'
#' @return A class \code{VTPOST} matrix of posterior samples with \code{nsmp}
#' rows and 4 columns. Columns 1-4 correspond to\eqn{\theta^{(l)}_{00},
#' \theta^{(l)}_{01}, \theta^{(l)}_{10}, \theta^{(l)}_{11}}. See
#' \code{\link{visit}} for details about \eqn{\theta}'s.
#'
#'
#'
#' @examples
#' obs.y <- rbind(c(5, 2, 0, 0), c(3, 4, 0, 0), c(1, 6, 0, 0))
#' prior <- vtPriorPar(prior.y = NULL, tau = c(0.1, 0.3, 0.6), sdalpha=10, sdrho=10, vtheta=NULL)
#' rst.post <- vtPost(obs.y, priors = prior, warmup = 100, prob.mdl = "PARA", nsmp = 200)
#'
#' @export
#'
vtPost <- function(obs.y,
prob.mdl = c("NONPARA", "NONPARA+", "PARA", "PARA+"),
priors = NULL,
...,
nsmp = 4000,
prior.const = 0.5) {
stopifnot(4 == ncol(obs.y));
## parameter
prob.mdl <- match.arg(prob.mdl);
if ("PARA" == prob.mdl |
"PARA+" == prob.mdl) {
if (is.null(priors))
stop("Priors need to be specified for parametric models.")
}
cur.level <- nrow(obs.y);
##posterior sampling
if ("NONPARA" == prob.mdl) {
post.smp <- rdirichlet(nsmp, obs.y[cur.level,] + prior.const);
} else if ("NONPARA+" == prob.mdl) {
post.dlt <- rbeta(nsmp,
sum(obs.y[cur.level,c(3,4)])+prior.const,
sum(obs.y[cur.level,c(1,2)])+prior.const);
post.rep <- rbeta(nsmp,
sum(obs.y[cur.level,c(2,4)])+prior.const,
sum(obs.y[cur.level,c(1,3)])+prior.const);
post.smp <- cbind((1-post.dlt)*(1-post.rep), (1-post.dlt)*post.rep,
post.dlt*(1-post.rep), post.dlt*post.rep);
} else {
if ("PARA" == prob.mdl) {
post.rst <- vtStan(obs.y, priors, model=0, iter = nsmp, ...);
} else if ("PARA+" == prob.mdl){
post.rst <- vtStan(obs.y, priors, model=1, iter = nsmp, ...);
}
post.theta <- extract(post.rst)$theta;
post.smp <- post.theta[,cur.level,];
}
##return
class(post.smp) <- get.const()$CLSPOST;
post.smp
}
|
/scratch/gouwar.j/cran-all/cranData/visit/R/visit_stan.R
|
.onLoad <- function(libname, pkgname) {
if (!("methods" %in% .packages()))
attachNamespace("methods");
}
|
/scratch/gouwar.j/cran-all/cranData/visit/R/zzz.R
|
## ---- eval = TRUE, echo = FALSE, message = FALSE------------------------------
require(visit);
set.seed(10000);
## ---- eval = FALSE, echo = TRUE-----------------------------------------------
# install.packages("visit");
# require(visit);
## ---- eval = TRUE, echo = TRUE------------------------------------------------
tox <- c(0.07, 0.23, 0.66);
res <- c(0.50, 0.17, 0.59);
rho <- c(0.98, 0.40, 0.46);
scenario <- vtScenario(tox = tox, res = res, rho = rho);
summary(scenario);
## ---- eval = TRUE-------------------------------------------------------------
summary(scenario);
## ---- eval = TRUE, echo = TRUE, fig.height = 4, fig.width = 8-----------------
par(mfrow = c(1,2));
plot(scenario, draw.curves = 1:2, main = "Marginal DLT Risk and Response Rates");
plot(scenario, draw.curves = 3:6, main = "Joint DLT Risk and Response Rates");
## ---- eval = TRUE, echo = TRUE------------------------------------------------
tau <- c(0.39, 0.87, 0.49);
prior <- vtPriorPar(tau = tau, sdalpha = 10, sdrho = 10);
## ---- eval = TRUE, results = 'hide'-------------------------------------------
simu <- vtSimu(n.rep = 100, trueps = scenario,
size.cohort = 5, size.level = 10,
etas = c(0.3, 0.7), dec.cut = c(0.45, 0.55, 0.75),
prob.mdl = "NONPARA+");
## ---- eval = TRUE, echo = TRUE------------------------------------------------
sum.1 <- summary(simu);
print(sum.1);
## ---- eval = TRUE, echo = TRUE------------------------------------------------
sum.2 <- summary2(simu);
print(sum.2);
## ---- eval = TRUE, echo = TRUE, fig.height = 5, fig.width = 5-----------------
etas <- c(0.1, 0.3)
dec.cut <- c(0.6,0.6,0.6)
cur.obs.y <- c(3, 2, 1, 1)
prev.obs.y <- c(5, 2, 0, 0)
rst.inter <- vtInterim(cur.obs.y, prev.obs.y = prev.obs.y,
prob.mdl = "NONPARA", etas = etas, dec.cut = dec.cut,
nsmp = 2000);
plot(rst.inter);
## ---- eval = TRUE, echo = TRUE, fig.height = 4, fig.width = 7-----------------
obs <- rbind(c(1, 6, 4, 3, 6), c(2, 4, 9, 3, 3), c(3, 2, 6, 6, 5));
vtTrack(obs, end.width = 0.8);
## ---- echo=TRUE, eval=FALSE---------------------------------------------------
# vtShiny();
|
/scratch/gouwar.j/cran-all/cranData/visit/inst/doc/vignette.R
|
---
title: "visit: Vaccine Phase I Design with Simultaneous Evaluation of Immnunogeneicity and Toxicity"
author: "Chenguang Wang"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{visit: Vaccine Phase I Design with Simultaneous Evaluation of Immnunogeneicity and Toxicity}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, eval = TRUE, echo = FALSE, message = FALSE}
require(visit);
set.seed(10000);
```
# Introduction
Phase I clinical trials are the first step in drug development to apply a new
drug or drug combination on humans. Typical designs of Phase I trials use
toxicity as the primary endpoint and aim to find the maximum tolerable dosage.
However, these designs are generally inapplicable for the development of cancer
vaccines because the primary objectives of a cancer vaccine Phase I trial often
include determining whether the vaccine shows biologic activity.
R package **visit** implements a dose escalation algorithm that simultaneously
accounts for immunogenicity and toxicity. It uses lower dose levels as the
reference for determining if the current dose level is optimal in terms of
immune response. It also ensures subject safety by capping the toxicity rate
with a given upper bound. These two criteria are simultaneously evaluated using
an intuitive decision region.
Users are referred to the following paper for details of the **visit** design:
Wang, C., Rosner, G. L., & Roden, R. B. (2019). A Bayesian design for phase I cancer
therapeutic vaccine trials. Statistics in medicine, 38(7), 1170-1189.
# Installation
The package **visit** can be installed directly from *CRAN*:
```{r, eval = FALSE, echo = TRUE}
install.packages("visit");
require(visit);
```
Some packages (e.g., *shiny*) are required to run the graphical user interface for *visit***,
but are not required to run **visit** through a *R* terminal.
# Conduct Simulation Studies During Study Design
Simulation studies are necessary for evaluating study characteristics for a
specific study design. **visit** provides functions for conducting simulation
studies and summarizing the simulation results.
## Simulation Scenarios
The first step in the simulation studies is usually to specify the simulation
scenarios. This is done in **visit** by the function *vtScenario*:
```{r, eval = TRUE, echo = TRUE}
tox <- c(0.07, 0.23, 0.66);
res <- c(0.50, 0.17, 0.59);
rho <- c(0.98, 0.40, 0.46);
scenario <- vtScenario(tox = tox, res = res, rho = rho);
summary(scenario);
```
The simulation scenarios are constructed using the level-specific DLT risk rates (*tox*),
immune response rates (*res*), and odds ratios (*rho*). The result is a class *VTTRUEPS* object
which has S3 methods *summary* and "plot".
```{r, eval = TRUE}
summary(scenario);
```
```{r, eval = TRUE, echo = TRUE, fig.height = 4, fig.width = 8}
par(mfrow = c(1,2));
plot(scenario, draw.curves = 1:2, main = "Marginal DLT Risk and Response Rates");
plot(scenario, draw.curves = 3:6, main = "Joint DLT Risk and Response Rates");
```
## Incorporate Prior Knowledge and Specify Priors
Prior knowledge about the DLT risk rates and the prior choices of the parametric
model parameters should be encapsulate into a class
*VTPRIOR* object by the function *vtPriorPar*. Details of the parameters can be
found in Wang el. al. (2019).
```{r, eval = TRUE, echo = TRUE}
tau <- c(0.39, 0.87, 0.49);
prior <- vtPriorPar(tau = tau, sdalpha = 10, sdrho = 10);
```
## Conduct Simulation
The main simulation function in the **visit** package is the *vtSimu* function.
The function requires a class *VTRUEPS* object for simulation scenarios and a
class "VTPRIOR" class object for priors and other design parameters including
*etas* for the lower and upper boundary of DLT risks, *size.cohort* for cohort
sizes, etc. Probability model needs to be specified by *prob.mdl*. The options
are *NONPARA* for non-parametric model, NONPARA+ for non-parametric model with
$\rho=1$, *PARA* for partially parametric model, and *PARA+* for partially
parametric model with $\rho=1$.
```{r, eval = TRUE, results = 'hide'}
simu <- vtSimu(n.rep = 100, trueps = scenario,
size.cohort = 5, size.level = 10,
etas = c(0.3, 0.7), dec.cut = c(0.45, 0.55, 0.75),
prob.mdl = "NONPARA+");
```
The result is a class *VTSIMUT* object with S3 methods *summary* and *summary2*.
```{r, eval = TRUE, echo = TRUE}
sum.1 <- summary(simu);
print(sum.1);
```
```{r, eval = TRUE, echo = TRUE}
sum.2 <- summary2(simu);
print(sum.2);
```
# Conduct Data Analysis for Ongoing Phase I Studies
During the conduction of a Phase I study, the *visit** package provide functions
to carry our the interim analysis and provide decisions about dose escalation.
## Interim Analysis
To perform the interim analysis, the current level of observation and the
previous level of observation is needed. Decision cuts, lower and upper bound of
DLT risk, prob.mdl, and priors are all optional arguments. Please refer to the
**visit** package PDF for details on the result *decision map*.
```{r, eval = TRUE, echo = TRUE, fig.height = 5, fig.width = 5}
etas <- c(0.1, 0.3)
dec.cut <- c(0.6,0.6,0.6)
cur.obs.y <- c(3, 2, 1, 1)
prev.obs.y <- c(5, 2, 0, 0)
rst.inter <- vtInterim(cur.obs.y, prev.obs.y = prev.obs.y,
prob.mdl = "NONPARA", etas = etas, dec.cut = dec.cut,
nsmp = 2000);
plot(rst.inter);
```
## Track of Study History
A function *vtTrack* is provided to visualize the entire progress of the study,
including the observed data and dose escalation decisions. The required data is
a five column matrix, with the first column indicating the dose level, and the
rest should indicate the observed number of patients with *No DLT, No
Response*, *No DLT, Response*, *DLT, No Response*, and *DLT, Response*.
```{r, eval = TRUE, echo = TRUE, fig.height = 4, fig.width = 7}
obs <- rbind(c(1, 6, 4, 3, 6), c(2, 4, 9, 3, 3), c(3, 2, 6, 6, 5));
vtTrack(obs, end.width = 0.8);
```
# Graphical User Interface (GUI)
The **visit** package provides a web-based GUI for composite endpoint analysis.
The GUI can be accessed by
```{r, echo=TRUE, eval=FALSE}
vtShiny();
```
|
/scratch/gouwar.j/cran-all/cranData/visit/inst/doc/vignette.Rmd
|
---
title: "visit Simulation Report"
author: "visit package"
date: "`r Sys.Date()`"
toc: true
output:
pdf_document:
number_sections: true
---
```{r, echo=FALSE, eval=TRUE}
library(xtable);
data.all <- get.data();
```
\clearpage
# Simulation Parameters
Variables | Values
------------------|--------------------------
Number of doses | `r data.all$ndose`
decision cuts | `r data.all$dec.cut[1:3]`
etas | `r data.all$etas[1:2]`
probability model | `r data.all$probmdl`
cohort size | `r data.all$size.cohort`
level size | `r data.all$size.level`
n rep | `r data.all$n.rep`
n core | `r data.all$n.cores`
\clearpage
# Trueps Plots
## Trueps Plots 1
Probability of DLT risk and immune response:
```{r TRUEPS1, echo=FALSE, fig.cap="Scenario Plots", fig.height=6, fig.width=8, warning=FALSE}
if (!is.null(data.all$sm)) {
plot(data.all$sm, draw.curves = 1:2, bg = 'transparent')
} else {
"No scenario plot was generated from this session."
};
```
\clearpage
## Trueps Plots 2
Probability of each scenario:
```{r TRUEPS2, echo=FALSE, fig.cap="Scenario Plots", fig.height=6, fig.width=8, warning=FALSE, fig.pos="down"}
if (!is.null(data.all$sm)) {
plot(data.all$sm, draw.curves = 3:6, bg = 'transparent')
} else {
"No scenario plot was generated from this session."
};
```
\clearpage
# Simulation Results
<center>
```{r, echo=FALSE, eval=TRUE, results='asis'}
if (!is.null(data.all$rst)) {
for (i in 1:length(summary(data.all$rst))) {
print(xtable(summary(data.all$rst)[[i]]), comment = FALSE)
}
} else {
"No simulation result was generated from this session."
}
```
</center>
\clearpage
# Observation
vtTrack Plots
```{r, fig.cap="track plot",echo=FALSE, eval=TRUE, results='asis'}
if(!is.null(data.all$om)) {
vtTrack(data.all$om, end.width = 0.8, max.level = data.all$currentLevel)
print(xtable(data.all$om, digits = 0), comment=FALSE, include.rownames=FALSE);
} else {
"No observation plot was generated from this session."
}
```
\clearpage
# Observation Interim
```{r, echo=FALSE, eval=TRUE}
par(pin=c(3,3));
if(!is.null(data.all$om)) {
for (i in 1:data.all$ndose) {
cur.obs.y <- data.all$om[i, -1];
if (1 == i) {
prev.obs.y <- NULL;
prev.res <- 0;
} else {
prev.obs.y <- p.data$om[i-1,-1];
prev.res <- NULL;
}
cat(paste("\nLevel", i))
plot(vtInterim(cur.obs.y, prev.obs.y = prev.obs.y, prev.res = prev.res, dec.cut = data.all$dec.cut[1:3], etas = data.all$etas[1:2]));
}
} else {
"No observation plot was generated from this session."
}
```
|
/scratch/gouwar.j/cran-all/cranData/visit/inst/shiny/report/report.Rmd
|
library(devtools);
load_all();
library(xtable);
shinyServer(function(input, output, session) {
source("visit_ui.R");
output$mainpage <- renderUI({
tabpanel.all();
})
output$simulationStart <- renderUI ({
actionButton(
inputId = "simu",
label = "Start simulation"
)
})
observeEvent(input$close, {
stopApp()
});
p.data <- reactiveValues();
observeEvent(input$scenarioButton, {
if (is.null(input$scenarioButton)) return(NULL);
if (input$scenarioButton == 0) return(NULL);
error <- list();
# Probability
if (input$scenarioInput == 'Probability') {
sm <- array(0, dim = c(input$ndose, 4));
for (i in 1:input$ndose) {
for (j in 1:4) {
current <- paste0("predictedOcc", i, j);
if (is.null(input[[current]]) || is.na(input[[current]])) {
sm[i,j] <- 0;
} else {
sm[i,j] <- input[[current]];
}
}
}
if (any(sm < 0)) {
error <- append(error, "Error: Incorrect values for probability. All values should be nonnegative integers.");
} else {
for (i in 1:NROW(sm)) {
if (sum(sm[i,]) > 0) {
sm[i,] <- sm[i,]/sum(sm[i,]);
} else {
error <- append(error, "Error: Incorrect values for probability. Each level should contain at least one patient.");
break;
}
}
}
colnames(sm) <- get.shinyConst()$THETA;
class(sm) <- get.shinyConst()$CLSTRUEPS;
# Probability by Odds Ratio
} else if (input$scenarioInput == 'Probability by Odds Ratio') {
sm <- array(0, dim = c(input$ndose, 3));
for (i in 1:input$ndose) {
for (j in 1:3) {
current <- paste0("predictedProb", i, j);
if (j != 3) {
if (is.null(input[[current]]) || is.na(input[[current]])) {
sm[i,j] <- NA;
} else {
sm[i,j] <- input[[current]];
}
} else {
if (input$scenarioRho == 'Multiple') {
if (is.null(input[[current]]) || is.na(input[[current]])) {
sm[i,j] <- NA;
} else {
sm[i,j] <- input[[current]];
}
} else if (input$scenarioRho == 'Single') {
if (is.null(input$rho) || is.na(input$rho)) {
sm[i,j] <- NA;
} else {
sm[i,j] <- input$rho;
}
}
}
}
}
# Check if p is NA
if(any(is.na(sm[,1:2]))) {
error <- append(error, "Error: Missing values for probabilities.");
}
# Check for 0 <= p <= 1
if(any(sm[,1:2] < 0, na.rm = TRUE) || any(sm[,1:2] > 1, na.rm = TRUE)) {
error <- append(error, "Error: Incorrect values for probabilities. Please make sure all probabilities values are between 0 and 1, inclusive.");
}
# Check if rho is NA
if(any(is.na(sm[,3]))) {
error <- append(error, "Error: Missing values for rhos.");
}
# Check for 0 <= rho
if(any(sm[,3] < 0, na.rm = TRUE)) {
error <- append(error, "Error: Incorrect values for rhos. Please make sure all rhos are greater than or equal to 0.");
}
if (length(error) == 0) {
sm <- vtScenario(tox = sm[,1], res = sm[,2], rho = sm[,3]);
}
}
if (length(error) == 0) {
# Store for rmarkdown
p.data$sm <- sm;
output$scenario <- renderUI({
fluidRow(
column(12,
wellPanel(
fluidRow(
h4("Scenarios"),
style = 'margin-left: 20px; border-bottom: 2px solid #E3E3E3; margin-right: 20px;'
),
fluidRow(
column(6,
renderPlot({
plot(sm, draw.curves = 1:2);
}, bg = 'transparent')
),
column(6,
renderPlot({
plot(sm, draw.curves = 3:6);
}, bg = 'transparent')
)
)
)
)
)
})
} else {
output$scenario <- renderUI({
return (NULL);
})
writeError(error)
}
})
observeEvent(input$simu, {
if (is.null(input$simu)) return(NULL);
if (input$simu == 0) return(NULL);
error <- list();
# Check for NA
decisions <- c(input$dec.cut1, input$dec.cut2, input$dec.cut3, input$etas1, input$etas2)
if(any(is.na(decisions))) {
error <- append(error, "Error: Missing values for C1, C2, C3, or DLT boundary values.");
}
# Check for 0 < d < 1
if(any(decisions < 0, na.rm = TRUE) || any(decisions > 1, na.rm = TRUE)) {
error <- append(error, "Error: Incorrect values for C1, C2, C3, or DLT boundary values. Please make sure all values are between 0 and 1, exclusive.");
}
# Unlikely to Happen, but a check for NA probmdl
if (any(is.na(input$probmdl)) || any(is.null(input$probmdl))) {
error <- append(error, "Error: No probability model chosen. Please selecte one from Non-Parametric and Non-Parametric+");
}
# Check for PARA and PARA+
if (input$probmdl == 'PARA' || input$probmdl == 'PARA+') {
error <- append(error, "Error: Parametric and Parametric+ are unsupported as of this version of shiny website. Please perform the computation through R to bypass this issue.");
}
# Check for size.level and size.cohort
if (is.na(input$size.cohort) ||
is.null(input$size.cohort) ||
input$size.cohort <= 0 ||
is.na(input$size.level) ||
is.null(input$size.level) ||
input$size.level <= 0) {
error <- append(error, "Error: Cohort size and level size should be positive integers.");
} else {
if (input$size.level <= input$size.cohort) {
error <- append(error, "Error: Size level should be greater than cohort size.");
}
}
# Check for n.rep
if (is.na(input$n.rep) || is.null(input$n.rep) || input$n.rep <= 0) {
error <- append(error, "Error: Number of replication should be a positive integer.");
}
# Check for Number of Cores
n.cores <- input$n.cores;
if (is.na(input$n.cores) || input$n.cores <= 0 || input$n.cores > parallel::detectCores()) {
n.cores <- parallel::detectCores() - 1;
}
# Probability
if (input$scenarioInput == 'Probability') {
sm <- array(0, dim = c(input$ndose, 4));
for (i in 1:input$ndose) {
for (j in 1:4) {
current <- paste0("predictedOcc", i, j);
if (is.null(input[[current]]) || is.na(input[[current]])) {
sm[i,j] <- 0;
} else {
sm[i,j] <- input[[current]];
}
}
}
if (any(sm < 0)) {
error <- append(error, "Error: Incorrect values for probability. All values should be nonnegative integers.");
} else {
for (i in 1:NROW(sm)) {
if (sum(sm[i,]) > 0) {
sm[i,] <- sm[i,]/sum(sm[i,]);
} else {
error <- append(error, "Error: Incorrect values for probability. Each level should contain at least one patient.");
break;
}
}
}
colnames(sm) <- get.shinyConst()$THETA;
class(sm) <- get.shinyConst()$CLSTRUEPS;
# Probability by Odds Ratio
} else if (input$scenarioInput == 'Probability by Odds Ratio') {
sm <- array(0, dim = c(input$ndose, 3));
for (i in 1:input$ndose) {
for (j in 1:3) {
current <- paste0("predictedProb", i, j);
if (j != 3) {
if (is.null(input[[current]]) || is.na(input[[current]])) {
sm[i,j] <- NA;
} else {
sm[i,j] <- input[[current]];
}
} else {
if (input$scenarioRho == 'Multiple') {
if (is.null(input[[current]]) || is.na(input[[current]])) {
sm[i,j] <- NA;
} else {
sm[i,j] <- input[[current]];
}
} else if (input$scenarioRho == 'Single') {
if (is.null(input$rho) || is.na(input$rho)) {
sm[i,j] <- NA;
} else {
sm[i,j] <- input$rho;
}
}
}
}
}
# Check if p is NA
if(any(is.na(sm[,1:2]))) {
error <- append(error, "Error: Missing values for probabilities.");
}
# Check for 0 <= p <= 1
if(any(sm[,1:2] < 0, na.rm = TRUE) || any(sm[,1:2] > 1, na.rm = TRUE)) {
error <- append(error, "Error: Incorrect values for probabilities. Please make sure all probabilities values are between 0 and 1, inclusive.");
}
# Check if rho is NA
if(any(is.na(sm[,3]))) {
error <- append(error, "Error: Missing values for rhos.");
}
# Check for 0 <= rho
if(any(sm[,3] < 0, na.rm = TRUE)) {
error <- append(error, "Error: Incorrect values for rhos. Please make sure all rhos are greater than or equal to 0.");
}
if (length(error) == 0) {
sm <- vtScenario(tox = sm[,1], res = sm[,2], rho = sm[,3]);
}
}
if (length(error) == 0) {
progress <- shiny::Progress$new(session, min = 0, max = 1, style = 'old');
progress$set(message = "Progress", value = 0);
on.exit(progress$close());
print(paste0("dec.cut: ", decisions[1:3]));
print(paste0("etas: ", decisions[4:5]));
print(paste0("probmdl: ", input$probmdl));
print(paste0("size.cohort: ", input$size.cohort));
print(paste0("size.level: ", input$size.level));
print(paste0("n.rep: ", input$n.rep));
print(paste0("n.core: ", n.cores));
print(paste0("seed: ", input$seed1))
print(paste0("trueps:"));
print(sm);
showModal(
modalDialog(
id = "prog",
title = NULL,
size = 's',
footer = NULL,
fade = FALSE
)
)
rst <- vtSimu(
n.rep = input$n.rep,
seed = input$seed1,
trueps = sm,
size.cohort = input$size.cohort,
size.level = input$size.level,
etas = decisions[4:5],
dec.cut = decisions[1:3],
prob.mdl = input$probmdl,
priors = NULL,
n.cores = n.cores,
update.progress = progress
)
# Store for rmarkdown
p.data$rst <- rst;
p.data$ndose <- input$ndose;
p.data$dec.cut <- c(input$dec.cut1, input$dec.cut2, input$dec.cut3);
p.data$etas <- c(input$etas1, input$etas2);
p.data$probmdl <- input$probmdl;
p.data$size.cohort <- input$size.cohort;
p.data$size.level <- input$size.level;
p.data$n.rep <- input$n.rep;
p.data$n.cores <- n.cores;
p.data$seed <- input$seed1
output$simulationResult <- renderUI({
fluidRow(
fluidRow(
h4("Simulation Result"),
style = 'margin-left: 20px; border-bottom: 2px solid #E3E3E3; margin-right: 20px; text-align: left;'
),
fluidRow(
page.simu_output(length(summary(rst)))
)
)
})
output$simulationStart <- renderUI({
fluidRow(
actionButton(
inputId = "simu",
label = "Restart simulation"
),
align = 'left',
style = 'margin-left: 25px; margin-top: 20px !important;'
)
})
title <- c("dose: Frequency for each dose level being selected as the optimal dose level",
"npat: Average number of patients for each cohort and each dose level",
"samples: Average number of DLT risks and responses for each cohort on each dose level",
"decision: Frequency each region in the decision map is selected for each cohort on each dose level",
"prob: Average conditional probabilities corresponding to each region in the decision map for each cohort on each dose level",
"ptox: Mean and credible interval of DLT risk rates for each cohort on each dose level",
"pres: Mean and credible interval of immune response rates for each cohort on each dose level");
for (i in 1:(NROW(rst))) {
local({
s <- i;
table.name <- paste0("rst.", s);
output[[table.name]] <- renderUI ({
fluidRow(
h3(
title[s],
style = 'text-align: left;'
),
renderTable({
xtable(summary(rst)[[s]]);
})
)
})
})
}
removeModal()
} else {
writeError(error)
}
})
observeEvent(input$realButton, {
if (is.null(input$realButton)) return(NULL);
if (input$realButton == 0) return(NULL);
error <- list();
# Check for NA
decisions <- c(input$dec.cut1, input$dec.cut2, input$dec.cut3, input$etas1, input$etas2)
if(any(is.na(decisions))) {
error <- append(error, "Error: Missing values for C1, C2, C3, or DLT boundary values.");
}
# Check for 0 < d < 1
if(any(decisions < 0, na.rm = TRUE) || any(decisions > 1, na.rm = TRUE)) {
error <- append(error, "Error: Incorrect values for C1, C2, C3, or DLT boundary values. Please make sure all values are between 0 and 1, exclusive.");
}
# Get the matrix m
om <- array(0, dim = c(input$ndose, 5));
for (i in 1:input$ndose) {
om[i,1] <- i;
for (j in 2:5) {
current <- paste0("realData", i, j-1);
if (is.null(input[[current]]) || is.na(input[[current]])) {
om[i,j] <- 0;
} else {
om[i,j] <- input[[current]];
}
}
}
# Check for negative numbers in m
if (any(om < 0)) {
error <- append(error, "Error: Incorrect values for observed data. Values should be positive integers. ");
}
# Check if input currentLevel is NA
if (is.na(input$currentLevel) || is.null(input$currentLevel) || input$currentLevel > input$ndose) {
currentLevel <- input$ndose;
} else {
currentLevel <- input$currentLevel;
}
if (length(error) == 0) {
# Store for rmarkdown
p.data$om <- om;
p.data$currentLevel <- currentLevel;
# Generate plotTrack
output$plotTrack <- renderUI({
fluidRow(
column(8,
wellPanel(
fluidRow(
h4("Track Plot"),
style = 'margin-left: 20px; border-bottom: 2px solid #E3E3E3; margin-right: 20px;'
),
fluidRow(
renderPlot({
vtTrack(om, end.width = 0.8, max.level = currentLevel);
}, bg = 'transparent')
)
),
offset = 2
)
)
})
# Generate plotInterims
for (i in 1:(NROW(om))) {
local({
s <- i;
plot.name <- paste0("plotInterim", s);
output[[plot.name]] <- renderPlot({
cur.obs.y <- om[s,-1];
if (1 == s) {
prev.obs.y <- NULL;
prev.res <- 0;
} else {
prev.obs.y <- om[s-1,-1];
prev.res <- NULL;
}
plot(vtInterim(cur.obs.y, prev.obs.y = prev.obs.y, prev.res = prev.res, dec.cut = decisions[1:3], etas = decisions[4:5], seed = input$seed2));
}, height = 400, width = 400, bg = 'transparent')
})
}
output$plotInterim <- renderUI({
fluidRow(
column(8,
wellPanel(
fluidRow(
h4("Decision Maps"),
style = 'margin-left: 20px; border-bottom: 2px solid #E3E3E3; margin-right: 20px;'
),
fluidRow(
ui.plotInterim(NROW(om))
)
),
offset = 2
)
)
})
} else {
output$plotTrack <- renderUI({
return (NULL);
})
for (i in 1:(NROW(om))) {
local({
s <- i;
plot.name <- paste0("plotInterim", s);
output[[plot.name]] <- renderPlot({
return (NULL);
})
})
}
output$plotInterim <- renderUI({
return (NULL);
})
writeError(error)
}
})
output$export <- downloadHandler(
filename = function() {
paste0('export-',
format(Sys.time(), "%m%d%Y%H%M%S"),
'.RData'
)
},
content = function(file) {
export <- get.export();
save(export, file = file);
}
)
get.export <- reactive({
export <- list();
export$ndose <- input$ndose;
export$dec.cut <- c(input$dec.cut1, input$dec.cut2, input$dec.cut3);
export$etas <- c(input$etas1, input$etas2);
export$probmdl <- input$probmdl;
export$size.cohort <- input$size.cohort;
export$size.level <- input$size.level;
export$n.rep <- input$n.rep;
export$n.cores <- input$n.cores;
export$currentLevel <- input$currentLevel;
export$scenarioInput <- input$scenarioInput;
export$scenarioRho <- input$scenarioRho;
export$rho <- input$rho;
export$seed1 <- input$seed1;
export$seed2 <- input$seed2;
osm <- array(0, dim = c(10, 4));
om <- array(0, dim = c(10, 4));
for (i in 1:10) {
for (j in 1:4) {
current <- paste0("predictedOcc", i, j);
osm[i,j] <- input[[current]];
current <- paste0("realData", i, j);
om[i,j] <- input[[current]];
}
}
export$osm <- osm;
export$om <- om;
psm <- array(0, dim = c(10, 3));
for (i in 1:10) {
for (j in 1:3) {
current <- paste0("predictedProb", i, j);
psm[i,j] <- input[[current]];
}
}
export$psm <- psm;
class(export) <- 'visit.export';
return(export);
})
observeEvent(input$import, {
if (is.null(input$import)) return (NULL);
if (endsWith(input$import$name, "RData")) {
load(input$import$datapath)
if (exists('export') && class(export) == 'visit.export') {
updateSliderInput(
session,
inputId = "ndose",
label = "Number of doses",
min = 1,
max = 10,
value = export$ndose,
step = 1
)
updateNumericInput(
session,
inputId = "dec.cut1",
label = "C1",
value = export$dec.cut[1],
min = 0,
max = 1,
step = 0.05
)
updateNumericInput(
session,
inputId = "dec.cut2",
label = "C2",
value = export$dec.cut[2],
min = 0,
max = 1,
step = 0.05
)
updateNumericInput(
session,
inputId = "dec.cut3",
label = "C3",
value = export$dec.cut[3],
min = 0,
max = 1,
step = 0.05
)
updateNumericInput(
session,
inputId = "etas1",
label = "Lower boundary of DLT risk",
value = export$etas[1],
min = 0,
max = 1,
step = 0.05
)
updateNumericInput(
session,
inputId = "etas2",
label = "Upper boundary of DLT risk",
value = export$etas[2],
min = 0,
max = 1,
step = 0.05
)
updateRadioButtons(
session,
inputId = "probmdl",
label = "",
choices = c("Non-Parametric" = "NONPARA",
"Non-Parametric+" = "NONPARA+",
"Parametric" = "PARA",
"Parametric+" = "PARA+"),
selected = export$probmdl
)
updateNumericInput(
session,
inputId = "size.cohort",
label = "Cohort Size",
value = export$size.cohort,
min = 0,
step = 1
)
updateNumericInput(
session,
inputId = "size.level",
label = "Level Size",
value = export$size.level,
min = 0,
step = 1
)
updateNumericInput(
session,
inputId = "n.rep",
label = "Number of Replications",
value = export$n.rep,
min = 1,
step = 1
)
updateNumericInput(
session,
inputId = "n.cores",
label = "Number of Cores",
value = export$n.cores,
min = 1,
step = 1
)
updateRadioButtons(
session,
inputId = "scenarioInput",
label = "Type",
choices = c("Probability by Odds Ratio", "Probability"),
selected = export$scenarioInput
)
updateRadioButtons(
session,
inputId = "scenarioRho",
label = "",
choices = c("Single", "Multiple"),
selected = export$scenarioRho
)
updateNumericInput(
session,
inputId = "rho",
label = "",
value = export$rho,
min = 0,
step = 1
)
updateNumericInput(
session,
inputId = "currentLevel",
label = "Current dose level",
value = export$currentLevel,
min = 1,
max = 10,
step = 1
)
updateNumericInput(
session,
inputId = "seed1",
label = "Seed",
value = export$seed1,
step = 1
)
updateNumericInput(
session,
inputId = "seed2",
label = "Seed",
value = export$seed2,
step = 1
)
for (i in 1:10) {
for (j in 1:3) {
updateNumericInput(
session,
inputId = paste0("predictedProb", i, j),
label = "",
value = export$psm[i,j],
min = 0,
max = 1,
step = 0.05
)
}
}
for (i in 1:10) {
for (j in 1:4) {
updateNumericInput(
session,
inputId = paste0("predictedOcc", i, j),
label = "",
value = export$osm[i,j],
min = 0,
step = 1
)
updateNumericInput(
session,
inputId = paste0("realData", i, j),
label = "",
value = export$om[i,j],
min = 0,
max = 100,
step = 1
)
}
}
updateTabsetPanel(session, "mainpanel", selected = "About")
} else {
writeError("Wrong .RData file. Please upload an file that has been exported from VISIT. ")
}
} else {
writeError("Unexpected file. Please import a .RData file.")
}
})
output$downloadButton <- downloadHandler(
filename = function() {
paste('report_',
format(Sys.time(), "%m%d%Y%H%M%S"),
'.',
switch(input$format,
PDF = 'pdf',
HTML = 'html',
Word = 'docx'
),
sep = ""
)
},
content = function(file) {
out <- rmarkdown::render('report/report.Rmd',
switch(input$format,
PDF = rmarkdown::pdf_document(),
HTML = rmarkdown::html_document(),
Word = rmarkdown::word_document()
)
);
bytes <- readBin(out, "raw", file.info(out)$size);
writeBin(bytes, file);
}
)
get.data <- reactive({
result <- list();
result$ndose <- p.data$ndose;
result$dec.cut <- p.data$dec.cut;
result$etas <- p.data$etas;
result$probmdl <- p.data$probmdl;
result$size.cohort <- p.data$size.cohort;
result$size.level <- p.data$size.level;
result$n.rep <- p.data$n.rep;
result$n.cores <- p.data$n.cores;
result$om <- p.data$om;
result$currentLevel <- p.data$currentLevel;
result$sm <- p.data$sm;
result$rst <- p.data$rst;
result$seed1 <- p.data$seed1
return(result);
})
})
|
/scratch/gouwar.j/cran-all/cranData/visit/inst/shiny/server.R
|
shinyUI(
fluidPage(
# CSS
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "styles.css"),
tags$title("Phase I: Vaccine Dose Selection Design and Analysis"),
tags$style(".shiny-file-input-progress {display: none}"),
tags$script(src="https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML", type="text/javascript")
),
withTags({
div(class = "cheader",
h5("Phase I: Vaccine Dose Selection Design and Analysis"),
tags$button(
id = 'close',
type = "button",
class = "btn action-button",
onclick = "setTimeout(function(){window.close();},500);",
"Exit",
style="float: right;"
)
)
}),
# Main Page
uiOutput("mainpage")
)
)
|
/scratch/gouwar.j/cran-all/cranData/visit/inst/shiny/ui.R
|
# Constants in the package
get.shinyConst <- reactive ({
rst <- list(REGIONS = c("Toxic", "Ineffective", "Safe,Effective", "Effective,Safety concern"),
THETA = c("No DLT, No Response", "No DLT, Response",
"DLT, No Response", "DLT, Response"),
CLSPRIOR = "VTPRIOR",
CLSPOST = "VTPOST",
CLSTRUEPS = "VTTRUEPS",
CLSSIMU = "VTSIMU",
CLSDEC = "VTDEC"
);
rst$DENLEGEND <- c("Toxicity Rate", "Response Rate", rst$THETA);
return(rst);
})
##----------------------------------------------------------------------
## MAINPAGE UI
##----------------------------------------------------------------------
tabpanel.all <- function() {
tabsetPanel(type = "pills",
id = "mainpanel",
page.about(),
page.design(),
page.simu_options(),
page.simu_result(),
page.analysis(),
page.report()
)
}
##----------------------------------------------------------------------
## DISPLAY ERROR
##----------------------------------------------------------------------
writeError <- function(errors) {
rst <- '';
for (i in 1:length(errors)) {
rst <- paste(rst, '<h6>', errors[[i]], '</h6>');
}
showModal(
modalDialog(
title = "Error",
footer = NULL,
size = 'm',
easyClose = TRUE,
(HTML(rst))
)
)
}
##----------------------------------------------------------------------
## ABOUT UI
##----------------------------------------------------------------------
page.about <- function() {
tabPanel(
title = "About",
fluidRow(
column(8,
wellPanel(
fluidRow(
withMathJax(includeHTML('www/text.HTML')),
style = 'padding-left: 30px; padding-right: 30px;'
),
style = 'padding-left: 30px;'
),
offset = 2
)
)
)
}
##----------------------------------------------------------------------
## ABOUT UI
##----------------------------------------------------------------------
page.design <- function() {
tabPanel(
title = "Design Options",
fluidRow(
column(8,
page.param(),
page.prior(),
offset = 2
)
)
)
}
# Parameters
page.param <- function() {
wellPanel(
fluidRow(
h4("Design Parameters"),
style = 'margin-left: 20px; border-bottom: 2px solid #E3E3E3; margin-right: 20px; margin-bottom: 20px'
),
fluidRow(
column(6,
fluidRow(
column(10,
sliderInput(
inputId = "ndose",
label = "Number of doses",
min = 1,
max = 10,
value = 3,
step = 1
),
offset = 1
),
style = 'margin-top: 127px; margin-bottom: 127px;'
),
style = 'border-right: 2px solid #E3E3E3;'
),
column(6,
fluidRow(
column(5,
numericInput(
inputId = "size.cohort",
label = "Cohort Size",
value = 5,
min = 0,
step = 1
),
style = 'padding: 20px; margin-left: 24px; padding-bottom: 0px; padding-top: 10px;'
),
column(5,
numericInput(
inputId = "size.level",
label = "Level Size",
value = 10,
min = 0,
step = 1
),
style = 'padding: 20px; margin-left: 17px; padding-bottom: 0px; padding-top: 10px;'
)
),
fluidRow(
column(5,
numericInput(
inputId = "etas1",
label = "Lower boundary of DLT risk",
value = 0.1,
min = 0,
max = 1,
step = 0.05
),
style = 'padding: 20px; margin-left: 24px; padding-bottom: 5px;'
),
column(5,
numericInput(
inputId = "etas2",
label = "Upper boundary of DLT risk",
value = 0.3,
min = 0,
max = 1,
step = 0.05
),
style = 'padding: 20px; margin-left: 17px; padding-bottom: 5px;'
),
style = 'text-align: center;'
),
fluidRow(
column(5,
numericInput(
inputId = "dec.cut1",
label = "C1",
value = 0.65,
min = 0,
max = 1,
step = 0.05
),
numericInput(
inputId = "dec.cut3",
label = "C3",
value = 0.65,
min = 0,
max = 1,
step = 0.05
),
style = 'padding: 20px; margin-left: 24px; padding-top: 5px;'
),
column(5,
numericInput(
inputId = "dec.cut2",
label = "C2",
value = 0.65,
min = 0,
max = 1,
step = 0.05
),
style = 'padding: 20px; margin-left: 17px;padding-top: 5px;'
)
)
)
)
)
}
page.prior <- function() {
wellPanel(
fluidRow(
h4("Probability Model"),
style = 'margin-left: 20px; border-bottom: 2px solid #E3E3E3; margin-right: 20px;'
),
fluidRow(
column(4,
radioButtons(
inputId = "probmdl",
label = "",
choices = c("Non-Parametric" = "NONPARA",
"Non-Parametric+" = "NONPARA+",
"Parametric" = "PARA",
"Parametric+" = "PARA+"),
selected = "NONPARA"
),
style = 'margin-left: 40px; margin-bottom:40px; margin-top: 20px;'
),
conditionalPanel(
condition = "(input.probmdl == \"PARA\") || (input.probmdl == \"PARA+\")",
column(7,
fluidRow(
column(6,
withMathJax(
numericInput(
inputId = "vtheta",
label = "$$\\mbox{Variance } \\theta$$",
value = NULL,
min = 1,
step = 1,
width = '90%'
)
),
withMathJax(
numericInput(
inputId = "sdalpha",
label = "$$\\mbox{Standard Deviation } \\alpha$$",
value = NULL,
min = 1,
step = 1,
width = '90%'
)
),
style = 'margin-top: 12px;'
),
column(5,
fluidRow(
checkboxInput(
inputId = "priory",
label = "Include a prior.y",
value = FALSE
),
style = 'margin-top: 33px;'
),
offset = 1
)
)
)
),
style = 'margin-top: 40px; margin-bottom: 20px; '
),
conditionalPanel(
condition = "(input.probmdl == \"PARA\") || (input.probmdl == \"PARA+\")",
fluidRow(
style = 'border-top: 2px solid #E3E3E3; margin-left: 20px; margin-right: 20px; margin-bottom: 20px;'
),
fluidRow(
column(2,
lapply(1:10, function(i) {
fluidRow(
conditionalPanel(
condition = paste("input.ndose >=", i),
column(12,
h3(
paste("Level", i),
style = 'margin-bottom: 23px; margin-top: 24px; padding-top: 0px;'
)
)
),
style = 'margin-left: 20px;'
)
}),
style = 'margin-top: 54px; margin-left: 0px;'
),
column(10,
fluidRow(
column(2,
h3(
paste0(strsplit(get.shinyConst()$THETA[1], ",")[[1]][1], ","),
style = 'padding-top: 0px; margin-bottom: 0px;'
),
h3(
strsplit(get.shinyConst()$THETA[1], ",")[[1]][2],
style = 'padding-top: 0px; margin-bottom: 0px;'
),
style = 'text-align: center; margin-top: 10px;'
),
column(2,
h3(
paste0(strsplit(get.shinyConst()$THETA[2], ",")[[1]][1], ","),
style = 'padding-top: 0px; margin-bottom: 0px;'
),
h3(
strsplit(get.shinyConst()$THETA[2], ",")[[1]][2],
style = 'padding-top: 0px; margin-bottom: 0px;'
),
style = 'text-align: center; margin-top: 10px;'
),
column(2,
h3(
paste0(strsplit(get.shinyConst()$THETA[3], ",")[[1]][1], ","),
style = 'padding-top: 0px; margin-bottom: 0px;'
),
h3(
strsplit(get.shinyConst()$THETA[3], ",")[[1]][2],
style = 'padding-top: 0px; margin-bottom: 0px;'
),
style = 'text-align: center; margin-top: 10px;'
),
column(2,
h3(
paste0(strsplit(get.shinyConst()$THETA[4], ",")[[1]][1], ","),
style = 'padding-top: 0px; margin-bottom: 0px;'
),
h3(
strsplit(get.shinyConst()$THETA[4], ",")[[1]][2],
style = 'padding-top: 0px; margin-bottom: 0px;'
),
style = 'text-align: center; margin-top: 10px;'
),
column(2,
"$$\\rho$$",
style = 'text-align: center; margin-top: 30px; margin-bottom: 0px;'
),
style = 'height: 54px;'
),
lapply(1:10, function(i) {
fluidRow(
conditionalPanel(
condition = paste("input.ndose >=", i),
lapply(1:4, function(j) {
column(2,
numericInput(
inputId = paste0("prior", i, j),
label = "",
value = NULL,
min = 0,
step = 1)
)
}),
conditionalPanel(
condition = "(input.probmdl == \"PARA\") || (input.probmdl == \"PARA+\")",
column(2,
numericInput(
inputId = paste0("prior", i, 5),
label = "",
value = NULL,
min = 0,
step = 1
)
)
)
)
)
})
)
)
)
)
}
page.simu_options <- function() {
tabPanel(
title = "Simulation Settings",
fluidRow(
column(8,
wellPanel(
fluidRow(
h4("Settings"),
style = 'margin-left: 20px; border-bottom: 2px solid #E3E3E3; margin-right: 20px;'
),
fluidRow(
column(4,
numericInput(
inputId = "n.rep",
label = "Number of Replications",
value = 100,
min = 1,
step = 1
)
),
column(4,
numericInput(
inputId = "n.cores",
label = "Number of Cores",
value = 1,
min = 1,
step = 1
)
),
column(4,
numericInput(
inputId = "seed1",
label = "Seed",
value = 10000,
step = 1
)
),
style = 'margin-top: 20px; margin-left: 29px; margin-bottom: 8px; margin-right: 29px;'
)
),
wellPanel(
fluidRow(
withMathJax(HTML("<h4>Specify true probabilities: \\(θ_{00}, θ_{11}, θ_{01} , θ_{10}\\)</h4>")),
style = 'margin-left: 20px; border-bottom: 2px solid #E3E3E3; margin-right: 20px;'
),
fluidRow(
column(4,
radioButtons(
inputId = "scenarioInput",
label = "Type",
choices = c("Probability by Odds Ratio", "Probability"),
selected = "Probability by Odds Ratio"
),
style = 'margin-left: 110px; margin-right: 20px;',
offset = 1
),
column(4,
conditionalPanel(
condition = "input.scenarioInput == \"Probability by Odds Ratio\"",
fluidRow(
column(2,
withMathJax(
h3(
"$$\\mbox{Number of } \\rho$$",
style = 'margin-top: 20px;'
)
),
radioButtons(
inputId = "scenarioRho",
label = "",
choices = c("Single", "Multiple"),
selected = "Single"
),
offset = 5
)
)
),
style = 'margin-left: 0px; margin-right: 20px;'
),
style = 'margin-bottom: 20px;'
),
fluidRow(
column(2,
lapply(1:10, function(i) {
fluidRow(
conditionalPanel(
condition = paste("input.ndose >=", i),
column(12,
h3(
paste("Level", i),
style = 'margin-bottom: 23px; margin-top: 24px; padding-top: 0px;'
)
)
),
style = 'margin-left: 20px;'
)
}),
style = 'margin-top: 54px;'
),
column(9,
conditionalPanel(
condition = "input.scenarioInput == \"Probability by Odds Ratio\"",
fluidRow(
column(4,
h3(
"DLT Risk",
style = 'text-align: center; margin-top: 20px; margin-bottom: 0px;'
)
),
column(4,
h3(
"Immune Response",
style = 'text-align: center; margin-top: 20px; margin-bottom: 0px;'
)
),
conditionalPanel(
condition = "input.scenarioRho == \"Multiple\"",
withMathJax(
column(4,
"$$\\rho$$",
style = 'text-align: center; margin-top: 30px; margin-bottom: 0px;'
)
)
),
style = 'height: 54px;'
),
lapply(1:10, function(i) {
fluidRow(
conditionalPanel(
condition = paste("input.ndose >=", i),
lapply(1:2, function(j) {
column(4,
numericInput(
inputId = paste0("predictedProb", i, j),
label = "",
value = 0.2,
min = 0,
max = 1,
step = 0.05
)
)
}),
column(4,
conditionalPanel(
condition = "input.scenarioRho == \"Multiple\"",
numericInput(
inputId = paste0("predictedProb", i, 3),
label = "",
value = NULL,
min = 0,
step = 0.05
)
)
)
)
)
}),
fluidRow(
column(4,
conditionalPanel(
condition = "input.scenarioRho == \"Single\" && input.scenarioInput == \"Probability by Odds Ratio\"",
h3(
withMathJax("$$\\rho$$"),
style = 'text-align: center; margin-top: 10px; margin-bottom: 0px;'
),
numericInput(
inputId = "rho",
label = "",
value = 1,
min = 0,
step = 1
)
)
)
)
),
conditionalPanel(
condition = "input.scenarioInput == \"Probability\"",
fluidRow(
column(3,
h3(
paste0(strsplit(get.shinyConst()$THETA[1], ",")[[1]][1], ","),
style = 'padding-top: 0px; margin-bottom: 0px;'
),
h3(
strsplit(get.shinyConst()$THETA[1], ",")[[1]][2],
style = 'padding-top: 0px; margin-bottom: 0px;'
),
style = 'text-align: center; margin-top: 10px;'
),
column(3,
h3(
paste0(strsplit(get.shinyConst()$THETA[2], ",")[[1]][1], ","),
style = 'padding-top: 0px; margin-bottom: 0px;'
),
h3(
strsplit(get.shinyConst()$THETA[2], ",")[[1]][2],
style = 'padding-top: 0px; margin-bottom: 0px;'
),
style = 'text-align: center; margin-top: 10px;'
),
column(3,
h3(
paste0(strsplit(get.shinyConst()$THETA[3], ",")[[1]][1], ","),
style = 'padding-top: 0px; margin-bottom: 0px;'
),
h3(
strsplit(get.shinyConst()$THETA[3], ",")[[1]][2],
style = 'padding-top: 0px; margin-bottom: 0px;'
),
style = 'text-align: center; margin-top: 10px;'
),
column(3,
h3(
paste0(strsplit(get.shinyConst()$THETA[4], ",")[[1]][1], ","),
style = 'padding-top: 0px; margin-bottom: 0px;'
),
h3(
strsplit(get.shinyConst()$THETA[4], ",")[[1]][2],
style = 'padding-top: 0px; margin-bottom: 0px;'
),
style = 'text-align: center; margin-top: 10px;'
),
style = 'height: 54px;'
),
lapply(1:10, function(i) {
fluidRow(
conditionalPanel(
condition = paste("input.ndose >=", i),
lapply(1:4, function(j) {
column(3,
numericInput(
inputId = paste0("predictedOcc", i, j),
label = "",
value = NULL,
min = 0,
step = 1
)
)
})
)
)
})
)
)
),
fluidRow(
column(2,
actionButton(
inputId = "scenarioButton",
label = "Simulation Scenario",
width = '150px'
),
offset = 2
),
style = 'margin-top: 30px; margin-bottom: 10px;'
)
),
uiOutput("scenario"),
offset = 2
)
)
)
}
page.simu_result <- function() {
tabPanel(
title = "Simulation Result",
fluidRow(
column(8,
wellPanel(
uiOutput("simulationResult"),
uiOutput("simulationStart"),
align = 'center'
),
offset = 2
)
)
)
}
page.simu_output <- function(l) {
result <- list();
for (i in 1:l) {
result[[i]] <- tabPanel(paste("Result", i), uiOutput(paste0("rst.", i)));
}
result <- do.call(navlistPanel, c(id = 'r', "", result, well = FALSE, list(widths = c(3, 7))));
result;
}
##----------------------------------------------------------------------
## REAL DATA ANALYSIS
##----------------------------------------------------------------------
page.analysis <- function() {
tabPanel(
title = "Real Data Analysis",
fluidRow(
column(8,
wellPanel(
fluidRow(
h4("Interim Data Analysis"),
style = 'margin-left: 20px; border-bottom: 2px solid #E3E3E3; margin-right: 20px;'
),
fluidRow(
column(4,
numericInput(
inputId = "currentLevel",
label = "Current dose level",
value = NULL,
min = 1,
max = 10,
step = 1
),
numericInput(
inputId = "seed2",
label = "Seed",
value = 10000,
step = 1
),
style = 'margin-left: 20px;'
),
style = 'margin-top: 25px; margin-bottom: 20px;'
),
fluidRow(
column(2,
lapply(1:10, function(i) {
fluidRow(
conditionalPanel(
condition = paste("input.ndose >=", i),
column(12,
h3(
paste("Level", i),
style = 'margin-bottom: 23px; margin-top: 24px; padding-top: 0px;'
)
)
),
style = 'margin-left: 20px;'
)
}),
style = 'margin-top: 54px;'
),
column(9,
fluidRow(
column(3,
h3(
paste0(strsplit(get.shinyConst()$THETA[1], ",")[[1]][1], ","),
style = 'padding-top: 0px; margin-bottom: 0px;'
),
h3(
strsplit(get.shinyConst()$THETA[1], ",")[[1]][2],
style = 'padding-top: 0px; margin-bottom: 0px;'
),
style = 'text-align: center; margin-top: 10px;'
),
column(3,
h3(
paste0(strsplit(get.shinyConst()$THETA[2], ",")[[1]][1], ","),
style = 'padding-top: 0px; margin-bottom: 0px;'
),
h3(
strsplit(get.shinyConst()$THETA[2], ",")[[1]][2],
style = 'padding-top: 0px; margin-bottom: 0px;'
),
style = 'text-align: center; margin-top: 10px;'
),
column(3,
h3(
paste0(strsplit(get.shinyConst()$THETA[3], ",")[[1]][1], ","),
style = 'padding-top: 0px; margin-bottom: 0px;'
),
h3(
strsplit(get.shinyConst()$THETA[3], ",")[[1]][2],
style = 'padding-top: 0px; margin-bottom: 0px;'
),
style = 'text-align: center; margin-top: 10px;'
),
column(3,
h3(
paste0(strsplit(get.shinyConst()$THETA[4], ",")[[1]][1], ","),
style = 'padding-top: 0px; margin-bottom: 0px;'
),
h3(
strsplit(get.shinyConst()$THETA[4], ",")[[1]][2],
style = 'padding-top: 0px; margin-bottom: 0px;'
),
style = 'text-align: center; margin-top: 10px;'
),
style = 'height: 54px;'
),
lapply(1:10, function(i) {
fluidRow(
conditionalPanel(
condition = paste("input.ndose >=", i),
lapply(1:4, function(j) {
column(3,
numericInput(
inputId = paste0("realData", i, j),
label = "",
value = 0,
min = 0,
max = 100,
step = 1
)
)
})
)
)
})
)
),
fluidRow(
column(2,
actionButton(
inputId = "realButton",
label = "Conduct Analysis",
width = '135px'
),
offset = 2
),
style = 'margin-top: 20px;'
)
),
offset = 2
)
),
uiOutput("plotTrack"),
uiOutput("plotInterim")
)
}
ui.plotInterim <- function(l){
result <- list();
for (i in 1:l) {
result[[i]] <- tabPanel(paste("Level", i), plotOutput(paste0("plotInterim", i)));
}
result <- do.call(navlistPanel, c(id = "t", "", result, well = FALSE, list(widths = c(2, 9))));
result;
}
##----------------------------------------------------------------------
## REPORT
##----------------------------------------------------------------------
page.report <- function() {
tabPanel(
title = "Report",
fluidRow(
column(8,
wellPanel(
fluidRow(
h4("Export and Import Current Settings"),
style = 'margin-left: 20px; border-bottom: 2px solid #E3E3E3; margin-right: 20px;'
),
fluidRow(
column(4,
downloadButton(
outputId = "export",
label = "Export",
style = 'margin-top: 20px;'
),
offset = 2
),
column(4,
fileInput(
inputId = "import",
label = "",
buttonLabel = "Import"
)
)
)
),
wellPanel(
fluidRow(
h4("Download Report Analysis"),
style = 'margin-left: 20px; border-bottom: 2px solid #E3E3E3; margin-right: 20px;'
),
fluidRow(
radioButtons(
inputId = "format",
label = "",
choices = c('PDF', 'HTML', 'Word')
),
downloadButton(
outputId = "downloadButton"
),
style = 'margin-left: 30px;'
)
),
offset = 2
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/visit/inst/shiny/visit_ui.R
|
---
title: "visit: Vaccine Phase I Design with Simultaneous Evaluation of Immnunogeneicity and Toxicity"
author: "Chenguang Wang"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{visit: Vaccine Phase I Design with Simultaneous Evaluation of Immnunogeneicity and Toxicity}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, eval = TRUE, echo = FALSE, message = FALSE}
require(visit);
set.seed(10000);
```
# Introduction
Phase I clinical trials are the first step in drug development to apply a new
drug or drug combination on humans. Typical designs of Phase I trials use
toxicity as the primary endpoint and aim to find the maximum tolerable dosage.
However, these designs are generally inapplicable for the development of cancer
vaccines because the primary objectives of a cancer vaccine Phase I trial often
include determining whether the vaccine shows biologic activity.
R package **visit** implements a dose escalation algorithm that simultaneously
accounts for immunogenicity and toxicity. It uses lower dose levels as the
reference for determining if the current dose level is optimal in terms of
immune response. It also ensures subject safety by capping the toxicity rate
with a given upper bound. These two criteria are simultaneously evaluated using
an intuitive decision region.
Users are referred to the following paper for details of the **visit** design:
Wang, C., Rosner, G. L., & Roden, R. B. (2019). A Bayesian design for phase I cancer
therapeutic vaccine trials. Statistics in medicine, 38(7), 1170-1189.
# Installation
The package **visit** can be installed directly from *CRAN*:
```{r, eval = FALSE, echo = TRUE}
install.packages("visit");
require(visit);
```
Some packages (e.g., *shiny*) are required to run the graphical user interface for *visit***,
but are not required to run **visit** through a *R* terminal.
# Conduct Simulation Studies During Study Design
Simulation studies are necessary for evaluating study characteristics for a
specific study design. **visit** provides functions for conducting simulation
studies and summarizing the simulation results.
## Simulation Scenarios
The first step in the simulation studies is usually to specify the simulation
scenarios. This is done in **visit** by the function *vtScenario*:
```{r, eval = TRUE, echo = TRUE}
tox <- c(0.07, 0.23, 0.66);
res <- c(0.50, 0.17, 0.59);
rho <- c(0.98, 0.40, 0.46);
scenario <- vtScenario(tox = tox, res = res, rho = rho);
summary(scenario);
```
The simulation scenarios are constructed using the level-specific DLT risk rates (*tox*),
immune response rates (*res*), and odds ratios (*rho*). The result is a class *VTTRUEPS* object
which has S3 methods *summary* and "plot".
```{r, eval = TRUE}
summary(scenario);
```
```{r, eval = TRUE, echo = TRUE, fig.height = 4, fig.width = 8}
par(mfrow = c(1,2));
plot(scenario, draw.curves = 1:2, main = "Marginal DLT Risk and Response Rates");
plot(scenario, draw.curves = 3:6, main = "Joint DLT Risk and Response Rates");
```
## Incorporate Prior Knowledge and Specify Priors
Prior knowledge about the DLT risk rates and the prior choices of the parametric
model parameters should be encapsulate into a class
*VTPRIOR* object by the function *vtPriorPar*. Details of the parameters can be
found in Wang el. al. (2019).
```{r, eval = TRUE, echo = TRUE}
tau <- c(0.39, 0.87, 0.49);
prior <- vtPriorPar(tau = tau, sdalpha = 10, sdrho = 10);
```
## Conduct Simulation
The main simulation function in the **visit** package is the *vtSimu* function.
The function requires a class *VTRUEPS* object for simulation scenarios and a
class "VTPRIOR" class object for priors and other design parameters including
*etas* for the lower and upper boundary of DLT risks, *size.cohort* for cohort
sizes, etc. Probability model needs to be specified by *prob.mdl*. The options
are *NONPARA* for non-parametric model, NONPARA+ for non-parametric model with
$\rho=1$, *PARA* for partially parametric model, and *PARA+* for partially
parametric model with $\rho=1$.
```{r, eval = TRUE, results = 'hide'}
simu <- vtSimu(n.rep = 100, trueps = scenario,
size.cohort = 5, size.level = 10,
etas = c(0.3, 0.7), dec.cut = c(0.45, 0.55, 0.75),
prob.mdl = "NONPARA+");
```
The result is a class *VTSIMUT* object with S3 methods *summary* and *summary2*.
```{r, eval = TRUE, echo = TRUE}
sum.1 <- summary(simu);
print(sum.1);
```
```{r, eval = TRUE, echo = TRUE}
sum.2 <- summary2(simu);
print(sum.2);
```
# Conduct Data Analysis for Ongoing Phase I Studies
During the conduction of a Phase I study, the *visit** package provide functions
to carry our the interim analysis and provide decisions about dose escalation.
## Interim Analysis
To perform the interim analysis, the current level of observation and the
previous level of observation is needed. Decision cuts, lower and upper bound of
DLT risk, prob.mdl, and priors are all optional arguments. Please refer to the
**visit** package PDF for details on the result *decision map*.
```{r, eval = TRUE, echo = TRUE, fig.height = 5, fig.width = 5}
etas <- c(0.1, 0.3)
dec.cut <- c(0.6,0.6,0.6)
cur.obs.y <- c(3, 2, 1, 1)
prev.obs.y <- c(5, 2, 0, 0)
rst.inter <- vtInterim(cur.obs.y, prev.obs.y = prev.obs.y,
prob.mdl = "NONPARA", etas = etas, dec.cut = dec.cut,
nsmp = 2000);
plot(rst.inter);
```
## Track of Study History
A function *vtTrack* is provided to visualize the entire progress of the study,
including the observed data and dose escalation decisions. The required data is
a five column matrix, with the first column indicating the dose level, and the
rest should indicate the observed number of patients with *No DLT, No
Response*, *No DLT, Response*, *DLT, No Response*, and *DLT, Response*.
```{r, eval = TRUE, echo = TRUE, fig.height = 4, fig.width = 7}
obs <- rbind(c(1, 6, 4, 3, 6), c(2, 4, 9, 3, 3), c(3, 2, 6, 6, 5));
vtTrack(obs, end.width = 0.8);
```
# Graphical User Interface (GUI)
The **visit** package provides a web-based GUI for composite endpoint analysis.
The GUI can be accessed by
```{r, echo=TRUE, eval=FALSE}
vtShiny();
```
|
/scratch/gouwar.j/cran-all/cranData/visit/vignettes/vignette.Rmd
|
#' @name PER_2015_rates
#' @docType data
#' @keywords data
#' @title Visual rate observations of Perseids from 2015
#' @description
#' Visual rate and magnitude observations of the Perseid shower from 2015.
#' @details
#' `PER_2015_rates` are rate observations loaded with [vismeteor::load_vmdb_rates].
#' @seealso [vismeteor::load_vmdb]
NULL
#' @name PER_2015_magn
#' @docType data
#' @keywords data
#' @title Visual magnitude observations of Perseids from 2015
#' @description
#' Visual magnitude observations of the Perseid shower from 2015.
#' @details
#' `PER_2015_magn` are magnitude observations loaded with
#' [vismeteor::load_vmdb_magnitudes].
#' @seealso [vismeteor::load_vmdb]
NULL
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/R/data.R
|
#' @title Quantiles with a minimum frequency
#' @description
#' This function generates quantiles with a minimum frequency.
#' These quantiles are formed from a vector `freq` of frequencies.
#' Each quantile then has the minimum total frequency `min`.
#' @param freq integer; A vector of frequencies.
#' @param min integer; Minimum total frequency per quantile.
#' @details
#' The frequencies `freq` are grouped in the order in which they
#' are passed as a vector.
#' The minimum `min` must be greater than `0`.
#' @return
#' A factor of indices is returned.
#' The index references the corresponding passed frequency `freq`.
#' @examples
#' freq <- c(1,2,3,4,5,6,7,8,9)
#' cumsum(freq)
#' (f <- freq.quantile(freq, 10))
#' tapply(freq, f, sum)
#' @export
freq.quantile <- function(freq, min) {
if (0 >= min) {
stop(paste0('min must be greater than 0 instead of "', min, '"!'))
}
n.sum <- 0
id.last <- 1L
id <- integer(length(freq))
for (i in seq_along(freq)) {
n.i <- freq[i]
if ((n.i + n.sum) < min) {
id[i] <- id.last
n.sum <- n.i + n.sum
} else {
id[i] <- id.last
n.sum <- 0
id.last <- id.last + 1L
}
}
if (0 == n.sum) {
id.last <- id.last - 1L
}
id.max <- id[length(id)]
if (id.max > 1 & n.sum > 0 & sum(freq[id == id.last]) < min) {
id[id == id.max] <- id.max - 1L
}
factor(id, ordered = TRUE)
}
#' @title Rounds a contingency table of meteor magnitude frequencies
#' @description
#' The meteor magnitude contingency table of VMDB contains half meteor counts (e.g. `3.5`).
#' This function converts these frequencies to integer values.
#' @param mt table; A two-dimensional contingency table of meteor magnitude frequencies.
#' @details
#' The contingency table of meteor magnitudes `mt` must be two-dimensional.
#' The row names refer to the magnitude observations.
#' Column names must be integer meteor magnitude values.
#' Also, the columns must be sorted in ascending or descending order of meteor magnitude.
#'
#' A sum-preserving algorithm is used for rounding.
#' It ensures that the total frequency of meteors per observation is preserved.
#' The marginal frequencies of the magnitudes are also preserved with
#' the restriction that the deviation is at most \eqn{\pm 0.5}.
#' If the total sum of a meteor magnitude is integer,
#' then the deviation is \eqn{\pm 0}.
#'
#' The algorithm is asymptotic. This means that the more meteors the table contains,
#' the more unbiased is the result of the rounding.
#' @return
#' A rounded contingency table of meteor magnitudes is returned.
#' @examples
#' # For example, create a contingency table of meteor magnitudes
#' mt <- as.table(matrix(
#' c(
#' 0.0, 0.0, 2.5, 0.5, 0.0, 1.0,
#' 0.0, 1.5, 2.0, 0.5, 0.0, 0.0,
#' 1.0, 0.0, 0.0, 3.0, 2.5, 0.5
#' ), nrow = 3, ncol = 6, byrow = TRUE
#' ))
#' colnames(mt) <- seq(6)
#' rownames(mt) <- c('A', 'B', 'C')
#' mt
#' margin.table(mt, 1)
#' margin.table(mt, 2)
#'
#' # contingency table with integer values
#' (mt.int <- vmtable(mt))
#' margin.table(mt.int, 1)
#' margin.table(mt.int, 2)
#' @export
vmtable <- function(mt) {
if (! methods::is(mt, 'table')) {
stop(paste0('Magnitude table is not a table!'))
}
if (2L != length(dim(mt))) {
stop(paste0('Magnitude table is not two-dimensional!'))
}
mt.m <- as.matrix(mt)
ncol.mt <- ncol(mt.m)
# "fair" rounding. Determinate the direction
from.right <- 0L != sum(mt.m) %% 2L
if (from.right) {
mt.m <- t(apply(mt.m, 1L, rev))
}
mt2c <- round(2L * mt.m) # "half" meteors
# phase 1: round column margin and add dummy row
margin.v <- as.integer(as.vector(margin.table(mt2c, 2L)))
dummy.row <- diff(c(0L, sapply(cumsum(margin.v), function(freq) {
2L * (freq %/% 2L) # round down
}))) - margin.v
mt2c <- rbind(mt2c, dummy.row) # add dummy row
nrow.mt2c <- nrow(mt2c)
# phase 2: cumsum column-wise and round
mt2c.v <- as.integer(as.vector(mt2c)) # column-wise
mt2c.cs <- cumsum(mt2c.v) # due to sum preserving rounding
mt2c.cs <- mapply(function(freq, row.id) { # add row index
c(freq = freq, row.id = row.id)
}, mt2c.cs, rep(seq_len(nrow.mt2c), times=ncol.mt), SIMPLIFY = FALSE)
rows.reminder <- rep(0L, nrow.mt2c) # row reminder
# apply each frequency column-wise, starting from upper left
mt2c.f <- sapply(mt2c.cs, function(cs) {
row.id <- cs['row.id']
freq <- cs['freq']
freq.quotient <- freq %/% 2L
freq.reminder <- freq %% 2L
freq <- 2L * freq.quotient
if (0L == freq.reminder) {
return(freq)
}
row.reminder.org <- rows.reminder[row.id]
row.reminder.new <- 1L
if (1L == row.reminder.org) {
freq <- freq + 2L
row.reminder.new <- 0L
}
if (row.reminder.org != row.reminder.new) {
rows.reminder[row.id] <<- row.reminder.new
}
return(freq)
})
mt2c.v <- diff(append(mt2c.f, 0L, after = 0L)) # inverse of cumsum
mt2c.m <- matrix(mt2c.v, ncol = ncol.mt)
mt2c.m <- utils::head(mt2c.m, -1L) # remove dummy.row
if (from.right) {
mt2c.m <- t(apply(mt2c.m, 1L, rev))
}
result <- as.table(mt2c.m %/% 2L)
dimnames(result) <- dimnames(mt) # restore dimnames
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/R/freq.R
|
#' @name load_vmdb
#' @aliases load_vmdb_rates
#' @aliases load_vmdb_magnitudes
#' @import DBI
#' @title Loading visual meteor observations from the data base
#' @description
#' Loads the data of visual meteor observations from a data base created with
#' \href{https://pypi.org/project/imo-vmdb/}{imo-vmdb}.
#' @note Angle values are expected and returned in degrees.
#' @param dbcon database connection.
#' @param shower character; selects by meteor shower codes.
#' `NA` loads sporadic meteors.
#' @param period time; selects a time range by minimum/maximum.
#' @param sl numeric; selects a range of solar longitudes by minimum/maximum.
#' @param lim.magn numeric; selects a range of limiting magnitudes by minimum/maximum.
#' @param sun.alt.max numeric; selects the maximum altitude of the sun.
#' @param moon.alt.max numeric; selects the maximum altitude of the moon.
#' @param session.id integer; selects by session ids.
#' @param rate.id integer; selects rate observations by ids.
#' @param magn.id integer; selects magnitude observations by ids.
#' @param withSessions logical; if `TRUE`, also load the corresponding session data.
#' @param withMagnitudes logical; if `TRUE`, also load the corresponding magnitude observations.
#' @details
#' `sl`, `period` and `lim.magn` expect a vector with successive minimum and maximum values.
#' `sun.alt.max` and `moon.alt.max` are expected to be scalar values.
#' @return
#' Both functions return a list, with
#'
#' \tabular{ll}{
#' `observations` \tab data frame, rate or magnitude observations,\cr
#' `sessions` \tab data frame; session data of observations,\cr
#' `magnitudes` \tab table; contingency table of meteor magnitude frequencies.
#' }
#'
#' `observations` depends on the function call. `load_vmdb_rates` returns a data frame, with
#'
#' \tabular{ll}{
#' `rate.id` \tab unique identifier of the rate observation,\cr
#' `shower.code` \tab IAU code of the shower. It is `NA` in case of sporadic meteors.\cr
#' `period.start` \tab start of observation,\cr
#' `period.end` \tab end of observation,\cr
#' `sl.start` \tab solarlong at start of observation,\cr
#' `sl.end` \tab solarlong at start of observation,\cr
#' `session.id` \tab reference to the session,\cr
#' `freq` \tab count of observed meteors,\cr
#' `lim.magn` \tab limiting magnitude,\cr
#' `t.eff` \tab net observed time in hours,\cr
#' `f` \tab correction factor of cloud cover,\cr
#' `time.sidereal` \tab sidereal time,\cr
#' `sun.alt` \tab altitude of the sun,\cr
#' `sun.az` \tab azimuth of the sun,\cr
#' `moon.alt` \tab altitude of the moon,\cr
#' `moon.az` \tab azimuth of the moon,\cr
#' `moon.illum` \tab illumination of the moon (`0.0 .. 1.0`),\cr
#' `field.alt` \tab altitude of the field of view (optional),\cr
#' `field.az` \tab azimuth of the field of view (optional),\cr
#' `radiant.alt` \tab altitude of the radiant (optional). The zenith attraction is already applied.\cr
#' `radiant.az` \tab azimuth of the radiant (optional),\cr
#' `magn.id` \tab reference to the magnitude observations (optional).
#' }
#'
#' `load_vmdb_magnitudes` returns a `observations` data frame, with
#'
#' \tabular{ll}{
#' `magn.id` \tab unique identifier of the magnitude observation,\cr
#' `shower.code` \tab IAU code of the shower. It is `NA` in case of sporadic meteors.\cr
#' `period.start` \tab start of observation,\cr
#' `period.end` \tab end of observation,\cr
#' `sl.start` \tab solarlong at start of observation,\cr
#' `sl.end` \tab solarlong at start of observation,\cr
#' `session.id` \tab reference to the session,\cr
#' `freq` \tab count of observed meteors,\cr
#' `magn.mean` \tab mean of magnitudes,\cr
#' `lim.magn` \tab limiting magnitude (optional).
#' }
#'
#' The `sessions` data frame contains
#'
#' \tabular{ll}{
#' `session.id` \tab unique identifier of the session,\cr
#' `longitude` \tab location’s longitude,\cr
#' `latitude` \tab location’s latitude,\cr
#' `elevation` \tab height above mean sea level in km,\cr
#' `country` \tab country name,\cr
#' `location.name` \tab location name,\cr
#' `observer.id` \tab observer id (optional),\cr
#' `observer.name` \tab observer name (optional).
#' }
#'
#' `magnitudes` is a contingency table of meteor magnitude frequencies.
#' The row names refer to the id of magnitude observations.
#' The column names refer to the magnitude.
#'
#' @references \url{https://pypi.org/project/imo-vmdb/}
#' @examples
#' \dontrun{
#' # create a connection to the data base
#' con <- dbConnect(
#' PostgreSQL(),
#' dbname = "vmdb",
#' host = "localhost",
#' user = "vmdb"
#' )
#'
#' # load rate observations including
#' # session data and magnitude observations
#' data <- load_vmdb_rates(
#' con,
#' shower = 'PER',
#' sl = c(135.5, 145.5),
#' period = c('2015-08-01', '2015-08-31'),
#' lim.magn = c(5.3, 6.7),
#' withMagnitudes = TRUE,
#' withSessions = TRUE
#' )
#'
#' # load magnitude observations including
#' # session data and magnitude observations
#' data <- load_vmdb_magnitudes(
#' con,
#' shower = 'PER',
#' sl = c(135.5, 145.5),
#' period = c('2015-08-01', '2015-08-31'),
#' lim.magn = c(5.3, 6.7),
#' withMagnitudes = TRUE,
#' withSessions = TRUE
#' )
#' }
#' @rdname load_vmdb
#' @export
load_vmdb_rates <- function(
dbcon,
shower = NULL,
period = NULL,
sl = NULL,
lim.magn = NULL,
sun.alt.max = NULL,
moon.alt.max = NULL,
session.id = NULL,
rate.id = NULL,
withSessions = FALSE,
withMagnitudes = FALSE
) {
shower.filter <- ''
if (!is.null(shower)) {
shower <- unique(shower)
shower.is_na <- is.na(shower)
with_spo <- length(shower[shower.is_na]) > 0
shower <- shower[!shower.is_na]
with_showers <- length(shower) > 0
if (with_spo) {
spo.filter <- "r.shower IS NULL"
} else {
spo.filter <- "FALSE"
}
if (with_showers) {
shower.filter <- paste(shower, collapse="','")
shower.filter <- paste0("r.shower IN ('", shower.filter , "')" )
} else {
shower.filter <- "FALSE"
}
if (with_showers | with_spo) {
shower.filter <- paste(
'(', shower.filter, 'OR', spo.filter , ') AND '
)
} else {
shower.filter <- ''
}
}
period.filter <- ''
if (!is.null(period)) {
period <- matrix(period, ncol=2)
period.filter <- apply(period, 1, function(period){
paste0(
"r.period_start >= '", period[1] ,
"' AND r.period_end <= '", period[2] , "'"
)
})
period.filter <- paste('(', paste(period.filter, collapse=') OR ('), ') AND ')
}
sl.filter <- ''
if (!is.null(sl)) {
sl <- matrix(sl, ncol=2)
sl.filter <- apply(sl, 1, function(sl){
if (sl[1] > sl[2]) {
paste0(
"r.sl_start BETWEEN ", sl[1], " AND 360.0 AND ",
"r.sl_end BETWEEN 0.0 AND ", sl[2]
)
} else {
paste0("r.sl_start > ", sl[1], " AND r.sl_end <= ", sl[2])
}
})
sl.filter <- paste('(', paste(sl.filter, collapse=') OR ('), ') AND ')
}
lim.magn.filter <- ''
if (!is.null(lim.magn)) {
lim.magn <- matrix(lim.magn, ncol=2)
lim.magn.filter <- apply(lim.magn, 1, function(lim.magn){
paste0("r.lim_mag BETWEEN ", lim.magn[1], " AND ", lim.magn[2])
})
lim.magn.filter <- paste('(', paste(lim.magn.filter, collapse=') OR ('), ') AND ')
}
sun_alt.filter <- ''
if (!is.null(sun.alt.max)) {
sun_alt.filter <- paste0("r.sun_alt <= " , sun.alt.max , " AND " )
}
moon_alt.filter <- ''
if (!is.null(moon.alt.max)) {
moon_alt.filter <- paste0("r.moon_alt <= " , moon.alt.max , " AND " )
}
session.id.filter <- ''
if (!is.null(session.id)) {
session.id.filter <- paste(session.id, collapse=",")
session.id.filter <- paste0(
"r.session_id IN (", session.id.filter , ") AND "
)
}
rate.id.filter <- ''
if (!is.null(rate.id)) {
rate.id.filter <- paste(rate.id, collapse=",")
rate.id.filter <- paste0("r.id IN (", rate.id.filter , ") AND " )
}
# query the data from SQL-Database
with_query <- paste0("
WITH selection as (
SELECT
r.id as \"rate.id\",
r.shower as \"shower.code\",
r.period_start as \"period.start\",
r.period_end as \"period.end\",
r.sl_start as \"sl.start\",
r.sl_end as \"sl.end\",
r.session_id as \"session.id\",
r.freq as \"freq\",
r.lim_mag as \"lim.magn\",
r.t_eff as \"t.eff\",
r.f as \"f\",
r.sidereal_time as \"time.sidereal\",
r.sun_alt as \"sun.alt\",
r.sun_az as \"sun.az\",
r.moon_alt as \"moon.alt\",
r.moon_az as \"moon.az\",
r.moon_illum as \"moon.illum\",
r.field_alt as \"field.alt\",
r.field_az as \"field.az\",
r.rad_alt as \"radiant.alt\",
r.rad_az as \"radiant.az\",
rm.magn_id as \"magn.id\"
FROM rate as r
LEFT JOIN rate_magnitude rm ON r.id = rm.rate_id
WHERE
", period.filter, "
", sl.filter, "
", lim.magn.filter, "
", sun_alt.filter, "
", moon_alt.filter, "
", session.id.filter, "
", rate.id.filter, "
", shower.filter, " TRUE
)
")
query <- paste0(with_query, "SELECT * FROM selection")
observations <- DBI::dbGetQuery(dbcon, query)
row.names(observations) <- observations$rate.id
observations$shower.code <- factor(observations$shower.code)
observations$session.id <- factor(observations$session.id)
observations$magn.id <- factor(observations$magn.id)
magnitudes <- NULL
if (withMagnitudes) {
query <- paste0(with_query, "
SELECT
id as \"magn.id\",
magn as \"magn\",
freq as \"freq\"
FROM magnitude_detail
WHERE id IN (
SELECT \"magn.id\" FROM selection
WHERE \"magn.id\" IS NOT NULL
)
")
magnitudes <- DBI::dbGetQuery(dbcon, query)
magnitudes$magn <- factor(
magnitudes$magn,
levels = sort(unique(magnitudes$magn), decreasing = TRUE),
ordered = TRUE
)
magnitudes <- stats::xtabs(freq ~ magn.id + magn, data = magnitudes)
}
sessions <- NULL
if (withSessions) {
query <- paste0(with_query, "
SELECT
id AS \"session.id\",
longitude,
latitude,
elevation,
country,
city as \"location.name\",
observer_id AS \"observer.id\",
observer_name AS \"observer.name\"
FROM obs_session
WHERE \"id\" IN (
SELECT \"session.id\" FROM selection
WHERE \"session.id\" IS NOT NULL
)
")
sessions <- DBI::dbGetQuery(dbcon, query)
row.names(sessions) <- sessions$session.id
sessions$country <- factor(sessions$country)
sessions$location.name <- factor(sessions$location.name)
sessions$observer.id <- factor(sessions$observer.id)
sessions$observer.name <- factor(sessions$observer.name)
}
list(observations=observations, sessions=sessions, magnitudes=magnitudes)
}
#' @rdname load_vmdb
#' @export
load_vmdb_magnitudes <- function(
dbcon,
shower = NULL,
period = NULL,
sl = NULL,
lim.magn = NULL,
session.id = NULL,
magn.id = NULL,
withSessions = FALSE,
withMagnitudes = TRUE
) {
shower.filter <- ''
if (!is.null(shower)) {
shower <- unique(shower)
shower.is_na <- is.na(shower)
with_spo <- length(shower[shower.is_na]) > 0
shower <- shower[!shower.is_na]
with_showers <- length(shower) > 0
if (with_spo) {
spo.filter <- "shower IS NULL"
} else {
spo.filter <- "FALSE"
}
if (with_showers) {
shower.filter <- paste(shower, collapse="','")
shower.filter <- paste0("shower IN ('", shower.filter , "')" )
} else {
shower.filter <- "FALSE"
}
if (with_showers | with_spo) {
shower.filter <- paste(
'(', shower.filter, 'OR', spo.filter , ') AND '
)
} else {
shower.filter <- ''
}
}
period.filter <- ''
if (!is.null(period)) {
period <- matrix(period, ncol=2)
period.filter <- apply(period, 1, function(period){
paste0(
"period_start >= '", period[1] ,
"' AND period_end <= '", period[2] , "'"
)
})
period.filter <- paste('(', paste(period.filter, collapse=') OR ('), ') AND ')
}
sl.filter <- ''
if (!is.null(sl)) {
sl <- matrix(sl, ncol=2)
sl.filter <- apply(sl, 1, function(sl){
if (sl[1] > sl[2]) {
paste0(
"sl_start BETWEEN ", sl[1], " AND 360.0 AND ",
"sl_end BETWEEN 0.0 AND ", sl[2]
)
} else {
paste0("sl_start > ", sl[1], " AND sl_end <= ", sl[2])
}
})
sl.filter <- paste('(', paste(sl.filter, collapse=') OR ('), ') AND ')
}
lim.magn.filter <- ''
if (!is.null(lim.magn)) {
lim.magn <- matrix(lim.magn, ncol=2)
lim.magn.filter <- apply(lim.magn, 1, function(lim.magn){
paste0("lim_mag BETWEEN ", lim.magn[1], " AND ", lim.magn[2])
})
lim.magn.filter <- paste('(', paste(lim.magn.filter, collapse=') OR ('), ') AND ')
}
session.id.filter <- ''
if (!is.null(session.id)) {
session.id.filter <- paste(session.id, collapse=",")
session.id.filter <- paste0(
"\"session_id\" IN (", session.id.filter , ") AND "
)
}
magn.id.filter <- ''
if (!is.null(magn.id)) {
magn.id.filter <- paste(magn.id, collapse=",")
magn.id.filter <- paste0("id IN (", magn.id.filter , ") AND " )
}
# query the data from PostgreSQL
with_query <- paste0("
WITH selection as (
SELECT
id as \"magn.id\",
shower as \"shower.code\",
period_start as \"period.start\",
period_end as \"period.end\",
sl_start as \"sl.start\",
sl_end as \"sl.end\",
session_id as \"session.id\",
freq as \"freq\",
mean as \"magn.mean\",
lim_mag as \"lim.magn\"
FROM magnitude
WHERE
", period.filter, "
", sl.filter, "
", lim.magn.filter, "
", session.id.filter, "
", magn.id.filter, "
", shower.filter, " TRUE
)
")
query <- paste0(with_query, "SELECT * FROM selection")
observations <- DBI::dbGetQuery(dbcon, query)
row.names(observations) <- observations$magn.id
observations$shower.code <- factor(observations$shower.code)
observations$session.id <- factor(observations$session.id)
magnitudes <- NULL
if (withMagnitudes) {
query <- paste0(with_query, "
SELECT
id as \"magn.id\",
magn as \"magn\",
freq as \"freq\"
FROM magnitude_detail
WHERE id IN (
SELECT \"magn.id\" FROM selection
)
")
magnitudes <- DBI::dbGetQuery(dbcon, query)
magnitudes$magn <- factor(
magnitudes$magn,
levels = sort(unique(magnitudes$magn), decreasing = TRUE),
ordered = TRUE
)
magnitudes <- stats::xtabs(freq ~ magn.id + magn, data = magnitudes)
}
sessions <- NULL
if (withSessions) {
query <- paste0(with_query, "
SELECT
id AS \"session.id\",
longitude,
latitude,
elevation,
country,
city as \"location.name\",
observer_id AS \"observer.id\",
observer_name AS \"observer.name\"
FROM obs_session
WHERE \"id\" IN (
SELECT \"session.id\" FROM selection
WHERE \"session.id\" IS NOT NULL
)
")
sessions <- DBI::dbGetQuery(dbcon, query)
row.names(sessions) <- sessions$session.id
sessions$country <- factor(sessions$country)
sessions$location.name <- factor(sessions$location.name)
sessions$observer.id <- factor(sessions$observer.id)
sessions$observer.name <- factor(sessions$observer.name)
}
list(observations=observations, sessions=sessions, magnitudes=magnitudes)
}
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/R/load_data.R
|
#' @name mideal
#' @aliases dmideal
#' @title Ideal distributed meteor magnitudes
#' @description
#' Density, distribution function, quantile function and random generation
#' of ideal distributed meteor magnitudes.
#' @param psi numeric; the location parameter of a probability distribution.
#' It is the only parameter of the distribution.
#' @param m numeric; meteor magnitude.
#' @param p numeric; probability.
#' @param n numeric; count of meteor magnitudes.
#' @param log logical; if `TRUE`, probabilities p are given as `log(p)`.
#' @param lower.tail logical; if `TRUE` (default) probabilities are
#' \eqn{P[M \le m]}, otherwise, \eqn{P[M > m]}.
#' @details
#' The density of an ideal magnitude distribution is
#' \deqn{
#' {\displaystyle \frac{\mathrm{d}p}{\mathrm{d}m} = \frac{3}{2} \, \log(r) \sqrt{\frac{r^{3 \, \psi + 2 \, m}}{(r^\psi + r^m)^5}}}
#' }
#' where \eqn{m} is the meteor magnitude, \eqn{r = 10^{0.4} \approx 2.51189 \dots} is a constant and
#' \eqn{\psi} is the only parameter of this magnitude distribution.
#' @return
#' `dmideal` gives the density, `pmideal` gives the distribution function,
#' `qmideal` gives the quantile function and `rmideal` generates random deviates.
#'
#' The length of the result is determined by `n` for `rmideal`, and is the maximum
#' of the lengths of the numerical vector arguments for the other functions.
#'
#' `qmideal` can return `NaN` value with a warning.
#' @references Richter, J. (2018) \emph{About the mass and magnitude distributions of meteor showers}.
#' WGN, Journal of the International Meteor Organization, vol. 46, no. 1, p. 34-38
#' @examples
#' old_par <- par(mfrow = c(2,2))
#' psi <- 5.0
#' plot(
#' function(m) dmideal(m, psi, log = FALSE),
#' -5, 10,
#' main = paste0('density of ideal meteor magnitude\ndistribution (psi = ', psi, ')'),
#' col = "blue",
#' xlab = 'm',
#' ylab = 'dp/dm'
#' )
#' abline(v=psi, col="red")
#'
#' plot(
#' function(m) dmideal(m, psi, log = TRUE),
#' -5, 10,
#' main = paste0('density of ideal meteor magnitude\ndistribution (psi = ', psi, ')'),
#' col = "blue",
#' xlab = 'm',
#' ylab = 'log( dp/dm )'
#' )
#' abline(v=psi, col="red")
#'
#' plot(
#' function(m) pmideal(m, psi),
#' -5, 10,
#' main = paste0('probability of ideal meteor magnitude\ndistribution (psi = ', psi, ')'),
#' col = "blue",
#' xlab = 'm',
#' ylab = 'p'
#' )
#' abline(v=psi, col="red")
#'
#' plot(
#' function(p) qmideal(p, psi),
#' 0.01, 0.99,
#' main = paste('quantile of ideal meteor magnitude\n distribution (psi = ', psi, ')'),
#' col = "blue",
#' xlab = 'p',
#' ylab = 'm'
#' )
#' abline(h=psi, col="red")
#'
#' # generate random meteor magnitudes
#' m <- rmideal(1000, psi)
#'
#' # log likelihood function
#' llr <- function(psi) {
#' -sum(dmideal(m, psi, log=TRUE))
#' }
#'
#' # maximum likelihood estimation (MLE) of psi
#' est <- optim(2, llr, method='Brent', lower=0, upper=8, hessian=TRUE)
#'
#' # estimations
#' est$par # mean of psi
#' sqrt(1/est$hessian[1][1]) # standard deviation of psi
#'
#' par(old_par)
#' @rdname mideal
#' @export
dmideal <- function(m, psi = 0.0, log = FALSE) {
a <- -base::log(10.0)/2.5
d <- rep(NA, length(m))
psi.exp <- 10.0
m <- m - psi
idx <- m > psi.exp
if (any(idx)) {
if (log) {
d[idx] <- 1.5 * a * m[idx]
} else {
d[idx] <- exp(1.5 * a * m[idx])
}
}
idx <- m < -psi.exp
if (any(idx)) {
if (log) {
d[idx] <- -a * m[idx]
} else {
d[idx] <- exp(-a * m[idx])
}
}
idx <- is.na(d)
if (any(idx)) {
if (log) {
d[idx] <- (3 * a * m[idx] - 5 * base::log(1.0 + exp(a * m[idx])))/2
} else {
d[idx] <- base::sqrt(
exp(3 * a * m[idx])/(1.0 + exp(a * m[idx]))^5
)
}
}
if (log) {
base::log(-1.5 * a) + d
} else {
-1.5 * a * d
}
}
#' @rdname mideal
#' @export
pmideal <- function(m, psi = 0.0, lower.tail = TRUE, log = FALSE) {
a <- -base::log(10.0)/2.5
psi.exp <- 10.0
m <- m - psi
p <- rep(NA, length(m))
if (lower.tail) {
spline.knods <- c(
-8.80472534014006,
-8.34423787637494,
-7.88372873094501,
-7.42318695592341,
-6.96263200116049,
-6.50203076601891,
-6.04137424073419,
-5.58062400802751,
-5.11973760793805,
-4.6586468529202,
-4.19721109282873,
-3.73524195963578,
-3.27243690786416,
-2.8083150258617,
-2.34214577748568,
-1.87281940429522,
-1.39869703344889,
-0.917450131705818,
-0.425957810918723,
0.0796223098768092,
0.603456223455557,
1.14927014152791,
1.71941908808972,
2.31420877744153,
2.93185193543927,
3.56905016441712,
4.22186644876613,
4.88646802251895,
5.55957886852415,
6.23863980114079,
6.92171415166587,
7.6074921875779,
8.2951330347783,
8.98380534340058,
9.67360021970155,
10.3632095823678,
11.0538798267455,
11.7465047916797,
12.4480963334632,
13.1258002217638,
13.8198202144629
)
} else {
spline.knods <- c(
8.80485039014547,
8.34431865416611,
7.88377975423457,
7.42322504002181,
6.96264953286627,
6.50204026882691,
6.04137719922142,
5.5806286403783,
5.11974504501854,
4.65864760617632,
4.19721233788351,
3.73524403458982,
3.27243684618656,
2.80831497655707,
2.34214575402348,
1.87281941733144,
1.3986971549163,
0.917450465736558,
0.425957597606353,
-0.0796226279169917,
-0.603456102786167,
-1.14927012452963,
-1.71941902164115,
-2.31420847179972,
-2.93185085210491,
-3.56905007596158,
-4.22186620307173,
-4.88646699897379,
-5.55957402041309,
-6.23861581194743,
-6.92169294002751,
-7.60746668941095,
-8.295014234359,
-8.98371489435258,
-9.67316619598767,
-10.3630974814401,
-11.053325888328,
-11.7437374092862,
-12.4343089812847,
-13.1248984689238,
-13.8155074574103
)
}
f.spline <- stats::splinefun(seq(-psi.exp, psi.exp, 0.5), spline.knods, method = "hyman")
idx <- lower.tail & m < -psi.exp
if (any(idx)) {
p.max <- 1/(1 + exp(-f.spline(-psi.exp)))
if (log) {
p[idx] <- base::log(p.max) + stats::pexp(-psi.exp - m[idx], -a, lower.tail = FALSE, log = TRUE)
} else {
p[idx] <- p.max * stats::pexp(-psi.exp - m[idx], -a, lower.tail = FALSE, log = FALSE)
}
}
idx <- !lower.tail & m > psi.exp
if (any(idx)) {
p.max <- 1/(1 + exp(-f.spline(psi.exp)))
if (log) {
p[idx] <- base::log(p.max) + stats::pexp(m[idx] - psi.exp, -1.5 * a, lower.tail = FALSE, log = TRUE)
} else {
p[idx] <- p.max * stats::pexp(m[idx] - psi.exp, -1.5 * a, lower.tail = FALSE, log = FALSE)
}
}
idx <- is.na(p)
if (any(idx)) {
p[idx] <- 1/(1 + exp(-f.spline(m[idx])))
if (log) {
p[idx] <- base::log(p[idx])
}
}
p
}
#' @rdname mideal
#' @export
qmideal <- function(p, psi = 0.0, lower.tail = TRUE) {
a <- -base::log(10.0)/2.5
psi.exp <- 10.0
m <- rep(NA, length(p))
apply.idx <- !is.na(p) & p>0.0 & p < 1.0
if (lower.tail) {
m[0.0 == p] <- -Inf
m[(p + 1e-07) >= 1.0 & p <= 1.0] <- Inf
spline.knods <- c(
-8.80472534014006,
-8.34423787637494,
-7.88372873094501,
-7.42318695592341,
-6.96263200116049,
-6.50203076601891,
-6.04137424073419,
-5.58062400802751,
-5.11973760793805,
-4.6586468529202,
-4.19721109282873,
-3.73524195963578,
-3.27243690786416,
-2.8083150258617,
-2.34214577748568,
-1.87281940429522,
-1.39869703344889,
-0.917450131705818,
-0.425957810918723,
0.0796223098768092,
0.603456223455557,
1.14927014152791,
1.71941908808972,
2.31420877744153,
2.93185193543927,
3.56905016441712,
4.22186644876613,
4.88646802251895,
5.55957886852415,
6.23863980114079,
6.92171415166587,
7.6074921875779,
8.2951330347783,
8.98380534340058,
9.67360021970155,
10.3632095823678,
11.0538798267455,
11.7465047916797,
12.4480963334632,
13.1258002217638,
13.8198202144629
)
p.min <- 1/(1 + exp(-spline.knods[1]))
} else {
m[0.0 == p] <- Inf
m[(p + 1e-07) >= 1.0 & p <= 1.0] <- -Inf
spline.knods <- c(
8.80485039014547,
8.34431865416611,
7.88377975423457,
7.42322504002181,
6.96264953286627,
6.50204026882691,
6.04137719922142,
5.5806286403783,
5.11974504501854,
4.65864760617632,
4.19721233788351,
3.73524403458982,
3.27243684618656,
2.80831497655707,
2.34214575402348,
1.87281941733144,
1.3986971549163,
0.917450465736558,
0.425957597606353,
-0.0796226279169917,
-0.603456102786167,
-1.14927012452963,
-1.71941902164115,
-2.31420847179972,
-2.93185085210491,
-3.56905007596158,
-4.22186620307173,
-4.88646699897379,
-5.55957402041309,
-6.23861581194743,
-6.92169294002751,
-7.60746668941095,
-8.295014234359,
-8.98371489435258,
-9.67316619598767,
-10.3630974814401,
-11.053325888328,
-11.7437374092862,
-12.4343089812847,
-13.1248984689238,
-13.8155074574103
)
p.min <- 1/(1 + exp(-spline.knods[length(spline.knods)]))
}
f.spline <- stats::splinefun(spline.knods, seq(-psi.exp, psi.exp, 0.5), method = "hyman")
idx <- apply.idx & lower.tail & p <= p.min
if (any(idx)) {
m[idx] <- -psi.exp - stats::qexp(p[idx]/p.min, -a, lower.tail = FALSE, log = FALSE)
}
idx <- apply.idx & !lower.tail & p <= p.min
if (any(idx)) {
m[idx] <- psi.exp + stats::qexp(p[idx]/p.min, -1.5 * a, lower.tail = FALSE, log = FALSE)
}
idx <- apply.idx & is.na(m)
if (any(idx)) {
p.logit <- base::log(p[idx]/(1-p[idx]))
m[idx] <- f.spline(p.logit)
}
if (anyNA(m)) {
warning('NaNs produced')
}
m <- m + psi
}
#' @rdname mideal
#' @export
rmideal <- function(n, psi = 0.0) {
p <- stats::runif(n)
m <- rep(NA, n)
idx <- p < 0.5
if (any(idx)) {
m[idx] <- vismeteor::qmideal(p[idx], psi, lower.tail = TRUE)
}
if (any(!idx)) {
m[!idx] <- vismeteor::qmideal(1.0 - p[!idx], psi, lower.tail = FALSE)
}
m
}
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/R/mideal.R
|
#' @details
#' The data used in this package can created and provided by
#' [imo-vmdb](https://pypi.org/project/imo-vmdb/).
"_PACKAGE"
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/R/package.R
|
#' @title Perception Probabilities of Visual Meteor Magnitudes
#' @description
#' Provides the perception probability of visual meteor magnitudes and its first derivative.
#' @param m numerical; difference between the limiting magnitude and the meteor magnitude.
#' @param deriv.degree integer; degree of derivative of the perception probability.
#' Currently, valid values of `deriv.degree` are `0`, `1` and `2`.
#' @details
#' The perception probabilities of _Koschack R., Rendtel J., 1990b_
#' are estimated with the formula
#' \deqn{
#' p(m) = \begin{cases}
#' 1.0 - \exp\left(-z(m + 0.5)\right)\ & \text{ if } m > -0.5,\\
#' 0.0 \ & \text{ otherwise,}
#' \end{cases}
#' }
#' where
#' \deqn{
#' z(x) = 0.003 \, x + 0.0056 \, x^2 + 0.0014 \, x^4
#' }
#' and `m` is the difference between the limiting magnitude and the meteor magnitude.
#' @return This function returns the visual perception probabilities.
#' If `deriv.degree` is specified, it will return the `deriv.degree`-th order derivative
#' of the perception probability.
#' @references Koschack R., Rendtel J., 1990b _Determination of spatial number density and mass index from visual meteor observations (II)._ WGN 18, 119–140.
#' @examples
#' # Perception probability of visually estimated meteor of magnitude 3.0
#' # with a limiting magnitude of 5.6.
#' vmperception(5.6 - 3.0)
#'
#' # plot
#' old_par <- par(mfrow = c(1,1))
#' plot(
#' vmperception,
#' -0.5, 8,
#' main = paste(
#' 'perception probability of',
#' 'visual meteor magnitudes'
#' ),
#' col = "blue",
#' xlab = 'm',
#' ylab = 'p'
#' )
#' plot(
#' function(m) {
#' vmperception(m, deriv.degree=1L)/vmperception(m)
#' },
#' -0.3, 8,
#' main = paste(
#' 'q-values of',
#' 'visual meteor magnitudes'
#' ),
#' col = "blue",
#' log = 'y',
#' xlab = 'm',
#' ylab = 'q'
#' )
#'
#' par(old_par)
#' @export
vmperception <- function(m, deriv.degree = 0L) {
poly.coef <- c(0.0, 0.003, 0.0056, 0, 0.0014)
names(poly.coef) <- seq(along = poly.coef) - 1 # exponents
m <- m + 0.5
p <- rep(0.0, length(m))
idx <- m > .Machine$double.eps
if (any(idx)) {
f0 <- f.polynomial(m[idx], poly.coef)
if (0L == deriv.degree) {
# 1 - exp(-f(x))
p[idx] <- 1.0 - exp(-f0)
} else if (1L == deriv.degree) {
# f'(x) * exp(-f(x))
poly.coef1 <- f.polynomial.coef(poly.coef, deriv.degree = 1L)
f1 <- f.polynomial(m[idx], poly.coef1)
p[idx] <- exp(-f0) * f1
} else if (2L == deriv.degree) {
# (f''(x) -f'(x)^2) * exp(-f(x))
poly.coef1 <- f.polynomial.coef(poly.coef, deriv.degree = 1L)
f1 <- f.polynomial(m[idx], poly.coef1)
poly.coef2 <- f.polynomial.coef(poly.coef, deriv.degree = 2L)
f2 <- f.polynomial(m[idx], poly.coef2)
p[idx] <- exp(-f0) * (f2 - f1^2)
} else {
stop(paste('deriv.degree', deriv.degree, 'not implemented!'))
}
}
p
}
#' @title Laplace-Transformed Perception Probabilities of Visual Meteor Magnitudes
#' @description
#' Provides the Laplace-transformed perception probability of visual meteor magnitudes
#' and its first derivative.
#' @param s numerical; Real (non-complex) parameter for the Laplace transformation.
#' @param deriv.degree integer; degree of derivative of the transformation.
#' Currently, valid values of `deriv.degree` are `0`, `1` and `2`.
#' @details
#' The Laplace-transformed [perception probabilities][vismeteor::vmperception] `F(s)`, given as
#' \deqn{
#' F(s) = \mathcal{L} \left\{p\right\}(s)
#' = \int_{-0.5}^{\infty} \, f(m) \, \mathrm e^{-s \, m} \,\mathrm{d}m \,,
#' }
#' are approximately
#' \deqn{
#' P(s) = \begin{cases}
#' s^{-1} \, \exp\left(-4.11 \, s + 1.32 \, s^2 - 0.15 \, s^3\right)\ & \text{ if } s >= 0.0,\\
#' \text{undefined} \ & \text{ otherwise.}
#' \end{cases}
#' }
#' Here, `m` is the difference between the limiting magnitude and the meteor magnitude,
#' and `f(m)` denotes the perception probabilities as a function of `m`.
#' The \eqn{\mathcal{L}} recalls here the one-sided Laplace transform.
#'
#' The Laplace transform is notably effective for determining the mean and variance
#' of observed meteor magnitudes, which are measured relative to the limiting magnitude.
#' This is just one example of its application.
#' This approach is valid only when the actual magnitude distribution adheres
#' to \eqn{p(m) \sim r^{-m}}, where \eqn{s = \log(r)}.
#' In this scenario, the mean of the observable meteor magnitudes is given by
#' \eqn{-\mathcal{L}'/\mathcal{L}}, and their variance is calculated as
#' \eqn{\mathcal{L}''/\mathcal{L} - (\mathcal{L}'/\mathcal{L})^2}.
#' @return returns the Laplace-transformed perception probabilities.
#' If `deriv.degree` is specified, it will return the `deriv.degree`-th order derivative
#' of these Laplace-transformed values.
#' @seealso
#' [vismeteor::vmperception]
#' [vismeteor::vmgeom]
#' @examples
#' r <- 2.0
#' s <- log(r)
#' F0 <- vmperception.l(s)
#' F1 <- vmperception.l(s, deriv.degree=1L)
#' # magnitude mean
#' -F1/F0
#' F2 <- vmperception.l(s, deriv.degree=2L)
#' # magnitude variance
#' F2/F0 - (F1/F0)^2
#' # plot the Laplace-transformed perception probabilities
#' old_par <- par(mfrow = c(1,1))
#' plot(
#' vmperception.l,
#' 0.2, 1.1,
#' main = paste(
#' 'Laplace-transformed perception',
#' 'probability of visual meteor magnitudes'
#' ),
#' col = "blue",
#' log = 'y',
#' xlab = 's',
#' ylab = 'L'
#' )
#' par(old_par)
#' @export
vmperception.l <- function(s, deriv.degree = 0L) {
poly.coef <- c(0.0, -4.11, 1.32, -0.15)
names(poly.coef) <- seq(along = poly.coef) - 1 # exponents
f0 <- f.polynomial(s, poly.coef)
if (0L == deriv.degree) {
# exp(f(s))/s
exp(f0)/s
} else if (1L == deriv.degree) {
# e^f(s) ((f'(s))/s - 1/s^2)
poly.coef1 <- f.polynomial.coef(poly.coef, deriv.degree = 1L)
f1 <- f.polynomial(s, poly.coef1)
exp(f0) * (f1/s - s^-2)
} else if (2L == deriv.degree) {
# e^f(s) ((f''(s))/s - (2 f'(s))/s^2 + f'(s)^2/s + 2/s^3)
poly.coef1 <- f.polynomial.coef(poly.coef, deriv.degree = 1L)
f1 <- f.polynomial(s, poly.coef1)
poly.coef2 <- f.polynomial.coef(poly.coef, deriv.degree = 2L)
f2 <- f.polynomial(s, poly.coef2)
exp(f0) * ( f2/s - 2*f1*s^-2 + (f1^2)/s + 2*(s^-3))
} else {
stop(paste('deriv.degree', deriv.degree, 'not implemented!'))
}
}
#' build polynomial sum
#'
#' @noRd
f.polynomial <- function(m, poly.coef) {
exponents <- as.numeric(names(poly.coef))
margin.table(poly.coef * t(outer(m, exponents, "^")), 2)
}
#' returns polynomial coefficients
#'
#' @noRd
f.polynomial.coef <- function(poly.coef, deriv.degree = 1L) {
if (0L == deriv.degree)
return(poly.coef)
if (1 == length(poly.coef))
return(0)
exponents <- as.numeric(names(poly.coef))
poly.coef <- poly.coef * exponents
if (0L %in% exponents) {
intercept.idx <- 0L == exponents
poly.coef <- poly.coef[! intercept.idx]
exponents <- exponents[! intercept.idx]
}
exponents <- exponents - 1L
names(poly.coef) <- exponents
f.polynomial.coef(poly.coef, deriv.degree - 1L)
}
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/R/perception.R
|
#' @name vmgeom
#' @aliases dvmgeom
#' @aliases pvmgeom
#' @aliases qvmgeom
#' @aliases rvmgeom
#' @title Visual magnitude distribution of geometric distributed meteor magnitudes
#' @description
#' Density, distribution function, quantile function and random generation for the
#' visual magnitude distribution of geometric distributed meteor magnitudes.
#' @param m numeric; the meteor magnitude.
#' @param p numeric; probability.
#' @param lm numeric; limiting magnitude.
#' @param r numeric; the population index. It is the only parameter of the distribution.
#' @param n numeric; count of meteor magnitudes.
#' @param log logical; if `TRUE`, probabilities p are given as `log(p)`.
#' @param lower.tail logical; if `TRUE` (default) probabilities are
#' \eqn{P[M < m]}, otherwise, \eqn{P[M \ge m]}.
#' @param perception.fun function; perception probability function (optional).
#' Default is [vismeteor::vmperception].
#' @details
#' In visual meteor observation, it is common to estimate meteor magnitudes in integer values.
#' Hence, this distribution is discrete and has the density
#' \deqn{
#' {\displaystyle P[X = x] \sim f(x) \, \mathrm r^{-x}} \,\mathrm{,}
#' }
#' where \eqn{x \ge -0.5} is the difference between the limiting magnitude `lm`
#' and the meteor magnitude `m` and \eqn{f(x)} is the perception probability function.
#' This distribution is thus a product of the
#' [perception probabilities][vismeteor::vmperception] and the
#' actual [geometric distribution][stats::Geometric] of the meteor magnitudes.
#' Therefore, the parameter `p` of the geometric distribution is `p = 1 - 1/r`.
#'
#' The parameter `lm` indicate what the parameter `m` refers to.
#' `m` must be an integer meteor magnitude.
#' The length of the vector `lm` must then be equal to the length of the vector `m`
#' or `lm` is a scalar value.
#' In case of `rvmgeom`, the length of the vector `lm` must be `n` or `lm` is a scalar value.
#'
#' If the perception probabilities function `perception.fun` is given,
#' it must have the signature `function(x)` and must return the perception probabilities of
#' the difference `x` between the limiting magnitude and the meteor magnitude.
#' If `x >= 15.0`, the `perception.fun` function should return the perception probability of `1.0`.
#' If `log = TRUE` is given, the logarithm value of the perception probabilities
#' must be returned. `perception.fun` is resolved using [match.fun].
#' @return
#' `dvmgeom` gives the density, `pvmgeom` gives the distribution function,
#' `qvmgeom` gives the quantile function, and `rvmgeom` generates random deviates.
#'
#' The length of the result is determined by `n` for `rvmgeom`, and is the maximum
#' of the lengths of the numerical vector arguments for the other functions.
#'
#' Since the distribution is discrete, `qvmgeom` and `rvmgeom` always return integer values.
#' `qvmgeom` can return `NaN` value with a warning.
#' @seealso [vismeteor::vmperception]
#' [stats::Geometric]
#' @examples
#' N <- 100
#' r <- 2.0
#' limmag <- 6.5
#' (m <- seq(6, -7))
#'
#' # discrete density of `N` meteor magnitudes
#' (freq <- round(N * dvmgeom(m, limmag, r)))
#'
#' # log likelihood function
#' lld <- function(r) {
#' -sum(freq * dvmgeom(m, limmag, r, log=TRUE))
#' }
#'
#' # maximum likelihood estimation (MLE) of r
#' est <- optim(2, lld, method='Brent', lower=1.1, upper=4)
#'
#' # estimations
#' est$par # mean of r
#'
#' # generate random meteor magnitudes
#' m <- rvmgeom(N, r, lm=limmag)
#'
#' # log likelihood function
#' llr <- function(r) {
#' -sum(dvmgeom(m, limmag, r, log=TRUE))
#' }
#'
#' # maximum likelihood estimation (MLE) of r
#' est <- optim(2, llr, method='Brent', lower=1.1, upper=4, hessian=TRUE)
#'
#' # estimations
#' est$par # mean of r
#' sqrt(1/est$hessian[1][1]) # standard deviation of r
#'
#' m <- seq(6, -4, -1)
#' p <- vismeteor::dvmgeom(m, limmag, r)
#' barplot(
#' p,
#' names.arg = m,
#' main = paste0('Density (r = ', r, ', limmag = ', limmag, ')'),
#' col = "blue",
#' xlab = 'm',
#' ylab = 'p',
#' border = "blue",
#' space = 0.5
#' )
#' axis(side = 2, at = pretty(p))
#' @rdname vmgeom
#' @export
dvmgeom <- function(m, lm, r, log = FALSE, perception.fun = NULL) {
if (anyNA(m) | anyNA(lm) | anyNA(r)) {
stop("NA's are not allowed!")
}
if (any(is.infinite(lm))) {
stop("Infinite limiting magnitudes are not allowed!")
}
if (any(is.infinite(r))) {
stop("Infinite r values are not allowed!")
}
if (any(1.0 > r)) {
stop(paste0('r must be greater than 1.0 instead of "', r, '"!'))
}
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) all(is.infinite(x) | abs(x - round(x)) < tol)
if (! is.wholenumber(m)) {
stop("magnitudes must be integer values!")
}
if (is.null(perception.fun)) {
perception.fun <- vismeteor::vmperception
} else {
perception.fun <- match.fun(perception.fun)
}
p.geom <- 1.0 - 1.0/r
if (1 == length(r)) {
r <- rep(r, length(m))
p.geom <- rep(p.geom, length(m))
}
offset <- 0.0
list2env(vmgeom.std(m, lm), environment())
f.density <- function(m, offset, p.geom) {
m.max <- 15L
idx <- m <= m.max
d <- stats::dgeom(m, p.geom, log = TRUE)
if (any(idx)) {
d[idx] <- d[idx] + base::log(perception.fun(m[idx] + offset))
}
d - base::log(vmgeom.norm(offset, p.geom, m.max, perception.fun))
}
arg.data <- data.frame(
m = m,
offset = offset,
p.geom = p.geom
)
data.f <- as.factor(paste0(offset, '/', p.geom))
data.s <- split(arg.data, data.f)
d <- lapply(data.s, function(data) {
m <- data$m
offset <- data$offset[1]
p.geom <- data$p.geom[1]
d <- rep(-Inf, length(m))
idx <- m > -1
if (any(idx)) {
d[idx] <- f.density(m[idx], offset, p.geom)
}
if (! log) {
d[idx] <- exp(d[idx])
d[!idx] <- 0.0
}
d
})
unsplit(d, data.f)
}
#' @rdname vmgeom
#' @export
pvmgeom <- function(m, lm, r, lower.tail = TRUE, log = FALSE, perception.fun = NULL) {
if (anyNA(m) | anyNA(lm) | anyNA(r)) {
stop("NA's are not allowed!")
}
if (any(is.infinite(lm))) {
stop("Infinite limiting magnitudes are not allowed!")
}
if (any(is.infinite(r))) {
stop("Infinite r values are not allowed!")
}
if (any(1.0 > r)) {
stop(paste0('r must be greater than 1.0 instead of "', r, '"!'))
}
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) all(is.infinite(x) | abs(x - round(x)) < tol)
if (! is.wholenumber(m)) {
stop("magnitudes must be integer values!")
}
if (is.null(perception.fun)) {
perception.fun <- vismeteor::vmperception
} else {
perception.fun <- match.fun(perception.fun)
}
p.geom <- 1.0 - 1.0/r
if (1 == length(r)) {
r <- rep(r, length(m))
p.geom <- rep(p.geom, length(m))
}
offset <- 0.0
list2env(vmgeom.std(m, lm), environment())
f.density <- function(m, offset, p.geom) {
stats::dgeom(m, p.geom) * perception.fun(m + offset)
}
f.sum <- Vectorize(function(m, offset, p.geom) {
m <- as.integer(seq(0, m))
sum(f.density(m, offset, p.geom))
})
f.prob <- function(m, offset, p.geom) {
m.max <- 15L
norm <- vmgeom.norm(offset, p.geom, m.max, perception.fun)
p <- rep(0.0, length(m))
if (lower.tail) {
idx <- m <= m.max
if(any(idx)) {
p[idx] <- 1.0 - f.sum(m[idx], offset, p.geom)/norm
}
idx <- m > m.max
if(any(idx)) {
p[idx] <- stats::pgeom(m[idx], p.geom, lower.tail = FALSE)/norm
}
} else {
idx <- m <= m.max
if(any(idx)) {
p[idx] <- f.sum(m[idx], offset, p.geom)/norm
}
idx <- m > m.max
if(any(idx)) {
p[idx] <- 1.0 - stats::pgeom(m[idx], p.geom, lower.tail = FALSE)/norm
}
}
p
}
arg.data <- data.frame(
m = m,
offset = offset,
p.geom = p.geom
)
data.f <- as.factor(paste0(offset, '/', p.geom))
data.s <- split(arg.data, data.f)
p <- lapply(data.s, function(data) {
m <- data$m
offset <- data$offset[1]
p.geom <- data$p.geom[1]
if (lower.tail) {
p <- rep(1.0, length(m))
} else {
p <- rep(0.0, length(m))
}
idx <- m > -1
if(any(idx)) {
p[idx] <- f.prob(m[idx], offset, p.geom)
}
if (log) {
p[idx] <- base::log(p[idx])
if (lower.tail) {
p[!idx] <- 0.0
} else {
p[!idx] <- -Inf
}
}
p
})
unsplit(p, data.f)
}
#' @rdname vmgeom
#' @export
qvmgeom <- function(p, lm, r, lower.tail = TRUE, perception.fun = NULL) {
if (anyNA(p) | anyNA(lm) | anyNA(r)) {
stop("NA's are not allowed!")
}
if (any(is.infinite(lm))) {
stop("Infinite limiting magnitudes are not allowed!")
}
if (any(is.infinite(r))) {
stop("Infinite r values are not allowed!")
}
if (any(1.0 > r)) {
stop(paste0('r must be greater than 1.0 instead of "', r, '"!'))
}
if (is.null(perception.fun)) {
perception.fun <- vismeteor::vmperception
} else {
perception.fun <- match.fun(perception.fun)
}
p.geom <- 1.0 - 1.0/r
if (1 == length(r)) {
r <- rep(r, length(p))
p.geom <- rep(p.geom, length(p))
}
lm.round <- round(lm)
offset <- lm - lm.round
lm <- lm.round
idx <- -0.5 == offset
lm[idx] <- lm[idx] - 1L
offset[idx] <- offset[idx] + 1.0
if (1 == length(offset)) {
offset <- rep(offset, length(p))
}
arg.data <- data.frame(
p = p,
offset = offset,
p.geom = p.geom,
r = r
)
data.f <- as.factor(paste0(offset, '/', p.geom))
data.s <- split(arg.data, data.f)
m <- lapply(data.s, function(data) {
m.max <- 15L
p <- data$p
offset <- data$offset[1]
p.geom <- data$p.geom[1]
r <- data$r[1]
m <- rep(NA, length(p))
if(lower.tail) {
m[1.0 == p] <- 0L
p.max <- 1.0 - vismeteor::pvmgeom(0, m.max + offset, r, lower.tail = FALSE, perception.fun = perception.fun)
idx <- p>=0.0 & p<p.max
if (any(idx)) {
m[idx] <- m.max + 1 + stats::qgeom(p[idx]/p.max, p.geom, lower.tail = FALSE)
}
idx <- p>=p.max & p<1.0
if (any(idx)) {
m0 <- seq(-m.max, 0, 1)
p0 <- c(vismeteor::pvmgeom(m0, offset, r, lower.tail = TRUE, perception.fun = perception.fun), 1.0)
m0 <- c(m0, 1L)
p.idx <- findInterval(p[idx], p0, left.open = FALSE)
m[idx] <- -m0[p.idx]
}
} else {
m[0.0 == p] <- 0L
p.max <- vismeteor::pvmgeom(0, m.max + offset, r, lower.tail = FALSE, perception.fun = perception.fun)
idx <- p>p.max & p<=1.0
if (any(idx)) {
m[idx] <- m.max + 1L + stats::qgeom((p[idx] - p.max)/(1.0 - p.max), p.geom)
}
idx <- p>0.0 & p<=p.max
if (any(idx)) {
m0 <- seq(1, -m.max, -1)
p0 <- vismeteor::pvmgeom(m0, offset, r, lower.tail = FALSE, perception.fun = perception.fun)
p.idx <- findInterval(p[idx], p0, left.open = TRUE) + 1
m[idx] <- -m0[p.idx]
m[m<0] <- NA
}
}
m
})
m <- lm - unsplit(m, data.f)
if (anyNA(m)) {
warning('NaNs produced')
}
as.numeric(m)
}
#' @rdname vmgeom
#' @export
rvmgeom <- function(n, lm, r, perception.fun = NULL) {
if (anyNA(lm) | anyNA(r)) {
stop("NA's are not allowed!")
}
if (any(is.infinite(lm))) {
stop("Infinite limiting magnitudes are not allowed!")
}
if (any(is.infinite(r))) {
stop("Infinite r values are not allowed!")
}
if (any(1.0 > r)) {
stop(paste0('r must be greater than 1.0 instead of "', r, '"!'))
}
if (is.null(perception.fun)) {
perception.fun <- vismeteor::vmperception
} else {
perception.fun <- match.fun(perception.fun)
}
p <- stats::runif(n)
m <- rep(NA, n)
idx <- p < 0.5
if (any(idx)) {
m[idx] <- vismeteor::qvmgeom(p[idx], lm, r, lower.tail = TRUE, perception.fun = perception.fun)
}
if (any(!idx)) {
m[!idx] <- vismeteor::qvmgeom(1.0 - p[!idx], lm, r, lower.tail = FALSE, perception.fun = perception.fun)
}
m
}
#' standardization
#'
#' @noRd
vmgeom.std <- function(m, lm) {
m <- lm - m
m.round <- round(m)
offset <- rep(0.0, length(m))
idx <- is.infinite(m)
offset[! idx] <- m[! idx] - m.round[! idx]
m <- m.round
idx <- -0.5 == offset
m[idx] <- m[idx] - 1L
offset[idx] <- offset[idx] + 1.0
list(
m = m,
offset = offset
)
}
#' normalization
#'
#' @noRd
vmgeom.norm <- function(offset, p.geom, m.max, perception.fun){
m <- as.integer(seq(0, m.max))
sum(stats::dgeom(m, p.geom) * perception.fun(m + offset)) +
stats::pgeom(m.max, p.geom, lower.tail = FALSE)
}
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/R/vmgeom.R
|
#' @name vmideal
#' @aliases dvmideal
#' @aliases pvmideal
#' @aliases qvmideal
#' @aliases rvmideal
#' @aliases cvmideal
#' @title Visual magnitude distribution of ideal distributed meteor magnitudes
#' @description
#' Density, distribution function, quantile function and random generation for the
#' visual magnitude distribution of ideal distributed meteor magnitudes.
#' @param psi numeric; the location parameter of a probability distribution.
#' It is the only parameter of the distribution.
#' @param m integer; visual meteor magnitude.
#' @param lm numeric; limiting magnitude.
#' @param p numeric; probability.
#' @param n numeric; count of meteor magnitudes.
#' @param log logical; if `TRUE`, probabilities p are given as `log(p)`.
#' @param lower.tail logical; if `TRUE` (default) probabilities are
#' \eqn{P[M < m]}, otherwise, \eqn{P[M \ge m]}.
#' @param perception.fun function; perception probability function (optional).
#' Default is [vismeteor::vmperception].
#' @details
#' The density of an [ideal magnitude distribution][vismeteor::mideal] is
#' \deqn{
#' {\displaystyle f(m) = \frac{\mathrm{d}p}{\mathrm{d}m} = \frac{3}{2} \, \log(r) \sqrt{\frac{r^{3 \, \psi + 2 \, m}}{(r^\psi + r^m)^5}}}
#' }
#' where \eqn{m} is the meteor magnitude, \eqn{r = 10^{0.4} \approx 2.51189 \dots} is a constant and
#' \eqn{\psi} is the only parameter of this magnitude distribution.
#'
#' In visual meteor observation, it is common to estimate meteor magnitudes in integer values.
#' Hence, this distribution is discrete and has the density
#' \deqn{
#' {\displaystyle P[M = m] \sim g(m) \, \int_{m-0.5}^{m+0.5} f(m) \, \, \mathrm{d}m} \, \mathrm{,}
#' }
#' where \eqn{g(m)} is the perception probability.
#' This distribution is thus a product of the
#' [perception probabilities][vismeteor::vmperception] and the
#' actual [ideal distribution][vismeteor::mideal] of the meteor magnitudes.
#'
#' If the perception probabilities function `perception.fun` is given,
#' it must have the signature `function(M)` and must return the perception probabilities of
#' the difference `M` between the limiting magnitude and the meteor magnitude.
#' If `m >= 15.0`, the `perception.fun` function should return the perception probability of `1.0`.
#' If `log = TRUE` is given, the logarithm value of the perception probabilities
#' must be returned. `perception.fun` is resolved using [match.fun].
#' @return
#' `dvmideal` gives the density, `pvmideal` gives the distribution function,
#' `qvmideal` gives the quantile function, and `rvmideal` generates random deviates.
#' `cvmideal` gives the partial convolution of the ideal meteor magnitude distribution
#' with the perception probabilities.
#'
#' The length of the result is determined by `n` for `rvmideal`, and is the maximum
#' of the lengths of the numerical vector arguments for the other functions.
#'
#' Since the distribution is discrete, `qvmideal` and `rvmideal` always return integer values.
#' `qvmideal` can return `NaN` value with a warning.
#' @seealso [vismeteor::mideal]
#' [vismeteor::vmperception]
#'
#' @references Richter, J. (2018) \emph{About the mass and magnitude distributions of meteor showers}.
#' WGN, Journal of the International Meteor Organization, vol. 46, no. 1, p. 34-38
#' @examples
#' N <- 100
#' psi <- 5.0
#' limmag <- 6.5
#' (m <- seq(6, -4))
#'
#' # discrete density of `N` meteor magnitudes
#' (freq <- round(N * dvmideal(m, limmag, psi)))
#'
#' # log likelihood function
#' lld <- function(psi) {
#' -sum(freq * dvmideal(m, limmag, psi, log=TRUE))
#' }
#'
#' # maximum likelihood estimation (MLE) of psi
#' est <- optim(2, lld, method='Brent', lower=0, upper=8, hessian=TRUE)
#'
#' # estimations
#' est$par # mean of psi
#'
#' # generate random meteor magnitudes
#' m <- rvmideal(N, limmag, psi)
#'
#' # log likelihood function
#' llr <- function(psi) {
#' -sum(dvmideal(m, limmag, psi, log=TRUE))
#' }
#'
#' # maximum likelihood estimation (MLE) of psi
#' est <- optim(2, llr, method='Brent', lower=0, upper=8, hessian=TRUE)
#'
#' # estimations
#' est$par # mean of psi
#' sqrt(1/est$hessian[1][1]) # standard deviation of psi
#'
#' m <- seq(6, -4, -1)
#' p <- vismeteor::dvmideal(m, limmag, psi)
#' barplot(
#' p,
#' names.arg = m,
#' main = paste0('Density (psi = ', psi, ', limmag = ', limmag, ')'),
#' col = "blue",
#' xlab = 'm',
#' ylab = 'p',
#' border = "blue",
#' space = 0.5
#' )
#' axis(side = 2, at = pretty(p))
#'
#' plot(
#' function(lm) vismeteor::cvmideal(lm, psi, log = TRUE),
#' -5, 10,
#' main = paste0(
#' 'Partial convolution of the ideal meteor magnitude distribution\n',
#' 'with the perception probabilities (psi = ', psi, ')'
#' ),
#' col = "blue",
#' xlab = 'lm',
#' ylab = 'log(rate)'
#' )
#' @rdname vmideal
#' @export
dvmideal <- function(m, lm, psi, log = FALSE, perception.fun = NULL) {
if (anyNA(m) | anyNA(lm) | anyNA(psi)) {
stop("NA's are not allowed!")
}
if (any(is.infinite(lm))) {
stop("Infinite limiting magnitudes are not allowed!")
}
if (any(is.infinite(psi))) {
stop("Infinite psi values are not allowed!")
}
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) all(is.infinite(x) | abs(x - round(x)) < tol)
if (! is.wholenumber(m)) {
stop("magnitudes must be integer values!")
}
if (is.null(perception.fun)) {
perception.fun <- vismeteor::vmperception
} else {
perception.fun <- match.fun(perception.fun)
}
if (1 == length(lm)) {
lm <- rep(lm, length(m))
}
if (1 == length(psi)) {
psi <- rep(psi, length(m))
}
# density function
f.density <- function(m, lm, psi, log) {
psi.exp <- 10.0
if (lm + psi.exp < psi) {
return(vismeteor::dvmgeom(m, lm, 10^0.4, log = log))
}
norm.res <- vmideal.norm(lm, psi, perception.fun)
p <- norm.res$p
m.lower <- norm.res$m.lower
m.upper <- norm.res$m.upper
if (log) {
d <- rep(-Inf, length(m))
} else {
d <- rep(0.0, length(m))
}
idx <- m >= m.lower & m <= m.upper
if (any(idx)) {
d.tmp <- p[as.character(m[idx])]
if (log) {
d.tmp[0.0 == d.tmp] <- -Inf
log.idx <- -Inf != d.tmp
if (any(log.idx)) {
d.tmp[log.idx] <- base::log(d.tmp[log.idx])
}
}
d[idx] <- d.tmp
}
idx <- m > -Inf & m < m.lower
if (any(idx)) {
if (log) {
d[idx] <- dmideal.int(m[idx], psi, log = TRUE) - base::log(norm.res$norm)
} else {
d[idx] <- dmideal.int(m[idx], psi)/norm.res$norm
}
}
d
}
arg.data <- data.frame(
m = m,
lm = lm,
psi = psi
)
data.f <- as.factor(paste0(lm, '/', psi))
data.s <- split(arg.data, data.f)
d <- lapply(data.s, function(data) {
m <- data$m
lm <- data$lm[1]
psi <- data$psi[1]
f.density(m, lm, psi, log = log)
})
unsplit(d, data.f)
}
#' @rdname vmideal
#' @export
pvmideal <- function(m, lm, psi, lower.tail = TRUE, log = FALSE, perception.fun = NULL) {
if (anyNA(m) | anyNA(lm) | anyNA(psi)) {
stop("NA's are not allowed!")
}
if (any(is.infinite(lm))) {
stop("Infinite limiting magnitudes are not allowed!")
}
if (any(is.infinite(psi))) {
stop("Infinite psi values are not allowed!")
}
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) all(is.infinite(x) | abs(x - round(x)) < tol)
if (! is.wholenumber(m)) {
stop("magnitudes must be integer values!")
}
if (is.null(perception.fun)) {
perception.fun <- vismeteor::vmperception
} else {
perception.fun <- match.fun(perception.fun)
}
if (1 == length(lm)) {
lm <- rep(lm, length(m))
}
if (1 == length(psi)) {
psi <- rep(psi, length(m))
}
# probability function
f.prob <- function(m, lm, psi, log) {
psi.exp <- 10.0
if (lm + psi.exp < psi) {
return(vismeteor::pvmgeom(m, lm, 10^0.4, lower.tail = lower.tail, log = log))
}
norm.res <- vmideal.norm(lm, psi, perception.fun)
m.lower <- norm.res$m.lower
m.upper <- norm.res$m.upper
if (lower.tail) {
if (log) {
p <- rep(0.0, length(m))
p[-Inf == m] <- -Inf
} else {
p <- rep(1.0, length(m))
p[-Inf == m] <- 0.0
}
idx <- m > -Inf & m <= m.lower
if (any(idx)) {
if (log) {
p[idx] <- vismeteor::pmideal(m[idx] - 0.5, psi, lower.tail = TRUE, log = TRUE) -
base::log(norm.res$norm)
} else {
p[idx] <- vismeteor::pmideal(m[idx] - 0.5, psi, lower.tail = TRUE, log = FALSE) / norm.res$norm
}
}
idx <- m > m.lower & m <= m.upper
if (any(idx)) {
p.gen <- cumsum(norm.res$p)
p[idx] <- norm.res$p.lower.tail + p.gen[as.character(m[idx] - 1)]
p[p>1.0] <- 1.0
if (log) {
log.idx <- p > 0.0
if (any(log.idx)) {
p[log.idx] <- base::log(p[log.idx])
}
}
}
} else {
p <- rep(0.0, length(m))
p[-Inf == m] <- 1.0
idx <- m > -Inf & m < m.lower
if (any(idx)) {
p[idx] <- 1.0 - vismeteor::pmideal(m[idx] - 0.5, psi, lower.tail = TRUE) / norm.res$norm
}
idx <- m >= m.lower & m <= m.upper
if (any(idx)) {
p.gen <- base::rev(cumsum(base::rev(norm.res$p)))
p[idx] <- p.gen[as.character(m[idx])]
}
p[m > 0.0 & is.infinite(m)] <- 0.0
p[p>1.0] <- 1.0
if (log) {
log.idx <- p > 0.0
if (any(log.idx)) {
p[log.idx] <- base::log(p[log.idx])
}
p[!log.idx] <- -Inf
}
}
p
}
arg.data <- data.frame(
m = m,
lm = lm,
psi = psi
)
data.f <- as.factor(paste0(lm, '/', psi))
data.s <- split(arg.data, data.f)
p <- lapply(data.s, function(data) {
m <- data$m
lm <- data$lm[1]
psi <- data$psi[1]
f.prob(m, lm, psi, log = log)
})
unsplit(p, data.f)
}
#' @rdname vmideal
#' @export
qvmideal <- function(p, lm, psi, lower.tail = TRUE, perception.fun = NULL) {
if (anyNA(p) | anyNA(lm) | anyNA(psi)) {
stop("NA's are not allowed!")
}
if (any(is.infinite(lm))) {
stop("Infinite limiting magnitudes are not allowed!")
}
if (any(is.infinite(psi))) {
stop("Infinite psi values are not allowed!")
}
if (is.null(perception.fun)) {
perception.fun <- vismeteor::vmperception
} else {
perception.fun <- match.fun(perception.fun)
}
if (1 == length(lm)) {
lm <- rep(lm, length(p))
}
if (1 == length(psi)) {
psi <- rep(psi, length(p))
}
# quantile function
f.q <- function(p, lm, psi) {
m.max <- 15L
psi.exp <- 10.0
if (lm + psi.exp < psi) {
r.lower <- 10^0.4
return(vismeteor::qvmgeom(p, lm, r.lower, lower.tail = lower.tail))
}
m.upper <- vmideal.upper.lm(lm)
m.lower <- m.upper - m.max
m <- rep(NA, length(p))
if(lower.tail) {
m[0.0 == p] <- -Inf
m[1.0 == p] <- m.upper
p.max <- vismeteor::pvmideal(m.lower, lm, psi, lower.tail = TRUE, perception.fun = perception.fun)
idx <- p > 0.0 & p < p.max
if (any(idx)) {
p.max.ideal <- vismeteor::pmideal(m.lower - 0.5, psi, lower.tail = TRUE)
m[idx] <- floor(0.5 + vismeteor::qmideal(
p.max.ideal * p[idx] / p.max,
psi,
lower.tail = TRUE
))
}
idx <- p>=p.max & p<1.0
if (any(idx)) {
m.gen <- seq(m.lower, m.upper + 1)
p.gen <- vismeteor::pvmideal(m.gen, lm, psi, lower.tail = lower.tail, perception.fun = perception.fun)
p.idx <- findInterval(p[idx], p.gen, left.open = FALSE)
m[idx] <- m.gen[p.idx]
}
} else {
m[0.0 == p] <- m.upper
m[1.0 == p] <- -Inf
p.max <- vismeteor::pvmideal(m.lower, lm, psi, lower.tail = FALSE, perception.fun = perception.fun)
idx <- p > p.max & p < 1.0
if (any(idx)) {
p.max.ideal <- vismeteor::pmideal(m.lower - 0.5, psi, lower.tail = FALSE)
m[idx] <- floor(0.5 + vismeteor::qmideal(
1.0 - (1.0 - p[idx]) * ((1.0 - p.max.ideal) / (1.0 - p.max)),
psi,
lower.tail = FALSE
))
}
idx <- p>0.0 & p<=p.max
if (any(idx)) {
m.gen <- seq(m.upper + 1, m.lower)
p.gen <- vismeteor::pvmideal(m.gen, lm, psi, lower.tail = FALSE, perception.fun = perception.fun)
p.idx <- findInterval(p[idx], p.gen, left.open = TRUE) + 1
m[idx] <- m.gen[p.idx]
}
}
m
}
arg.data <- data.frame(
p = p,
lm = lm,
psi = psi
)
data.f <- as.factor(paste0(lm, '/', psi))
data.s <- split(arg.data, data.f)
m <- lapply(data.s, function(data) {
p <- data$p
lm <- data$lm[1]
psi <- data$psi[1]
f.q(p, lm, psi)
})
m <- unsplit(m, data.f)
if (anyNA(m)) {
warning('NaNs produced')
}
m
}
#' @rdname vmideal
#' @export
rvmideal <- function(n, lm, psi, perception.fun = NULL) {
if (anyNA(lm) | anyNA(psi)) {
stop("NA's are not allowed!")
}
if (any(is.infinite(lm))) {
stop("Infinite limiting magnitudes are not allowed!")
}
if (any(is.infinite(psi))) {
stop("Infinite psi values are not allowed!")
}
if (is.null(perception.fun)) {
perception.fun <- vismeteor::vmperception
} else {
perception.fun <- match.fun(perception.fun)
}
p <- stats::runif(n)
m <- rep(NA, n)
idx <- p < 0.5
if (any(idx)) {
m[idx] <- vismeteor::qvmideal(p[idx], lm, psi, lower.tail = TRUE, perception.fun = perception.fun)
}
if (any(!idx)) {
m[!idx] <- vismeteor::qvmideal(1.0 - p[!idx], lm, psi, lower.tail = FALSE, perception.fun = perception.fun)
}
m
}
#' @rdname vmideal
#' @export
cvmideal <- function(lm, psi, log = FALSE, perception.fun = NULL) {
if (anyNA(lm) | anyNA(psi)) {
stop("NA's are not allowed!")
}
if (is.null(perception.fun)) {
perception.fun <- vismeteor::vmperception
} else {
perception.fun <- match.fun(perception.fun)
}
if (1 == length(psi)) {
psi <- rep(psi, length(lm))
}
# Integration - similar to vmideal.norm()
f.integrate <- function(lm, psi) {
m.max <- 15L
m.upper <- vmideal.upper.lm(lm)
m.lower <- m.upper - m.max
m <- as.integer(seq(m.lower, m.upper))
p <- dmideal.int(m, psi) * perception.fun(lm - m)
p.lower.tail <- vismeteor::pmideal(m.lower - 0.5, psi, lower.tail = TRUE)
sum(p) + p.lower.tail
}
p <- mapply(function(lm, psi){
if (Inf == lm & Inf == psi) return(NA)
if (-Inf == lm & -Inf == psi) return(NA)
if (Inf == lm & -Inf == psi) return(if(log) 0.0 else 1.0)
if (Inf == psi & -Inf == lm) return(if(log) -Inf else 0.0)
if (log) {
base::log(f.integrate(lm, psi))
} else {
f.integrate(lm, psi)
}
}, lm, psi, SIMPLIFY = TRUE)
if (anyNA(p)) {
warning('NaNs produced')
}
p
}
#' upper available magnitude
#'
#' @noRd
vmideal.upper.lm <- function(lm) {
lm.round <- round(lm)
offset <- lm - lm.round
if (-0.5 == offset) {
lm.round <- lm.round - 1L
}
lm.round
}
#' density of ideal integer magnitude distribution
#'
#' @noRd
dmideal.int <- function(m, psi, log = FALSE) {
psi.exp <- 10.0
r.lower <- 10^0.4
a <- -base::log(r.lower)
d <- rep(NA, length(m))
idx <- m > (psi + psi.exp)
if (any(idx)) {
if (log) {
d[idx] <- base::log(1 - base::exp(1.5 * a)) + a * (1.5 * (m[idx] - psi) - 0.75)
} else {
d[idx] <- base::exp(a * (1.5 * (m[idx] - psi) - 0.75)) -
base::exp(a * (1.5 * (m[idx] - psi) + 0.75))
}
}
idx <- m < (psi - psi.exp)
if (any(idx)) {
if (log) {
d[idx] <- base::log(1.5) + a * (psi - m[idx] - 0.5) + base::log(1 - base::exp(a))
} else {
d[idx] <- 1.5 * base::exp(a * (psi - m[idx] - 0.5)) -
1.5 * base::exp(a * (psi - m[idx] + 0.5))
}
}
idx <- is.na(d)
if (any(idx)) {
d[idx] <- vismeteor::pmideal(m[idx] + 0.5, psi, lower.tail = TRUE) -
vismeteor::pmideal(m[idx] - 0.5, psi, lower.tail = TRUE)
if (log) {
d[idx] <- base::log(d[idx])
}
}
d
}
#' normalization
#'
#' @noRd
vmideal.norm <- function(lm, psi, perception.fun) {
m.max <- 15L
m.upper <- vmideal.upper.lm(lm)
m.lower <- m.upper - m.max
m <- as.integer(seq(m.lower, m.upper))
p <- dmideal.int(m, psi) * perception.fun(lm - m)
names(p) <- as.character(m)
p.lower.tail <- vismeteor::pmideal(m.lower - 0.5, psi, lower.tail = TRUE)
norm <- sum(p) + p.lower.tail
list(
m.lower = m.lower,
m.upper = m.upper,
norm = norm,
p = p/norm,
p.lower.tail = p.lower.tail/norm
)
}
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/R/vmideal.R
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup, include = FALSE---------------------------------------------------
library(vismeteor)
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----echo=TRUE, results='hide'------------------------------------------------
m <- seq(6, -4, -1)
limmag <- 6.5
r <- 2.0
p <- vismeteor::dvmgeom(m, limmag, r)
barplot(
p,
names.arg = m,
main = paste0('Density (r = ', r, ', limmag = ', limmag, ')'),
col = "blue",
xlab = 'm',
ylab = 'p',
border = "blue",
space = 0.5
)
axis(side = 2, at = pretty(p))
## ----echo=TRUE, results='hide'------------------------------------------------
m <- seq(6, -4, -1)
psi <- 5.0
limmag <- 6.5
p <- vismeteor::dvmideal(m, limmag, psi)
barplot(
p,
names.arg = m,
main = paste0('Density (psi = ', psi, ', limmag = ', limmag, ')'),
col = "blue",
xlab = 'm',
ylab = 'p',
border = "blue",
space = 0.5
)
axis(side = 2, at = pretty(p))
## ----echo=TRUE----------------------------------------------------------------
mt <- as.table(matrix(
c(
0.0, 0.0, 2.5, 0.5, 0.0, 1.0,
0.0, 1.5, 2.0, 0.5, 0.0, 0.0,
1.0, 0.0, 0.0, 3.0, 2.5, 0.5
), nrow = 3, ncol = 6, byrow = TRUE
))
colnames(mt) <- seq(6)
rownames(mt) <- c('A', 'B', 'C')
margin.table(mt, 1)
margin.table(mt, 2)
# contingency table with integer values
(mt.int <- vmtable(mt))
margin.table(mt.int, 1)
margin.table(mt.int, 2)
## ----echo=TRUE----------------------------------------------------------------
freq <- c(1,8,3,3,4,9,5,0,0,2,7,8,2,6,4)
f <- freq.quantile(freq, 10)
print(f)
print(tapply(freq, f, sum))
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/inst/doc/vismeteor.R
|
---
title: "vismeteor"
output:
rmarkdown::html_vignette:
toc: true
fig_width: 6
fig_height: 4
vignette: >
%\VignetteIndexEntry{vismeteor}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup, include = FALSE}
library(vismeteor)
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Introduction
`vismeteor` was developed to provide a comprehensive tool for analyzing visually observed
meteors. It takes into account human perception of different meteor magnitudes through
perception probabilities. Therefore, this package provides methods that incorporate
these perception probabilities in the evaluation of meteor magnitudes.
Perception probabilities are crucial in analyzing the observed magnitude distributions,
leading to specific magnitude distributions unique to visual meteor observations.
A popular distribution is the Visual Geometric Magnitude Distribution.
This is a standard geometric distribution adjusted by multiplying its densities with
the perception probabilities. The result is a distribution whose sole parameter is
the Population Index r.
This package enables users to work with this specially adjusted distribution.
Additionally, this package offers tools to facilitate the evaluation, recognizing that
visual meteor observations typically report only total counts over a specified period,
rather than individual meteor events.
For visual observations of meteor magnitudes, the observed counts can be specified in
fractional values (half meteors). This package includes a function for unbiased rounding
of these fractional values to use standard tools in R that only allow integer values.
## Perception probabilities
Meteors appear randomly distributed across the sky. The perception of an observer can
vary significantly for each meteor, influenced largely by the region of the sky where
the meteor appears. Depending on its position, a meteor might be perceived or missed entirely,
leading to the concept of perception probabilities.
Perception probabilities quantify the likelihood of an observer detecting a meteor.
They provide a statistical measure reflecting the chance of seeing or not seeing a meteor
under various conditions. In statistical modeling of visual observations, these probabilities
are commonly expressed in relation to the difference between the meteor's magnitude and the
limiting magnitude of the observation.
This package includes the function `vmperception()` to return perception probabilities for
visual meteor magnitudes. For advanced analytical purposes, `vmperception.l()` is provided
to return the Laplace-transformed perception probabilities.
Custom perception probability functions can be passed to most functions of this package.
This allows users to tailor the behavior of the functions to their specific needs and
conduct their own studies on the appearance of observable meteor magnitudes.
This flexibility is particularly valuable for researchers wishing to incorporate
their own empirical data or models into the analysis.
See `?vmperception` and `?vmperception.l` for more information.
## Geometric magnitude distribution
In visual meteor observation, it is common to estimate meteor magnitudes in integer values.
Hence, the geometric magnitude distribution is discrete and has the density:
$$
{\displaystyle P[X = x] \sim f(x) \, \mathrm r^{-x}} \,\mathrm{,}
$$
where $x \ge -0.5$ represents the difference between the limiting magnitude and the
meteor magnitude, and $f(x)$ is the perception probability function. This distribution
is a product of the perception probabilities with the actual geometric distribution of
the meteor magnitudes. Therefore, the parameter $p$ of the geometric distribution is $p=1−1/r$.
The advantage of this model is its simplicity. When the number of observed meteors is low,
it can often be shown that the distribution of meteor magnitudes corresponds to this model.
```{r, echo=TRUE, results='hide'}
m <- seq(6, -4, -1)
limmag <- 6.5
r <- 2.0
p <- vismeteor::dvmgeom(m, limmag, r)
barplot(
p,
names.arg = m,
main = paste0('Density (r = ', r, ', limmag = ', limmag, ')'),
col = "blue",
xlab = 'm',
ylab = 'p',
border = "blue",
space = 0.5
)
axis(side = 2, at = pretty(p))
```
See `?vmgeom` for more information.
## Ideal magnitude distribution
The ideal magnitude distribution is an alternative model to the geometric magnitude distribution.
It more accurately reflects the finiteness of meteor magnitudes across the entire magnitude
spectrum, drawing upon the model of an ideal gas from theoretical physics as a conceptual analogy.
This model is particularly useful when, compared to the geometric distribution, fewer faint meteor
magnitudes have been observed.
The density of an ideal magnitude distribution is
$$
{\displaystyle \frac{\mathrm{d}p}{\mathrm{d}m} = \frac{3}{2} \, \log(r) \sqrt{\frac{r^{3 \, \psi + 2 \, m}}{(r^\psi + r^m)^5}}}
$$
where $m$ is the meteor magnitude, $r = 10^{0.4} \approx 2.51189 \dots$ is a constant and
$\psi$ is the only parameter of this magnitude distribution.
```{r, echo=TRUE, results='hide'}
m <- seq(6, -4, -1)
psi <- 5.0
limmag <- 6.5
p <- vismeteor::dvmideal(m, limmag, psi)
barplot(
p,
names.arg = m,
main = paste0('Density (psi = ', psi, ', limmag = ', limmag, ')'),
col = "blue",
xlab = 'm',
ylab = 'p',
border = "blue",
space = 0.5
)
axis(side = 2, at = pretty(p))
```
See `?mideal` and `?vmideal` for more information.
## Fractional Counting
In the statistical analysis of visual meteor observations, a method known as "count data"
in categorical time series is used. Observers record the number of meteors in each category
over specific time intervals.
In visual meteor observation, observers record numerical meteor magnitudes and sum them,
uniquely allowing for "half" counts when indecisive between two values. This fractional
counting reflects measurement uncertainty, splitting counts between adjacent magnitude categories.
While fractional counting accommodates measurement uncertainty with "half" counts, some tools
cannot process these fractional values and require integer rounding.
The function `vmtable()` addresses this by rounding the magnitudes in a contingency table to whole
numbers. It ensures that the rounding process only alters the marginal totals when necessary,
preserving the overall count integrity. This means both the grand total and the intermediate
sums of meteors observed remain consistent, ensuring accurate and usable data for
subsequent analysis.
Example:
```{r, echo=TRUE}
mt <- as.table(matrix(
c(
0.0, 0.0, 2.5, 0.5, 0.0, 1.0,
0.0, 1.5, 2.0, 0.5, 0.0, 0.0,
1.0, 0.0, 0.0, 3.0, 2.5, 0.5
), nrow = 3, ncol = 6, byrow = TRUE
))
colnames(mt) <- seq(6)
rownames(mt) <- c('A', 'B', 'C')
margin.table(mt, 1)
margin.table(mt, 2)
# contingency table with integer values
(mt.int <- vmtable(mt))
margin.table(mt.int, 1)
margin.table(mt.int, 2)
```
See `?vmtable` for more information.
## Quantile Analysis with Minimum Meteor Count
The function `freq.quantile()` is tailored for analyzing time series data in visual
meteor observation, where the count of meteors is recorded over specific time intervals.
Unlike traditional methods that sort quantiles based on time and percent, which often result
in some quantiles having fewer meteors than desired, `freq.quantile()` constructs quantiles
with a focus on ensuring a minimum number of meteors in each quantile.
This method addresses the challenge of varying meteor counts in each interval, including those
with zero meteors. By utilizing `freq.quantile()`, users can effectively divide the time series
into quantiles that are both time-based and density-based, enhancing the understanding of meteor
occurrence and distribution over the observed period while ensuring each quantile meets the
minimum count criterion.
This approach provides a more nuanced and reliable analysis, especially vital when dealing with
the inherent variability in meteor observations.
The following example represents a time-ordered list of observed meteor counts.
The objective is to group these counts into quantiles while maintaining their chronological order,
ensuring that each quantile contains at least 10 meteors.
```{r, echo=TRUE}
freq <- c(1,8,3,3,4,9,5,0,0,2,7,8,2,6,4)
f <- freq.quantile(freq, 10)
print(f)
print(tapply(freq, f, sum))
```
See `?freq.quantile` for more information.
## Interfacing VMDB Data with `load_vmdb()`
The `load_vmdb()` function is designed to interface with the
[imo-vmdb](https://pypi.org/project/imo-vmdb/)
application, processing data specifically exported from the Visual Meteor Database (VMDB) in
CSV format. After these CSV exports are verified and validated with the _imo-vmdb_ application,
they are stored in a relational database, which enhances the accessibility and usability of the
data compared to its original CSV format. This storage method establishes relationships between
data records and enriches them with additional information derived from the original data records.
Utilizing `load_vmdb()`, users can efficiently query and retrieve specific datasets relevant
to their meteor observation analysis. This streamlined access facilitates more comprehensive
and targeted research.
Within this package, `PER_2015_rates` and `PER_2015_magn` are provided as example data sets.
They are included for testing and training purposes, allowing users to
understand and utilize the full functionality of the entire package.
See `?load_vmdb`, `?PER_2015_rates` and `?PER_2015_magn` for more information.
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/inst/doc/vismeteor.Rmd
|
## ----setup, include = FALSE---------------------------------------------------
library(vismeteor)
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----echo=TRUE, results='hide'------------------------------------------------
observations <- with(PER_2015_magn$observations, {
idx <- !is.na(lim.magn) & sl.start > 135.81 & sl.end < 135.87
data.frame(
magn.id = magn.id[idx],
lim.magn = lim.magn[idx]
)
})
head(observations, 5) # Example values
## ----echo=FALSE, results='asis'-----------------------------------------------
knitr::kable(head(observations, 5))
## ----echo=TRUE, results='hide'------------------------------------------------
magnitudes <- merge(
observations,
as.data.frame(PER_2015_magn$magnitudes),
by = 'magn.id'
)
magnitudes$magn <- as.integer(as.vector(magnitudes$magn))
head(magnitudes[magnitudes$Freq>0,], 5) # Example values
## ----echo=FALSE, results='asis'-----------------------------------------------
knitr::kable(head(magnitudes[magnitudes$Freq>0,], 5))
## ----echo=TRUE, results='hide'------------------------------------------------
# maximum likelihood estimation (MLE) of r
result <- with(subset(magnitudes, (magnitudes$lim.magn - magnitudes$magn) > -0.5), {
# log likelihood function
ll <- function(r) -sum(Freq * dvmgeom(magn, lim.magn, r, log=TRUE))
r.start <- 2.0 # starting value
r.lower <- 1.2 # lowest expected value
r.upper <- 4.0 # highest expected value
# find minimum
optim(r.start, ll, method='Brent', lower=r.lower, upper=r.upper, hessian=TRUE)
})
## ----echo=TRUE----------------------------------------------------------------
r.mean <- result$par # mean of r
print(r.mean)
r.var <- 1/result$hessian[1][1] # variance of r
print(r.var)
## ----echo=TRUE----------------------------------------------------------------
m.mean <- with(magnitudes, sum((lim.magn - magn) * Freq)/sum(Freq))
print(m.mean)
## ----echo=TRUE----------------------------------------------------------------
m.var <- with(magnitudes, {
n <- sum(Freq)
sum((lim.magn - magn - m.mean)^2 * Freq)/((n-1) * n)
})
print(m.var)
## ----echo=TRUE, results='hide'------------------------------------------------
r.mean.fun <- with(new.env(), {
r <- seq(1.3, 3.5, 0.1)
s <- log(r)
m.mean <- -vmperception.l(s, deriv.degree = 1L)/vmperception.l(s)
splinefun(m.mean, r)
})
## ----echo=TRUE----------------------------------------------------------------
r.mean <- r.mean.fun(m.mean)
print(r.mean)
## ----echo=TRUE----------------------------------------------------------------
r.var <- r.mean.fun(m.mean, deriv = 1L)^2 * m.var
print(r.var)
## ----echo=TRUE, results='hide'------------------------------------------------
magnitudes$p <- with(magnitudes, dvmgeom(m = magn, lm = lim.magn, r.mean))
## ----echo=TRUE, results='hide'------------------------------------------------
magn.min <- min(magnitudes$magn)
## ----echo=TRUE, results='asis'------------------------------------------------
idx <- magnitudes$magn == magn.min
magnitudes$p[idx] <- with(
magnitudes[idx,],
pvmgeom(m = magn + 1L, lm = lim.magn, r.mean, lower.tail = TRUE)
)
## ----echo=TRUE----------------------------------------------------------------
magnitutes.observed <- xtabs(Freq ~ magn.id + magn, data = magnitudes)
magnitutes.observed.mt <- margin.table(magnitutes.observed, margin = 2)
print(magnitutes.observed.mt)
## ----echo=TRUE----------------------------------------------------------------
magnitudes$magn[magnitudes$magn <= 0] <- '0-'
magnitudes$magn[magnitudes$magn >= 4] <- '4+'
magnitutes.observed <- xtabs(Freq ~ magn.id + magn, data = magnitudes)
print(margin.table(magnitutes.observed, margin = 2))
## ----echo=TRUE----------------------------------------------------------------
magnitutes.expected <- xtabs(p ~ magn.id + magn, data = magnitudes)
magnitutes.expected <- magnitutes.expected/nrow(magnitutes.expected)
print(sum(magnitudes$Freq) * margin.table(magnitutes.expected, margin = 2))
## ----echo=TRUE, results='asis'------------------------------------------------
chisq.test.result <- chisq.test(
x = margin.table(magnitutes.observed, margin = 2),
p = margin.table(magnitutes.expected, margin = 2)
)
## ----echo=TRUE----------------------------------------------------------------
print(chisq.test.result$p.value)
## ----fig.show='hold'----------------------------------------------------------
chisq.test.residuals <- with(new.env(), {
chisq.test.residuals <- residuals(chisq.test.result)
v <- as.vector(chisq.test.residuals)
names(v) <- rownames(chisq.test.residuals)
v
})
plot(
chisq.test.residuals,
main="Residuals of the chi-square goodness-of-fit test",
xlab="m",
ylab="Residuals",
ylim=c(-3, 3),
xaxt = "n"
)
abline(h=0.0, lwd=2)
axis(1, at = seq_along(chisq.test.residuals), labels = names(chisq.test.residuals))
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/inst/doc/vmgeom.R
|
---
title: "Parameter Estimation of the Geometric Magnitude Distribution"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
fig_width: 6
fig_height: 4
vignette: >
%\VignetteIndexEntry{Parameter Estimation of the Geometric Magnitude Distribution}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
library(vismeteor)
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Introduction
The geometric distribution of meteor magnitudes is a frequently used statistical model to describe
the real magnitude distribution of a meteor shower. The observable magnitude distribution of meteors
is then
$$
{\displaystyle P[M = m] \sim f(m) \, \mathrm r^{-m}} \,\mathrm{,}
$$
where `m >= -0.5` is the difference between the limiting magnitude
and the meteor magnitude. `f(m)` is the perception probability function.
The estimation of the population index r, briefly called the r-value,
is a common task in the evaluation of meteor magnitudes.
Here we demonstrate two methods for unbiased estimation of this parameter.
First, we obtain some magnitude observations from the example data set,
which also includes the limiting magnitude.
```{r, echo=TRUE, results='hide'}
observations <- with(PER_2015_magn$observations, {
idx <- !is.na(lim.magn) & sl.start > 135.81 & sl.end < 135.87
data.frame(
magn.id = magn.id[idx],
lim.magn = lim.magn[idx]
)
})
head(observations, 5) # Example values
```
```{r, echo=FALSE, results='asis'}
knitr::kable(head(observations, 5))
```
Next, the observed meteor magnitudes are matched with the corresponding observations.
This is necessary as we need the limiting magnitudes of the observations to determine
the r-value.
Using
```{r, echo=TRUE, results='hide'}
magnitudes <- merge(
observations,
as.data.frame(PER_2015_magn$magnitudes),
by = 'magn.id'
)
magnitudes$magn <- as.integer(as.vector(magnitudes$magn))
head(magnitudes[magnitudes$Freq>0,], 5) # Example values
```
we obtain a data frame with the absolute observed frequencies `Freq` for each
observation of a magnitude class:
```{r, echo=FALSE, results='asis'}
knitr::kable(head(magnitudes[magnitudes$Freq>0,], 5))
```
This data frame contains a total of `r sum(magnitudes$Freq)` meteors.
This is a sufficiently large number to estimate the r-value.
## Maximum Likelihood Method I
The maximum likelihood method can be used to estimate the r-value in an unbiased manner.
For this, the function `dvmgeom()` is needed, which returns the probability density of
the observable meteor magnitudes when the r-value and the limiting magnitudes are known.
The following algorithm estimates the r-value by maximizing the likelihood with
the `optim()` function. The function `ll` returns the negative log-likelihood,
as `optim()` identifies a minimum. The expression
`subset(magnitudes, (magnitudes$lim.magn - magnitudes$magn) > -0.5`
ensures that meteors fainter than the limiting magnitude are not used if they exist.
```{r, echo=TRUE, results='hide'}
# maximum likelihood estimation (MLE) of r
result <- with(subset(magnitudes, (magnitudes$lim.magn - magnitudes$magn) > -0.5), {
# log likelihood function
ll <- function(r) -sum(Freq * dvmgeom(magn, lim.magn, r, log=TRUE))
r.start <- 2.0 # starting value
r.lower <- 1.2 # lowest expected value
r.upper <- 4.0 # highest expected value
# find minimum
optim(r.start, ll, method='Brent', lower=r.lower, upper=r.upper, hessian=TRUE)
})
```
This gives the expected value and the variance of the r-value:
```{r, echo=TRUE}
r.mean <- result$par # mean of r
print(r.mean)
r.var <- 1/result$hessian[1][1] # variance of r
print(r.var)
```
## Maximum Likelihood Method II
With the maximum likelihood method, it can be demonstrated that the mean difference
between meteor magnitudes and the limiting magnitude is an unbiased estimator for
the r-value. This mean is straightforward to calculate:
```{r, echo=TRUE}
m.mean <- with(magnitudes, sum((lim.magn - magn) * Freq)/sum(Freq))
print(m.mean)
```
Similarly, its variance is:
```{r, echo=TRUE}
m.var <- with(magnitudes, {
n <- sum(Freq)
sum((lim.magn - magn - m.mean)^2 * Freq)/((n-1) * n)
})
print(m.var)
```
We can easily determine the mean for an r-value using the Laplace transform
of the perception probabilities by setting `s=log(r)`. However, since we aim
to inversely determine the r-value from the mean value, we first generate
the necessary values and then employ the `splinefun()` function for interpolation:
```{r, echo=TRUE, results='hide'}
r.mean.fun <- with(new.env(), {
r <- seq(1.3, 3.5, 0.1)
s <- log(r)
m.mean <- -vmperception.l(s, deriv.degree = 1L)/vmperception.l(s)
splinefun(m.mean, r)
})
```
This approach yields the r-value as follows:
```{r, echo=TRUE}
r.mean <- r.mean.fun(m.mean)
print(r.mean)
```
Assuming that the mean is normally distributed and that the variance of
magnitudes `m.var` is small, we can obtain the variance of the r-value:
```{r, echo=TRUE}
r.var <- r.mean.fun(m.mean, deriv = 1L)^2 * m.var
print(r.var)
```
The method described herein for estimating the r-value offers an advantage over
the previous method. It is not only more straightforward to execute but also
less computationally demanding.
## Residual Analysis
So far, we have operated under the assumption that the real distribution of meteor magnitudes
is exponential and that the perception probabilities are accurate.
We now use the Chi-Square goodness-of-fit test to check whether the observed frequencies match
the expected frequencies. Then, using the estimated r-value, we retrieve the relative
frequencies `p` for each observation and add them to the data frame `magnitudes`:
```{r, echo=TRUE, results='hide'}
magnitudes$p <- with(magnitudes, dvmgeom(m = magn, lm = lim.magn, r.mean))
```
We must also consider the probabilities for the magnitude class with the brightest meteors.
```{r, echo=TRUE, results='hide'}
magn.min <- min(magnitudes$magn)
```
The smallest magnitude class `magn.min` is `r magn.min`. In calculating the probabilities,
we assume that the magnitude class `r magn.min` contains meteors that are either brighter
or equally bright as `r magn.min` and thus use the function `pvmgeom()` to determine
their probability.
```{r, echo=TRUE, results='asis'}
idx <- magnitudes$magn == magn.min
magnitudes$p[idx] <- with(
magnitudes[idx,],
pvmgeom(m = magn + 1L, lm = lim.magn, r.mean, lower.tail = TRUE)
)
```
This ensures that the probability of observing a meteor of any given magnitude is 100%.
This is known as the normalization condition. Accordingly, the Chi-Square goodness-of-fit test
will fail if this condition is not met.
We now create the contingency table `magnitutes.observed` for the observed meteor magnitudes
and its margin table.
```{r, echo=TRUE}
magnitutes.observed <- xtabs(Freq ~ magn.id + magn, data = magnitudes)
magnitutes.observed.mt <- margin.table(magnitutes.observed, margin = 2)
print(magnitutes.observed.mt)
```
Next, we check which magnitude classes need to be aggregated so that each contains
at least 10 meteors, allowing us to perform a Chi-Square goodness-of-fit test.
The last output shows that meteors of magnitude class `0` or brighter must be combined into
a magnitude class `0-`. Meteors with a brightness less than `4` are grouped here in the
magnitude class `4+`, and a new contingency table magnitudes.observed is created:
```{r, echo=TRUE}
magnitudes$magn[magnitudes$magn <= 0] <- '0-'
magnitudes$magn[magnitudes$magn >= 4] <- '4+'
magnitutes.observed <- xtabs(Freq ~ magn.id + magn, data = magnitudes)
print(margin.table(magnitutes.observed, margin = 2))
```
We now need the corresponding expected relative frequencies
```{r, echo=TRUE}
magnitutes.expected <- xtabs(p ~ magn.id + magn, data = magnitudes)
magnitutes.expected <- magnitutes.expected/nrow(magnitutes.expected)
print(sum(magnitudes$Freq) * margin.table(magnitutes.expected, margin = 2))
```
and then carry out the Chi-Square goodness-of-fit test:
```{r, echo=TRUE, results='asis'}
chisq.test.result <- chisq.test(
x = margin.table(magnitutes.observed, margin = 2),
p = margin.table(magnitutes.expected, margin = 2)
)
```
As a result, we obtain the p-value:
```{r, echo=TRUE}
print(chisq.test.result$p.value)
```
If we set the level of significance at 5 percent, then it is clear that the p-value with
`r chisq.test.result$p.value` is greater than 0.05. Thus, under the assumption that the
magnitude distribution follows an geometric meteor magnitude distribution and assuming that
the perception probabilities are correct (i.e., error-free or precisely known),
the assumptions cannot be rejected. However, the converse is not true; the assumptions
may not necessarily be correct. The total count of meteors here is too small for such
a conclusion.
To verify the p-value, we also graphically represent the Pearson residuals:
```{r, fig.show='hold'}
chisq.test.residuals <- with(new.env(), {
chisq.test.residuals <- residuals(chisq.test.result)
v <- as.vector(chisq.test.residuals)
names(v) <- rownames(chisq.test.residuals)
v
})
plot(
chisq.test.residuals,
main="Residuals of the chi-square goodness-of-fit test",
xlab="m",
ylab="Residuals",
ylim=c(-3, 3),
xaxt = "n"
)
abline(h=0.0, lwd=2)
axis(1, at = seq_along(chisq.test.residuals), labels = names(chisq.test.residuals))
```
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/inst/doc/vmgeom.Rmd
|
## ----setup, include = FALSE---------------------------------------------------
library(vismeteor)
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----echo=TRUE, results='hide'------------------------------------------------
observations <- with(PER_2015_magn$observations, {
idx <- !is.na(lim.magn) & sl.start > 135.81 & sl.end < 135.87
data.frame(
magn.id = magn.id[idx],
lim.magn = lim.magn[idx]
)
})
head(observations, 5) # Example values
## ----echo=FALSE, results='asis'-----------------------------------------------
knitr::kable(head(observations, 5))
## ----echo=TRUE, results='hide'------------------------------------------------
magnitudes <- with(new.env(), {
magnitudes <- merge(
observations,
as.data.frame(PER_2015_magn$magnitudes),
by = 'magn.id'
)
magnitudes$magn <- as.integer(as.character(magnitudes$magn))
magnitudes
})
head(magnitudes[magnitudes$Freq>0,], 5) # Example values
## ----echo=FALSE, results='asis'-----------------------------------------------
knitr::kable(head(magnitudes[magnitudes$Freq>0,], 5))
## ----echo=TRUE, results='hide'------------------------------------------------
# maximum likelihood estimation (MLE) of psi
result <- with(subset(magnitudes, (magnitudes$lim.magn - magnitudes$magn) > -0.5), {
# log likelihood function
ll <- function(psi) -sum(Freq * dvmideal(magn, lim.magn, psi, log=TRUE))
psi.start <- 5.0 # starting value
psi.lower <- 0.0 # lowest expected value
psi.upper <- 10.0 # highest expected value
# find minimum
optim(psi.start, ll, method='Brent', lower=psi.lower, upper=psi.upper, hessian=TRUE)
})
## ----echo=TRUE----------------------------------------------------------------
psi.mean <- result$par # mean of psi
print(psi.mean)
psi.var <- 1/result$hessian[1][1] # variance of r
print(psi.var)
## ----echo=TRUE, results='asis'------------------------------------------------
magnitudes$p <- with(magnitudes, dvmideal(m = magn, lm = lim.magn, psi.mean))
## ----echo=TRUE, results='hide'------------------------------------------------
magn.min <- min(magnitudes$magn)
## ----echo=TRUE, results='asis'------------------------------------------------
idx <- magnitudes$magn == magn.min
magnitudes$p[idx] <- with(
magnitudes[idx,],
pvmideal(m = magn + 1L, lm = lim.magn, psi.mean, lower.tail = TRUE)
)
## ----echo=TRUE----------------------------------------------------------------
magnitutes.observed <- xtabs(Freq ~ magn.id + magn, data = magnitudes)
magnitutes.observed.mt <- margin.table(magnitutes.observed, margin = 2)
print(magnitutes.observed.mt)
## ----echo=TRUE----------------------------------------------------------------
magnitudes$magn[magnitudes$magn <= 0] <- '0-'
magnitudes$magn[magnitudes$magn >= 4] <- '4+'
magnitutes.observed <- xtabs(Freq ~ magn.id + magn, data = magnitudes)
print(margin.table(magnitutes.observed, margin = 2))
## ----echo=TRUE----------------------------------------------------------------
magnitutes.expected <- xtabs(p ~ magn.id + magn, data = magnitudes)
magnitutes.expected <- magnitutes.expected/nrow(magnitutes.expected)
print(sum(magnitudes$Freq) * margin.table(magnitutes.expected, margin = 2))
## ----echo=TRUE, results='asis'------------------------------------------------
chisq.test.result <- chisq.test(
x = margin.table(magnitutes.observed, margin = 2),
p = margin.table(magnitutes.expected, margin = 2)
)
## ----echo=TRUE----------------------------------------------------------------
print(chisq.test.result$p.value)
## ----fig.show='hold'----------------------------------------------------------
chisq.test.residuals <- with(new.env(), {
chisq.test.residuals <- residuals(chisq.test.result)
v <- as.vector(chisq.test.residuals)
names(v) <- rownames(chisq.test.residuals)
v
})
plot(
chisq.test.residuals,
main="Residuals of the chi-square goodness-of-fit test",
xlab="m",
ylab="Residuals",
ylim=c(-3, 3),
xaxt = "n"
)
abline(h=0.0, lwd=2)
axis(1, at = seq_along(chisq.test.residuals), labels = names(chisq.test.residuals))
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/inst/doc/vmideal.R
|
---
title: "Parameter Estimation of the Ideal Magnitude Distribution"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
fig_width: 6
fig_height: 4
vignette: >
%\VignetteIndexEntry{Parameter Estimation of the Ideal Magnitude Distribution}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
library(vismeteor)
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Introduction
The density of an ideal magnitude distribution is
$$
{\displaystyle f(m) = \frac{\mathrm{d}p}{\mathrm{d}m} = \frac{3}{2} \, \log(r) \sqrt{\frac{r^{3 \, \psi + 2 \, m}}{(r^\psi + r^m)^5}}}
$$
where $m$ is the meteor magnitude, $r = 10^{0.4} \approx 2.51189 \dots$ is a constant and
$\psi$ is the only parameter of this magnitude distribution.
In visual meteor observation, it is common to estimate meteor magnitudes in integer values.
Hence, this distribution is discrete and has the density
$$
{\displaystyle P[M = m] \sim g(m) \, \int_{m-0.5}^{m+0.5} f(m) \, \, \mathrm{d}m} \, \mathrm{,}
$$
where $g(m)$ is the perception probability function. This distribution is thus a product of the
perception probabilities and the actual ideal distribution of the meteor magnitudes.
Here we demonstrate a method for an unbiased estimation of $\psi$.
First, we obtain some magnitude observations from the example data set,
which also includes the limiting magnitude.
```{r, echo=TRUE, results='hide'}
observations <- with(PER_2015_magn$observations, {
idx <- !is.na(lim.magn) & sl.start > 135.81 & sl.end < 135.87
data.frame(
magn.id = magn.id[idx],
lim.magn = lim.magn[idx]
)
})
head(observations, 5) # Example values
```
```{r, echo=FALSE, results='asis'}
knitr::kable(head(observations, 5))
```
Next, the observed meteor magnitudes are matched with the corresponding observations.
This is necessary as we need the limiting magnitudes of the observations to determine
the parameter.
Using
```{r, echo=TRUE, results='hide'}
magnitudes <- with(new.env(), {
magnitudes <- merge(
observations,
as.data.frame(PER_2015_magn$magnitudes),
by = 'magn.id'
)
magnitudes$magn <- as.integer(as.character(magnitudes$magn))
magnitudes
})
head(magnitudes[magnitudes$Freq>0,], 5) # Example values
```
we obtain a data frame with the absolute observed frequencies `Freq` for each
observation of a magnitude class:
```{r, echo=FALSE, results='asis'}
knitr::kable(head(magnitudes[magnitudes$Freq>0,], 5))
```
This data frame contains a total of `r sum(magnitudes$Freq)` meteors.
This is a sufficiently large number to estimate the parameter.
## Maximum Likelihood Method
The maximum likelihood method can be used to estimate the parameter in an unbiased manner.
For this, the function `dvmideal()` is needed, which returns the probability density of
the observable meteor magnitudes when the parameter and the limiting magnitudes are known.
The following algorithm estimates the parameter by maximizing the likelihood with
the `optim()` function. The function `ll` returns the negative log-likelihood,
as `optim()` identifies a minimum. The expression
`subset(magnitudes, (magnitudes$lim.magn - magnitudes$magn) > -0.5`
ensures that meteors fainter than the limiting magnitude are not used if they exist.
```{r, echo=TRUE, results='hide'}
# maximum likelihood estimation (MLE) of psi
result <- with(subset(magnitudes, (magnitudes$lim.magn - magnitudes$magn) > -0.5), {
# log likelihood function
ll <- function(psi) -sum(Freq * dvmideal(magn, lim.magn, psi, log=TRUE))
psi.start <- 5.0 # starting value
psi.lower <- 0.0 # lowest expected value
psi.upper <- 10.0 # highest expected value
# find minimum
optim(psi.start, ll, method='Brent', lower=psi.lower, upper=psi.upper, hessian=TRUE)
})
```
This gives the expected value and the variance of the parameter:
```{r, echo=TRUE}
psi.mean <- result$par # mean of psi
print(psi.mean)
psi.var <- 1/result$hessian[1][1] # variance of r
print(psi.var)
```
## Residual Analysis
So far, we have operated under the assumption that the real distribution of meteor magnitudes
is exponential and that the perception probabilities are accurate.
We now use the Chi-Square goodness-of-fit test to check whether the observed frequencies match
the expected frequencies. Then, using the estimated parameter, we retrieve the relative
frequencies `p` for each observation and add them to the data frame `magnitudes`:
```{r, echo=TRUE, results='asis'}
magnitudes$p <- with(magnitudes, dvmideal(m = magn, lm = lim.magn, psi.mean))
```
We must also consider the probabilities for the magnitude class with the brightest meteors.
```{r, echo=TRUE, results='hide'}
magn.min <- min(magnitudes$magn)
```
The smallest magnitude class `magn.min` is `r magn.min`. In calculating the probabilities,
we assume that the magnitude class `r magn.min` contains meteors that are either brighter
or equally bright as `r magn.min` and thus use the function `pvmideal()` to determine
their probability.
```{r, echo=TRUE, results='asis'}
idx <- magnitudes$magn == magn.min
magnitudes$p[idx] <- with(
magnitudes[idx,],
pvmideal(m = magn + 1L, lm = lim.magn, psi.mean, lower.tail = TRUE)
)
```
This ensures that the probability of observing a meteor of any given magnitude is 100%.
This is known as the normalization condition. Accordingly, the Chi-Square goodness-of-fit test
will fail if this condition is not met.
We now create the contingency table `magnitutes.observed` for the observed meteor magnitudes
and its margin table.
```{r, echo=TRUE}
magnitutes.observed <- xtabs(Freq ~ magn.id + magn, data = magnitudes)
magnitutes.observed.mt <- margin.table(magnitutes.observed, margin = 2)
print(magnitutes.observed.mt)
```
Next, we check which magnitude classes need to be aggegated so that each contains
at least 10 meteors, allowing us to perform a Chi-Square goodness-of-fit test.
The last output shows that meteors of magnitude class `0` or brighter must be combined into
a magnitude class `0-`. Meteors with a brightness less than `4` are grouped here in the
magnitude class `4+`, and a new contingency table magnitudes.observed is created:
```{r, echo=TRUE}
magnitudes$magn[magnitudes$magn <= 0] <- '0-'
magnitudes$magn[magnitudes$magn >= 4] <- '4+'
magnitutes.observed <- xtabs(Freq ~ magn.id + magn, data = magnitudes)
print(margin.table(magnitutes.observed, margin = 2))
```
We now need the corresponding expected relative frequencies
```{r, echo=TRUE}
magnitutes.expected <- xtabs(p ~ magn.id + magn, data = magnitudes)
magnitutes.expected <- magnitutes.expected/nrow(magnitutes.expected)
print(sum(magnitudes$Freq) * margin.table(magnitutes.expected, margin = 2))
```
and then carry out the Chi-Square goodness-of-fit test:
```{r, echo=TRUE, results='asis'}
chisq.test.result <- chisq.test(
x = margin.table(magnitutes.observed, margin = 2),
p = margin.table(magnitutes.expected, margin = 2)
)
```
As a result, we obtain the p-value:
```{r, echo=TRUE}
print(chisq.test.result$p.value)
```
If we set the level of significance at 5 percent, then it is clear that the p-value with
`r chisq.test.result$p.value` is greater than 0.05. Thus, under the assumption that the
magnitude distribution follows an ideal meteor magnitude distribution and assuming that
the perception probabilities are correct (i.e., error-free or precisely known),
the assumptions cannot be rejected. However, the converse is not true; the assumptions
may not necessarily be correct. The total count of meteors here is too small for such
a conclusion.
To verify the p-value, we also graphically represent the Pearson residuals:
```{r, fig.show='hold'}
chisq.test.residuals <- with(new.env(), {
chisq.test.residuals <- residuals(chisq.test.result)
v <- as.vector(chisq.test.residuals)
names(v) <- rownames(chisq.test.residuals)
v
})
plot(
chisq.test.residuals,
main="Residuals of the chi-square goodness-of-fit test",
xlab="m",
ylab="Residuals",
ylim=c(-3, 3),
xaxt = "n"
)
abline(h=0.0, lwd=2)
axis(1, at = seq_along(chisq.test.residuals), labels = names(chisq.test.residuals))
```
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/inst/doc/vmideal.Rmd
|
---
title: "vismeteor"
output:
rmarkdown::html_vignette:
toc: true
fig_width: 6
fig_height: 4
vignette: >
%\VignetteIndexEntry{vismeteor}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup, include = FALSE}
library(vismeteor)
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Introduction
`vismeteor` was developed to provide a comprehensive tool for analyzing visually observed
meteors. It takes into account human perception of different meteor magnitudes through
perception probabilities. Therefore, this package provides methods that incorporate
these perception probabilities in the evaluation of meteor magnitudes.
Perception probabilities are crucial in analyzing the observed magnitude distributions,
leading to specific magnitude distributions unique to visual meteor observations.
A popular distribution is the Visual Geometric Magnitude Distribution.
This is a standard geometric distribution adjusted by multiplying its densities with
the perception probabilities. The result is a distribution whose sole parameter is
the Population Index r.
This package enables users to work with this specially adjusted distribution.
Additionally, this package offers tools to facilitate the evaluation, recognizing that
visual meteor observations typically report only total counts over a specified period,
rather than individual meteor events.
For visual observations of meteor magnitudes, the observed counts can be specified in
fractional values (half meteors). This package includes a function for unbiased rounding
of these fractional values to use standard tools in R that only allow integer values.
## Perception probabilities
Meteors appear randomly distributed across the sky. The perception of an observer can
vary significantly for each meteor, influenced largely by the region of the sky where
the meteor appears. Depending on its position, a meteor might be perceived or missed entirely,
leading to the concept of perception probabilities.
Perception probabilities quantify the likelihood of an observer detecting a meteor.
They provide a statistical measure reflecting the chance of seeing or not seeing a meteor
under various conditions. In statistical modeling of visual observations, these probabilities
are commonly expressed in relation to the difference between the meteor's magnitude and the
limiting magnitude of the observation.
This package includes the function `vmperception()` to return perception probabilities for
visual meteor magnitudes. For advanced analytical purposes, `vmperception.l()` is provided
to return the Laplace-transformed perception probabilities.
Custom perception probability functions can be passed to most functions of this package.
This allows users to tailor the behavior of the functions to their specific needs and
conduct their own studies on the appearance of observable meteor magnitudes.
This flexibility is particularly valuable for researchers wishing to incorporate
their own empirical data or models into the analysis.
See `?vmperception` and `?vmperception.l` for more information.
## Geometric magnitude distribution
In visual meteor observation, it is common to estimate meteor magnitudes in integer values.
Hence, the geometric magnitude distribution is discrete and has the density:
$$
{\displaystyle P[X = x] \sim f(x) \, \mathrm r^{-x}} \,\mathrm{,}
$$
where $x \ge -0.5$ represents the difference between the limiting magnitude and the
meteor magnitude, and $f(x)$ is the perception probability function. This distribution
is a product of the perception probabilities with the actual geometric distribution of
the meteor magnitudes. Therefore, the parameter $p$ of the geometric distribution is $p=1−1/r$.
The advantage of this model is its simplicity. When the number of observed meteors is low,
it can often be shown that the distribution of meteor magnitudes corresponds to this model.
```{r, echo=TRUE, results='hide'}
m <- seq(6, -4, -1)
limmag <- 6.5
r <- 2.0
p <- vismeteor::dvmgeom(m, limmag, r)
barplot(
p,
names.arg = m,
main = paste0('Density (r = ', r, ', limmag = ', limmag, ')'),
col = "blue",
xlab = 'm',
ylab = 'p',
border = "blue",
space = 0.5
)
axis(side = 2, at = pretty(p))
```
See `?vmgeom` for more information.
## Ideal magnitude distribution
The ideal magnitude distribution is an alternative model to the geometric magnitude distribution.
It more accurately reflects the finiteness of meteor magnitudes across the entire magnitude
spectrum, drawing upon the model of an ideal gas from theoretical physics as a conceptual analogy.
This model is particularly useful when, compared to the geometric distribution, fewer faint meteor
magnitudes have been observed.
The density of an ideal magnitude distribution is
$$
{\displaystyle \frac{\mathrm{d}p}{\mathrm{d}m} = \frac{3}{2} \, \log(r) \sqrt{\frac{r^{3 \, \psi + 2 \, m}}{(r^\psi + r^m)^5}}}
$$
where $m$ is the meteor magnitude, $r = 10^{0.4} \approx 2.51189 \dots$ is a constant and
$\psi$ is the only parameter of this magnitude distribution.
```{r, echo=TRUE, results='hide'}
m <- seq(6, -4, -1)
psi <- 5.0
limmag <- 6.5
p <- vismeteor::dvmideal(m, limmag, psi)
barplot(
p,
names.arg = m,
main = paste0('Density (psi = ', psi, ', limmag = ', limmag, ')'),
col = "blue",
xlab = 'm',
ylab = 'p',
border = "blue",
space = 0.5
)
axis(side = 2, at = pretty(p))
```
See `?mideal` and `?vmideal` for more information.
## Fractional Counting
In the statistical analysis of visual meteor observations, a method known as "count data"
in categorical time series is used. Observers record the number of meteors in each category
over specific time intervals.
In visual meteor observation, observers record numerical meteor magnitudes and sum them,
uniquely allowing for "half" counts when indecisive between two values. This fractional
counting reflects measurement uncertainty, splitting counts between adjacent magnitude categories.
While fractional counting accommodates measurement uncertainty with "half" counts, some tools
cannot process these fractional values and require integer rounding.
The function `vmtable()` addresses this by rounding the magnitudes in a contingency table to whole
numbers. It ensures that the rounding process only alters the marginal totals when necessary,
preserving the overall count integrity. This means both the grand total and the intermediate
sums of meteors observed remain consistent, ensuring accurate and usable data for
subsequent analysis.
Example:
```{r, echo=TRUE}
mt <- as.table(matrix(
c(
0.0, 0.0, 2.5, 0.5, 0.0, 1.0,
0.0, 1.5, 2.0, 0.5, 0.0, 0.0,
1.0, 0.0, 0.0, 3.0, 2.5, 0.5
), nrow = 3, ncol = 6, byrow = TRUE
))
colnames(mt) <- seq(6)
rownames(mt) <- c('A', 'B', 'C')
margin.table(mt, 1)
margin.table(mt, 2)
# contingency table with integer values
(mt.int <- vmtable(mt))
margin.table(mt.int, 1)
margin.table(mt.int, 2)
```
See `?vmtable` for more information.
## Quantile Analysis with Minimum Meteor Count
The function `freq.quantile()` is tailored for analyzing time series data in visual
meteor observation, where the count of meteors is recorded over specific time intervals.
Unlike traditional methods that sort quantiles based on time and percent, which often result
in some quantiles having fewer meteors than desired, `freq.quantile()` constructs quantiles
with a focus on ensuring a minimum number of meteors in each quantile.
This method addresses the challenge of varying meteor counts in each interval, including those
with zero meteors. By utilizing `freq.quantile()`, users can effectively divide the time series
into quantiles that are both time-based and density-based, enhancing the understanding of meteor
occurrence and distribution over the observed period while ensuring each quantile meets the
minimum count criterion.
This approach provides a more nuanced and reliable analysis, especially vital when dealing with
the inherent variability in meteor observations.
The following example represents a time-ordered list of observed meteor counts.
The objective is to group these counts into quantiles while maintaining their chronological order,
ensuring that each quantile contains at least 10 meteors.
```{r, echo=TRUE}
freq <- c(1,8,3,3,4,9,5,0,0,2,7,8,2,6,4)
f <- freq.quantile(freq, 10)
print(f)
print(tapply(freq, f, sum))
```
See `?freq.quantile` for more information.
## Interfacing VMDB Data with `load_vmdb()`
The `load_vmdb()` function is designed to interface with the
[imo-vmdb](https://pypi.org/project/imo-vmdb/)
application, processing data specifically exported from the Visual Meteor Database (VMDB) in
CSV format. After these CSV exports are verified and validated with the _imo-vmdb_ application,
they are stored in a relational database, which enhances the accessibility and usability of the
data compared to its original CSV format. This storage method establishes relationships between
data records and enriches them with additional information derived from the original data records.
Utilizing `load_vmdb()`, users can efficiently query and retrieve specific datasets relevant
to their meteor observation analysis. This streamlined access facilitates more comprehensive
and targeted research.
Within this package, `PER_2015_rates` and `PER_2015_magn` are provided as example data sets.
They are included for testing and training purposes, allowing users to
understand and utilize the full functionality of the entire package.
See `?load_vmdb`, `?PER_2015_rates` and `?PER_2015_magn` for more information.
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/vignettes/vismeteor.Rmd
|
---
title: "Parameter Estimation of the Geometric Magnitude Distribution"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
fig_width: 6
fig_height: 4
vignette: >
%\VignetteIndexEntry{Parameter Estimation of the Geometric Magnitude Distribution}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
library(vismeteor)
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Introduction
The geometric distribution of meteor magnitudes is a frequently used statistical model to describe
the real magnitude distribution of a meteor shower. The observable magnitude distribution of meteors
is then
$$
{\displaystyle P[M = m] \sim f(m) \, \mathrm r^{-m}} \,\mathrm{,}
$$
where `m >= -0.5` is the difference between the limiting magnitude
and the meteor magnitude. `f(m)` is the perception probability function.
The estimation of the population index r, briefly called the r-value,
is a common task in the evaluation of meteor magnitudes.
Here we demonstrate two methods for unbiased estimation of this parameter.
First, we obtain some magnitude observations from the example data set,
which also includes the limiting magnitude.
```{r, echo=TRUE, results='hide'}
observations <- with(PER_2015_magn$observations, {
idx <- !is.na(lim.magn) & sl.start > 135.81 & sl.end < 135.87
data.frame(
magn.id = magn.id[idx],
lim.magn = lim.magn[idx]
)
})
head(observations, 5) # Example values
```
```{r, echo=FALSE, results='asis'}
knitr::kable(head(observations, 5))
```
Next, the observed meteor magnitudes are matched with the corresponding observations.
This is necessary as we need the limiting magnitudes of the observations to determine
the r-value.
Using
```{r, echo=TRUE, results='hide'}
magnitudes <- merge(
observations,
as.data.frame(PER_2015_magn$magnitudes),
by = 'magn.id'
)
magnitudes$magn <- as.integer(as.vector(magnitudes$magn))
head(magnitudes[magnitudes$Freq>0,], 5) # Example values
```
we obtain a data frame with the absolute observed frequencies `Freq` for each
observation of a magnitude class:
```{r, echo=FALSE, results='asis'}
knitr::kable(head(magnitudes[magnitudes$Freq>0,], 5))
```
This data frame contains a total of `r sum(magnitudes$Freq)` meteors.
This is a sufficiently large number to estimate the r-value.
## Maximum Likelihood Method I
The maximum likelihood method can be used to estimate the r-value in an unbiased manner.
For this, the function `dvmgeom()` is needed, which returns the probability density of
the observable meteor magnitudes when the r-value and the limiting magnitudes are known.
The following algorithm estimates the r-value by maximizing the likelihood with
the `optim()` function. The function `ll` returns the negative log-likelihood,
as `optim()` identifies a minimum. The expression
`subset(magnitudes, (magnitudes$lim.magn - magnitudes$magn) > -0.5`
ensures that meteors fainter than the limiting magnitude are not used if they exist.
```{r, echo=TRUE, results='hide'}
# maximum likelihood estimation (MLE) of r
result <- with(subset(magnitudes, (magnitudes$lim.magn - magnitudes$magn) > -0.5), {
# log likelihood function
ll <- function(r) -sum(Freq * dvmgeom(magn, lim.magn, r, log=TRUE))
r.start <- 2.0 # starting value
r.lower <- 1.2 # lowest expected value
r.upper <- 4.0 # highest expected value
# find minimum
optim(r.start, ll, method='Brent', lower=r.lower, upper=r.upper, hessian=TRUE)
})
```
This gives the expected value and the variance of the r-value:
```{r, echo=TRUE}
r.mean <- result$par # mean of r
print(r.mean)
r.var <- 1/result$hessian[1][1] # variance of r
print(r.var)
```
## Maximum Likelihood Method II
With the maximum likelihood method, it can be demonstrated that the mean difference
between meteor magnitudes and the limiting magnitude is an unbiased estimator for
the r-value. This mean is straightforward to calculate:
```{r, echo=TRUE}
m.mean <- with(magnitudes, sum((lim.magn - magn) * Freq)/sum(Freq))
print(m.mean)
```
Similarly, its variance is:
```{r, echo=TRUE}
m.var <- with(magnitudes, {
n <- sum(Freq)
sum((lim.magn - magn - m.mean)^2 * Freq)/((n-1) * n)
})
print(m.var)
```
We can easily determine the mean for an r-value using the Laplace transform
of the perception probabilities by setting `s=log(r)`. However, since we aim
to inversely determine the r-value from the mean value, we first generate
the necessary values and then employ the `splinefun()` function for interpolation:
```{r, echo=TRUE, results='hide'}
r.mean.fun <- with(new.env(), {
r <- seq(1.3, 3.5, 0.1)
s <- log(r)
m.mean <- -vmperception.l(s, deriv.degree = 1L)/vmperception.l(s)
splinefun(m.mean, r)
})
```
This approach yields the r-value as follows:
```{r, echo=TRUE}
r.mean <- r.mean.fun(m.mean)
print(r.mean)
```
Assuming that the mean is normally distributed and that the variance of
magnitudes `m.var` is small, we can obtain the variance of the r-value:
```{r, echo=TRUE}
r.var <- r.mean.fun(m.mean, deriv = 1L)^2 * m.var
print(r.var)
```
The method described herein for estimating the r-value offers an advantage over
the previous method. It is not only more straightforward to execute but also
less computationally demanding.
## Residual Analysis
So far, we have operated under the assumption that the real distribution of meteor magnitudes
is exponential and that the perception probabilities are accurate.
We now use the Chi-Square goodness-of-fit test to check whether the observed frequencies match
the expected frequencies. Then, using the estimated r-value, we retrieve the relative
frequencies `p` for each observation and add them to the data frame `magnitudes`:
```{r, echo=TRUE, results='hide'}
magnitudes$p <- with(magnitudes, dvmgeom(m = magn, lm = lim.magn, r.mean))
```
We must also consider the probabilities for the magnitude class with the brightest meteors.
```{r, echo=TRUE, results='hide'}
magn.min <- min(magnitudes$magn)
```
The smallest magnitude class `magn.min` is `r magn.min`. In calculating the probabilities,
we assume that the magnitude class `r magn.min` contains meteors that are either brighter
or equally bright as `r magn.min` and thus use the function `pvmgeom()` to determine
their probability.
```{r, echo=TRUE, results='asis'}
idx <- magnitudes$magn == magn.min
magnitudes$p[idx] <- with(
magnitudes[idx,],
pvmgeom(m = magn + 1L, lm = lim.magn, r.mean, lower.tail = TRUE)
)
```
This ensures that the probability of observing a meteor of any given magnitude is 100%.
This is known as the normalization condition. Accordingly, the Chi-Square goodness-of-fit test
will fail if this condition is not met.
We now create the contingency table `magnitutes.observed` for the observed meteor magnitudes
and its margin table.
```{r, echo=TRUE}
magnitutes.observed <- xtabs(Freq ~ magn.id + magn, data = magnitudes)
magnitutes.observed.mt <- margin.table(magnitutes.observed, margin = 2)
print(magnitutes.observed.mt)
```
Next, we check which magnitude classes need to be aggregated so that each contains
at least 10 meteors, allowing us to perform a Chi-Square goodness-of-fit test.
The last output shows that meteors of magnitude class `0` or brighter must be combined into
a magnitude class `0-`. Meteors with a brightness less than `4` are grouped here in the
magnitude class `4+`, and a new contingency table magnitudes.observed is created:
```{r, echo=TRUE}
magnitudes$magn[magnitudes$magn <= 0] <- '0-'
magnitudes$magn[magnitudes$magn >= 4] <- '4+'
magnitutes.observed <- xtabs(Freq ~ magn.id + magn, data = magnitudes)
print(margin.table(magnitutes.observed, margin = 2))
```
We now need the corresponding expected relative frequencies
```{r, echo=TRUE}
magnitutes.expected <- xtabs(p ~ magn.id + magn, data = magnitudes)
magnitutes.expected <- magnitutes.expected/nrow(magnitutes.expected)
print(sum(magnitudes$Freq) * margin.table(magnitutes.expected, margin = 2))
```
and then carry out the Chi-Square goodness-of-fit test:
```{r, echo=TRUE, results='asis'}
chisq.test.result <- chisq.test(
x = margin.table(magnitutes.observed, margin = 2),
p = margin.table(magnitutes.expected, margin = 2)
)
```
As a result, we obtain the p-value:
```{r, echo=TRUE}
print(chisq.test.result$p.value)
```
If we set the level of significance at 5 percent, then it is clear that the p-value with
`r chisq.test.result$p.value` is greater than 0.05. Thus, under the assumption that the
magnitude distribution follows an geometric meteor magnitude distribution and assuming that
the perception probabilities are correct (i.e., error-free or precisely known),
the assumptions cannot be rejected. However, the converse is not true; the assumptions
may not necessarily be correct. The total count of meteors here is too small for such
a conclusion.
To verify the p-value, we also graphically represent the Pearson residuals:
```{r, fig.show='hold'}
chisq.test.residuals <- with(new.env(), {
chisq.test.residuals <- residuals(chisq.test.result)
v <- as.vector(chisq.test.residuals)
names(v) <- rownames(chisq.test.residuals)
v
})
plot(
chisq.test.residuals,
main="Residuals of the chi-square goodness-of-fit test",
xlab="m",
ylab="Residuals",
ylim=c(-3, 3),
xaxt = "n"
)
abline(h=0.0, lwd=2)
axis(1, at = seq_along(chisq.test.residuals), labels = names(chisq.test.residuals))
```
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/vignettes/vmgeom.Rmd
|
---
title: "Parameter Estimation of the Ideal Magnitude Distribution"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
fig_width: 6
fig_height: 4
vignette: >
%\VignetteIndexEntry{Parameter Estimation of the Ideal Magnitude Distribution}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
library(vismeteor)
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Introduction
The density of an ideal magnitude distribution is
$$
{\displaystyle f(m) = \frac{\mathrm{d}p}{\mathrm{d}m} = \frac{3}{2} \, \log(r) \sqrt{\frac{r^{3 \, \psi + 2 \, m}}{(r^\psi + r^m)^5}}}
$$
where $m$ is the meteor magnitude, $r = 10^{0.4} \approx 2.51189 \dots$ is a constant and
$\psi$ is the only parameter of this magnitude distribution.
In visual meteor observation, it is common to estimate meteor magnitudes in integer values.
Hence, this distribution is discrete and has the density
$$
{\displaystyle P[M = m] \sim g(m) \, \int_{m-0.5}^{m+0.5} f(m) \, \, \mathrm{d}m} \, \mathrm{,}
$$
where $g(m)$ is the perception probability function. This distribution is thus a product of the
perception probabilities and the actual ideal distribution of the meteor magnitudes.
Here we demonstrate a method for an unbiased estimation of $\psi$.
First, we obtain some magnitude observations from the example data set,
which also includes the limiting magnitude.
```{r, echo=TRUE, results='hide'}
observations <- with(PER_2015_magn$observations, {
idx <- !is.na(lim.magn) & sl.start > 135.81 & sl.end < 135.87
data.frame(
magn.id = magn.id[idx],
lim.magn = lim.magn[idx]
)
})
head(observations, 5) # Example values
```
```{r, echo=FALSE, results='asis'}
knitr::kable(head(observations, 5))
```
Next, the observed meteor magnitudes are matched with the corresponding observations.
This is necessary as we need the limiting magnitudes of the observations to determine
the parameter.
Using
```{r, echo=TRUE, results='hide'}
magnitudes <- with(new.env(), {
magnitudes <- merge(
observations,
as.data.frame(PER_2015_magn$magnitudes),
by = 'magn.id'
)
magnitudes$magn <- as.integer(as.character(magnitudes$magn))
magnitudes
})
head(magnitudes[magnitudes$Freq>0,], 5) # Example values
```
we obtain a data frame with the absolute observed frequencies `Freq` for each
observation of a magnitude class:
```{r, echo=FALSE, results='asis'}
knitr::kable(head(magnitudes[magnitudes$Freq>0,], 5))
```
This data frame contains a total of `r sum(magnitudes$Freq)` meteors.
This is a sufficiently large number to estimate the parameter.
## Maximum Likelihood Method
The maximum likelihood method can be used to estimate the parameter in an unbiased manner.
For this, the function `dvmideal()` is needed, which returns the probability density of
the observable meteor magnitudes when the parameter and the limiting magnitudes are known.
The following algorithm estimates the parameter by maximizing the likelihood with
the `optim()` function. The function `ll` returns the negative log-likelihood,
as `optim()` identifies a minimum. The expression
`subset(magnitudes, (magnitudes$lim.magn - magnitudes$magn) > -0.5`
ensures that meteors fainter than the limiting magnitude are not used if they exist.
```{r, echo=TRUE, results='hide'}
# maximum likelihood estimation (MLE) of psi
result <- with(subset(magnitudes, (magnitudes$lim.magn - magnitudes$magn) > -0.5), {
# log likelihood function
ll <- function(psi) -sum(Freq * dvmideal(magn, lim.magn, psi, log=TRUE))
psi.start <- 5.0 # starting value
psi.lower <- 0.0 # lowest expected value
psi.upper <- 10.0 # highest expected value
# find minimum
optim(psi.start, ll, method='Brent', lower=psi.lower, upper=psi.upper, hessian=TRUE)
})
```
This gives the expected value and the variance of the parameter:
```{r, echo=TRUE}
psi.mean <- result$par # mean of psi
print(psi.mean)
psi.var <- 1/result$hessian[1][1] # variance of r
print(psi.var)
```
## Residual Analysis
So far, we have operated under the assumption that the real distribution of meteor magnitudes
is exponential and that the perception probabilities are accurate.
We now use the Chi-Square goodness-of-fit test to check whether the observed frequencies match
the expected frequencies. Then, using the estimated parameter, we retrieve the relative
frequencies `p` for each observation and add them to the data frame `magnitudes`:
```{r, echo=TRUE, results='asis'}
magnitudes$p <- with(magnitudes, dvmideal(m = magn, lm = lim.magn, psi.mean))
```
We must also consider the probabilities for the magnitude class with the brightest meteors.
```{r, echo=TRUE, results='hide'}
magn.min <- min(magnitudes$magn)
```
The smallest magnitude class `magn.min` is `r magn.min`. In calculating the probabilities,
we assume that the magnitude class `r magn.min` contains meteors that are either brighter
or equally bright as `r magn.min` and thus use the function `pvmideal()` to determine
their probability.
```{r, echo=TRUE, results='asis'}
idx <- magnitudes$magn == magn.min
magnitudes$p[idx] <- with(
magnitudes[idx,],
pvmideal(m = magn + 1L, lm = lim.magn, psi.mean, lower.tail = TRUE)
)
```
This ensures that the probability of observing a meteor of any given magnitude is 100%.
This is known as the normalization condition. Accordingly, the Chi-Square goodness-of-fit test
will fail if this condition is not met.
We now create the contingency table `magnitutes.observed` for the observed meteor magnitudes
and its margin table.
```{r, echo=TRUE}
magnitutes.observed <- xtabs(Freq ~ magn.id + magn, data = magnitudes)
magnitutes.observed.mt <- margin.table(magnitutes.observed, margin = 2)
print(magnitutes.observed.mt)
```
Next, we check which magnitude classes need to be aggegated so that each contains
at least 10 meteors, allowing us to perform a Chi-Square goodness-of-fit test.
The last output shows that meteors of magnitude class `0` or brighter must be combined into
a magnitude class `0-`. Meteors with a brightness less than `4` are grouped here in the
magnitude class `4+`, and a new contingency table magnitudes.observed is created:
```{r, echo=TRUE}
magnitudes$magn[magnitudes$magn <= 0] <- '0-'
magnitudes$magn[magnitudes$magn >= 4] <- '4+'
magnitutes.observed <- xtabs(Freq ~ magn.id + magn, data = magnitudes)
print(margin.table(magnitutes.observed, margin = 2))
```
We now need the corresponding expected relative frequencies
```{r, echo=TRUE}
magnitutes.expected <- xtabs(p ~ magn.id + magn, data = magnitudes)
magnitutes.expected <- magnitutes.expected/nrow(magnitutes.expected)
print(sum(magnitudes$Freq) * margin.table(magnitutes.expected, margin = 2))
```
and then carry out the Chi-Square goodness-of-fit test:
```{r, echo=TRUE, results='asis'}
chisq.test.result <- chisq.test(
x = margin.table(magnitutes.observed, margin = 2),
p = margin.table(magnitutes.expected, margin = 2)
)
```
As a result, we obtain the p-value:
```{r, echo=TRUE}
print(chisq.test.result$p.value)
```
If we set the level of significance at 5 percent, then it is clear that the p-value with
`r chisq.test.result$p.value` is greater than 0.05. Thus, under the assumption that the
magnitude distribution follows an ideal meteor magnitude distribution and assuming that
the perception probabilities are correct (i.e., error-free or precisely known),
the assumptions cannot be rejected. However, the converse is not true; the assumptions
may not necessarily be correct. The total count of meteors here is too small for such
a conclusion.
To verify the p-value, we also graphically represent the Pearson residuals:
```{r, fig.show='hold'}
chisq.test.residuals <- with(new.env(), {
chisq.test.residuals <- residuals(chisq.test.result)
v <- as.vector(chisq.test.residuals)
names(v) <- rownames(chisq.test.residuals)
v
})
plot(
chisq.test.residuals,
main="Residuals of the chi-square goodness-of-fit test",
xlab="m",
ylab="Residuals",
ylim=c(-3, 3),
xaxt = "n"
)
abline(h=0.0, lwd=2)
axis(1, at = seq_along(chisq.test.residuals), labels = names(chisq.test.residuals))
```
|
/scratch/gouwar.j/cran-all/cranData/vismeteor/vignettes/vmideal.Rmd
|
Response <- function(fit, x, trans, alpha, ...) {
## Calculate partial residuals
rr <- visregResid(fit)
nr <- if (is.matrix(rr)) nrow(rr) else length(rr)
if (nr>0 && nrow(x$D) != nr) warning("Residuals do not match data; have you changed the original data set? If so, visreg is probably not displaying the residuals for the data set that was actually used to fit the model.")
y <- visregPred(fit, x$D, ...)
if (is.null(rr)) {
r <- NULL
} else {
r <- y + rr
}
# Calculate predictions
p <- visregPred(fit, x$DD, se.fit=TRUE, ...)
## Format output
if (inherits(p, "svystat")) {
p <- list(fit=as.double(p), se.fit=sqrt(attr(p,"var")))
} else if (inherits(fit, "rq")) {
p <- list(fit=as.double(p[,1]), se.fit=as.double(p[,3]-p[,2])/(2*qnorm(.975)))
} else if (inherits(fit, "rms")) {
p$fit <- p$linear.predictors
} else if (is.double(p)) {
p <- list(fit=p, se.fit=NA)
}
m <- ifelse(identical(class(fit), "lm"), qt(1-alpha/2, fit$df.residual), qnorm(1-alpha/2))
upr <- p$fit + m*p$se.fit
lwr <- p$fit - m*p$se.fit
if (is.matrix(p$fit)) {
if (length(r)==0) {
R <- matrix(NA, nrow(x$D), ncol=ncol(p$fit))
} else {
R <- matrix(trans(r), ncol=ncol(p$fit))
}
val <- list(fit=matrix(trans(p$fit), ncol=ncol(p$fit)), lwr=matrix(trans(lwr), ncol=ncol(p$fit)), upr=matrix(trans(upr), ncol=ncol(p$fit)), r=R)
val$name <- colnames(val$fit) <- colnames(p$fit)
} else {
if (length(r)==0) r <- rep(NA_real_, nrow(x$D))
val <- list(fit=as.double(trans(p$fit)), lwr=as.double(trans(lwr)), upr=as.double(trans(upr)), r=as.double(trans(r)), name=as.character(formula(fit)[2]))
}
val$pos <- rr>0
if (length(val$pos)==0) {
if (is.matrix(p$fit)) {
val$pos <- matrix(NA, nrow(x$D), ncol(p$fit))
} else {
val$pos <- rep(NA_real_, nrow(x$D))
}
}
val$n <- if (is.matrix(p$fit)) ncol(p$fit) else 1
val
}
|
/scratch/gouwar.j/cran-all/cranData/visreg/R/Response.R
|
Terms <- function(fit, f, x, trans, alpha, ...) {
if (inherits(fit, "lme")) {
b <- nlme::fixed.effects(fit)
} else if (inherits(fit, "merMod")) {
b <- fit@beta
} else {
b <- coef(fit)
}
if (inherits(fit, "mlm")) {
summ <- summary(fit)
n.y <- length(summ)
yy <- SE <- matrix(NA, nrow=nrow(x$XX), ncol=n.y)
r <- rr <- matrix(NA, nrow=nrow(x$X), ncol=n.y)
for (i in 1:n.y) {
V <- summ[[i]]$sigma^2 * summ[[i]]$cov.unscaled
SE[,i] <- sqrt(apply(x$XX * (x$XX %*% V), 1, sum))
ind <- is.finite(b[,i])
yy[,i] <- x$XX%*%b[ind,i]
rr[,i] <- visregResid(fit)[,i]
r[,i] <- x$X%*%b[ind,i] + rr[,i]
}
} else {
V <- vcov(fit)
dg <- if (inherits(V, 'Matrix')) Matrix::diag(V) else diag(V)
if (inherits(fit, 'polr')) {
remove <- grep("|", colnames(V), fixed=TRUE)
V <- V[-remove,-remove, drop=FALSE]
}
if (any(is.na(dg))) {
remove <- which(is.na(dg))
V <- V[-remove,-remove, drop=FALSE]
}
SE <- sqrt(apply(x$XX * (x$XX %*% V), 1, sum))
yy <- drop(x$XX%*%b[is.finite(b)])
rr <- visregResid(fit)
if (is.null(rr)) rr <- rep(NA, nrow(x$X))
r <- drop(x$X%*%b[is.finite(b)]) + rr
if (nrow(x$X) != length(rr)) warning("Residuals do not match data; have you changed the original data set? If so, visreg is probably not displaying the residuals for the data set that was actually used to fit the model.")
}
if (!all(is.finite(b))) warning("prediction from a rank-deficient fit may be misleading")
m <- ifelse(identical(class(fit), "lm") || identical(class(fit), "mlm"), qt(1-alpha/2, fit$df.residual), qnorm(1-alpha/2))
lwr <- yy - m*SE
upr <- yy + m*SE
if (inherits(fit, "mlm")) {
val <- list(fit=matrix(as.double(trans(yy)), ncol=n.y), lwr=matrix(as.double(trans(lwr)), ncol=n.y), upr=matrix(as.double(trans(upr)), ncol=n.y), r=matrix(as.double(trans(r)), ncol=n.y))
val$name <- colnames(val$fit) <- colnames(fit$fitted.values)
} else {
val <- list(fit=as.double(trans(yy)), lwr=as.double(trans(lwr)), upr=as.double(trans(upr)), r=as.double(trans(r)), name=as.character(formula(fit)[2]))
}
val$pos <- rr>0
val$n <- if (inherits(fit, "mlm")) n.y else 1
val
}
|
/scratch/gouwar.j/cran-all/cranData/visreg/R/Terms.R
|
abbrNum <- function(xx) {
x <- unique(xx)
l <- format.info(x)
if (length(l) == 1) {
lab <- x
} else if (l[2] > 0) {
digits <- max(ceiling(-log10(sd(x))) + 1, 0)
lab <- formatC(x, digits=digits, format="f")
} else {
lab <- x
}
lab
}
|
/scratch/gouwar.j/cran-all/cranData/visreg/R/abbrNum.R
|
collapse.visregList <- function(obj, labels, ...) {
fit <- res <- NULL
if (missing(labels)) {
l <- sapply(obj, function(v) v$meta$y)
if (any(duplicated(l))) {
labels <- paste0("Y", 1:length(obj))
} else {
labels <- l
}
}
if (length(labels) != length(obj)) stop("labels do not match list", call.=FALSE)
for (i in 1:length(obj)) {
fit <- rbind(fit, data.frame(obj[[i]]$fit, visregCollapse=labels[i]))
res <- rbind(res, data.frame(obj[[i]]$res, visregCollapse=labels[i]))
}
meta <- obj[[1]]$meta
meta$by <- "visregCollapse"
structure(list(fit=fit, res=res, meta=meta), class="visreg")
}
|
/scratch/gouwar.j/cran-all/cranData/visreg/R/collapseVisregList.R
|
factorAxis <- function(x, w, new.args) {
K <- length(levels(x))
len <- K*(1-w)+(K-1)*w
axis.args <- list(side=1, at=(0:(K-1))/len+(1-w)/(2*len), labels=levels(x))
if (length(new.args)) axis.args[names(new.args)] <- new.args
do.call("axis", axis.args)
}
factorAxis2d <- function(xx, w, nn) {
l <- levels(xx)
K <- length(levels(xx))
len <- K*(1-w)+(K-1)*w
m <- ((0:(K-1))/len+(1-w)/(2*len))
ind <- integer(nn)
for(k in 1:K) {
i1 <- ceiling(nn*(k-1)/len)
i2 <- ceiling(nn*((k-1)/len + (1-w)/len))
i3 <- ceiling(nn*k/len)
ind[i1:i2] <- k
if (k!=K) ind[(i2+1):i3] <- NA
}
list(x=seq(0, 1, length=nn), m=m, l=l, ind=ind)
}
|
/scratch/gouwar.j/cran-all/cranData/visreg/R/factorAxis.R
|
factorPlot <- function(v, partial, band, rug, w, top, line.par, fill.par, points.par, ...) {
if (band) fp_bands(v, w, fill.par)
if (!partial) {
fp_lines(v, w, line.par)
} else {
if (top=='line') {
fp_points(v, w, points.par)
fp_lines(v, w, line.par)
} else {
fp_lines(v, w, line.par)
fp_points(v, w, points.par)
}
}
fp_rug(v, w, rug, line.par)
}
fp_lines <- function(v, w, line.par) {
xx <- v$fit[, v$meta$x]
K <- length(levels(xx))
len <- K*(1-w)+(K-1)*w
yy <- v$fit$visregFit
for(k in 1:K) {
x1 <- (k-1)/len
x2 <- (k-1)/len + (1-w)/len
xx <- c(x1, x2)
line.args <- list(x=c(x1, x2), y=rep(yy[k], 2), lwd=3, col="#008DFFFF")
if (length(line.par)) line.args[names(line.par)] <- line.par
do.call("lines", line.args)
}
}
fp_bands <- function(v, w, fill.par) {
xx <- v$fit[, v$meta$x]
K <- length(levels(xx))
len <- K*(1-w)+(K-1)*w
lwr <- v$fit$visregLwr
upr <- v$fit$visregUpr
for(k in 1:K) {
x1 <- (k-1)/len
x2 <- (k-1)/len + (1-w)/len
xx <- c(x1, x2)
fill.args <- list(x=c(xx, rev(xx)), y=c(rep(lwr[k], 2), rev(rep(upr[k], 2))), col="gray85", border=F)
if (length(fill.par)) fill.args[names(fill.par)] <- fill.par
do.call("polygon", fill.args)
}
}
fp_points <- function(v, w, points.par) {
x <- v$res[, v$meta$x]
y <- v$res$visregRes
K <- length(levels(x))
len <- K*(1-w)+(K-1)*w
for(k in 1:K) {
x1 <- (k-1)/len
x2 <- (k-1)/len + (1-w)/len
ind <- x==levels(x)[k]
rx <- seq(x1, x2, len=sum(ind)+2)[c(-1, -(sum(ind)+2))]
points.args <- list(x=rx, y=y[ind], pch=19, cex=0.4, col="gray50")
if (length(points.par)) points.args[names(points.par)] <- points.par
do.call("points", points.args)
}
}
fp_rug <- function(v, w, rug, line.args) {
x <- v$res[, v$meta$x]
y <- v$res$visregRes
K <- length(levels(x))
len <- K*(1-w)+(K-1)*w
for(k in 1:K) {
x1 <- (k-1)/len
x2 <- (k-1)/len + (1-w)/len
ind <- x==levels(x)[k]
rx <- seq(x1, x2, len=sum(ind)+2)[c(-1, -(sum(ind)+2))]
if (!all(is.na(v$res$visregPos))) {
if (rug==1) rug(rx, col=line.args$col)
if (rug==2) {
ind1 <- ind & !v$res$visregPos
ind2 <- ind & v$res$visregPos
rx1 <- seq(x1, x2, len=sum(ind1)+2)[c(-1,-(sum(ind1)+2))]
rx2 <- seq(x1, x2, len=sum(ind2)+2)[c(-1,-(sum(ind2)+2))]
rug(rx1, col=line.args$col)
rug(rx2, side=3, col=line.args$col)
}
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/visreg/R/factorPlot.R
|
fillFrame <- function(f, x, cond) {
## x = data frame of x variable(s) being changed
## x2 = variables being filled by median
## x3 = variables specified by cond
if (missing(cond)) cond <- NULL
for (j in 1:ncol(f)) {
if (is.factor(f[,j]) && !is.element(names(f)[j], names(cond)) && !is.element(names(f)[j], names(x))) {
mode = names(sort(-table(f[j])))[1]
eval(parse(text=c('cond=c(cond, list(', names(f)[j],'=factor(mode, levels=levels(f[, names(f)[j]]))))')))
}
}
exclude <- c(names(x), names(cond), names(which(sapply(f, function(x) inherits(x, "Surv")))))
x2 <- lapply(as.data.frame(f[, setdiff(names(f), exclude)]), median)
names(x2) <- setdiff(names(f), exclude)
x3 <- cond
for (j in seq_along(x3)) {
if (is.character(x3[[j]])) x3[[j]] <- factor(x3[[j]], levels=levels(f[, names(x3)[j]]))
}
if (length(x2)>0 & length(x3)>0) newdf <- data.frame(x, x2, x3, check.names=FALSE)
else if (length(x2)>0 & length(x3)==0) newdf <- data.frame(x, x2, check.names=FALSE)
else if (length(x2)==0 & length(x3)>0) newdf <- data.frame(x, x3, check.names=FALSE)
else newdf <- x
newdf
}
|
/scratch/gouwar.j/cran-all/cranData/visreg/R/fillFrame.R
|
getXY <- function(fit, f, name, nn, cond, type, trans, xtrans, alpha, jitter, ...) {
if (type=="conditional") {
x <- setupD(fit, f, name, nn, cond, ...)
y <- Response(fit, x, trans, alpha, ...)
} else if (type=="contrast") {
x <- setupX(fit, f, name, nn, cond, ...)
y <- Terms(fit, f, x, trans, alpha, ...)
x <- setupD(fit, f, name, nn, cond, ...)
}
if (jitter && is.numeric(x$x)) x$x <- jitter(x$x)
if (!missing(xtrans)) {
x$xx <- xtrans(x$xx)
x$x <- xtrans(x$x)
}
list(x=x, y=y)
}
|
/scratch/gouwar.j/cran-all/cranData/visreg/R/getXY.R
|
ggContPlot <- function(v, partial, band, rug, whitespace, strip.names, overlay, top, line.par, fill.par, points.par, ...) {
# Setup data frames
xx <- v$fit[, v$meta$x]
fillData <- data.frame(x = c(xx, rev(xx)),
y = c(v$fit$visregLwr, rev(v$fit$visregUpr)))
lineData <- data.frame(x = xx,
y = v$fit$visregFit)
pointData <- data.frame(x = v$res[, v$meta$x],
y = v$res$visregRes)
if ("by" %in% names(v$meta)) {
bb <- factor(v$fit[, v$meta$by])
fillData$z <- factor(c(bb, rev(bb)), labels=levels(bb))
lineData$z <- bb
pointData$z <- factor(v$res[, v$meta$by])
names(fillData)[3] <- names(lineData)[3] <- names(pointData)[3] <- v$meta$by
}
# Plotting defaults
dots <- list(...)
xlab <- if ("xlab" %in% names(dots)) dots$xlab else v$meta$x
if ("ylab" %in% names(dots)) {
ylab <- dots$ylab
} else {
ylab <- if (is.null(v$meta$yName)) paste("f(", v$meta$x, ")", sep="") else v$meta$yName
}
# Base gg object and aesthetic defaults
if ("by" %in% names(v$meta) & overlay){
p <- ggplot2::ggplot(pointData, ggplot2::aes_string('x', 'y', group=v$meta$by))
fill.args <- list(mapping=ggplot2::aes_string(fill=v$meta$by))
line.args <- list(mapping=ggplot2::aes_string(color=v$meta$by), size=1)
point.args <- list(mapping=ggplot2::aes_string(color=v$meta$by), size=0.8)
acol <- pal(length(levels(bb)), alpha=0.3)
col <- pal(length(levels(bb)))
if (length(fill.par)) fill.args[names(fill.par)] <- fill.par
if (length(line.par)) line.args[names(line.par)] <- line.par
if (length(points.par)) point.args[names(points.par)] <- points.par
p <- p + ggplot2::scale_fill_manual(values=acol) + ggplot2::scale_color_manual(values=col)
} else {
p <- ggplot2::ggplot(pointData, ggplot2::aes_string('x', 'y'))
fill.args <- list(fill="gray85")
line.args <- list(size=1, col="#008DFFFF")
point.args <- list(size=0.8, col="gray50")
if (length(fill.par)) fill.args[names(fill.par)] <- fill.par
if (length(line.par)) line.args[names(line.par)] <- line.par
if (length(points.par)) point.args[names(points.par)] <- points.par
}
p <- p + ggplot2::xlab(xlab) + ggplot2::ylab(ylab)
# Add geoms
if (band) {
fill.args$data <- fillData
p <- p + do.call("geom_polygon", fill.args, envir=asNamespace("ggplot2"))
}
line.args$data <- lineData
if (!partial) {
p <- p + do.call("geom_line", line.args, envir=asNamespace("ggplot2"))
} else {
point.args$data <- pointData
if (top == 'line') {
p <- p + do.call("geom_point", point.args, envir=asNamespace("ggplot2"))
p <- p + do.call("geom_line", line.args, envir=asNamespace("ggplot2"))
} else {
p <- p + do.call("geom_line", line.args, envir=asNamespace("ggplot2"))
p <- p + do.call("geom_point", point.args, envir=asNamespace("ggplot2"))
}
}
if (rug==1) {
rug.args <- point.args
rug.args$sides <- 'b'
p <- p + do.call("geom_rug", point.args, envir=asNamespace("ggplot2"))
}
if (rug==2) {
top.args <- bot.args <- point.args
top.args$sides <- 't'
bot.args$sides <- 'b'
top.args$data <- pointData[v$res$visregPos,]
bot.args$data <- pointData[!v$res$visregPos,]
p <- p + do.call("geom_rug", top.args, envir=asNamespace("ggplot2"))
p <- p + do.call("geom_rug", bot.args, envir=asNamespace("ggplot2"))
}
# Facet
if ("by" %in% names(v$meta) & !overlay) {
form <- as.formula(paste("~", v$meta$by))
K <- length(levels(bb))
if (identical(strip.names, TRUE)) {
p <- p + ggplot2::facet_grid(form, labeller=ggplot2::label_both)
} else if (identical(strip.names, FALSE)) {
p <- p + ggplot2::facet_grid(form)
} else if (is.character(strip.names) & length(strip.names) == K) {
names(strip.names) <- levels(bb)
args <- list(strip.names)
names(args) <- v$meta$by
lbl <- do.call(ggplot2::labeller, args)
p <- p + ggplot2::facet_grid(form, labeller=lbl)
} else {
stop('strip.names must either be logical or a character vector with length equal to the number of facets', call.=FALSE)
}
}
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/visreg/R/ggContPlot.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.