content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' Cancer survival data #' #' Creation script in data-raw #' "brca_cohort" #' @title adtte - CDISC ADaM compliant time to event data set #' #' @description ADTTE data copied from the 2013 CDISC Pilot #' @source CDISC SDTM/ADAM Pilot Project. #' \url{https://github.com/phuse-org/phuse-scripts/tree/master/data} #' #' @format A data frame with 254 rows and 26 variables: #' \describe{ #' \item{STUDYID}{Study Identifier} #' \item{SITEID}{Study Site Identifier} #' \item{USUBJID}{Unique Subject Identifier} #' \item{AGE}{Age} #' \item{AGEGR1}{Pooled Age Group 1} #' \item{AGEGR1N}{Pooled Age Group 1 (N)} #' \item{RACE}{Race} #' \item{RACEN}{Race (N)} #' \item{SEX}{Sex} #' \item{TRTSDT}{Date of First Exposure to Treatment} #' \item{TRTEDT}{Date of Last Exposure to Treatment} #' \item{TRTDUR}{Duration of treatment (days)} #' \item{TRTP}{Planned Treatment} #' \item{TRTA}{Actual Treatment} #' \item{TRTAN}{Actual Treatment (N)} #' \item{PARAM}{Parameter Description} #' \item{PARAMCD}{Parameter Code} #' \item{AVAL}{Analysis Value} #' \item{STARTDT}{Time to Event Origin Date for Subject} #' \item{ADT}{Analysis Date} #' \item{CNSR}{Censor} #' \item{EVNTDESC}{Event or Censoring Description} #' \item{SRCDOM}{Source Domain} #' \item{SRCVAR}{Source Variable} #' \item{SRCSEQ}{Source Sequence Number} #' \item{SAFFL}{Safety Population Flag} #' } #' @keywords datasets CDISC adtte #' @name adtte #' @examples #' data("adtte") "adtte"
/scratch/gouwar.j/cran-all/cranData/visR/R/data.R
#' @title Provides a simple wrapper for themes #' #' @description #' `r lifecycle::badge("experimental")` #' This function collects several lists if they are present. If absent, reasonable defaults are used. #' When strata are not defined in the theme, they default to grey50 and will not be presented in the legend. #' @param strata named list containing the different strata and name:colour value pairs #' @param fontsizes named list containing the font sizes for different options #' @param fontfamily string with the name of a supported font #' @param grid boolean that specifies whether the major and minor grid should be drawn. The drawing of major and minor #' gridlines can be manipulated separately by using a boolean indicator in a named `list` with elements `major` #' and `minor`. #' @param bg string defining the colour for the background of the plot #' @param legend_position string defining the legend position. Valid options are NULL, 'top' 'bottom' 'right' 'left' #' #' @return Nested list with styling preferences for a ggplot object #' #' @examples #' #' theme <- visR::define_theme( #' strata = list("SEX" = list( #' "F" = "red", #' "M" = "blue" #' )), #' fontsizes = list( #' "axis" = 12, #' "ticks" = 10, #' "legend_title" = 10, #' "legend_text" = 8 #' ), #' fontfamily = "Helvetica", #' grid = list( #' "major" = FALSE, #' "minor" = FALSE #' ), #' bg = "transparent", #' legend_position = "top" #' ) #' #' @export define_theme <- function(strata = NULL, fontsizes = NULL, fontfamily = "Helvetica", grid = FALSE, bg = "transparent", legend_position = NULL) { theme <- list() if (!base::is.null(strata)) { if (base::is.list(strata)) { if (base::length(base::names(strata)) > 0) { theme[["strata"]] <- strata } else { base::warning("Invalid argument for `strata`. Please provide a named list as described in the documentation. Setting strata to `NULL` (which results in no specific theming for stratification).") theme[["strata"]] <- NULL } } } if (!base::is.null(fontsizes)) { if (base::is.list(fontsizes)) { if (base::length(base::names(fontsizes)) > 0) { theme[["fontsizes"]] <- fontsizes } else { base::warning("Invalid argument for `fontsizes`. Please provide a named list for the individual plot elements as described in the documentation. Setting fontsizes to `NULL`.") theme[["fontsizes"]] <- NULL } } else if (base::is.numeric(fontsizes)) { base::message("Setting all fontsizes to the provided numeric value. It is recommended to use a named list as described in the documentation.") theme[["fontsizes"]] <- fontsizes } else { base::warning("Invalid argument for `fontsizes`. Please provide a named list as described in the documentation. Setting fontsizes to `NULL`.") theme[["fontsizes"]] <- NULL } } if (!base::is.character(fontfamily)) { base::warning("Invalid argument for `fontfamily`. Please provide the name of a valid font family as a string. Setting to default `Helvetica`.") theme[["fontfamily"]] <- "Helvetica" } else if (base::is.character(fontfamily) & (base::length(fontfamily) > 1)) { base::warning(paste0("Invalid amount of arguments for `fontfamily`. Using the first one: ", fontfamily[[1]])) theme[["fontfamily"]] <- fontfamily[[1]] } else if (base::is.character(fontfamily) & (base::length(fontfamily) == 1) & (base::nchar(fontfamily) == 0)) { base::warning("Invalid argument for `fontfamily`. Please provide the name of a valid font family as a string. Setting to default `Helvetica`.") theme[["fontfamily"]] <- "Helvetica" } else { theme[["fontfamily"]] <- fontfamily } if (base::is.logical(grid)) { if (grid == TRUE) { theme[["grid"]] <- list( "major" = TRUE, "minor" = FALSE ) } else { theme[["grid"]] <- grid } } else if (is.list(grid)) { if (("major" %in% names(grid)) | ("minor" %in% names(grid))) { theme[["grid"]] <- grid } else { base::warning("Invalid argument for `grid`. Please use a boolean or a list to indicate whether you want a background grid. Setting to default `FALSE`.") theme[["grid"]] <- FALSE } } else { base::warning("Invalid argument for `grid`. Please use a boolean or a list to indicate whether you want a background grid. Setting to default `FALSE`.") theme[["grid"]] <- FALSE } if (!base::is.character(bg)) { base::warning("Invalid argument for `bg`. Please provide the name of a valid colour as a string. Setting to default `transparent`.") theme[["bg"]] <- "transparent" } else { theme[["bg"]] <- bg } if (base::is.null(legend_position) | isTRUE(legend_position %in% c("top", "right", "left", "bottom"))) { theme[["legend_position"]] <- legend_position } else { base::warning("Invalid argument for `legend_position`. Setting it to default \"right\".") theme[["legend_position"]] <- "right" } base::class(theme) <- c("visR_theme", class(theme)) return(theme) } # END OF CODE -------------------------------------------------------------
/scratch/gouwar.j/cran-all/cranData/visR/R/define_theme.R
#' @title Wrapper for Kaplan-Meier Time-to-Event analysis #' #' @description #' `r lifecycle::badge("deprecated")` #' #' This function is a wrapper around `survival::survfit.formula()` #' to perform a Kaplan-Meier analysis, assuming right-censored data. #' The result is an object of class \code{survfit} which can be used in #' downstream functions and methods that rely on the \code{survfit} class. #' #' The function can leverage the conventions and controlled vocabulary from #' [CDISC ADaM ADTTE data model](https://www.cdisc.org/standards/foundational/adam/adam-basic-data-structure-bds-time-event-tte-analyses-v1-0), #' and also works with standard, non-CDISC datasets through the `formula` argument. #' #' @section Estimation of 'survfit' object: #' #' The `estimate_KM()` function utilizes the defaults in `survival::survfit()`: #' \itemize{ #' \item{The Kaplan Meier estimate is estimated directly (stype = 1).} #' \item{The cumulative hazard is estimated using the Nelson-Aalen estimator (ctype = 1): H.tilde = cumsum(x$n.event/x$n.risk). #' The MLE (H.hat(t) = -log(S.hat(t))) can't be requested.} #' \item{A two-sided pointwise 0.95 confidence interval is estimated using a log transformation (conf.type = "log").} #' } #' #' When strata are present, the returned survfit object is supplemented with #' the a named list of the stratum and associated label. #' To support full traceability, the data set name is captured in the named #' list and the call is captured within its corresponding environment. #' #' @section PARAM/PARAMCD and CDISC: #' #' If the data frame includes columns PARAM/PARAMCD (part of the CDISC format), #' the function expects the data has been filtered on the parameter of interest. #' #' @seealso \code{\link[survival]{survfit.formula} \link[survival]{survfitCI}} #' #' @param data A data frame. The dataset is expected to have #' one record per subject per analysis parameter. Rows with missing observations included in the analysis are removed. #' @param AVAL,CNSR,strata These arguments are used to construct a formula to be passed to #' `survival::survfit(formula=Surv(AVAL, 1-CNSR)~strata)`. These arguments' default values follow the naming conventions in CDISC. #' - `AVAL` Analysis value for Time-to-Event analysis. Default is `"AVAL"`, as per CDISC ADaM guiding principles. #' - `CNSR` Censor for Time-to-Event analysis. Default is `"CNSR"`, as per CDISC ADaM guiding principles. It is expected that CNSR = 1 #' for censoring and CNSR = 0 for the event of interest. #' - `strata` Character vector, representing the strata for Time-to-Event analysis. When NULL, an overall analysis is performed. #' Default is `NULL`. #' @param ... additional arguments passed on to the ellipsis of the call `survival::survfit.formula(...)`. #' Use \code{?survival::survfit.formula} and \code{?survival::survfitCI} for more information. #' @param formula `r lifecycle::badge('experimental')` formula with `Surv()` on RHS and stratifying variables on the LHS. Use #' `~1` on the LHS for unstratified estimates. This argument will be passed to `survival::survfit(formula=)`. When this argument is #' used, arguments AVAL, CNSR, and strata are ignored. #' #' @return survfit object ready for downstream processing in estimation or visualization functions and methods. #' #' @references \url{https://github.com/therneau/survival} #' #' @export #' #' @examples #' #' ## No stratification #' visR::estimate_KM(data = adtte) #' #' ## Stratified Kaplan-Meier analysis by `TRTP` #' visR::estimate_KM(data = adtte, strata = "TRTP") #' #' ## Stratified Kaplan-Meier analysis by `TRTP` and `SEX` #' visR::estimate_KM(data = adtte, strata = c("TRTP", "SEX")) #' #' ## Stratification with one level #' visR::estimate_KM(data = adtte, strata = "PARAMCD") #' #' ## Analysis on subset of adtte #' visR::estimate_KM(data = adtte[adtte$SEX == "F", ]) #' #' ## Modify the default analysis by using the ellipsis #' visR::estimate_KM( #' data = adtte, strata = NULL, #' type = "kaplan-meier", conf.int = FALSE, timefix = TRUE #' ) #' #' ## Example working with non CDISC data #' head(survival::veteran[c("time", "status", "trt")]) #' #' # Using non-CDSIC data #' visR::estimate_KM(data = survival::veteran, formula = Surv(time, status) ~ trt) #' estimate_KM <- function(data = NULL, strata = NULL, CNSR = "CNSR", AVAL = "AVAL", formula = NULL, ...) { lifecycle::deprecate_warn( when = "0.4.0", what = "visR::estimate_KM()", details = "Please use `ggsurvfit::ggsurvfit()` instead." ) # Capture input to validate user input for data argument --------------------- dots <- rlang::dots_list(...) # Validate argument inputs --------------------------------------------------- if (is.null(data)) { stop(paste0("Data can't be NULL.")) } if (!is.data.frame(data)) { stop("Data does not have class `data.frame`.") } if (is.null(formula)) { reqcols <- c(strata, CNSR, AVAL) if (!all(reqcols %in% colnames(data))) { stop(paste0("Following columns are missing from `data`: ", paste(setdiff(reqcols, colnames(data)), collapse = " "), ".")) } if (!is.numeric(data[[AVAL]])) { stop("Analysis variable (AVAL) is not numeric.") } if (!is.numeric(data[[CNSR]])) { stop("Censor variable (CNSR) is not numeric.") } } else if (!inherits(formula, "formula")) { stop("Argument `formula=` must be class 'formula'.") } # Check formula arguments and add strata object if user passer formula ----- if (!is.null(formula)) { if (any(!all.vars(formula) %in% names(data))) { vars_missing_in_data <- all.vars(formula) %>% setdiff(names(data)) %>% { paste(shQuote(., type = "csh"), collapse = ", ") } paste( "The following columns found in `formula=` are missing from the data frame:", vars_missing_in_data ) %>% stop(call. = FALSE) } # extract strata formula_rhs <- formula rlang::f_lhs(formula_rhs) <- NULL strata <- stats::get_all_vars(formula = formula_rhs, data = data) %>% names() %>% switch(!rlang::is_empty(.), . ) # convert empty string to NULL } # construct formula if not passed by user ------------------------------------ if (is.null(formula)) { formula <- stats::as.formula(paste0( "survival::Surv(", AVAL, ", 1-", CNSR, ") ~ ", ifelse(is.null(strata), "1", paste(strata, collapse = " + ")) )) } # Remove NA from the analysis ------------------------------------------------ data <- tidyr::drop_na(data, dplyr::all_of(all.vars(formula))) # Ensure the presence of at least one strata ----------------------------- formula_rhs <- ifelse(is.null(strata), "1", paste(strata, collapse = " + ")) # Calculate survival and add time = 0 to survfit object ------------------- survfit_object <- rlang::inject(survival::survfit(!!formula, data = data, !!!dots)) %>% # immediate resolves call arguments survival::survfit0(start.time = 0) # convert survfit() call to quo with attached envir -------------------------- survfit_object$call[[1]] <- rlang::expr(survival::survfit) # adding `survival::` prefix survfit_object$call <- rlang::quo(!!survfit_object$call) # Add additional metadata ---------------------------------------------------- if ("PARAM" %in% colnames(data) && length(setdiff(c("PARAMCD", "PARAM"), strata)) == 2) { # we expect only one unique value => catch mistakes survfit_object[["PARAM"]] <- paste(unique(data[["PARAM"]]), collapse = ", ") } if ("PARAMCD" %in% colnames(data) && length(setdiff(c("PARAMCD", "PARAM"), strata)) == 2) { # we expect only one unique value => catch mistakes survfit_object[["PARAMCD"]] <- paste(unique(data[["PARAMCD"]]), collapse = ", ") } survfit_object$data_name <- .call_list_to_name(as.list(match.call())) # Artificial strata for easy downstream processing when strata=NULL ---------- if (is.null(survfit_object[["strata"]])) { survfit_object[["strata"]] <- as.vector(length(survfit_object[["time"]])) if (is.null(strata)) { # overall analysis attr(survfit_object[["strata"]], "names") <- "Overall" } else { # ~ x with One level in variable present attr(survfit_object[["strata"]], "names") <- as.character(paste0(strata, "=", data[1, formula_rhs])) } } # add strata labels - main goal is for populating legend in visR(): label -- level1 strata -- levelx strata # these are the LABEL attributes of the stratifying variables (separate from above, which are the levels of the variables) # is null, when no stratifying variables present so legend title is not populated as Overall -- overall if (!is.null(strata)) { survfit_object[["strata_lbls"]] <- lapply(as.list(strata), function(x) attr(data[[x]], "label") %||% x) %>% rlang::set_names(strata) } # Return ------------------------------------------------------------------ survfit_object }
/scratch/gouwar.j/cran-all/cranData/visR/R/estimate_KM.R
#' @title Competing Events Cumulative Incidence #' #' @description Function creates a cumulative incidence object using the #' `tidycmprsk::cuminc()` function. #' #' @param AVAL,CNSR,strata These arguments are used to construct a formula to be passed to `tidycmprsk::cuminc(formula=)`. #' - `AVAL` Analysis value for Time-to-Event analysis. Default is `"AVAL"`, as per CDISC ADaM guiding principles. #' - `CNSR` Column name indicating the outcome and censoring statuses. #' Column must be a factor and the first level indicates censoring, the #' next level is the outcome of interest, and the remaining levels are the #' competing events. Default is `"CNSR"` #' - `strata` Character vector, representing the strata for Time-to-Event analysis. When NULL, an overall analysis is performed. #' Default is `NULL`. #' @param conf.int Confidence internal level. Default is 0.95. Parameter is passed to `tidycmprsk::cuminc(conf.level=)` #' @param ... Additional argument passed to `tidycmprsk::cuminc()` #' @inheritParams estimate_KM #' @inheritParams visr #' @inheritParams add_CI.ggsurvfit #' @inheritParams add_risktable.ggsurvfit #' @inheritParams get_risktable #' #' @return A cumulative incidence object as explained at https://mskcc-epi-bio.github.io/tidycmprsk/reference/cuminc.html #' #' @export #' #' @examples #' cuminc <- #' visR::estimate_cuminc( #' data = tidycmprsk::trial, #' strata = "trt", #' CNSR = "death_cr", #' AVAL = "ttdeath" #' ) #' cuminc #' #' cuminc %>% #' visR::visr() %>% #' visR::add_CI() %>% #' visR::add_risktable(statlist = c("n.risk", "cum.event")) estimate_cuminc <- function(data = NULL, strata = NULL, CNSR = "CNSR", AVAL = "AVAL", conf.int = 0.95, ...) { # check for installation of tidycmprsk package ------------------------- rlang::check_installed("tidycmprsk", version = "0.1.1") dots <- rlang::dots_list(...) # Validate data -------------------------------------------------------- if (is.null(data)) stop(paste0("Data can't be NULL.")) if (!is.numeric(conf.int)) stop(paste0("conf.int needs to be numeric.")) if (!(0 <= conf.int & conf.int <= 1)) stop(paste0("conf.int needs to between 0 and 1.")) # Validate columns ----------------------------------------------------- reqcols <- c(strata, CNSR, AVAL) if (!all(reqcols %in% colnames(data))) { stop(paste0("Following columns are missing from `data`: ", paste(setdiff(reqcols, colnames(data)), collapse = " "), ".")) } if (!is.numeric(data[[AVAL]])) { stop("Analysis variable (AVAL) is not numeric.") } if (!is.factor(data[[CNSR]])) { stop("Censor variable (CNSR) is not a factor") } # Remove NA from the analysis ------------------------------------------ data <- data %>% tidyr::drop_na(AVAL, CNSR) if (!is.null(strata)) { data <- data %>% tidyr::drop_na(any_of({{ strata }})) } # Ensure the presence of at least one strata --------------------------- strata <- ifelse(is.null(strata), 1, strata %>% paste(collapse = " + ")) # cuminc --------------------------------------------------------------- cuminc <- tidycmprsk::cuminc( formula = stats::as.formula(paste0("survival::Surv(", AVAL, ", ", CNSR, ") ~ ", strata)), data = data, conf.level = conf.int, ... ) cuminc } # this function runs `tidy()` and puts it in the visR format # 1. only keeps the first outcome # 2. renames estimate and CI columns # 3. Add a strata column if not already present visr_tidy_tidycuminc <- function(x, times = NULL) { df_visr_tidy <- tidycmprsk::tidy(x, times = times) %>% dplyr::filter(.data[["outcome"]] %in% names(x$failcode)[1]) %>% # renaming to match column name in the survfit equivalent of these functions dplyr::rename( est = .data[["estimate"]], est.lower = .data[["conf.low"]], est.upper = .data[["conf.high"]] ) # adding strata column if not already present if (!"strata" %in% names(df_visr_tidy)) { df_visr_tidy <- dplyr::mutate(df_visr_tidy, strata = "Overall") } as.data.frame(df_visr_tidy) }
/scratch/gouwar.j/cran-all/cranData/visR/R/estimate_cuminc.R
#' @title Summarize Hazard Ratio from a survival object using S3 method #' #' @description S3 method for extracting information regarding Hazard Ratios. #' The function allows the survival object's formula to be updated. #' No default method is available at the moment. #' #' @seealso \code{\link[survival]{coxph}} \code{\link[stats]{update.formula}} #' #' @param x An object of class \code{survfit} #' @param ... other arguments passed on to the method survival::coxph #' #' @rdname get_COX_HR #' @export get_COX_HR <- function(x, ...) { UseMethod("get_COX_HR", x) } #' @param update_formula Template which specifies how to update the formula of the survfit object \code{\link[stats]{update.formula}} #' #' @examples #' ## treatment effect #' survfit_object_trt <- visR::estimate_KM(data = adtte, strata = c("TRTP")) #' visR::get_COX_HR(survfit_object_trt) #' #' ## treatment and gender effect #' survfit_object_trt_sex <- visR::estimate_KM(data = adtte, strata = c("TRTP", "SEX")) #' visR::get_COX_HR(survfit_object_trt_sex) #' #' ## update formula of KM estimates by treatment to include "SEX" for HR estimation #' visR::get_COX_HR(survfit_object_trt, update_formula = ". ~ . + SEX") #' #' ## update formula of KM estimates by treatment to include "AGE" for #' ## HR estimation with ties considered via the efron method #' visR::get_COX_HR(survfit_object_trt, #' update_formula = ". ~ . + survival::strata(AGE)", ties = "efron" #' ) #' #' @return A tidied object of class \code{coxph} containing Hazard Ratios #' #' @rdname get_COX_HR #' @method get_COX_HR survfit #' @export get_COX_HR.survfit <- function(x, update_formula = NULL, ...) { # Update formula ---------------------------------------------------------- updated_call <- rlang::quo_squash(x$call) updated_call[["data"]] <- rlang::inject(!!updated_call[["data"]], env = attr(x$call, ".Environment")) updated_object <- eval(updated_call, envir = attr(x$call, ".Environment")) if (!is.null(update_formula)) { updated_object <- stats::update(updated_object, formula = stats::as.formula(update_formula), evaluate = TRUE) } # Change Call ------------------------------------------------------------- SurvCall <- as.list(updated_object$call) CoxArgs <- base::formals(survival::coxph) CoxCall <- append(quote(survival::coxph), SurvCall[names(SurvCall) %in% names(CoxArgs)]) CoxCall <- append(CoxCall, list(...)) # Tidy output ------------------------------------------------------------- cox <- eval(as.call(CoxCall), envir = attr(x$call, ".Environment")) %>% tidyme() return(cox) } # END OF CODE -------------------------------------------------------------
/scratch/gouwar.j/cran-all/cranData/visR/R/get_COX_HR.R
#' Generate cohort attrition table #' #' @description #' `r lifecycle::badge("questioning")` #' This is an experimental function that may be developed over time. #' #' This function calculates the subjects counts excluded and included #' for each step of the cohort selection process. #' #' @param data Dataframe. It is used as the input data to count the subjects #' that meets the criteria of interest #' @param criteria_descriptions \code{character} It contains the descriptions #' of the inclusion/exclusion criteria. #' Each element of the vector corresponds to the description of each criterion. #' @param criteria_conditions \code{character} It contains the corresponding #' conditions of the criteria. #' These conditions will be used in the table to compute the counts of the #' subjects. #' @param subject_column_name \code{character} The column name of the table that #' contains the subject id. #' #' @usage get_attrition(data, criteria_descriptions, criteria_conditions, #' subject_column_name) #' @return The counts and percentages of the remaining and excluded subjects #' for each step of the cohort selection in a table format. #' #' @details criteria_descriptions and criteria_conditions need to be of same length #' #' @examples #' visR::get_attrition(adtte, #' criteria_descriptions = #' c( #' "1. Placebo Group", "2. Be 75 years of age or older.", #' "3. White", "4. Site 709" #' ), #' criteria_conditions = c( #' "TRTP=='Placebo'", "AGE>=75", #' "RACE=='WHITE'", "SITEID==709" #' ), #' subject_column_name = "USUBJID" #' ) #' @export get_attrition <- function(data, criteria_descriptions, criteria_conditions, subject_column_name) { if (!inherits(subject_column_name, "character") || length(subject_column_name) > 1) { stop("The 'subject_column_name' argument has to be a string. Please correct the 'subject_column_name' and re-run the function") } if (!subject_column_name %in% names(data)) { stop("The 'subject_column_name' argument doesn't correspond to a column name. Please correct the 'subject_column_name' and re-run the function") } if (length(criteria_descriptions) != length(criteria_conditions)) { stop("Vectors 'criteria_descriptions' and 'criteria_conditions' must have the same length.") } if (!NA %in% criteria_conditions) { criteria_map <- data.frame(cbind(criteria_descriptions, criteria_conditions), stringsAsFactors = FALSE) final_cond <- c() person_count_master <- c() for (each_cond in criteria_map$criteria_conditions) { final_cond <- ifelse(is.null(final_cond), each_cond, paste(paste0("(", final_cond, ")"), paste0(paste0("(", each_cond), ")"), sep = " & " ) ) # print(final_cond) person_count_temp <- data %>% dplyr::filter(eval(parse(text = final_cond))) %>% dplyr::select(!!subject_column_name) %>% dplyr::n_distinct() # print(person_count_temp) person_count_master <- c(person_count_master, person_count_temp) } if (length(person_count_master) > 0) { count_master_table <- dplyr::tibble("Remaining N" = person_count_master) criterion_0 <- dplyr::tibble( criteria_conditions = "none", criteria_descriptions = "Total cohort size", `Remaining N` = dplyr::select(data, !!subject_column_name) %>% dplyr::n_distinct() ) # generate attrition table attrition_table <- criterion_0 %>% dplyr::bind_rows(cbind(criteria_map, count_master_table)) %>% dplyr::mutate( `Remaining %` = 100 * `Remaining N` / max(`Remaining N`), `Excluded N` = dplyr::lag(`Remaining N`, n = 1L, default = max(`Remaining N`)) - `Remaining N`, `Excluded %` = 100 * `Excluded N` / max(`Remaining N`) ) %>% # rename columns dplyr::rename( Condition = criteria_conditions, Criteria = criteria_descriptions ) %>% # fix formatting dplyr::select(Criteria, Condition, dplyr::everything()) class(attrition_table) <- c("attrition", class(attrition_table)) return(attrition_table) } } }
/scratch/gouwar.j/cran-all/cranData/visR/R/get_attrition.R
#' @title Summarize the test for equality across strata from a survival object using S3 method #' #' @description Wrapper around survival::survdiff that tests the null hypothesis of equality across strata. #' #' @seealso \code{\link[survival]{survdiff}} #' #' @param survfit_object An object of class \code{survfit} #' @param ptype Character vector containing the type of p-value desired. Current options are "Log-Rank" "Wilcoxon" "Tarone-Ware" "Custom" "All". #' "Custom" allows the user to specify the weights on the Kaplan-Meier estimates using the argument `rho`. #' The default is "All" displaying all types possible. When `rho` is specified in context of "All", also a custom p-value is displayed. #' @param statlist Character vector containing the desired information to be displayed. The order of the arguments determines the order in which #' they are displayed in the final result. Default is the test name ("test"), Chi-squared test statistic ("Chisq"), degrees of freedom ("df") and #' p-value ("pvalue"). #' @param ... other arguments passed on to the method #' #' @inheritParams survival::survdiff #' #' @return A data frame with summary measures for the Test of Equality Across Strata #' #' @examples #' #' ## general examples #' survfit_object <- visR::estimate_KM(data = adtte, strata = "TRTP") #' visR::get_pvalue(survfit_object) #' visR::get_pvalue(survfit_object, ptype = "All") #' #' ## examples to obtain specific tests #' visR::get_pvalue(survfit_object, ptype = "Log-Rank") #' visR::get_pvalue(survfit_object, ptype = "Wilcoxon") #' visR::get_pvalue(survfit_object, ptype = "Tarone-Ware") #' #' ## Custom example - obtain Harrington and Fleming test #' visR::get_pvalue(survfit_object, ptype = "Custom", rho = 1) #' #' ## Get specific information and statistics #' visR::get_pvalue(survfit_object, ptype = "Log-Rank", statlist = c("test", "Chisq", "df", "pvalue")) #' visR::get_pvalue(survfit_object, ptype = "Wilcoxon", statlist = c("pvalue")) #' #' @export get_pvalue <- function(survfit_object, ptype = "All", rho = NULL, statlist = c("test", "Chisq", "df", "pvalue"), ...) { # Input validation -------------------------------------------------------- if (!inherits(survfit_object, "survfit")) { stop("The function expects an object of class `survfit` as input.") } if (length(names(survfit_object[["strata"]])) <= 1) { stop("Main effect has only 1 level. Test of equality over strata can't be determined.") } if (!base::any(c("Log-Rank", "Wilcoxon", "Tarone-Ware", "Custom", "All") %in% ptype)) { stop("Specify a valid type") } if ("Custom" %in% ptype & is.null(rho)) { stop("ptype = `Custom`. Please, specify rho.") } if (is.null(statlist) | !base::all(statlist %in% c("test", "df", "Chisq", "pvalue"))) { stop("Specify valid `statlist` arguments.") } # Re-use Call from survival object ---------------------------------------- Call <- as.list(rlang::quo_squash(survfit_object$call)) NewCall <- append(as.list(parse(text = "survival::survdiff")), Call[names(Call) %in% names(formals(survival::survdiff))]) if ("All" %in% ptype) { ptype <- c("Log-Rank", "Wilcoxon", "Tarone-Ware") if (!is.null(rho)) { ptype <- c(ptype, "Custom") } } # Summary list ------------------------------------------------------------ survdifflist <- list( `Log-Rank` = rlang::expr(eval(as.call( append(!!NewCall, list(rho = 0)) ))), `Wilcoxon` = rlang::expr(eval(as.call( append(!!NewCall, list(rho = 1)) ))), `Tarone-Ware` = rlang::expr(eval(as.call( append(!!NewCall, list(rho = 1.5)) ))), `Custom` = rlang::expr(eval(as.call( append(!!NewCall, list(rho = !!rho)) ))) )[ptype] survdifflist_eval <- lapply( survdifflist, function(x) { tryCatch( eval(x, envir = attr(survfit_object$call, ".Environment")), error = function(e) { error_msg <- as.character(e) if (!is_visr_survfit(survfit_object)) { error_msg <- paste0( "There was an error calculating the p-values.\n", "The 'survfit' object was not created with `visR::estimate_KM()`.\n", "The the error will likely be resolved by re-estimating the ", "'survfit' object with visR.\n", error_msg ) } stop(error_msg, call. = FALSE) } ) } ) # Statlist ---------------------------------------------------------------- statlist <- unique(statlist) statlist <- base::sub("test", "Equality across strata", statlist, fixed = TRUE) statlist <- base::sub("pvalue", "p-value", statlist, fixed = TRUE) Nms <- names(survdifflist_eval) stat_summary <- list( `Equality across strata` = rlang::expr(base::sub( "Custom", paste0("Harrington and Fleming test (rho = ", rho, ")"), Nms, fixed = TRUE )), `Chisq` = rlang::expr(unlist( lapply(survdifflist_eval, function(x) { format(round(x$chisq, 3), nsmall = 3, justify = "right", width = 6, scientific = FALSE) }) )), df = rlang::expr(unlist( lapply(survdifflist_eval, function(x) { length(x$n) - 1 }) )), `p-value` = rlang::expr(unlist( lapply(survdifflist_eval, function(x) { .pvalformat( stats::pchisq(x$chisq, length(x$n) - 1, lower.tail = FALSE) ) }) )) )[statlist] # Output to dataframe ----------------------------------------------------- equality <- data.frame( lapply(stat_summary, eval, env = environment()), check.names = FALSE, stringsAsFactors = FALSE, row.names = NULL ) return(equality) }
/scratch/gouwar.j/cran-all/cranData/visR/R/get_pvalue.R
#' @title Wrapper around quantile methods #' #' @description S3 method for extracting quantiles. #' No default method is available at the moment. #' #' @seealso \code{\link[survival]{quantile.survfit}} #' #' @param x An object of class \code{survfit} #' @param probs probabilities Default = c(0.25,0.50,0.75) #' @inheritParams survival::quantile.survfit #' @param ... other arguments passed on to the method #' #' @examples #' #' ## Kaplan-Meier estimates #' survfit_object <- visR::estimate_KM(data = adtte, strata = c("TRTP")) #' #' ## visR quantiles #' visR::get_quantile(survfit_object) #' #' ## survival quantiles #' quantile(survfit_object) #' #' @return A data frame with quantiles of the object #' #' @rdname get_quantile #' @export #' get_quantile <- function(x, ...) { UseMethod("get_quantile", x) } #' @rdname get_quantile #' @method get_quantile survfit #' @export get_quantile.survfit <- function(x, ..., probs = c(0.25, 0.50, 0.75), conf.int = TRUE, tolerance = sqrt(.Machine$double.eps)) { # User input validation --------------------------------------------------- if (conf.int == TRUE & !base::all(c("lower", "upper") %in% names(x))) { stop("Confidence limits were not part of original estimation.") } if (!base::all(is.numeric(probs) == TRUE) | (!base::all(probs < 1))) { stop("probs should be a numeric vector.") } if (!is.numeric(tolerance)) { stop("tolerance should be numeric") } # Extract quantiles ------------------------------------------------------- q <- quantile(x, probs = probs, conf.int = conf.int, tolerance = tolerance, type = 3 ) qdf <- do.call(rbind.data.frame, q) strata <- as.character(unlist(lapply(q, rownames))) quantity <- unlist(lapply(strsplit(rownames(qdf), "\\.", fixed = FALSE), `[[`, 1)) final <- data.frame( cbind(strata, quantity, qdf), row.names = NULL, check.names = FALSE ) final <- final[order(final[, "strata"], final[, "quantity"]), ] return(final) }
/scratch/gouwar.j/cran-all/cranData/visR/R/get_quantile.R
#' @title Obtain risk tables for tables and plots #' #' @description Create a risk table from an object using an S3 method. #' Currently, no default method is defined. #' #' @seealso \code{\link[survival]{summary.survfit}} #' #' @param x an object of class `survfit` or `tidycuminc` #' @param times Numeric vector indicating the times at which the risk set, censored subjects, events are calculated. #' @param statlist Character vector indicating which summary data to present. Current choices are "n.risk" "n.event" #' "n.censor", "cum.event", "cum.censor". #' Default is "n.risk". #' @param label Character vector with labels for the statlist. Default matches "n.risk" with "At risk", "n.event" with #' "Events", "n.censor" with "Censored", "cum.event" with "Cum. Event", and "cum.censor" with "Cum. Censor". #' @param group String indicating the grouping variable for the risk tables. #' Current options are: #' \itemize{ #' \item{"strata": groups the risk tables per stratum. #' The `label` specifies the label within each risk table. The strata levels #' are used for the titles of the risk tables. This is the default} #' \item{"statlist": groups the risk tables per statlist. #' The `label` specifies the title for each risk table. The strata levels #' are used for labeling within each risk table.} #' } #' Default is "strata". #' @param collapse Boolean, indicates whether to present the data overall. #' Default is FALSE. #' @param ... other arguments passed on to the method #' #' @return return list of attributes the form the risk table i.e. #' number of patients at risk per strata #' @rdname get_risktable #' @export #' get_risktable <- function(x, ...) { UseMethod("get_risktable") } #' @rdname get_risktable #' @method get_risktable survfit #' @export get_risktable.survfit <- function(x, times = NULL, statlist = "n.risk", label = NULL, group = c("strata", "statlist"), collapse = FALSE, ...) { # User input validation --------------------------------------------------- group <- match.arg(group) if (!base::all(statlist %in% c( "n.risk", "n.censor", "n.event", "cum.censor", "cum.event" ))) { stop("statlist argument not valid. Current options are n.risk, n.censor, n.event, cum.event, cum.censor") } if (!is.null(label) & !base::all(is.character(label)) & !base::inherits(label, "factor")) { stop("label arguments should be of class `character` or `factor`.") } if (!base::is.logical(collapse)) { stop("Error in get_risktable: collapse is expected to be boolean.") } if (base::any(times < 0)) { stop("Negative times are not valid.") } # Clean input ------------------------------------------------------------ tidy_object <- tidyme(x) statlist <- unique(statlist) # Match amount of elements in label with statlist ------------------------- if (length(label) <= length(statlist)) { vlookup <- data.frame( statlist = c( "n.risk", "n.censor", "n.event", "cum.censor", "cum.event" ), label = c( "At risk", "Censored", "Events", "Cum. Censored", "Cum. Events" ), check.names = FALSE, stringsAsFactors = FALSE ) if (is.null(label)) label <- NA label <- c(label, rep(NA, length(statlist) - length(label))) have <- data.frame(cbind(label, statlist), check.names = FALSE, stringsAsFactors = TRUE ) label_lookup <- vlookup %>% dplyr::right_join(have, by = "statlist") %>% dplyr::mutate(label = dplyr::coalesce(label.y, label.x)) %>% dplyr::select(-label.x, -label.y) %>% as.data.frame() } else if (length(label) > length(statlist)) { label_lookup <- data.frame( statlist = statlist, label = label[1:length(statlist)], check.names = FALSE, stringsAsFactors = TRUE ) } # Ensure the order of the label corresponds to statlist order------------- statlist_order <- factor(statlist, levels = statlist) label_lookup[["statlist"]] <- factor(label_lookup[["statlist"]], levels = statlist) label_lookup <- label_lookup[order(label_lookup[["statlist"]]), ] # Generate time ticks ---------------------------------------------------- if (is.null(times)) { times <- pretty(x$time, 10) } else { times <- times[order(unique(times))] } # Summary ----------------------------------------------------------------- survfit_summary <- summary(x, times = times, extend = TRUE) # Risk table per statlist ------------------------------------------------- ## labels of risk table are strata, titles are specified through `label per_statlist <- data.frame( time = survfit_summary$time, strata = base::factor(.get_strata(survfit_summary[["strata"]]), levels = unique(.get_strata(survfit_summary[["strata"]])) ), n.risk = survfit_summary[["n.risk"]], n.event = survfit_summary[["n.event"]], n.censor = survfit_summary[["n.censor"]] ) %>% dplyr::arrange(.data[["strata"]], .data[["time"]]) %>% dplyr::group_by(.data[["strata"]]) %>% dplyr::mutate( cum.event = cumsum(.data[["n.event"]]), cum.censor = cumsum(.data[["n.censor"]]) ) %>% dplyr::ungroup() %>% dplyr::rename(y_values = strata) %>% as.data.frame() final <- per_statlist[, c("time", "y_values", levels(statlist_order))] attr(final, "time_ticks") <- times attr(final, "title") <- label_lookup[["label"]] attr(final, "statlist") <- levels(label_lookup[["statlist"]]) # Organize the risk tables per strata => reorganize the data -------------- if (group == "strata" & collapse == FALSE) { per_strata <- per_statlist %>% dplyr::arrange(time) %>% tidyr::pivot_longer( cols = c( "n.risk", "n.censor", "n.event", "cum.censor", "cum.event" ), names_to = "statlist", values_to = "values" ) %>% tidyr::pivot_wider(names_from = "y_values", values_from = values) %>% dplyr::rename(y_values = statlist) %>% dplyr::filter(y_values %in% statlist) %>% as.data.frame() per_strata[["y_values"]] <- factor(per_strata[["y_values"]], levels = levels(label_lookup[["statlist"]]), labels = label_lookup[["label"]] ) per_strata <- per_strata[order(per_strata[["y_values"]]), ] title <- levels(per_statlist[["y_values"]]) final <- per_strata attr(final, "time_ticks") <- times attr(final, "title") <- title attr(final, "statlist") <- title } # Collapse: start from the group == "statlist" logic ------------------------ if (collapse == TRUE) { collapsed <- per_statlist %>% dplyr::arrange(time) %>% dplyr::mutate(strata = "Overall") %>% dplyr::group_by(time, strata) %>% dplyr::summarise( n.risk = sum(n.risk), n.event = sum(n.event), n.censor = sum(n.censor), cum.event = sum(.data[["cum.event"]]), cum.censor = sum(.data[["cum.censor"]]) ) %>% dplyr::ungroup() %>% dplyr::select(-strata) %>% tidyr::pivot_longer( cols = c( "n.risk", "n.censor", "n.event", "cum.censor", "cum.event" ), names_to = "y_values", values_to = "Overall" ) %>% dplyr::filter(y_values %in% statlist) %>% as.data.frame() collapsed[["y_values"]] <- factor(collapsed[["y_values"]], levels = label_lookup[["statlist"]], labels = label_lookup[["label"]]) collapsed <- collapsed %>% dplyr::arrange(y_values, time) final <- collapsed attr(final, "time_ticks") <- times attr(final, "title") <- "Overall" attr(final, "statlist") <- "Overall" } class(final) <- c("risktable", class(final)) return(final) } #' @rdname get_risktable #' @method get_risktable tidycuminc #' @export get_risktable.tidycuminc <- function(x, times = pretty(x$tidy$time, 10), statlist = "n.risk", label = NULL, group = c("strata", "statlist"), collapse = FALSE, ...) { # check for installation of tidycmprsk package rlang::check_installed("tidycmprsk", version = "0.1.1") group <- match.arg(group) # list of statistics and their default labels lst_stat_labels_default <- list( n.risk = "At Risk", n.event = "Events", n.censor = "Censored", cum.event = "Cum. Events", cum.censor = "Cum. Censored" ) label <- .reconcile_statlist_and_labels( statlist = statlist, label = label, default_labels = lst_stat_labels_default ) # named list of stats and labels lst_stat_labels <- as.list(label) %>% stats::setNames(statlist) tidy <- visr_tidy_tidycuminc(x, times = times) %>% dplyr::select(dplyr::any_of(c("time", "outcome", "strata", names(lst_stat_labels_default)))) if (isTRUE(collapse)) { tidy <- tidy %>% dplyr::mutate(strata = "Overall") %>% dplyr::group_by(dplyr::across(dplyr::any_of(c("time", "outcome", "strata")))) %>% dplyr::mutate( dplyr::across( dplyr::any_of(c( "n.risk", "n.event", "cum.event", "n.censor", "cum.censor" )), ~ sum(., na.rm = TRUE) ) ) %>% dplyr::filter(dplyr::row_number() == 1L) %>% dplyr::ungroup() } if (group == "strata" || isTRUE(collapse)) { strata_levels <- unique(tidy[["strata"]]) %>% sort() %>% as.character() result <- tidy %>% dplyr::select(dplyr::any_of(c( "time", "strata", "n.risk", "n.event", "cum.event", "n.censor", "cum.censor" ))) %>% tidyr::pivot_longer(cols = -c(.data[["time"]], .data[["strata"]])) %>% tidyr::pivot_wider( id_cols = c(.data[["time"]], .data[["name"]]), values_from = "value", names_from = "strata" ) %>% dplyr::relocate(dplyr::any_of(strata_levels), .after = dplyr::last_col()) %>% dplyr::mutate( y_values = dplyr::recode(.data[["name"]], !!!lst_stat_labels) ) %>% dplyr::filter(.data[["name"]] %in% .env[["statlist"]]) %>% dplyr::select(.data[["time"]], .data[["y_values"]], dplyr::everything(), -.data[["name"]]) %>% dplyr::mutate(y_values = factor(.data[["y_values"]], levels = .env[["label"]])) %>% dplyr::arrange(.data[["y_values"]], .data[["time"]]) %>% as.data.frame() attr(result, "title") <- names(result) %>% setdiff(c("time", "y_values")) attr(result, "statlist") <- names(result) %>% setdiff(c("time", "y_values")) } else if (group == "statlist") { result <- tidy %>% dplyr::select(.data[["time"]], y_values = .data[["strata"]], dplyr::any_of(c( "n.risk", "n.event", "cum.event", "n.censor", "cum.censor" )) ) %>% as.data.frame() attr(result, "statlist") <- names(lst_stat_labels_default[statlist]) attr(result, "title") <- lst_stat_labels_default[statlist] %>% unlist() %>% unname() } attr(result, "time_ticks") <- unique(result$time) %>% sort() class(result) <- c("risktable", class(result)) result } .reconcile_statlist_and_labels <- function(statlist, label, default_labels) { # return label as is if length matches statlist if (!is.null(label) && length(statlist) == length(label)) { return(label) } # initialize empty vector if NULL if (is.null(label)) { label <- character(0L) } # replace labels with defaults if not passed by user for (i in seq_along(statlist)) { label[i] <- dplyr::coalesce( label[i], default_labels[[statlist[i]]] %||% NA_character_, statlist[i] ) } return(label[seq_along(statlist)]) }
/scratch/gouwar.j/cran-all/cranData/visR/R/get_risktable.R
#' @title Summarize the descriptive statistics across strata from a survival object using S3 method #' #' @description S3 method for extracting descriptive statistics across strata. #' No default method is available at the moment. #' #' @param x An object of class `survfit` #' @param statlist Character vector containing the desired information to be displayed. The order of the arguments determines the order in which #' they are displayed in the final result. Default is the strata ("strata"), number of subjects ("records"), number of events ("events"), #' the median survival time ("median"), the Confidence Interval ("CI"), the Lower Confidence Limit ("UCL") and the Upper Confidence Limit ("UCL"). #' @param ... other arguments passed on to the method #' #' @return list of summary statistics from survfit object #' #' @rdname get_summary #' @export #' get_summary <- function(x, ...) { UseMethod("get_summary", x) } #' @examples #' #' survfit_object <- survival::survfit(data = adtte, survival::Surv(AVAL, 1 - CNSR) ~ TRTP) #' get_summary(survfit_object) #' #' @return A data frame with summary measures from a `survfit` object #' #' @rdname get_summary #' @method get_summary survfit #' @export #' get_summary.survfit <- function(x, statlist = c("strata", "records", "events", "median", "LCL", "UCL", "CI"), ...) { # User input validation --------------------------------------------------- statlist <- unique(statlist) if (is.null(statlist) | !base::all(statlist %in% c("strata", "records", "events", "median", "LCL", "UCL", "CI"))) { stop("Error in get_summary: Specify valid `statlist` arguments. Valid `statistic` arguments are: `strata`, `records`, `events`, `median`, `LCL`, `UCL` and `CI`. ") } # Adjust CI based on conf.int in survfit ---------------------------------- if ("conf.int" %in% names(x) & x[["conf.type"]] != "none" & ((base::any(grepl("CL", statlist, fixed = TRUE))) | (base::any(grepl("CI", statlist, fixed = TRUE)))) ) { CI <- paste0(x[["conf.int"]], statlist[grepl("CL", statlist, fixed = TRUE)]) statlist[grepl("CL", statlist, fixed = TRUE)] <- CI CI_Varname <- paste0(x[["conf.int"]], statlist[grepl("CI", statlist, fixed = TRUE)]) statlist[grepl("CI", statlist, fixed = TRUE)] <- CI_Varname } else if (!"conf.int" %in% names(x) | x[["conf.type"]] == "none") { statlist <- statlist[-which(grepl("CL", statlist, fixed = TRUE))] statlist <- statlist[-which(grepl("CI", statlist, fixed = TRUE))] warning("No conf.int estimated in x.") } else if (!base::any(statlist %in% c("LCL", "UCL", "CI"))) { CI_Varname <- "conf.in" } # Summary list ------------------------------------------------------------ if ("strata" %in% names(x)) { strata <- names(x[["strata"]]) } else { strata <- "Overall" } .CIpaste <- function(df) { if (base::any(grepl("CI", statlist, fixed = TRUE))) { paste0("(", apply(dplyr::select(df, matches(CI)), 1, paste, collapse = ";"), ")") } else { NULL } } summary_survfit <- as.data.frame( base::rbind(summary(x)[["table"]]), check.names = FALSE, stringsAsFactors = FALSE, row.names = NULL ) %>% dplyr::mutate(strata = strata) %>% dplyr::mutate(!!CI_Varname := .CIpaste(.)) %>% dplyr::select(dplyr::all_of(statlist)) # Output to dataframe ----------------------------------------------------- statlist <- base::sub("records", "No. of subjects", statlist, fixed = TRUE) statlist <- base::sub("events", "No. of events", statlist, fixed = TRUE) statlist <- base::sub("median", "Median(surv.time)", statlist, fixed = TRUE) colnames(summary_survfit) <- statlist return(summary_survfit) }
/scratch/gouwar.j/cran-all/cranData/visR/R/get_summary.R
#' @title Calculate summary statistics #' #' @description #' `r lifecycle::badge("questioning")` #' S3 method for creating a table of summary statistics. #' The summary statistics can be used for presentation in tables such as table one or baseline and demography tables. #' #' The summary statistics estimated are conditional on the variable type: continuous, binary, categorical, etc. #' #' By default the following summary stats are calculated: #' * Numeric variables: mean, min, 25th-percentile, median, 75th-percentile, maximum, standard deviation #' * Factor variables: proportion of each factor level in the overall dataset #' * Default: number of unique values and number of missing values #' #' @param data The dataset to summarize as dataframe or tibble #' @param strata Stratifying/Grouping variable name(s) as character vector. If NULL, only overall results are returned #' @param overall If TRUE, the summary statistics for the overall dataset are also calculated #' @param summary_function A function defining summary statistics for numeric and categorical values #' @details It is possible to provide your own summary function. Please have a loot at summary for inspiration. #' #' @note All columns in the table will be summarized. If only some columns shall be used, please select only those #' variables prior to creating the summary table by using dplyr::select() #' #' @examples #' #' # Example using the ovarian data set #' #' survival::ovarian %>% #' dplyr::select(-fustat) %>% #' dplyr::mutate( #' age_group = factor( #' dplyr::case_when( #' age <= 50 ~ "<= 50 years", #' age <= 60 ~ "<= 60 years", #' age <= 70 ~ "<= 70 years", #' TRUE ~ "> 70 years" #' ) #' ), #' rx = factor(rx), #' ecog.ps = factor(ecog.ps) #' ) %>% #' dplyr::select(age, age_group, everything()) %>% #' visR::get_tableone() #' #' # Examples using ADaM data #' #' # display patients in an analysis set #' adtte %>% #' dplyr::filter(SAFFL == "Y") %>% #' dplyr::select(TRTA) %>% #' visR::get_tableone() #' #' ## display overall summaries for demog #' adtte %>% #' dplyr::filter(SAFFL == "Y") %>% #' dplyr::select(AGE, AGEGR1, SEX, RACE) %>% #' visR::get_tableone() #' #' ## By actual treatment #' adtte %>% #' dplyr::filter(SAFFL == "Y") %>% #' dplyr::select(AGE, AGEGR1, SEX, RACE, TRTA) %>% #' visR::get_tableone(strata = "TRTA") #' #' ## By actual treatment, without overall #' adtte %>% #' dplyr::filter(SAFFL == "Y") %>% #' dplyr::select(AGE, AGEGR1, SEX, EVNTDESC, TRTA) %>% #' visR::get_tableone(strata = "TRTA", overall = FALSE) #' #' @rdname get_tableone #' @export #' get_tableone <- function(data, strata = NULL, overall = TRUE, summary_function = summarize_short) { UseMethod("get_tableone") } #' @rdname get_tableone #' @method get_tableone default #' @return object of class tableone. That is a list of data specified summaries #' for all input variables. #' @export get_tableone.default <- function(data, strata = NULL, overall = TRUE, summary_function = summarize_short) { summary_FUN <- match.fun(summary_function) if (overall & !is.null(strata)) { overall_table1 <- get_tableone(data, strata = NULL, overall = FALSE, summary_function = summary_function) %>% dplyr::filter(!(variable %in% strata)) combine_dfs <- TRUE } else { combine_dfs <- FALSE } if (is.null(strata)) { data <- data %>% dplyr::mutate(all = "Total") strata <- c("all") } data <- data %>% dplyr::group_by(!!!dplyr::syms(strata)) data_ns <- data %>% dplyr::summarise(summary = dplyr::n()) %>% tidyr::pivot_wider(names_from = dplyr::any_of(strata), values_from = "summary") %>% dplyr::mutate(variable = "Sample", summary_id = "N") data_summary <- data %>% dplyr::summarise_all(summary_FUN) %>% dplyr::ungroup() %>% tidyr::pivot_longer(cols = setdiff(names(.), strata), names_to = "variable", values_to = "summary") %>% tidyr::unnest_longer(summary) %>% tidyr::pivot_wider(names_from = dplyr::any_of(strata), values_from = "summary") data_table1 <- rbind(data_ns, data_summary) %>% dplyr::rename(statistic = summary_id) %>% dplyr::select(variable, statistic, everything()) if (overall & combine_dfs) { data_table1 <- overall_table1 %>% dplyr::left_join(data_table1, by = c("variable", "statistic")) } class(data_table1) <- c("tableone", class(data_table1)) return(data_table1) }
/scratch/gouwar.j/cran-all/cranData/visR/R/get_tableone.R
#' \code{visR} package #' #' @docType package #' @name visR Global #' @keywords internal #' #' @description Set global variables NULL ## Quiets concerns of R CMD check re: the .'s that appear in pipelines ## For "visible binding note solution" see ## here \href{https://github.com/STAT545-UBC/Discussion/issues/451} ## TODO: move to more maintainable solution if (getRversion() >= "2.15.1") { utils::globalVariables( c( "est.lower", "est.upper", "strata", "n.censor", "time", "est", "unit", "LegendWidth", "any_of", "y_values", "statistic", "quantile", "surv", "std.err", "lower", "upper", "variable", "value", ".", "qmax", "tmax", "tmin", "quantity", "vmin", "n.risk", "label.y", "label.x", "min_time", "n.event", "values", "matches", ":=", "summary_id", "everything", "rename_at", "vars", "select", "N", "median", "sd", "asis_output", "write.table", "n.risk", "n.event", "n_distinct", "Remaining N", "Excluded N", "Criteria", "Condition" ) ) }
/scratch/gouwar.j/cran-all/cranData/visR/R/global.R
# dplyr ------------------------------------------------------------------------ #' @export #' @importFrom dplyr %>% dplyr::`%>%` # survival --------------------------------------------------------------------- #' @export #' @importFrom survival Surv survival::Surv
/scratch/gouwar.j/cran-all/cranData/visR/R/reexports.R
#' @title Render a data.frame, risktable, or tableone object as a table #' #' @description #' `r lifecycle::badge("questioning")` #' Render a previously created data.frame, tibble or tableone object to html, rtf or latex #' #' @param data Input data.frame or tibble to visualize #' @param title Specify the title as a text string to be displayed in the rendered table. #' Default is no title. #' @param datasource String specifying the data source underlying the data set. #' Default is no title. #' @param footnote String specifying additional information to be displayed as a footnote #' alongside the data source and specifications of statistical tests. #' @param output_format Type of output that is returned, can be "html" or "latex". #' Default is "html". #' @param engine If "html" is selected as `output_format`, one can chose between #' using `kable`, `gt` and `DT` as engine to render the output table. Default is "gt". #' @param download_format Options formats generated for downloading the data. #' Default is a list "c('copy', 'csv', 'excel')". #' #' @return A table data structure with possible interactive functionality depending on the choice of the engine. #' @rdname render #' @export render <- function(data, title = "", datasource, footnote = "", output_format = "html", engine = "gt", download_format = c("copy", "csv", "excel")) { UseMethod("render") } #' @inheritParams render #' @export #' @method render tableone #' render.tableone <- function(data, title, datasource, footnote = "", output_format = "html", engine = "gt", download_format = NULL) { if (!inherits(data, "tableone")) { stop("Please provide a valid `tableone` object.") } if (missing(title)) { stop("Please provide a valid `title`.") } if (missing(datasource)) { stop("Please provide a valid `datasource`.") } if (!(output_format %in% c("html", "latex"))) { stop("Invalid output_format. Currently, 'html' and 'latex' are supported.") } else if (output_format == "latex" & !(engine %in% c("gt", "kable"))) { stop("Currently, 'latex' output is only implemented with 'gt' or 'kable' as a table engine.") } download_formats <- c() if (engine %in% c("dt", "datatable", "datatables")) { if ("copy" %in% download_format) { download_formats <- c(download_formats, "copy") } if ("csv" %in% download_format) { download_formats <- c(download_formats, "csv") } if ("excel" %in% download_format) { download_formats <- c(download_formats, "excel") } for (f in download_format) { if (!(is.null(f)) & !(f %in% c("copy", "csv", "excel"))) { warning("Currently, only 'copy', 'csv' and 'excel' are supported as 'download_format'.") } } } else { if (!is.null(download_format)) { warning("Currently, 'download_format' is only supported for the following engines: dt, datatable, datatables.") } } if (!inherits(data, "risktable")) { sample <- data[data$variable == "Sample", ] sample <- sample[3:length(sample)] sample_names <- colnames(sample) new_sample_names <- sapply(1:length(sample), function(i) { vec <- c(sample_names[i], " (N=", sample[i], ")") paste(vec, collapse = "") }) colnames(data) <- c(colnames(data)[1:2], new_sample_names) data <- data[data$variable != "Sample", ] } render.data.frame( data, title, datasource, footnote, output_format, engine, download_format ) } #' @inheritParams render #' @export #' #' @method render risktable render.risktable <- function(data, title, datasource, footnote = "", output_format = "html", engine = "gt", download_format = NULL) { if (!inherits(data, "risktable")) { stop("Please provide a valid `risktable` object.") } else { # Many tidyr operations don't work on non-standard class objects. Therefore, # we remove the class and add it back later. class(data) <- class(data)[class(data) != "risktable"] } if (missing(title)) { stop("Please provide a valid `title`.") } if (missing(datasource)) { stop("Please provide a valid `datasource`.") } strata <- colnames(data)[3:ncol(data)] if (!is.null(attributes(data)$title) & length(attributes(data)$title) == length(strata)) { data <- data %>% dplyr::rename_at(dplyr::vars(strata), ~ attributes(data)$title) strata <- colnames(data)[3:ncol(data)] } y_lables <- unique(data$y_values) coln <- colnames(data)[1:2] complete_tab <- c() for (s in strata) { tab <- data[c(coln, s)] %>% tidyr::pivot_wider(names_from = "time", values_from = s) tab$variable <- s complete_tab <- rbind(complete_tab, tab) } colnames(complete_tab) <- c("statistic", colnames(tab)[2:ncol(tab)]) class(complete_tab) <- c("tableone", class(complete_tab)) class(complete_tab) <- c("risktable", class(complete_tab)) complete_tab <- complete_tab %>% dplyr::select(variable, statistic, dplyr::everything()) render.tableone( complete_tab, title, datasource, footnote, output_format, engine, download_format ) } #' @inheritParams render #' #' @method render data.frame #' @export render.data.frame <- function(data, title, datasource, footnote = "", output_format = "html", engine = "gt", download_format = c("copy", "csv", "excel")) { # TODO: add code for rtf output # TODO: do we need a routine for falling back on minimal? # TODO: do we need features to further specify styling of the table? check_rendering_input(output_format, engine) # Kable output if (tolower(engine) == "kable") { if (tolower(output_format) %in% c("html", "latex")) { table_out <- data %>% knitr::kable( format = output_format, caption = title, digits = 2, booktabs = TRUE ) %>% kableExtra::collapse_rows(valign = "top") %>% kableExtra::footnote( general = footnote, general_title = "Additional Note:" ) %>% kableExtra::footnote( general = datasource, general_title = "Data Source:" ) } else { # Currently can't be triggered due to check_rendering_input() # Uncommented for possible later use with rtf # warning(paste("Supported output format of the kable engine are html and latex and not", output_format, " - falling back to html")) # render(data = data, # title = title, # datasource = datasource, # output_format = "html", # engine = engine, # download_format = download_format) } } #-------------------- # GT output else if (tolower(engine) == "gt") { if (!tolower(output_format) %in% c("html", "latex")) { # Currently can't be triggered due to check_rendering_input() # Uncommented for possible later use with rtf # warning(paste("Supported output format of the gt engine are html and latex and not", output_format, " - falling back to html")) } table_out <- render_gt(data = data, title = title, datasource = datasource, footnote = footnote) if (output_format == "latex") { # note: after this step, the table is not a gt object anymore and thus cannot be further styled table_out <- table_out %>% gt::as_latex() } } #-------------------- # jQuery DT output else if (tolower(engine) %in% c("dt", "datatables", "datatable")) { if (!tolower(output_format) %in% c("html")) { warning(paste( "DT engine only supports html output and not", output_format, "- falling back to html. Please pick a different engine to create other outputs" )) } # WIP: we may want to create a custom container to allow for stratification of more than one value and merge cells in the description # sketch = htmltools::withTags(table( # DT::tableHeader(colnames(data)), # DT::tableFooter(paste("Source:", datasource)) # )) caption_datasource <- paste(" var caption = 'Data Source:", datasource, "';") source_cap <- c( "function(settings){", " var datatable = settings.oInstance.api();", " var table = datatable.table().node();", " var n_captions = $(table).find('caption').length + 1;", caption_datasource, " if (n_captions <= 1) { $(table).append('<caption style=\"caption-side: bottom\">' + caption + '</caption>')};", "}" ) # may need some adjustment to also allow creation of DT in loops table_out <- render_datatable(data, title, download_format, source_cap) } #-------------------- return(table_out) } #' @title Experimental internal function to help render a data.frame, risktable or tableone object as a datatable #' #' @description #' `r lifecycle::badge("questioning")` #' Render a previously created datatable to html, rtf or latex #' @param data Input data.frame or tibble to visualize #' @param title Specify the title as a text string to be displayed in the rendered table. #' Default is no title. #' @param download_format Options formats generated for downloading the data. #' Default is a list "c('copy', 'csv', 'excel')".' #' @param source_cap String automatically compiled of data source and captions. #' @return A table data structure with possible interactive functionality depending on the choice of the engine. #' @rdname render_datatable #' @noRd render_datatable <- function(data, title, download_format, source_cap) { UseMethod("render_datatable") } #' @inheritParams render_datatable #' #' @method render_datatable tableone #' @noRd render_datatable.tableone <- function(data, title, download_format, source_cap) { if (is.null(download_format)) { table_out <- data %>% DT::datatable( caption = title, filter = "none", # container = sketch, options = list( paging = FALSE, ordering = FALSE, info = FALSE, drawCallback = DT::JS(source_cap) ) ) } else { table_out <- data %>% DT::datatable( caption = title, filter = "none", # container = sketch, extensions = "Buttons", options = list( paging = FALSE, info = FALSE, ordering = FALSE, drawCallback = DT::JS(source_cap), dom = "Bfrtip", buttons = download_format ) ) } return(table_out) } #' @inheritParams render_datatable #' #' @method render_datatable data.frame #' @noRd render_datatable.data.frame <- function(data, title, download_format, source_cap) { if (is.null(download_format)) { table_out <- data %>% DT::datatable( caption = title, options = list( drawCallback = DT::JS(source_cap) ) ) } else { table_out <- data %>% DT::datatable( caption = title, extensions = "Buttons", options = list( drawCallback = DT::JS(source_cap), dom = "Bfrtip", buttons = download_format ) ) } return(table_out) } #' @title Experimental function to render to a gt table. #' #' @description Render a previously created datatable to gt #' `r lifecycle::badge("questioning")` #' @param data Input data.frame or tibble to visualize #' @param title Specify the title as a text string to be displayed in the rendered table. #' Default is no title. #' @param datasource String specifying the data source underlying the data set. #' Default is no title. #' @param footnote String specifying additional information to be displayed as a footnote #' alongside the data source and specifications of statistical tests. #' @return A gt object. #' @rdname render_gt #' @noRd render_gt <- function(data, title, datasource, footnote) { # identify numeric columns for special formatting later numcols <- data %>% dplyr::select_if(is.numeric) %>% names() # create gt table table_out <- data %>% get_gt(numcols) %>% gt::fmt_number( columns = numcols, decimals = 2 ) %>% add_metadata_gt( title = title, datasource = datasource, footnote = footnote ) return(table_out) } #' @title Internal function Get gt object #' @description Internal function Get gt object for tableone #' `r lifecycle::badge("questioning")` #' #' @param data input data set #' @param numcols number of columns #' @noRd #' @return gt object get_gt <- function(data, numcols) { UseMethod("get_gt") } #' @inheritParams get_gt #' #' @method get_gt tableone #' @noRd get_gt.tableone <- function(data, numcols) { gt <- gt::gt(data, groupname_col = "variable", rowname_col = "statistic" ) %>% # no decimal points for sample count gt::fmt_number( columns = numcols, rows = grepl("^N$", statistic), decimals = 0 ) return(gt) } #' @inheritParams get_gt #' #' @method get_gt data.frame #' get_gt.data.frame <- function(data, numcols) { gt <- gt::gt(data) return(gt) } #' @title Internal function to add metadata to a gt object #' @description Internal function to add metadata to a gt object #' `r lifecycle::badge("questioning")` #' #' @param gt input gt object #' @param title Specify the title as a text string to be displayed in the rendered table. #' Default is no title. #' @param datasource String specifying the data source underlying the data set. #' Default is no title. #' @param footnote String specifying additional information to be displayed as a footnote #' alongside the data source and specifications of statistical tests. #' @noRd #' @return gt object add_metadata_gt <- function(gt, title, datasource, footnote) { table_out <- gt %>% gt::tab_header(title = title) table_out <- table_out %>% # add metadata gt::tab_source_note(source_note = paste("Data Source:", datasource)) %>% gt::tab_source_note(source_note = footnote) %>% # add formatting gt::tab_options(data_row.padding = gt::px(4)) return(table_out) } #' @title Internal function to check if the input works #' `r lifecycle::badge("questioning")` #' @param output_format format for output i.e. html #' @param engine engine to render output. #' @noRd #' @return Warning message check_rendering_input <- function(output_format = NULL, engine = NULL) { if (missing(output_format) | is.null(output_format) | missing(engine) | is.null(engine)) { stop("Please provide an output_format and an engine.") } # stop if output format is not supported if (!tolower(output_format) %in% c("html", "latex")) { # "rtf", stop(paste("Currently supported output formats are html and latex.", output_format, "is not yet supported.")) } # stop if engine format is not supported if (!tolower(engine) %in% c("kable", "gt", "dt", "datatables", "datatable")) { stop(paste("Currently implemented output engines are kable, gt and jquery datatables (DT).", engine, "is not yet supported.")) } }
/scratch/gouwar.j/cran-all/cranData/visR/R/render.R
#' Step ribbon statistic #' #' @description #' `r lifecycle::badge("experimental")` #' Provides stair-step values for ribbon plots, often using in #' conjunction with `ggplot2::geom_step()`. #' The step ribbon can be added with `stat_stepribbon()` or #' identically with `ggplot2::geom_ribbon(stat = "stepribbon")` #' #' @name stat_stepribbon #' @inheritParams ggplot2::geom_ribbon #' @param geom which geom to use; defaults to "`ribbon`" #' @param direction `hv` for horizontal-vertical steps, `vh` for #' vertical-horizontal steps #' @references [https://groups.google.com/forum/?fromgroups=#!topic/ggplot2/9cFWHaH1CPs]() #' @return a ggplot #' @examples #' # using ggplot2::geom_ribbon() #' survival::survfit(survival::Surv(time, status) ~ 1, data = survival::lung) %>% #' survival::survfit0() %>% #' broom::tidy() %>% #' ggplot2::ggplot(ggplot2::aes(x = time, y = estimate, ymin = conf.low, ymax = conf.high)) + #' ggplot2::geom_step() + #' ggplot2::geom_ribbon(stat = "stepribbon", alpha = 0.2) #' #' # using stat_stepribbon() with the same result #' survival::survfit(survival::Surv(time, status) ~ 1, data = survival::lung) %>% #' survival::survfit0() %>% #' broom::tidy() %>% #' ggplot2::ggplot(ggplot2::aes(x = time, y = estimate, ymin = conf.low, ymax = conf.high)) + #' ggplot2::geom_step() + #' visR::stat_stepribbon(alpha = 0.2) NULL #' @rdname stat_stepribbon #' @export stat_stepribbon <- function(mapping = NULL, data = NULL, geom = "ribbon", position = "identity", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, direction = "hv", ...) { ggplot2::layer( data = data, mapping = mapping, stat = visR::StatStepribbon, geom = geom, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list( na.rm = na.rm, direction = direction, ... ) ) } #' @rdname stat_stepribbon #' @export StatStepribbon <- ggplot2::ggproto( "StatStepRibbon", ggplot2::Stat, required_aes = c("x", "ymin", "ymax"), compute_group = function(data, scales, direction = "hv", yvars = c("ymin", "ymax"), ...) { stairstepn(data = data, direction = direction, yvars = yvars) } ) stairstepn <- function(data, direction = "hv", yvars = "y") { direction <- match.arg(direction, c("hv", "vh")) data <- as.data.frame(data)[order(data$x), ] n <- nrow(data) if (direction == "vh") { xs <- rep(1:n, each = 2)[-2 * n] ys <- c(1, rep(2:n, each = 2)) } else { ys <- rep(1:n, each = 2)[-2 * n] xs <- c(1, rep(2:n, each = 2)) } data.frame( x = data$x[xs], data[ys, yvars, drop = FALSE], data[xs, setdiff(names(data), c("x", yvars)), drop = FALSE] ) }
/scratch/gouwar.j/cran-all/cranData/visR/R/stat_stepribbon.R
#' @title Display a summary Table (i.e. table one) #' #' @description #' `r lifecycle::badge("questioning")` #' Wrapper function to produce a summary table (i.e. Table One). #' Create and render a summary table for a dataset. #' A typical example of a summary table are "table one", the first table in an applied medical research manuscript. #' #' Calculate summary statistics and present them in a formatted table #' #' @param data The dataframe or tibble to visualize #' @param title Table title to include in the rendered table. Input is a text string. #' @param footnote Table footnote to include in the rendered table. Input is a text string. #' @param datasource String specifying the datasource underlying the data set #' @param strata Character vector with column names to use for #' stratification in the summary table. Default: NULL , which indicates no stratification. #' @param overall If TRUE, the summary statistics for the overall dataset are also calculated #' @param summary_function A function defining summary statistics for numeric and categorical values #' Pre-implemented functions are summarize_long and summarize_short #' @param ... Pass options to render_table #' #' @section Example Output: #' \if{html}{tableone(engine = "gt")} #' #' \if{html}{\figure{tableone_gt_ex.png}{options: width=65\%}} #' #' \if{html}{tableone(engine = "DT")} #' #' \if{html}{\figure{tableone_DT_ex.png}{options: width=65\%}} #' #' \if{html}{tableone(engine = "kable")} #' #' \if{html}{\figure{tableone_kable_html_ex.png}{options: width=65\%}} #' #' \if{html}{tableone(engine = "kable", output_format = "latex")} #' #' \if{html}{\figure{tableone_kable_latex_ex.png}{options: width=65\%}} #' #' @examples #' #' # metadata for table #' t1_title <- "Cohort Summary" #' t1_ds <- "ADaM Interim Dataset for Time-to-Event Analysis" #' t1_fn <- "My table one footnote" #' #' #' ## table by treatment - without overall and render with GT #' tbl_gt <- #' adtte %>% #' dplyr::filter(SAFFL == "Y") %>% #' dplyr::select(AGE, AGEGR1, SEX, EVNTDESC, TRTA) %>% #' visR::tableone( #' strata = "TRTA", #' overall = FALSE, #' title = t1_title, #' datasource = t1_ds, #' footnote = t1_fn, #' engine = "gt" #' ) #' #' ## table by treatment - without overall and render with DT #' tbl_DT <- #' adtte %>% #' dplyr::filter(SAFFL == "Y") %>% #' dplyr::select(AGE, AGEGR1, SEX, EVNTDESC, TRTA) %>% #' visR::tableone( #' strata = "TRTA", #' overall = FALSE, #' title = t1_title, #' datasource = t1_ds, #' footnote = t1_fn, #' engine = "DT" #' ) #' #' ## table by treatment - without overall and render with kable #' tbl_kable_html <- #' adtte %>% #' dplyr::filter(SAFFL == "Y") %>% #' dplyr::select(AGE, AGEGR1, SEX, EVNTDESC, TRTA) %>% #' visR::tableone( #' strata = "TRTA", #' overall = FALSE, #' title = t1_title, #' datasource = t1_ds, #' footnote = t1_fn, #' engine = "kable" #' ) #' #' ## table by treatment - without overall and render with kable as #' ## a latex table format rather than html #' tbl_kable_latex <- #' adtte %>% #' dplyr::filter(SAFFL == "Y") %>% #' dplyr::select(AGE, AGEGR1, SEX, EVNTDESC, TRTA) %>% #' visR::tableone( #' strata = "TRTA", #' overall = FALSE, #' title = t1_title, #' datasource = t1_ds, #' footnote = t1_fn, #' output_format = "latex", #' engine = "kable" #' ) #' #' @return A table-like data structure, possibly interactive depending on the choice of the engine #' #' @rdname tableone #' #' @export tableone <- function(data, title, datasource, footnote = "", # abbreviations = "", # variable_definitions = "", strata = NULL, overall = TRUE, summary_function = summarize_short, ... # engine = "gt" ) { tab1_rendered <- get_tableone(data, strata = strata, summary_function = summary_function, overall = overall ) %>% render( title = title, datasource = datasource, footnote = footnote, ... ) return(tab1_rendered) }
/scratch/gouwar.j/cran-all/cranData/visR/R/tableone.R
#' @title Extended tidy cleaning of selected objects using S3 method #' #' @description S3 method for extended tidying of selected model outputs. Note #' that the visR method retains the original nomenclature of the objects, #' and adds the one of broom::tidy to ensure compatibility with tidy workflows. #' The default method relies on \code{broom::tidy} to return a tidied object #' #' @seealso \code{\link[broom]{tidy}} #' #' @param x An S3 object #' @param ... other arguments passed on to the method #' #' @examples #' #' ## Extended tidying for a survfit object #' surv_object <- visR::estimate_KM(data = adtte, strata = "TRTA") #' tidied <- visR::tidyme(surv_object) #' #' ## Tidyme for non-included classes #' data <- cars #' lm_object <- stats::lm(data = cars, speed ~ dist) #' lm_tidied <- visR::tidyme(lm_object) #' lm_tidied #' #' @return Data frame containing all list elements of the S3 object as columns. #' The column 'strata' is a factor to ensure that the strata are sorted #' in agreement with the order in the `survfit` object #' #' @rdname tidyme #' #' @export tidyme <- function(x, ...) { UseMethod("tidyme") } #' @rdname tidyme #' @method tidyme default #' @export tidyme.default <- function(x, ...) { base::message("tidyme S3 default method (broom::tidy) used.") return(as.data.frame(broom::tidy(x))) } #' @rdname tidyme #' @method tidyme survfit #' @export tidyme.survfit <- function(x, ...) { if (inherits(x, "survfit")) { ## keep source survfit_object <- x ## Change class to perform list manipulations. The survfit class was throwing errors. class(x) <- ("list") ## Prepare for cleaning reps <- as.vector(length(x$time)) ## Lists to vectors cleaner <- function(x) { if (length(x) == 1) { rep(x, reps) } else { x } } ## Cleanit: strata will always be filled out based off the estimation function from which it is called retme <- lapply(x[names(x) %in% c("n", "strata", "call", "data_name", "na.action", "strata_lbls") == FALSE], cleaner) %>% dplyr::bind_rows() %>% dplyr::mutate( time = time, n.risk = as.integer(n.risk), n.event = as.integer(n.event), n.censor = as.integer(n.censor), call = list(x[["call"]]), estimate = surv, std.error = std.err, conf.low = lower, conf.high = upper ) if (!is.null(x[["strata"]])) { retme[["strata"]] <- rep(names(x[["strata"]]), x[["strata"]]) retme[["n.strata"]] <- rep(x[["n"]], x[["strata"]]) } } attr(retme, "survfit_object") <- survfit_object strata <- .extract_strata_varlist(survfit_object) # modify strata label, removing ref to raw variable name if (!is.null(strata)) { for (stratum in strata) { retme[["strata"]] <- gsub( pattern = paste0(stratum, "="), replacement = "", x = retme[["strata"]], fixed = TRUE ) } } retme[["strata"]] <- factor(retme[["strata"]], levels = unique(retme[["strata"]])) return(as.data.frame(retme)) }
/scratch/gouwar.j/cran-all/cranData/visR/R/tidyme.R
#' Internal function to format pvalues. #' @param x Input p-value. Numeric value. #' @noRd .pvalformat <- function(x) { if (x < 0.001) { "<0.001" } else if (x > 0.999) { ">0.999" } else { format(round(x, 3), nsmall = 3, justify = "right", width = 6, scientific = FALSE ) } }
/scratch/gouwar.j/cran-all/cranData/visR/R/utils_general.R
#' @title Find the "lhs" in the pipeline #' @description This function finds the left-hand sided symbol in a magrittr pipe and returns it as a character. #' @return Left-hand sided symbol as string in the magrittr pipe. #' @references \url{https://github.com/tidyverse/magrittr/issues/115#issuecomment-173894787} #' #' @examples #' blah <- function(x) the_lhs() #' adtte %>% #' blah() #' @export #' the_lhs <- function() { parents <- lapply(sys.frames(), parent.env) is_magrittr_env <- vapply(parents, identical, logical(1), y = environment(`%>%`)) if (any(is_magrittr_env)) { left <- deparse(get("lhs", sys.frames()[[max(which(is_magrittr_env))]])) } return(as.character(gsub(" %.*$", "", left))) } #' @title Find the character that represents the data argument in a call list #' #' @description This function returns character that represents the data argument in a call list. #' @param call_list A list from a call #' @return Character representing the data. #' @noRd .call_list_to_name <- function(call_list) { call_list[["data"]] if (length(base::deparse(call_list[["data"]])) == 1 && deparse(call_list[["data"]]) %in% c(".", ".x", "..1")) { df <- the_lhs() call_list[["data"]] <- as.symbol(df) %>% as.character() } else { df <- as.character(sub("\\[.*$", "", deparse(call_list[["data"]]))[1]) } } #' @title Is visR survfit? #' #' @return logical #' @noRd is_visr_survfit <- function(x) { # the visr survift object saves a quosure instead of a call inherits(x, "survfit") && rlang::is_quosure(x$call) }
/scratch/gouwar.j/cran-all/cranData/visR/R/utils_pipe.R
#' @title Calculate summary statistics from a vector #' #' @description #' `r lifecycle::badge("questioning")` #' Calculates several summary statistics. The summary statistics #' depend on the vector class #' #' @param x an object #' @return A summarized version of the input. #' @rdname summarize_long #' @export summarize_long <- function(x) { UseMethod("summarize_long") } #' @rdname summarize_long #' @method summarize_long factor #' @export summarize_long.factor <- function(x) { x1 <- forcats::fct_explicit_na(x, na_level = "Missing") dat <- tibble::enframe(x1) %>% dplyr::group_by(value) %>% dplyr::summarise(N = dplyr::n()) %>% dplyr::mutate(`%` = round(100 * N / sum(N), 3)) %>% tidyr::pivot_wider(names_from = value, values_from = c("N", "%"), names_sep = " ") %>% as.list() list(dat) } #' @method summarize_long integer #' @rdname summarize_long #' @export summarize_long.integer <- function(x) { summarize_long.numeric(x) } #' @method summarize_long numeric #' @rdname summarize_long #' @export summarize_long.numeric <- function(x) { dat <- list( mean = mean(x, na.rm = TRUE), min = min(x, na.rm = TRUE), Q1 = quantile(x, probs = 0.25, na.rm = TRUE), median = median(x, na.rm = TRUE), Q3 = quantile(x, probs = 0.75, na.rm = TRUE), max = max(x, na.rm = TRUE), sd = sd(x, na.rm = TRUE) ) list(dat) } #' @method summarize_long default #' @rdname summarize_long #' @export summarize_long.default <- function(x) { dat <- list( unique_values = length(unique(x)), nmiss = sum(is.na(x)) ) list(dat) } #' @title Create abbreviated variable summary for table1 #' #' @description #' `r lifecycle::badge("questioning")` #' This function creates summaries combines multiple summary #' measures in a single formatted string. Create variable summary for numeric variables. Calculates mean #' (standard deviation), median (IQR), min-max range and N/% missing elements #' for a numeric vector. #' #' Create variable summary for integer variables #' Calculates mean (standard deviation), median (IQR), min-max range #' and N/% missing elements for a integer vector. #' #' @param x a vector to be summarized #' @return A summarized less detailed version of the input. #' @rdname summarize_short #' @export summarize_short <- function(x) { UseMethod("summarize_short") } #' @method summarize_short factor #' @rdname summarize_short #' @export summarize_short.factor <- function(x) { x1 <- forcats::fct_explicit_na(x, na_level = "Missing") dat <- tibble::enframe(x1) %>% dplyr::group_by(value) %>% dplyr::summarise(N = dplyr::n()) %>% dplyr::mutate(`n (%)` = paste0(N, " (", format(100 * N / sum(N), digits = 3, trim = TRUE), "%)")) %>% dplyr::select(-N) %>% tidyr::pivot_wider(names_from = value, values_from = c("n (%)"), names_sep = " ") %>% as.list() list(dat) } #' @method summarize_short numeric #' @rdname summarize_short #' @export summarize_short.numeric <- function(x) { dat <- list( `Mean (SD)` = paste0(format(mean(x, na.rm = TRUE), digits = 3), " (", format(sd(x, na.rm = TRUE), digits = 3), ")"), `Median (IQR)` = paste0( format(median(x, na.rm = TRUE), digits = 3), " (", format(quantile(x, probs = 0.25, na.rm = TRUE), digits = 3), "-", format(quantile(x, probs = 0.75, na.rm = TRUE), digits = 3), ")" ), `Min-max` = paste0(format(min(x, na.rm = TRUE), digits = 3), "-", format(max(x, na.rm = TRUE), digits = 3)), Missing = paste0( format(sum(is.na(x)), digits = 3), " (", format(100 * sum(is.na(x)) / length(x), trim = TRUE, digits = 3), "%)" ) ) list(dat) } #' @method summarize_short integer #' @rdname summarize_short #' @export summarize_short.integer <- function(x) { summarize_short.numeric(x) } #' @method summarize_short default #' @rdname summarize_short #' @export summarize_short.default <- function(x) { dat <- list( `Unique values` = format(length(unique(x))), `Missing (%)` = paste0(format(sum(is.na(x))), " (", format(100 * sum(is.na(x)) / length(x), trim = TRUE), "%)") ) list(dat) }
/scratch/gouwar.j/cran-all/cranData/visR/R/utils_table.R
#' @title Align multiple ggplot graphs, taking into account the legend #' #' @description This function aligns multiple \code{ggplot} graphs by making them the same width by taking into account the legend width. #' #' @param pltlist A list of plots #' #' @return List of \code{ggplot} with equal width. #' #' @references \url{https://stackoverflow.com/questions/26159495} #' #' @examples #' \donttest{ #' #' ## create 2 graphs #' p1 <- ggplot2::ggplot(adtte, ggplot2::aes(x = as.numeric(AGE), fill = "Age")) + #' ggplot2::geom_histogram(bins = 15) #' #' p2 <- ggplot2::ggplot(adtte, ggplot2::aes(x = as.numeric(AGE))) + #' ggplot2::geom_histogram(bins = 15) #' #' ## default alignment does not take into account legend size #' cowplot::plot_grid( #' plotlist = list(p1, p2), #' align = "none", #' nrow = 2 #' ) #' #' ## align_plots() takes into account legend width #' cowplot::plot_grid( #' plotlist = visR::align_plots(pltlist = list(p1, p2)), #' align = "none", #' nrow = 2 #' ) #' } #' #' @export #' align_plots <- function(pltlist) { if (missing(pltlist) | is.null(pltlist)) { base::stop("Please provide a list of valid `ggplot` objects.") } for (plt in pltlist) { if (!inherits(plt, "ggplot")) { base::stop("Not all elements of the provided list are `ggplot` objects.") } } ### turn plots into grobs and determine amount of columns plots_grobs <- lapply(pltlist, ggplot2::ggplotGrob) ncols <- lapply(plots_grobs, function(x) dim(x)[[2]]) maxcols <- max(unlist(ncols)) ### Function to add more columns to compensate for eg missing legend .addcols <- function(x) { diffcols <- maxcols - dim(x)[[2]] if (diffcols > 0) { for (i in seq(1:diffcols)) { x <- gtable::gtable_add_cols(x, widths = grid::unit(1, "null"), pos = 8) } } x } ### TableGrob 1 has 11 columns while the others have only 9 because lacking legend+spacer ## => add two columns and then resize plots_grobs_xcols <- lapply(plots_grobs, .addcols) ### assign max length to ensure alignment max_width <- do.call(grid::unit.pmax, lapply(plots_grobs_xcols, "[[", "widths")) for (i in seq(1, length(plots_grobs_xcols))) { plots_grobs_xcols[[i]]$widths <- max_width } # ## The grob of the graph shrinks, but the Y-label inside a different grob remains at same location => move # grid.draw(plots.grobs[[1]]) # grid.draw(plots.grobs.xcols[[1]]) # # # Grob with graph is shronken and label does not move with it because it is in another grob # # Investigate which is the relative distance the graphgrob moved so we can move the label equally # # Key = spacer that broadened # # TableGrob has 11 columns, each with a width # plots.grobs[[1]] # at row 7 we have 4 grobs before we see grid background # plots.grobs[[1]]$widths xcol_widths <- grid::convertWidth( plots_grobs_xcols[[1]]$widths, unitTo = "cm", valueOnly = FALSE ) grob_widths <- grid::convertWidth( plots_grobs[[1]]$widths, unitTo = "cm", valueOnly = FALSE ) x <- xcol_widths[[4]] - grob_widths[[4]] plots_grobs_xcols[[1]]$grobs[[13]]$children[[1]]$x <- grid::unit(x, "cm") # grid.draw(plots.grobs.xcols[[1]]) return(plots_grobs_xcols) # ## layout without cowplot: rel. length is challenging to get right # layout <- cbind(seq(1:length(pltlist))) # # gridExtra::grid.arrange(grobs = plots.grobs.xcols, layout_matrix=layout) # ### old code # .LegendWidth <- function(x) # x$grobs[[8]]$grobs[[1]]$widths[[4]] # # plots.grobs <- lapply(pltlist, ggplot2::ggplotGrob) # max.widths <- # do.call(grid::unit.pmax, lapply(plots.grobs, "[[", "widths")) # legends.widths <- lapply(plots.grobs, .LegendWidth) # # max.legends.width <- # base::suppressWarnings(do.call(max, legends.widths)) # # plots.grobs.eq.widths <- lapply(plots.grobs, function(x) { # x$widths <- max.widths # x # }) # # plots.grobs.eq.widths.aligned <- # lapply(plots.grobs.eq.widths, function(x) { # if (gtable::is.gtable(x$grobs[[8]])) { # x$grobs[[8]] <- # gtable::gtable_add_cols(x$grobs[[8]], unit(abs(diff( # c(LegendWidth(x), max.legends.width) # )), "mm")) # } # x # }) # # plots.grobs.eq.widths.aligned } #' @title Get strata level combinations #' #' @description Extracts the strata level combinations from a survival objects without the specified strata. #' #' @param strata The strata from a survival object #' #' @return The strata level combinations from a survival objects without the specified strata. #' #' @keywords internal .get_strata <- function(strata) { x1 <- base::strsplit(as.character(strata), ",") x2 <- lapply(x1, FUN = function(x) base::sub(".*=", "", x)) x3 <- base::lapply(x2, FUN = function(x) base::paste0(x, collapse = ", ")) x4 <- base::unlist(trimws(x3, which = "both")) return(x4) } #' @title Translates options for legend into a list that can be passed to ggplot2 #' #' @description This function takes the legend position and orientation, defined by the user and puts them into a list for ggplot2. #' #' @param legend_position Default = "right". #' @param legend_orientation Default = NULL. #' #' @return List of legend options for ggplot2. legendopts <- function(legend_position = "right", legend_orientation = NULL) { ## replace default eg "h" if user specified something else .ucoalesce <- function(x, default) { ifelse(is.null(x), default, x) } showlegend <- TRUE if (is.character(legend_position)) { if (legend_position == "bottom") { leg_opts <- list( xanchor = "center", x = 0.5, y = -0.2, orientation = .ucoalesce(legend_orientation, "h") ) } else if (legend_position == "right") { leg_opts <- list( yanchor = "center", x = 1.2, y = 0.5, orientation = .ucoalesce(legend_orientation, "v") ) } else if (legend_position == "top") { leg_opts <- list( xanchor = "center", x = 0.5, y = 1.2, orientation = .ucoalesce(legend_orientation, "h") ) } else if (legend_position == "left") { leg_opts <- list( yanchor = "center", x = -1.0, y = 0.5, orientation = .ucoalesce(legend_orientation, "v") ) } else if (legend_position == "none") { showlegend <- FALSE leg_opts <- NULL } } else { if (length(legend_position) == 2) { leg_opts <- list( x = legend_position[1], y = legend_position[2] ) } else if (length(legend_position) > 2) { warning("The provided vector for the legend position contains more than 2 elements, only using the first two.") leg_opts <- list( x = legend_position[1], y = legend_position[2] ) } else { stop("Invalid argument for 'legend_position'. Use 'bottom', 'right', 'top', 'left', 'none' or a vector indicating the desired absolute x/y position, as for examle c(1, 2).") } } return(list(leg_opts = leg_opts, showlegend = showlegend)) } #' @title Create labels for flowchart #' #' @description This function creates lables with a maximal character length per line by combining content of two dataframe columns #' #' @param data A dataframe #' @param description_column_name \code{character} The column name containing description part of the new label #' @param value_column_name \code{character} The column name containing the sample size part of the new label #' @param complement_column_name \code{character} The column name containing a complement description part (will result in a second label) #' @param wrap_width \code{integer} for the maximal character count per line #' #' @return The input dataframe extended by two columns containing the label and complement label #' #' @keywords internal #' @noRd .get_labels <- function(data, description_column_name, value_column_name, complement_column_name = "", wrap_width = 50) { label <- complement_label <- NULL plotting_data <- data %>% dplyr::rowwise() %>% # below needs update to description_column_name instead of Criteria dplyr::mutate(label = paste(strwrap(get(description_column_name), width = wrap_width), collapse = "\n")) %>% # below needs update to value_column_name instead of `Remaining N` dplyr::mutate(label = sprintf("%s\nN = %d", label, get(value_column_name))) if (complement_column_name != "") { plotting_data <- plotting_data %>% # below needs update to complement_column_name instead of Complement dplyr::mutate(complement_label = paste(strwrap(get(complement_column_name), width = wrap_width), collapse = "\n")) %>% dplyr::ungroup() %>% # below needs update to value_column_name instead of `Remaining N` dplyr::mutate(complement_label = sprintf( "%s\nN = %d", complement_label, dplyr::lag(get(value_column_name)) - get(value_column_name) )) } else { plotting_data <- plotting_data %>% dplyr::ungroup() %>% dplyr::mutate(complement_label = sprintf( "%s N = %d", "Excluded", dplyr::lag(get(value_column_name)) - get(value_column_name) )) } return(plotting_data) } #' @title Calculate the size labels on the #' #' @description Calculate the text width and maximal text width of both the label and complement labels #' #' @param data Dataframe with label and complement label strings #' @param label The column containing attrition labels #' @param complement_label The column containing complement description labels #' #' @return The input dataframe extended by several columns containing the label and complement label height and width #' #' @keywords internal #' @noRd #' .get_labelsizes <- function(data, label, complement_label) { labelheight <- labelwidth <- complementheight <- complementwidth <- maxwidth <- maxheight <- NULL plotting_data <- data %>% dplyr::mutate( labelwidth = graphics::strwidth({{ label }}, units = "inch"), complementwidth = graphics::strwidth({{ complement_label }}, units = "inch"), maxwidth = max(labelwidth, complementwidth), labelheight = graphics::strheight({{ label }}, units = "inch"), complementheight = graphics::strheight({{ complement_label }}, units = "inch"), maxheight = max(labelheight, complementheight) ) %>% dplyr::select(labelwidth, complementwidth, maxwidth, labelheight, complementheight, maxheight, dplyr::everything()) return(plotting_data) } #' @title Create coordinates for each row in the attrition table #' #' @description This function creates lables with a maximal character length per line by combining content of two dataframe columns #' #' @param data A dataframe containing the attrition data #' @param box_width \code{integer} The width of the boxes in the flow charts (in canvas coordinates) #' @param box_height \code{integer} The height of the boxes in the flow charts (in canvas coordinates) #' @param field_height \code{float} The width of the boxes in the flow charts (in canvas coordinates) #' #' @return The input dataframe extended by columns containing x and y coordinates for included and excluded counts #' #' @keywords internal #' @noRd .get_coordinates <- function(data, box_width, box_height, field_height) { y <- ymin <- ymax <- down_ystart <- down_yend <- x <- side_xend <- side_y <- NULL plotting_data <- data %>% dplyr::ungroup() %>% dplyr::mutate( x = 50, y = 100 - dplyr::row_number() * field_height + box_height / 2 ) %>% # coordinates of text box dplyr::mutate( box_width = box_width, box_height = box_height, ymin = y - (box_height / 2), ymax = y + (box_height / 2) ) %>% # coordinates of down arrow dplyr::mutate( down_ystart = dplyr::lag(ymin), down_yend = ymax ) %>% # coordinates of side arrow dplyr::mutate( side_y = down_ystart - 0.5 * (down_ystart - down_yend), side_xstart = x, side_xend = x + (box_width / 2) + 10 ) %>% # complement coordinates dplyr::mutate( cx = side_xend + (box_width / 2), cy = side_y ) return(plotting_data) } #' @title Extract the numerical alpha representation of #RRGGBBAA colour #' #' @description RGB colours can be encoded as hexadecimal values, as for example internally used by `ggplot2`. #' For this, the numerical RGB values are mapped from their 0-255 value range to two-character hex-values. #' This yields a string in the form of '#RRGGBB'. Additionally, a fourth optional block can be present encoding #' the alpha transparency of the colour. This extends the string to '#RRGGBBAA'. #' This function takes such a string as input for `hex_colour`, extracts the 'AA' part and returns the #' numerical representation if it. #' #' @param hex_colour A string in the format '#RRGGBBAA' #' #' @return The numeric representation of the colors' alpha value, rounded to 2 digits. #' #' @keywords internal .get_alpha_from_hex_colour <- function(hex_colour = NULL) { if (missing(hex_colour) | !is.character(hex_colour)) { stop("Please provide a colour in hex representation as a string for `hex_colour`.") } if (!nchar(hex_colour) == 9) { stop("Please provide a hex colour in the format #RRGGBBAA.") } else { colour_parts <- strsplit(hex_colour, "")[[1]] if (colour_parts[1] != "#") { stop("Please provide a hex colour in the format #RRGGBBAA.") } else { alpha <- grDevices::col2rgb(hex_colour, alpha = TRUE)["alpha", ][[1]] alpha <- round(alpha / 255, 2) return(alpha) } } } #' @title Converts an alpha value between its numeric and its hex-encoded form. #' #' @description The function accepts a numeric (or NULL/NA) or a two-character hex encoded alpha representation and returns a the respective other representation. #' #' @param numeric_alpha A numerical value between 0 and 1. #' @param hex_alpha A two-letter character string. #' #' @return If `numeric_alpha` was specified, its two-letter representation is returned. If `hex_alpha` was specified, its numeric representation is returned. #' #' @keywords internal .convert_alpha <- function(numeric_alpha = NULL, hex_alpha = NULL) { if (missing(numeric_alpha) & missing(hex_alpha)) { stop("Either `numeric_alpha` or `hex_alpha` has to be specified.") } else if (!missing(numeric_alpha) & missing(hex_alpha)) { # Two separate ifs so that is.na(NULL) doesn't cause an error if (is.null(numeric_alpha)) { return("00") } if (is.na(numeric_alpha)) { return("00") } if (is.numeric(numeric_alpha)) { if (numeric_alpha > 1 | numeric_alpha < 0) { stop("Please enter a numeric value between 0 and 1 for `numeric_alpha`.") } else { alpha_decimal <- base::round((numeric_alpha * 100) * (255 / 100)) alpha_hex <- base::format(base::as.hexmode(alpha_decimal), width = 2, upper.case = TRUE ) return(alpha_hex) } } else { stop("Please enter a numeric value between 0 and 1 for `numeric_alpha`.") } } else if (missing(numeric_alpha) & !missing(hex_alpha)) { if (is.character(hex_alpha) & nchar(hex_alpha) == 2) { alpha <- grDevices::col2rgb(paste0("#FFFFFF", hex_alpha), alpha = TRUE)["alpha", ][[1]] alpha <- round(alpha / 255, 2) return(alpha) } else { stop("Please specify a two-letter character string for `hex_alpha`.") } } else if (!missing(numeric_alpha) & !missing(hex_alpha)) { stop("Please choose either `numeric_alpha` or `hex_alpha`.") } } #' @title Replaces the AA part of a #RRGGBBAA hex-colour. #' #' @description RGB colours can be encoded as hexadecimal values, as for example internally used by `ggplot2`. For this, the numerical RGB values are mapped from their 0-255 value range to two-character hex-values. This yields a string in the form of '#RRGGBB'. Additionally, a fourth optional block can be present encoding the alpha transparency of the colour. This extends the string to '#RRGGBBAA'. This function takes an '#RRGGBBAA' string as input for `colour` and a two-character hex-representation of an alpha value as an input for `new_alpha`, replaces the 'AA' part of `colour` with the `new_alpha` and returns the new colour. #' #' @param colour A character string of the format #RRGGBBAA. #' @param new_alpha A two-character string with the new alpha value. #' #' @return A hex-encoded RGBA colour. #' #' @keywords internal .replace_hex_alpha <- function(colour, new_alpha) { if (missing(colour) | missing(new_alpha)) { stop("Please provide a `colour` and a `new_alpha` in hex representation as strings.") } if (!(is.character(new_alpha) & nchar(new_alpha) == 2)) { stop("Please provide a two-character string for the hex representation of the new alpha.") } if (!(is.character(colour))) { stop("Please provide a hex colour as a string.") } else { if (!nchar(colour) == 9) { stop("Please provide a hex colour in the format #RRGGBBAA.") } else { colour_parts <- strsplit(colour, "")[[1]] if (colour_parts[1] != "#") { stop("Please provide a hex colour in the format #RRGGBBAA.") } else { colour_red <- paste0(colour_parts[2:3], collapse = "") colour_green <- paste0(colour_parts[4:5], collapse = "") colour_blue <- paste0(colour_parts[6:7], collapse = "") new_alpha <- base::toupper(new_alpha) new_colour <- paste0("#", colour_red, colour_green, colour_blue, new_alpha) return(new_colour) } } } } #' Extract estimate object #' #' Function returns the estimate object from `estimate_KM()` or `estimate_cuminc()` #' extracted from the plotted objected #' @param gg a ggplot object created with `visr()` #' #' @return estimate object #' @noRd .extract_estimate_object <- function(gg) { if (inherits(gg, "ggsurvfit")) { call <- as.character(rlang::quo_squash(gg$data$call[[1]])) survfit_object <- rlang::eval_tidy(gg$data$call[[1]]) # Since this call is using survival instead of visR, some characteristics are missing eg strata = "Overall" when no strata present main <- base::trimws(base::sub(".*~", "", call[[2]]), which = "both") if (is.null(survfit_object$strata) && main == "1") { survfit_object$strata <- as.vector(length(survfit_object$time)) attr(survfit_object$strata, "names") <- "Overall" } return(survfit_object) } else if (inherits(gg, "ggtidycuminc")) { return(attr(gg, "tidycuminc")) } } #' Construct strata label for visr legend title #' #' @param x a survfit or tidycuminc object #' #' @return string #' @noRd .construct_strata_label <- function(x, sep = ", ") { tryCatch( { if (inherits(x, "survfit") && is.null(x$strata_lbl)) { strata_label <- "" } else if (inherits(x, "survfit")) { strata_label <- unlist(x$strata_lbl) %>% paste(collapse = ", ") } else if (inherits(x, "tidycuminc")) { strata <- .extract_strata_varlist(x) strata_label <- lapply( as.list(strata), function(variable) attr(x$data[[variable]], "label") %||% x ) %>% unlist() %>% paste(collapse = ", ") } strata_label }, error = function(e) { return("") } ) } #' Extract the strata variable names #' #' @param x a survfit or tidycuminc object #' #' @return vector of variable names #' @noRd .extract_strata_varlist <- function(x) { if (inherits(x, "survfit")) { return(names(x$strata_lbls)) } if (inherits(x, "tidycuminc")) { return(.formula_to_strata_varlist(x$formula, x$data)) } } #' Extract the strata variable names from model formula #' #' @param formula a formula #' @param data a data frame #' #' @return vector of variable names #' @noRd .formula_to_strata_varlist <- function(formula, data) { tryCatch( { strata <- stats::model.frame(formula, data = data)[, -1, drop = FALSE] %>% names() if (rlang::is_empty(strata)) { strata <- NULL } strata }, error = function(e) { return(NULL) } ) }
/scratch/gouwar.j/cran-all/cranData/visR/R/utils_visr.R
#' @keywords internal #' @importFrom rlang .data .env %||% #' @importFrom survival survfit "_PACKAGE" ## usethis namespace: start ## usethis namespace: end NULL
/scratch/gouwar.j/cran-all/cranData/visR/R/visR-package.R
#' @title Plot a supported S3 object #' #' @description S3 method for creating plots directly from objects using `ggplot2`, #' similar to the base R `plot()` function. #' #' `r lifecycle::badge("deprecated")` Methods `visr.survfit()` and `visr.tidycuminc()` have been deprecated #' in favor of `ggsurvfit::ggsurvfit()` and `ggsurvfit::ggcuminc()`, respectively. #' #' @seealso \code{\link[ggplot2]{ggplot}} #' #' @param x Object of class `attritiontable` #' @param x_label \code{character} Label for the x-axis. When not specified, #' the function will look for `"PARAM"` or `"PARAMCD"` information in the original data set (CDISC standards). #' If no `"PARAM"`/`"PARAMCD"` information is available, the default x-axis label is `"Time"`. #' @param y_label \code{character} Label for the y-axis. When not specified, #' the default will do a proposal, depending on the `fun` argument. #' @param x_units Unit to be added to the x_label (x_label (x_unit)). #' Default is NULL. #' @param x_ticks Ticks for the x-axis. When not specified, the default will #' do a proposal. #' @param y_ticks Ticks for the y-axis. When not specified, #' the default will do a proposal based on the `fun` argument. #' #' @param fun Function that represents the scale of the estimate. #' The current options are: #' \tabular{ll}{ #' \code{surv} \tab is the survival probability. This is the default \cr #' \code{log} \tab is log of the survival probability \cr #' \code{event} \tab is the failure probability \cr #' \code{cloglog} \tab is log(-log(survival probability)) \cr #' \code{pct} \tab is survival as a percentage \cr #' \code{logpct} \tab is log survival as a percentage \cr #' \code{cumhaz} \tab is the cumulative hazard \cr #' } #' #' @param legend_position Specifies the legend position in the plot. #' Character values allowed are "top" "left" "bottom" "right". #' Numeric coordinates are also allowed. #' Default is "right". #' @param description_column_name \code{character} Name of the column containing #' the inclusion descriptions #' @param value_column_name \code{character} Name of the column containing the #' remaining sample counts #' @param complement_column_name \code{character} Optional: Name of the column #' containing the exclusion descriptions #' @param box_width \code{character} The box width for each box in the flow #' chart #' @param font_size \code{character} The fontsize in pt #' @param fill The color (string or hexcode) to use to fill the boxes in the #' flowchart #' @param border The color (string or hexcode) to use for the borders of the #' boxes in the flowchart #' @param ... other arguments passed on to the method #' #' @return Object of class \code{ggplot} and \code{ggsurvplot} for `survfit` objects. #' #' @rdname visr #' #' @export visr <- function(x, ...) { UseMethod("visr", x) } #' @rdname visr #' @method visr default #' @export visr.default <- function(x, ...) { graphics::plot(x) } #' @examples #' #' # fit KM #' km_fit <- survival::survfit(survival::Surv(AVAL, 1 - CNSR) ~ TRTP, data = adtte) #' #' # plot curves using survival plot function #' plot(km_fit) #' #' # plot same curves using visR::visr plotting function #' visR::visr(km_fit) #' #' # estimate KM using visR wrapper #' survfit_object <- visR::estimate_KM(data = adtte, strata = "TRTP") #' #' # Plot survival probability #' visR::visr(survfit_object, fun = "surv") #' #' # Plot survival percentage #' visR::visr(survfit_object, fun = "pct") #' #' # Plot cumulative hazard #' visR::visr(survfit_object, fun = "cloglog") #' #' @rdname visr #' @method visr survfit #' @export visr.survfit <- function(x = NULL, x_label = NULL, y_label = NULL, x_units = NULL, x_ticks = NULL, y_ticks = NULL, fun = "surv", legend_position = "right", ...) { lifecycle::deprecate_warn( when = "0.4.0", what = "visR::visr.survfit()", details = "Please use `ggsurvfit::ggsurvfit()` instead." ) # Minimal input validation ---------------------------------------------------- if (!(is.null(x_label) | is.character(x_label) | is.expression(x_label))) { stop("Invalid `x_label` argument, must be either `character` or `expression`.") } if (!(is.null(y_label) | is.character(y_label) | is.expression(y_label))) { stop("Invalid `y_label` argument, must be either `character` or `expression`.") } if (!(is.null(x_units) | is.character(x_units))) { stop("Invalid `x_units` argument, must be `character`.") } if (!(is.null(x_ticks) | is.numeric(x_ticks))) { stop("Invalid `x_ticks` argument, must be `numeric`.") } if (!(is.null(y_ticks) | is.numeric(y_ticks))) { stop("Invalid `y_ticks` argument, must be `numeric`.") } if (is.character(legend_position) && !legend_position %in% c("top", "bottom", "right", "left", "none")) { stop( "Invalid legend position given. Must either be [\"top\", \"bottom\", \"right\", \"left\", \"none\"] or a vector with two numbers indicating the position relative to the axis. For example c(0.5, 0.5) to place the legend in the center of the plot." ) } else if (is.numeric(legend_position) && length(legend_position) != 2) { stop( "Invalid legend position given. Must either be [\"top\", \"bottom\", \"right\", \"left\", \"none\"] or a vector with two numbers indicating the position relative to the axis. For example c(0.5, 0.5) to place the legend in the center of the plot." ) } valid_funs <- c("surv", "log", "event", "cloglog", "pct", "logpct", "cumhaz") if (is.character(fun)) { if (!(fun %in% valid_funs)) { stop( "Unrecognized `fun` argument, must be one of [\"surv\", \"log\", \"event\", \"cloglog\", \"pct\", \"logpct\", \"cumhaz\"] or a user-defined function." ) } } # Y-label ---------------------------------------------------------------------- if (is.null(y_label) & is.character(fun)) { y_label <- base::switch(fun, surv = "survival probability", log = "log(survival probability)", event = "failure probability", cloglog = "log(-log(survival probability))", pct = "percentage survival", logpct = "log(percentage survival)", cumhaz = "cumulative hazard", stop("Unrecognized fun argument") ) } else if (is.null(y_label) & is.function(fun)) { stop("No Y label defined. No default label is available when `fun` is a function.") } if (is.character(fun)) { .transfun <- base::switch(fun, surv = function(y) y, log = function(y) log(y), event = function(y) 1 - y, cloglog = function(y) log(-log(y)), pct = function(y) y * 100, logpct = function(y) log(y * 100), # survfit object contains an estimate for Cumhaz and SE based on Nelson-Aalen with or without correction for ties # However, no CI is calculated automatically. For plotting, the MLE estimator is used for convenience. cumhaz = function(y) -log(y) ) } else if (is.function(fun)) { .transfun <- function(y) fun(y) } else { stop("fun should be a character or a user-defined function.") } # Extended tidy of survfit class + transformation + remove NA after transfo ---- correctme <- NULL tidy_object <- tidyme(x) if ("estimate" %in% colnames(tidy_object)) { tidy_object[["est"]] <- .transfun(tidy_object[["estimate"]]) correctme <- c(correctme, "est") } if (all(c("conf.high", "conf.low") %in% colnames(tidy_object))) { tidy_object[["est.upper"]] <- .transfun(tidy_object[["conf.high"]]) tidy_object[["est.lower"]] <- .transfun(tidy_object[["conf.low"]]) correctme <- c(correctme, "est.lower", "est.upper") } # Adjust -Inf to minimal value ------------------------------------------------- if (nrow(tidy_object[tidy_object[["est"]] == "-Inf", ]) > 0) { warning("NAs introduced by y-axis transformation.") } tidy_object[, correctme] <- sapply(tidy_object[, correctme], FUN = function(x) { x[which(x == -Inf)] <- min(x[which(x != -Inf)], na.rm = TRUE) return(x) } ) ymin <- min(sapply(tidy_object[, correctme], function(x) min(x[which(x != -Inf)], na.rm = TRUE)), na.rm = TRUE) ymax <- max(sapply(tidy_object[, correctme], function(x) max(x[which(x != -Inf)], na.rm = TRUE)), na.rm = TRUE) # Obtain X-asis label ---------------------------------------------------------- if (is.null(x_label)) { if ("PARAM" %in% names(x)) { if (length(unique(x[["PARAM"]])) == 1) { x_label <- as.character(x[["PARAM"]][[1]]) } else { warning("More than one unique entry in 'PARAM'.") } } else if ("PARAMCD" %in% names(x)) { if (length(unique(x[["PARAMCD"]])) == 1) { x_label <- as.character(x[["PARAMCD"]][[1]]) } else { warning("More than one unique entry in 'PARAMCD'.") } } else { x_label <- "Time" } if (!is.null(x_units)) { x_label <- paste0(x_label, " (", x_units, ")") } } else { if (!is.null(x_units)) { x_label <- paste0(x_label, " (", x_units, ")") } } if (is.null(x_ticks)) x_ticks <- pretty(x$time, 10) # Obtain Y-asis label ---------------------------------------------------------- if (is.null(y_ticks) & is.character(fun)) { y_ticks <- switch(fun, surv = pretty(c(0, 1), 5), log = pretty(c(ymin, ymax), 5), event = pretty(c(0, 1), 5), cloglog = pretty(c(ymin, ymax), 5), pct = pretty(c(0, 100), 5), logpct = pretty(c(0, 5), 5), cumhaz = pretty(c(ymin, ymax), 5), stop("Unrecognized fun argument") ) } else if (is.null(y_ticks) & is.function(fun)) { y_ticks <- pretty(c(ymin, ymax), 5) } # Plotit ----------------------------------------------------- yscaleFUN <- function(x) sprintf("%.2f", x) gg <- ggplot2::ggplot(tidy_object, ggplot2::aes( x = time, group = strata, fill = strata )) + ggplot2::geom_step(ggplot2::aes(y = est, col = strata)) + ggplot2::scale_x_continuous( breaks = x_ticks ) + ggplot2::xlab(x_label) + ggplot2::scale_y_continuous( breaks = y_ticks, labels = yscaleFUN ) + ggplot2::coord_cartesian( xlim = c(min(x_ticks), max(x_ticks)), ylim = c(min(y_ticks), max(y_ticks)) ) + ggplot2::ylab(y_label) + ggplot2::labs(color = .construct_strata_label(x)) + ggplot2::theme(legend.position = legend_position) + ggplot2::theme(legend.key = ggplot2::element_blank()) + NULL # Save applied function so that we don't have to guess later on if (is.character(fun)) { attr(gg, "fun") <- .transfun } else { fun_call <- utils::capture.output(fun) if (grepl("rimitive", fun_call[1])) { fun_call_fun <- regmatches(fun_call, regexpr("\".*\"", fun_call)) fun_call_fun <- gsub("\"", "", fun_call_fun) fun_call_fun <- paste0("function(x) ", fun_call_fun, "(x)") attr(gg, "fun") <- eval(parse(text = fun_call_fun)) } else if (is.function(fun)) { attr(gg, "fun") <- fun } } class(gg) <- c("ggsurvfit", class(gg)) return(gg) } #' @description #' `r lifecycle::badge("questioning")` #' `visr.attrition()` function to draw a Consort flow diagram chart is currently being questioned. #' #' @examples #' #' ## Create attrition #' attrition <- visR::get_attrition(adtte, #' criteria_descriptions = c( #' "1. Not in Placebo Group", #' "2. Be 75 years of age or older.", #' "3. White", #' "4. Female" #' ), #' criteria_conditions = c( #' "TRTP != 'Placebo'", #' "AGE >= 75", #' "RACE=='WHITE'", #' "SEX=='F'" #' ), #' subject_column_name = "USUBJID" #' ) #' #' ## Draw a CONSORT attrition chart without specifying extra text for the complement #' attrition %>% #' visr("Criteria", "Remaining N") #' #' ## Add detailed complement descriptions to the "exclusion" part of the CONSORT diagram #' # Step 1. Add new column to attrition dataframe #' attrition$Complement <- c( #' "NA", #' "Placebo Group", #' "Younger than 75 years", #' "Non-White", #' "Male" #' ) #' #' # Step 2. Define the name of the column in the call to the plotting function #' attrition %>% #' visr("Criteria", "Remaining N", "Complement") #' #' ## Styling the CONSORT flowchart #' # Change the fill and outline of the boxes in the flowchart #' attrition %>% #' visr("Criteria", "Remaining N", "Complement", fill = "lightblue", border = "grey") #' #' ## Adjust the font size in the boxes #' attrition %>% #' visr("Criteria", "Remaining N", font_size = 10) #' #' @rdname visr #' @method visr attrition #' @export visr.attrition <- function(x, description_column_name = "Criteria", value_column_name = "Remaining N", complement_column_name = "", box_width = 50, font_size = 12, fill = "white", border = "black", ...) { if (!description_column_name %in% names(x)) { stop(paste0( "Column \"", description_column_name, "\" cannot be found in the input data. ", "Please provide the column name as string in the input ", "data containing the inclusion descriptions." )) } if (!value_column_name %in% names(x)) { stop(paste0( "Column \"", value_column_name, "\" cannot be found in the input data. ", "Please provide the column name as string in the input data containing", "the sample size after applying inclusion criteria." )) } if (complement_column_name != "" & !complement_column_name %in% names(x)) { stop(paste0( "Column \"", complement_column_name, "\" cannot be found in the input data. ", "Please provide a valid column name as string in the input data containing", "complement description or omit this argument for default labels." )) } if (!is.numeric(box_width)) { warning("An invalid input was given for `box_width`, must be `numeric` value. Setting it to 50.") box_width <- 50 } if (!is.numeric(font_size)) { warning("An invalid input was given for `font_size`, must be `numeric` value. Setting it to 12.") font_size <- 12 } if (!is.character(fill)) { warning("An invalid input was given for `fill`, must be `character` string. Setting it to \"white\".") fill <- "white" } if (!is.character(border)) { warning("An invalid input was given for `border`, must be `character` string. Setting it to \"black\".") border <- "black" } label <- complement_label <- NULL y <- down_ystart <- down_yend <- side_xstart <- side_xend <- side_y <- NULL cx <- cy <- NULL # split up space into evenly sized chunks field_height <- 100 / nrow(x) # allow for some spacing between boxes by reducing the size of the chunk box_height <- 0.75 * field_height # assign coordinates to each row in the attrition table plotting_data <- x %>% .get_labels(description_column_name, value_column_name, complement_column_name, wrap_width = box_width) %>% .get_labelsizes(label, complement_label) %>% .get_coordinates(box_width, box_height, field_height) # draw plot gg <- plotting_data %>% ggplot2::ggplot() + # boxes ggplot2::geom_tile( data = plotting_data, ggplot2::aes( x = x, y = y, width = box_width, height = box_height ), color = border, fill = fill ) + # text in boxes ggplot2::geom_text( data = plotting_data, ggplot2::aes( x = x, y = y, label = label ), size = font_size / ggplot2::.pt ) + # down arrow ggplot2::geom_segment( data = plotting_data, ggplot2::aes( x = x, xend = x, y = down_ystart, yend = down_yend ), arrow = ggplot2::arrow(length = 0.5 * ggplot2::unit(font_size, "pt")), size = .2, na.rm = TRUE ) + # side arrow ggplot2::geom_segment( data = plotting_data, ggplot2::aes( x = side_xstart, xend = side_xend, y = side_y, yend = side_y ), arrow = ggplot2::arrow(length = 0.5 * ggplot2::unit(font_size, "pt")), size = .2, na.rm = TRUE ) + # complement box ggplot2::geom_tile( data = plotting_data, ggplot2::aes( x = cx, y = cy, width = box_width, height = box_height ), color = border, fill = fill, na.rm = TRUE ) + # text in complement box ggplot2::geom_text( data = plotting_data, ggplot2::aes( x = cx, y = cy, label = complement_label ), size = font_size / ggplot2::.pt, na.rm = TRUE ) + # remove all plot elements ggplot2::theme_void() + ggplot2::theme(legend.position = "none") return(gg) } #' @rdname visr #' @method visr tidycuminc #' @export visr.tidycuminc <- function(x = NULL, x_label = "Time", y_label = "Cumulative Incidence", x_units = NULL, x_ticks = pretty(x$tidy$time, 10), y_ticks = pretty(c(0, 1), 5), legend_position = "right", ...) { lifecycle::deprecate_warn( when = "0.4.0", what = "visR::visr.tidycuminc()", details = "Please use `ggsurvfit::ggcuminc()` instead." ) # check for installation of tidycmprsk package rlang::check_installed("tidycmprsk", version = "0.1.1") if (!is.null(x_units)) { x_label <- paste0(x_label, " (", x_units, ")") } # Plotit ----------------------------------------------------- yscaleFUN <- function(x) sprintf("%.2f", x) gg <- visr_tidy_tidycuminc(x) %>% ggplot2::ggplot(ggplot2::aes( x = time, group = strata, fill = strata )) + ggplot2::geom_step(ggplot2::aes(y = est, col = strata)) + ggplot2::scale_x_continuous( breaks = x_ticks ) + ggplot2::xlab(x_label) + ggplot2::scale_y_continuous( breaks = y_ticks, labels = yscaleFUN ) + ggplot2::coord_cartesian( xlim = c(min(x_ticks), max(x_ticks)), ylim = c(min(y_ticks), max(y_ticks)) ) + ggplot2::ylab(y_label) + ggplot2::labs(color = .construct_strata_label(x)) + ggplot2::theme(legend.position = legend_position) + ggplot2::theme(legend.key = ggplot2::element_blank()) + NULL class(gg) <- c("ggtidycuminc", class(gg)) attr(gg, "tidycuminc") <- x gg }
/scratch/gouwar.j/cran-all/cranData/visR/R/visr.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----imports, echo=TRUE, warning=FALSE, message=FALSE------------------------- library(ggplot2) library(visR) ## ----globalSetup-------------------------------------------------------------- # Metadata Title DATASET <- paste0("Analyis Data Time-To-Event (ADTTE)") # Save original options() old <- options() # Global formatting options options(digits = 3) # Global ggplot settings theme_set(theme_bw()) # Global table settings options(DT.options = list(pageLength = 10, language = list(search = 'Filter:'), scrollX = TRUE)) # load ADTTE from CDISC pilot data(adtte) # Restore original options() options(old) ## ----table1_get_default------------------------------------------------------- # Display a summary table (e.g. tableone) visR::tableone(adtte[,c("TRTP", "AGE")], title = "Demographic summary" , datasource = DATASET) ## ----km_est------------------------------------------------------------------- # Estimate a survival object survfit_object <- adtte %>% visR::estimate_KM(data = ., strata = "TRTP") survfit_object ## ----km_tab_options_1--------------------------------------------------------- # Display test statistics associated with the survival estimate visR::render(survfit_object %>% get_pvalue(), title = "P-values", datasource = DATASET) ## ----km_plot_1, fig.align='center', fig.width= 6, fig.height=4---------------- # Create and display a Kaplan-Meier from the survival object and add a risktable visr(survfit_object) %>% visR::add_CI() %>% visR::add_risktable()
/scratch/gouwar.j/cran-all/cranData/visR/inst/doc/CDISC_ADaM.R
--- title: "Survival Analysis with visR using CDISC ADaM Time-To-Event Analysis Dataset (ADTTE)" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Survival Analysis with visR using CDISC ADaM Time-To-Event Analysis Dataset (ADTTE)} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction This tutorial illustrates how a standard time-to-event analysis can be done very efficiently when the data set adheres to the [CDISC ADaM standard](https://www.cdisc.org/standards/foundational/adam/adam-basic-data-structure-bds-time-event-tte-analyses-v1-0). A more detailed time-to-event analysis with a more broad overview of visR's functionality is presented in another vignette. ```{r imports, echo=TRUE, warning=FALSE, message=FALSE} library(ggplot2) library(visR) ``` ## Global Document Setup ```{r globalSetup} # Metadata Title DATASET <- paste0("Analyis Data Time-To-Event (ADTTE)") # Save original options() old <- options() # Global formatting options options(digits = 3) # Global ggplot settings theme_set(theme_bw()) # Global table settings options(DT.options = list(pageLength = 10, language = list(search = 'Filter:'), scrollX = TRUE)) # load ADTTE from CDISC pilot data(adtte) # Restore original options() options(old) ``` ## Time-to-event analysis visR includes a wrapper function to easily display summary tables (e.g. `tableone`) ```{r table1_get_default} # Display a summary table (e.g. tableone) visR::tableone(adtte[,c("TRTP", "AGE")], title = "Demographic summary" , datasource = DATASET) ``` The wrapper function to estimate a Kaplan-Meier curve is compatible with `%>%` and purrr::map functions without losing traceability of the dataset name inside the call of the object. If a data set adheres to the CDISC ADaM standards, only a stratifier needs to be specified. ```{r km_est} # Estimate a survival object survfit_object <- adtte %>% visR::estimate_KM(data = ., strata = "TRTP") survfit_object ``` Given a survival object, visR includes several functions to quickly extract additional information from the survival object (e.g. test statistics and p-values) and a general function to display a table (`render`). ```{r km_tab_options_1} # Display test statistics associated with the survival estimate visR::render(survfit_object %>% get_pvalue(), title = "P-values", datasource = DATASET) ``` A survival object can be plotted using the visR function `visr`. Additional information like confidence intervals and a risktable can be added to the plot. ```{r km_plot_1, fig.align='center', fig.width= 6, fig.height=4} # Create and display a Kaplan-Meier from the survival object and add a risktable visr(survfit_object) %>% visR::add_CI() %>% visR::add_risktable() ```
/scratch/gouwar.j/cran-all/cranData/visR/inst/doc/CDISC_ADaM.Rmd
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(visR) ## ----data_prep---------------------------------------------------------------- attrition <- visR::get_attrition(adtte, criteria_descriptions = c("1. Not in Placebo Group", "2. Be 75 years of age or older.", "3. White", "4. Female"), criteria_conditions = c("TRTP != 'Placebo'", "AGE >= 75", "RACE=='WHITE'", "SEX=='F'"), subject_column_name = "USUBJID") ## ----render1, fig.align='center', fig.width= 6, fig.height=6------------------ attrition %>% visR::visr("Criteria", "Remaining N") ## ---- data_control------------------------------------------------------------ attrition$Complement <- c("NA", "Placebo Group", "Younger than 75 years", "Non-White", "Male") ## ----render2, fig.align='center', fig.width= 6, fig.height=6------------------ attrition %>% visR::visr("Criteria", "Remaining N", "Complement") ## ----render3, fig.align='center', fig.width= 6, fig.height=6------------------ attrition %>% visR::visr("Criteria", "Remaining N", "Complement", fill = "lightblue", border="grey") ## ----render4, fig.align='center', fig.width= 6, fig.height=6----------------- attrition %>% visR::visr("Criteria", "Remaining N", font_size = 10)
/scratch/gouwar.j/cran-all/cranData/visR/inst/doc/Consort_flow_diagram.R
--- title: "Creating consort flow diagram with visR" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Creating consort flow diagram with visR} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(visR) ``` # Attrition example ## Data preparation Prepare the data using the attrition function. ```{r data_prep} attrition <- visR::get_attrition(adtte, criteria_descriptions = c("1. Not in Placebo Group", "2. Be 75 years of age or older.", "3. White", "4. Female"), criteria_conditions = c("TRTP != 'Placebo'", "AGE >= 75", "RACE=='WHITE'", "SEX=='F'"), subject_column_name = "USUBJID") ``` ## Render chart Draw a CONSORT attrition chart without specifying extra text for the complement ```{r render1, fig.align='center', fig.width= 6, fig.height=6} attrition %>% visR::visr("Criteria", "Remaining N") ``` ## Adding more detail Adding more detailed complement descriptions to the "exclusion" part of the CONSORT diagram ### Add the control group Step 1. Add new column to attrition dataframe ```{r, data_control} attrition$Complement <- c("NA", "Placebo Group", "Younger than 75 years", "Non-White", "Male") ``` ### Define metadata Step 2. Define the name of the column in the call to the plotting function ```{r render2, fig.align='center', fig.width= 6, fig.height=6} attrition %>% visR::visr("Criteria", "Remaining N", "Complement") ``` # Additional features ## Styling the CONSORT flowchart. Change the fill and outline of the boxes in the flowchart ```{r render3, fig.align='center', fig.width= 6, fig.height=6} attrition %>% visR::visr("Criteria", "Remaining N", "Complement", fill = "lightblue", border="grey") ``` ## Adjusting size Adjust the font size in the boxes ```{r render4, fig.align='center', fig.width= 6, fig.height=6} attrition %>% visR::visr("Criteria", "Remaining N", font_size = 10) ```
/scratch/gouwar.j/cran-all/cranData/visR/inst/doc/Consort_flow_diagram.Rmd
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, fig.width = 7, fig.height = 4, fig.align = "center", comment = "#>" ) ## ----libraries, include = TRUE------------------------------------------------ library(visR) ## ----generate-survival-data--------------------------------------------------- lung_cohort <- survival::lung lung_cohort <- lung_cohort %>% dplyr::mutate(sex = as.factor(ifelse(sex == 1, "Male", "Female"))) %>% dplyr::mutate(status = status - 1) %>% dplyr::rename(Age = "age", Sex = "sex", Status = "status", Days = "time") lung_suvival_object <- lung_cohort %>% visR::estimate_KM(strata = "Sex", CNSR = "Status", AVAL = "Days") ## ----default-ggplot2-plot----------------------------------------------------- p <- lung_suvival_object %>% visR::visr() p ## ----styling-with-ggplot2----------------------------------------------------- p + ggplot2::theme_bw() + ggplot2::theme(legend.position = "top") + ggplot2::scale_color_manual(values = c("red", "blue")) ## ----visr-parameter-styling--------------------------------------------------- lung_suvival_object %>% visR::visr(x_label = "Time", y_label = NULL, # NULL (default) causes the label to be deducted from the used function x_ticks = seq(0, 1200, 200), y_ticks = seq(0, 100, 20), fun = "pct", legend_position = "top") ## ----visr-define_theme-empty-------------------------------------------------- visR::define_theme() ## ----visr-define_theme-nonempty----------------------------------------------- theme <- visR::define_theme( strata = list( "Sex" = list("Female" = "red", "Male" = "blue"), "ph.ecog" = list("0" = "cyan", "1" = "purple", "2" = "brown") ), fontsizes = list( "axis" = 12, "ticks" = 10, "legend_title" = 10, "legend_text" = 8 ), fontfamily = "Helvetica", grid = list("major" = FALSE, "minor" = FALSE), #grid = TRUE/FALSE # <- can also be used instead of the named list above bg = "transparent", legend_position = "top" ) ## ----visr-apply_theme-empty, warning=FALSE------------------------------------ lung_suvival_object %>% visR::visr() %>% visR::apply_theme() ## ----visr-apply_theme-nonempty, warning = FALSE, message=FALSE---------------- lung_suvival_object %>% visR::visr() %>% visR::apply_theme(theme)
/scratch/gouwar.j/cran-all/cranData/visR/inst/doc/Styling_KM_plots.R
--- title: "Styling survival plots" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Styling survival plots} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, fig.width = 7, fig.height = 4, fig.align = "center", comment = "#>" ) ``` # Introduction ```{r libraries, include = TRUE} library(visR) ``` This tutorial illustrates the usage of the styling function that `visR` provides. By default, `visR::visr()` does not apply any form of visual changes to the generated survival plots. Therefore, the default output looks like you would expect from a standard `ggplot2::ggplot()` plot. While the examples below visualize the results from `estimate_KM()`, these principles apply to competing risks cumulative incidence objects created with `estimate_cuminc()` as well. ## Preparation of the data In this example, we will work with patient data from NCCTG Lung Cancer dataset that is part of the `survival` package. This data is also used to demonstrate more functions of `visR` in another vignette. However, in this particular one, it will only be used to demonstrate the adjustments of the aesthetics. ## Generation of a `survfit` object ```{r generate-survival-data} lung_cohort <- survival::lung lung_cohort <- lung_cohort %>% dplyr::mutate(sex = as.factor(ifelse(sex == 1, "Male", "Female"))) %>% dplyr::mutate(status = status - 1) %>% dplyr::rename(Age = "age", Sex = "sex", Status = "status", Days = "time") lung_suvival_object <- lung_cohort %>% visR::estimate_KM(strata = "Sex", CNSR = "Status", AVAL = "Days") ``` # Styling ## Plotting the generated `survfit` object without adjustments ```{r default-ggplot2-plot} p <- lung_suvival_object %>% visR::visr() p ``` As we can, the plot shows the default [`ggplot2::theme_grey()`](https://ggplot2.tidyverse.org/reference/ggtheme.html) style with a grey background, a visible grid and the default `ggplot2` colours. ## Using `ggplot2` to style the plot Since `visR::visr()` also generates a valid `ggplot` object as an output, we can use the conventional styling logic and options that `ggplot2` provides, as shown below. ```{r styling-with-ggplot2} p + ggplot2::theme_bw() + ggplot2::theme(legend.position = "top") + ggplot2::scale_color_manual(values = c("red", "blue")) ``` However, `visR` also provides functions to adjust common aesthetics more easily and with less code. ## Using `visR` to style the plot The most direct option to style plots generated through `visR::visr()` is by using the parameters that the function provides. Internally, parameters like the y-axis label are automatically deducted from the used function. The following example demonstrates the options exposed. ```{r visr-parameter-styling} lung_suvival_object %>% visR::visr(x_label = "Time", y_label = NULL, # NULL (default) causes the label to be deducted from the used function x_ticks = seq(0, 1200, 200), y_ticks = seq(0, 100, 20), fun = "pct", legend_position = "top") ``` However, these rather minimal adjustments usually don't cover all the things a user wants to modify. Therefore, we provide two additional functions to adjust more aesthetics: `visR::define_theme()` and `visR::apply_theme()`. The first one provides an easy wrapper to create a nested list of list with styling options that is then applied to the plot by the second function. # New themes ## Defining a `visR_theme` using `visR::define_theme()` If no further options are provided to `visR::define_theme()`, it nonetheless returns a very minimal list of reasonable defaults. ```{r visr-define_theme-empty} visR::define_theme() ``` However, this function also takes several other styling options. The currently usable ones are displayed below. One particular use that we had in mind when writing this function was, that we wanted to have the option to define the different colours for the strata once and then to not have to worry about all of them being present. ```{r visr-define_theme-nonempty} theme <- visR::define_theme( strata = list( "Sex" = list("Female" = "red", "Male" = "blue"), "ph.ecog" = list("0" = "cyan", "1" = "purple", "2" = "brown") ), fontsizes = list( "axis" = 12, "ticks" = 10, "legend_title" = 10, "legend_text" = 8 ), fontfamily = "Helvetica", grid = list("major" = FALSE, "minor" = FALSE), #grid = TRUE/FALSE # <- can also be used instead of the named list above bg = "transparent", legend_position = "top" ) ``` ## Apply styling using `visR::apply_theme()` The `visR::apply_theme()` function exposes the user to two ways to style their plot. The most direct one would be to just apply the function to a plot without specifying any options. This applies several reasonable defaults to the plot. ```{r visr-apply_theme-empty, warning=FALSE} lung_suvival_object %>% visR::visr() %>% visR::apply_theme() ``` The second one would be to apply a nested list of lists to, ideally generated through `visR::define_theme()` to a plot. This serves the purpose to generate a detailed `visR_theme` object once and then apply it to one or several plots with a single line. These lists could then also be easily saved and shared. The usage of the theme generated above is shown below. ```{r visr-apply_theme-nonempty, warning = FALSE, message=FALSE} lung_suvival_object %>% visR::visr() %>% visR::apply_theme(theme) ```
/scratch/gouwar.j/cran-all/cranData/visR/inst/doc/Styling_KM_plots.Rmd
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----imports, echo=TRUE, warning=FALSE, message=FALSE------------------------- library(ggplot2) library(visR) ## ----globalSetup-------------------------------------------------------------- # Metadata Title DATASET <- paste0("NCCTG Lung Cancer Dataset (from survival package ", packageVersion("survival"), ")") # Save original options() old <- options() # Global formatting options options(digits = 3) # Global ggplot settings theme_set(theme_bw()) # Global table settings options(DT.options = list(pageLength = 10, language = list(search = 'Filter:'), scrollX = TRUE)) lung_cohort <- survival::lung # Change gender to be a factor and rename some variables to make output look nicer lung_cohort <- lung_cohort %>% dplyr::mutate(sex = as.factor(ifelse(sex == 1, "Male", "Female"))) %>% dplyr::rename(Age = "age", Sex = "sex", Status = "status", Days = "time") # Restore original options() options(old) ## ----table1_get_default------------------------------------------------------- # Select variables of interest and change names to look nicer lung_cohort_tab1 <- lung_cohort %>% dplyr::select(Age, Sex) # Create a table one tab1 <- visR::get_tableone(lung_cohort_tab1) # Render the tableone visR::render(tab1, title = "Overview over Lung Cancer patients", datasource = DATASET) ## ----table1_render_default---------------------------------------------------- # Use wrapper functionality to create and display a tableone visR::tableone(lung_cohort_tab1, title = "Overview over Lung Cancer patients", datasource = DATASET) ## ----table1_get_options------------------------------------------------------- # Create and render a tableone with a stratifier and without displaying the total visR::tableone(lung_cohort_tab1, strata = "Sex", overall = FALSE, title = "Overview over Lung Cancer patients", datasource = DATASET) ## ----table1_render_options_dt------------------------------------------------- # Create and render a tableone with with dt as an engine visR::tableone(lung_cohort_tab1, strata = "Sex", overall = FALSE, title = "Overview over Lung Cancer patients", datasource = DATASET, engine = "dt") ## ----table1_render_options_kable---------------------------------------------- # Create and render a tableone with with kable as an engine and html as output format visR::tableone(lung_cohort_tab1, strata = "Sex", overall = FALSE, title = "Overview over Lung Cancer patients", datasource = DATASET, engine = "kable", output_format="html") ## ----km_est------------------------------------------------------------------- # Select variables of interest and change names to look nicer lung_cohort_survival <- lung_cohort %>% dplyr::select(Age, Sex, Status, Days) # For the survival estimate, the censor must be 0 or 1 lung_cohort_survival$Status <- lung_cohort_survival$Status - 1 # Estimate the survival curve lung_suvival_object <- lung_cohort_survival %>% visR::estimate_KM(strata = "Sex", CNSR = "Status", AVAL = "Days") lung_suvival_object ## ----km_tab------------------------------------------------------------------- # Create a risktable rt <- visR::get_risktable(lung_suvival_object) # Display the risktable visR::render(rt, title = "Overview over survival rates of Lung Cancer patients", datasource = DATASET) ## ----km_tab_options_1--------------------------------------------------------- # Display a summary of the survival estimate visR::render(lung_suvival_object %>% visR::get_summary(), title = "Summary", datasource = DATASET) ## ----km_tab_options_2--------------------------------------------------------- # Display test statistics associated with the survival estimate visR::render(lung_suvival_object %>% visR::get_pvalue(), title = "P-values", datasource = DATASET) ## ----km_tab_options_3--------------------------------------------------------- # Display qunatile information of the survival estimate visR::render(lung_suvival_object %>% visR::get_quantile(), title = "Quantile Information", datasource = DATASET) ## ----km_tab_options_4--------------------------------------------------------- # Display a cox model estimate associated with the survival estimate visR::render(lung_suvival_object %>% visR::get_COX_HR(), title = "COX estimate", datasource = DATASET) ## ----km_plot_1---------------------------------------------------------------- # Create and display a Kaplan-Meier from the survival object gg <- visR::visr(lung_suvival_object) gg ## ----km_plot_2---------------------------------------------------------------- # Add a confidence interval to the Kaplan-Meier and display the plot gg %>% visR::add_CI() ## ----km_plot_3---------------------------------------------------------------- # Add a confidence interval and the censor ticks to the Kaplan-Meier and display the plot gg %>% visR::add_CI() %>% visR::add_CNSR(shape = 3, size = 2) ## ----km_add------------------------------------------------------------------- # Add a confidence interval and the censor ticks and a risktable to the Kaplan-Meier and display the plot gg %>% visR::add_CI() %>% visR::add_CNSR(shape = 3, size = 2) %>% visR::add_risktable() ## ----cuminc_1----------------------------------------------------------------- visR::estimate_cuminc( tidycmprsk::trial, strata = "trt", CNSR = "death_cr", AVAL = "ttdeath" ) %>% visR::visr( legend_position = "bottom", x_label = "Months from Treatment", y_label = "Risk of Death" ) %>% visR::add_CI() %>% visR::add_risktable(statlist = c("n.risk", "cum.event"))
/scratch/gouwar.j/cran-all/cranData/visR/inst/doc/Time_to_event_analysis.R
--- title: "Survival Analysis with visR" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Survival Analysis with visR} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction This tutorial illustrates a typical use case in clinical development - the analysis of time to a certain event (e.g., death) in different groups. Typically, data obtained in randomized clinical trials (RCT) can be used to estimate the overall survival of patients in one group (e.g., treated with drug X) vs another group (e.g., treated with drug Y) and thus determine if there is a treatment difference. For a more thorough introduction to Survival Analysis, we recommend the following [tutorial](https://bioconnector.github.io/workshops/r-survival.html). In this example, we will work with patient data from NCCTG Lung Cancer dataset that is part of the `survival` package. Another vignette presents an example using a data set following the [CDISC ADaM standard](https://www.cdisc.org/standards/foundational/adam/adam-basic-data-structure-bds-time-event-tte-analyses-v1-0). ```{r imports, echo=TRUE, warning=FALSE, message=FALSE} library(ggplot2) library(visR) ``` ## Global Document Setup ```{r globalSetup} # Metadata Title DATASET <- paste0("NCCTG Lung Cancer Dataset (from survival package ", packageVersion("survival"), ")") # Save original options() old <- options() # Global formatting options options(digits = 3) # Global ggplot settings theme_set(theme_bw()) # Global table settings options(DT.options = list(pageLength = 10, language = list(search = 'Filter:'), scrollX = TRUE)) lung_cohort <- survival::lung # Change gender to be a factor and rename some variables to make output look nicer lung_cohort <- lung_cohort %>% dplyr::mutate(sex = as.factor(ifelse(sex == 1, "Male", "Female"))) %>% dplyr::rename(Age = "age", Sex = "sex", Status = "status", Days = "time") # Restore original options() options(old) ``` ## Cohort Overview (Table one) Visualizing tables, like the table one or risk tables, is a two-step process in visR . First, a data.frame (or tibble) is created by a `get_XXX()` function (e.g. `get_tableone()`). Secondly, the data.frame can be displayed by calling the function `render()`. The advantage of this process is that data summaries can be created, used and adjusted throughout an analysis, while at every step data summaries can be displayed or even be downloaded. Populations are usually displayed as a so-called table one. Function `get_tableone` creates a tibble that includes populations summaries. ```{r table1_get_default} # Select variables of interest and change names to look nicer lung_cohort_tab1 <- lung_cohort %>% dplyr::select(Age, Sex) # Create a table one tab1 <- visR::get_tableone(lung_cohort_tab1) # Render the tableone visR::render(tab1, title = "Overview over Lung Cancer patients", datasource = DATASET) ``` Function `render` nicely displays the tableone. Additionally, visR includes a wrapper function to create and display a `tableone` in only one function call. ```{r table1_render_default} # Use wrapper functionality to create and display a tableone visR::tableone(lung_cohort_tab1, title = "Overview over Lung Cancer patients", datasource = DATASET) ``` Creating and visualizing a tableone with default settings is very simple and can be done with one line of code. However, there are further customization options. In both the get and the wrapper functions, a stratifier can be defined and the column displaying total information can be removed. ```{r table1_get_options} # Create and render a tableone with a stratifier and without displaying the total visR::tableone(lung_cohort_tab1, strata = "Sex", overall = FALSE, title = "Overview over Lung Cancer patients", datasource = DATASET) ``` visR's `render` supports three different rendering engines to be as flexible as possible. By default, `render` uses `gt`. Additional engines are `datatable` (`dt`) to include easy downloading options... ```{r table1_render_options_dt} # Create and render a tableone with with dt as an engine visR::tableone(lung_cohort_tab1, strata = "Sex", overall = FALSE, title = "Overview over Lung Cancer patients", datasource = DATASET, engine = "dt") ``` ...and `kable` for flexible displaying in various output formats (`html` by default, `latex` supported). ```{r table1_render_options_kable} # Create and render a tableone with with kable as an engine and html as output format visR::tableone(lung_cohort_tab1, strata = "Sex", overall = FALSE, title = "Overview over Lung Cancer patients", datasource = DATASET, engine = "kable", output_format="html") ``` Called with `html` as an output format, a `html` view is displayed; called with `latex` a string containing latex code is printed. ## Time-to-event analysis ### Survival estimation visR provides a wrapper function to estimate a Kaplan-Meier curve and several functions to visualize the results. This wrapper function is compatible with `%>%` and purrr::map functions without losing traceability of the dataset name. ```{r km_est} # Select variables of interest and change names to look nicer lung_cohort_survival <- lung_cohort %>% dplyr::select(Age, Sex, Status, Days) # For the survival estimate, the censor must be 0 or 1 lung_cohort_survival$Status <- lung_cohort_survival$Status - 1 # Estimate the survival curve lung_suvival_object <- lung_cohort_survival %>% visR::estimate_KM(strata = "Sex", CNSR = "Status", AVAL = "Days") lung_suvival_object ``` ### Survival visualization There are two frequently used ways to estimate time-to-event data: As a risk table and as a Kaplan-Meier curve. In principle, visR allows to either visualize a risk table and a Kaplan-Meier curve separately, or both together in one plot. #### Displaying the risktable Creating and visualizing a risk table separately works in the exact same way as for the tableone (above): First, `get_risktable()` creates a tibble with risk information that can still be changed. Secondly, the risk table can be rendered to be displayed. ```{r km_tab} # Create a risktable rt <- visR::get_risktable(lung_suvival_object) # Display the risktable visR::render(rt, title = "Overview over survival rates of Lung Cancer patients", datasource = DATASET) ``` The risktable is only one piece of information that can be extracted from a survival object with a `get_XXX` to then be rendered. ```{r km_tab_options_1} # Display a summary of the survival estimate visR::render(lung_suvival_object %>% visR::get_summary(), title = "Summary", datasource = DATASET) ``` ```{r km_tab_options_2} # Display test statistics associated with the survival estimate visR::render(lung_suvival_object %>% visR::get_pvalue(), title = "P-values", datasource = DATASET) ``` ```{r km_tab_options_3} # Display qunatile information of the survival estimate visR::render(lung_suvival_object %>% visR::get_quantile(), title = "Quantile Information", datasource = DATASET) ``` ```{r km_tab_options_4} # Display a cox model estimate associated with the survival estimate visR::render(lung_suvival_object %>% visR::get_COX_HR(), title = "COX estimate", datasource = DATASET) ``` #### Plotting the Kaplan-Meier Alternatively, the survival data can be plotted as a Kaplan-Meier curve. In `visR`, a plot is in most cases a ggplot object and adapting the plot follows the general principle of creating a plot and then adding visual contents step-by-step. ```{r km_plot_1} # Create and display a Kaplan-Meier from the survival object gg <- visR::visr(lung_suvival_object) gg ``` ```{r km_plot_2} # Add a confidence interval to the Kaplan-Meier and display the plot gg %>% visR::add_CI() ``` ```{r km_plot_3} # Add a confidence interval and the censor ticks to the Kaplan-Meier and display the plot gg %>% visR::add_CI() %>% visR::add_CNSR(shape = 3, size = 2) ``` visR includes a wrapper function to create a risktable and then add it directly to a Kaplan-Meier plot. ```{r km_add} # Add a confidence interval and the censor ticks and a risktable to the Kaplan-Meier and display the plot gg %>% visR::add_CI() %>% visR::add_CNSR(shape = 3, size = 2) %>% visR::add_risktable() ``` ## Competing Risks In addition to classic right-censored data, the {visR} package supports the estimation of time-to-event outcomes in the presence of competing events. The package wraps the [{tidycmprsk}](https://mskcc-epi-bio.github.io/tidycmprsk/) package, and exports functions for cumulative incidence estimation and visualization. The function `estimate_cuminc()` estimates the cumulative incidence of the competing event or outcome of interest. The syntax is nearly identical to `estimate_KM()`; however, the outcome status variable (passed to the `CNSR=` argument) must be a factor where the first level indicates censoring, the second level the competing event of interest, and subsequent levels are the other competing events. Visualization functions, `visr()`, `add_CI()`, `add_CNSR()`, and `add_risktable()` share the same syntax as the Kaplan-Meier variants. ```{r cuminc_1} visR::estimate_cuminc( tidycmprsk::trial, strata = "trt", CNSR = "death_cr", AVAL = "ttdeath" ) %>% visR::visr( legend_position = "bottom", x_label = "Months from Treatment", y_label = "Risk of Death" ) %>% visR::add_CI() %>% visR::add_risktable(statlist = c("n.risk", "cum.event")) ```
/scratch/gouwar.j/cran-all/cranData/visR/inst/doc/Time_to_event_analysis.Rmd
--- title: "Survival Analysis with visR using CDISC ADaM Time-To-Event Analysis Dataset (ADTTE)" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Survival Analysis with visR using CDISC ADaM Time-To-Event Analysis Dataset (ADTTE)} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction This tutorial illustrates how a standard time-to-event analysis can be done very efficiently when the data set adheres to the [CDISC ADaM standard](https://www.cdisc.org/standards/foundational/adam/adam-basic-data-structure-bds-time-event-tte-analyses-v1-0). A more detailed time-to-event analysis with a more broad overview of visR's functionality is presented in another vignette. ```{r imports, echo=TRUE, warning=FALSE, message=FALSE} library(ggplot2) library(visR) ``` ## Global Document Setup ```{r globalSetup} # Metadata Title DATASET <- paste0("Analyis Data Time-To-Event (ADTTE)") # Save original options() old <- options() # Global formatting options options(digits = 3) # Global ggplot settings theme_set(theme_bw()) # Global table settings options(DT.options = list(pageLength = 10, language = list(search = 'Filter:'), scrollX = TRUE)) # load ADTTE from CDISC pilot data(adtte) # Restore original options() options(old) ``` ## Time-to-event analysis visR includes a wrapper function to easily display summary tables (e.g. `tableone`) ```{r table1_get_default} # Display a summary table (e.g. tableone) visR::tableone(adtte[,c("TRTP", "AGE")], title = "Demographic summary" , datasource = DATASET) ``` The wrapper function to estimate a Kaplan-Meier curve is compatible with `%>%` and purrr::map functions without losing traceability of the dataset name inside the call of the object. If a data set adheres to the CDISC ADaM standards, only a stratifier needs to be specified. ```{r km_est} # Estimate a survival object survfit_object <- adtte %>% visR::estimate_KM(data = ., strata = "TRTP") survfit_object ``` Given a survival object, visR includes several functions to quickly extract additional information from the survival object (e.g. test statistics and p-values) and a general function to display a table (`render`). ```{r km_tab_options_1} # Display test statistics associated with the survival estimate visR::render(survfit_object %>% get_pvalue(), title = "P-values", datasource = DATASET) ``` A survival object can be plotted using the visR function `visr`. Additional information like confidence intervals and a risktable can be added to the plot. ```{r km_plot_1, fig.align='center', fig.width= 6, fig.height=4} # Create and display a Kaplan-Meier from the survival object and add a risktable visr(survfit_object) %>% visR::add_CI() %>% visR::add_risktable() ```
/scratch/gouwar.j/cran-all/cranData/visR/vignettes/CDISC_ADaM.Rmd
--- title: "Creating consort flow diagram with visR" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Creating consort flow diagram with visR} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(visR) ``` # Attrition example ## Data preparation Prepare the data using the attrition function. ```{r data_prep} attrition <- visR::get_attrition(adtte, criteria_descriptions = c("1. Not in Placebo Group", "2. Be 75 years of age or older.", "3. White", "4. Female"), criteria_conditions = c("TRTP != 'Placebo'", "AGE >= 75", "RACE=='WHITE'", "SEX=='F'"), subject_column_name = "USUBJID") ``` ## Render chart Draw a CONSORT attrition chart without specifying extra text for the complement ```{r render1, fig.align='center', fig.width= 6, fig.height=6} attrition %>% visR::visr("Criteria", "Remaining N") ``` ## Adding more detail Adding more detailed complement descriptions to the "exclusion" part of the CONSORT diagram ### Add the control group Step 1. Add new column to attrition dataframe ```{r, data_control} attrition$Complement <- c("NA", "Placebo Group", "Younger than 75 years", "Non-White", "Male") ``` ### Define metadata Step 2. Define the name of the column in the call to the plotting function ```{r render2, fig.align='center', fig.width= 6, fig.height=6} attrition %>% visR::visr("Criteria", "Remaining N", "Complement") ``` # Additional features ## Styling the CONSORT flowchart. Change the fill and outline of the boxes in the flowchart ```{r render3, fig.align='center', fig.width= 6, fig.height=6} attrition %>% visR::visr("Criteria", "Remaining N", "Complement", fill = "lightblue", border="grey") ``` ## Adjusting size Adjust the font size in the boxes ```{r render4, fig.align='center', fig.width= 6, fig.height=6} attrition %>% visR::visr("Criteria", "Remaining N", font_size = 10) ```
/scratch/gouwar.j/cran-all/cranData/visR/vignettes/Consort_flow_diagram.Rmd
--- title: "Styling survival plots" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Styling survival plots} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, fig.width = 7, fig.height = 4, fig.align = "center", comment = "#>" ) ``` # Introduction ```{r libraries, include = TRUE} library(visR) ``` This tutorial illustrates the usage of the styling function that `visR` provides. By default, `visR::visr()` does not apply any form of visual changes to the generated survival plots. Therefore, the default output looks like you would expect from a standard `ggplot2::ggplot()` plot. While the examples below visualize the results from `estimate_KM()`, these principles apply to competing risks cumulative incidence objects created with `estimate_cuminc()` as well. ## Preparation of the data In this example, we will work with patient data from NCCTG Lung Cancer dataset that is part of the `survival` package. This data is also used to demonstrate more functions of `visR` in another vignette. However, in this particular one, it will only be used to demonstrate the adjustments of the aesthetics. ## Generation of a `survfit` object ```{r generate-survival-data} lung_cohort <- survival::lung lung_cohort <- lung_cohort %>% dplyr::mutate(sex = as.factor(ifelse(sex == 1, "Male", "Female"))) %>% dplyr::mutate(status = status - 1) %>% dplyr::rename(Age = "age", Sex = "sex", Status = "status", Days = "time") lung_suvival_object <- lung_cohort %>% visR::estimate_KM(strata = "Sex", CNSR = "Status", AVAL = "Days") ``` # Styling ## Plotting the generated `survfit` object without adjustments ```{r default-ggplot2-plot} p <- lung_suvival_object %>% visR::visr() p ``` As we can, the plot shows the default [`ggplot2::theme_grey()`](https://ggplot2.tidyverse.org/reference/ggtheme.html) style with a grey background, a visible grid and the default `ggplot2` colours. ## Using `ggplot2` to style the plot Since `visR::visr()` also generates a valid `ggplot` object as an output, we can use the conventional styling logic and options that `ggplot2` provides, as shown below. ```{r styling-with-ggplot2} p + ggplot2::theme_bw() + ggplot2::theme(legend.position = "top") + ggplot2::scale_color_manual(values = c("red", "blue")) ``` However, `visR` also provides functions to adjust common aesthetics more easily and with less code. ## Using `visR` to style the plot The most direct option to style plots generated through `visR::visr()` is by using the parameters that the function provides. Internally, parameters like the y-axis label are automatically deducted from the used function. The following example demonstrates the options exposed. ```{r visr-parameter-styling} lung_suvival_object %>% visR::visr(x_label = "Time", y_label = NULL, # NULL (default) causes the label to be deducted from the used function x_ticks = seq(0, 1200, 200), y_ticks = seq(0, 100, 20), fun = "pct", legend_position = "top") ``` However, these rather minimal adjustments usually don't cover all the things a user wants to modify. Therefore, we provide two additional functions to adjust more aesthetics: `visR::define_theme()` and `visR::apply_theme()`. The first one provides an easy wrapper to create a nested list of list with styling options that is then applied to the plot by the second function. # New themes ## Defining a `visR_theme` using `visR::define_theme()` If no further options are provided to `visR::define_theme()`, it nonetheless returns a very minimal list of reasonable defaults. ```{r visr-define_theme-empty} visR::define_theme() ``` However, this function also takes several other styling options. The currently usable ones are displayed below. One particular use that we had in mind when writing this function was, that we wanted to have the option to define the different colours for the strata once and then to not have to worry about all of them being present. ```{r visr-define_theme-nonempty} theme <- visR::define_theme( strata = list( "Sex" = list("Female" = "red", "Male" = "blue"), "ph.ecog" = list("0" = "cyan", "1" = "purple", "2" = "brown") ), fontsizes = list( "axis" = 12, "ticks" = 10, "legend_title" = 10, "legend_text" = 8 ), fontfamily = "Helvetica", grid = list("major" = FALSE, "minor" = FALSE), #grid = TRUE/FALSE # <- can also be used instead of the named list above bg = "transparent", legend_position = "top" ) ``` ## Apply styling using `visR::apply_theme()` The `visR::apply_theme()` function exposes the user to two ways to style their plot. The most direct one would be to just apply the function to a plot without specifying any options. This applies several reasonable defaults to the plot. ```{r visr-apply_theme-empty, warning=FALSE} lung_suvival_object %>% visR::visr() %>% visR::apply_theme() ``` The second one would be to apply a nested list of lists to, ideally generated through `visR::define_theme()` to a plot. This serves the purpose to generate a detailed `visR_theme` object once and then apply it to one or several plots with a single line. These lists could then also be easily saved and shared. The usage of the theme generated above is shown below. ```{r visr-apply_theme-nonempty, warning = FALSE, message=FALSE} lung_suvival_object %>% visR::visr() %>% visR::apply_theme(theme) ```
/scratch/gouwar.j/cran-all/cranData/visR/vignettes/Styling_KM_plots.Rmd
--- title: "Survival Analysis with visR" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Survival Analysis with visR} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction This tutorial illustrates a typical use case in clinical development - the analysis of time to a certain event (e.g., death) in different groups. Typically, data obtained in randomized clinical trials (RCT) can be used to estimate the overall survival of patients in one group (e.g., treated with drug X) vs another group (e.g., treated with drug Y) and thus determine if there is a treatment difference. For a more thorough introduction to Survival Analysis, we recommend the following [tutorial](https://bioconnector.github.io/workshops/r-survival.html). In this example, we will work with patient data from NCCTG Lung Cancer dataset that is part of the `survival` package. Another vignette presents an example using a data set following the [CDISC ADaM standard](https://www.cdisc.org/standards/foundational/adam/adam-basic-data-structure-bds-time-event-tte-analyses-v1-0). ```{r imports, echo=TRUE, warning=FALSE, message=FALSE} library(ggplot2) library(visR) ``` ## Global Document Setup ```{r globalSetup} # Metadata Title DATASET <- paste0("NCCTG Lung Cancer Dataset (from survival package ", packageVersion("survival"), ")") # Save original options() old <- options() # Global formatting options options(digits = 3) # Global ggplot settings theme_set(theme_bw()) # Global table settings options(DT.options = list(pageLength = 10, language = list(search = 'Filter:'), scrollX = TRUE)) lung_cohort <- survival::lung # Change gender to be a factor and rename some variables to make output look nicer lung_cohort <- lung_cohort %>% dplyr::mutate(sex = as.factor(ifelse(sex == 1, "Male", "Female"))) %>% dplyr::rename(Age = "age", Sex = "sex", Status = "status", Days = "time") # Restore original options() options(old) ``` ## Cohort Overview (Table one) Visualizing tables, like the table one or risk tables, is a two-step process in visR . First, a data.frame (or tibble) is created by a `get_XXX()` function (e.g. `get_tableone()`). Secondly, the data.frame can be displayed by calling the function `render()`. The advantage of this process is that data summaries can be created, used and adjusted throughout an analysis, while at every step data summaries can be displayed or even be downloaded. Populations are usually displayed as a so-called table one. Function `get_tableone` creates a tibble that includes populations summaries. ```{r table1_get_default} # Select variables of interest and change names to look nicer lung_cohort_tab1 <- lung_cohort %>% dplyr::select(Age, Sex) # Create a table one tab1 <- visR::get_tableone(lung_cohort_tab1) # Render the tableone visR::render(tab1, title = "Overview over Lung Cancer patients", datasource = DATASET) ``` Function `render` nicely displays the tableone. Additionally, visR includes a wrapper function to create and display a `tableone` in only one function call. ```{r table1_render_default} # Use wrapper functionality to create and display a tableone visR::tableone(lung_cohort_tab1, title = "Overview over Lung Cancer patients", datasource = DATASET) ``` Creating and visualizing a tableone with default settings is very simple and can be done with one line of code. However, there are further customization options. In both the get and the wrapper functions, a stratifier can be defined and the column displaying total information can be removed. ```{r table1_get_options} # Create and render a tableone with a stratifier and without displaying the total visR::tableone(lung_cohort_tab1, strata = "Sex", overall = FALSE, title = "Overview over Lung Cancer patients", datasource = DATASET) ``` visR's `render` supports three different rendering engines to be as flexible as possible. By default, `render` uses `gt`. Additional engines are `datatable` (`dt`) to include easy downloading options... ```{r table1_render_options_dt} # Create and render a tableone with with dt as an engine visR::tableone(lung_cohort_tab1, strata = "Sex", overall = FALSE, title = "Overview over Lung Cancer patients", datasource = DATASET, engine = "dt") ``` ...and `kable` for flexible displaying in various output formats (`html` by default, `latex` supported). ```{r table1_render_options_kable} # Create and render a tableone with with kable as an engine and html as output format visR::tableone(lung_cohort_tab1, strata = "Sex", overall = FALSE, title = "Overview over Lung Cancer patients", datasource = DATASET, engine = "kable", output_format="html") ``` Called with `html` as an output format, a `html` view is displayed; called with `latex` a string containing latex code is printed. ## Time-to-event analysis ### Survival estimation visR provides a wrapper function to estimate a Kaplan-Meier curve and several functions to visualize the results. This wrapper function is compatible with `%>%` and purrr::map functions without losing traceability of the dataset name. ```{r km_est} # Select variables of interest and change names to look nicer lung_cohort_survival <- lung_cohort %>% dplyr::select(Age, Sex, Status, Days) # For the survival estimate, the censor must be 0 or 1 lung_cohort_survival$Status <- lung_cohort_survival$Status - 1 # Estimate the survival curve lung_suvival_object <- lung_cohort_survival %>% visR::estimate_KM(strata = "Sex", CNSR = "Status", AVAL = "Days") lung_suvival_object ``` ### Survival visualization There are two frequently used ways to estimate time-to-event data: As a risk table and as a Kaplan-Meier curve. In principle, visR allows to either visualize a risk table and a Kaplan-Meier curve separately, or both together in one plot. #### Displaying the risktable Creating and visualizing a risk table separately works in the exact same way as for the tableone (above): First, `get_risktable()` creates a tibble with risk information that can still be changed. Secondly, the risk table can be rendered to be displayed. ```{r km_tab} # Create a risktable rt <- visR::get_risktable(lung_suvival_object) # Display the risktable visR::render(rt, title = "Overview over survival rates of Lung Cancer patients", datasource = DATASET) ``` The risktable is only one piece of information that can be extracted from a survival object with a `get_XXX` to then be rendered. ```{r km_tab_options_1} # Display a summary of the survival estimate visR::render(lung_suvival_object %>% visR::get_summary(), title = "Summary", datasource = DATASET) ``` ```{r km_tab_options_2} # Display test statistics associated with the survival estimate visR::render(lung_suvival_object %>% visR::get_pvalue(), title = "P-values", datasource = DATASET) ``` ```{r km_tab_options_3} # Display qunatile information of the survival estimate visR::render(lung_suvival_object %>% visR::get_quantile(), title = "Quantile Information", datasource = DATASET) ``` ```{r km_tab_options_4} # Display a cox model estimate associated with the survival estimate visR::render(lung_suvival_object %>% visR::get_COX_HR(), title = "COX estimate", datasource = DATASET) ``` #### Plotting the Kaplan-Meier Alternatively, the survival data can be plotted as a Kaplan-Meier curve. In `visR`, a plot is in most cases a ggplot object and adapting the plot follows the general principle of creating a plot and then adding visual contents step-by-step. ```{r km_plot_1} # Create and display a Kaplan-Meier from the survival object gg <- visR::visr(lung_suvival_object) gg ``` ```{r km_plot_2} # Add a confidence interval to the Kaplan-Meier and display the plot gg %>% visR::add_CI() ``` ```{r km_plot_3} # Add a confidence interval and the censor ticks to the Kaplan-Meier and display the plot gg %>% visR::add_CI() %>% visR::add_CNSR(shape = 3, size = 2) ``` visR includes a wrapper function to create a risktable and then add it directly to a Kaplan-Meier plot. ```{r km_add} # Add a confidence interval and the censor ticks and a risktable to the Kaplan-Meier and display the plot gg %>% visR::add_CI() %>% visR::add_CNSR(shape = 3, size = 2) %>% visR::add_risktable() ``` ## Competing Risks In addition to classic right-censored data, the {visR} package supports the estimation of time-to-event outcomes in the presence of competing events. The package wraps the [{tidycmprsk}](https://mskcc-epi-bio.github.io/tidycmprsk/) package, and exports functions for cumulative incidence estimation and visualization. The function `estimate_cuminc()` estimates the cumulative incidence of the competing event or outcome of interest. The syntax is nearly identical to `estimate_KM()`; however, the outcome status variable (passed to the `CNSR=` argument) must be a factor where the first level indicates censoring, the second level the competing event of interest, and subsequent levels are the other competing events. Visualization functions, `visr()`, `add_CI()`, `add_CNSR()`, and `add_risktable()` share the same syntax as the Kaplan-Meier variants. ```{r cuminc_1} visR::estimate_cuminc( tidycmprsk::trial, strata = "trt", CNSR = "death_cr", AVAL = "ttdeath" ) %>% visR::visr( legend_position = "bottom", x_label = "Months from Treatment", y_label = "Risk of Death" ) %>% visR::add_CI() %>% visR::add_risktable(statlist = c("n.risk", "cum.event")) ```
/scratch/gouwar.j/cran-all/cranData/visR/vignettes/Time_to_event_analysis.Rmd
#' Convert data frame of counts to data frame of cases. #' data frame must contain a column with frequencies (counts) as generated by as.data.frame from a contingency table #' #' @param x a \code{data.frame} of counts generated from a contingency table. #' @param countcol character string, name of the column of x containing the counts. Default name of the column is "Freq". #' @return data frame of cases of dimension (total number of counts as sum of "Freq" in x) times 2. #' @examples #' counts_to_cases(as.data.frame(HairEyeColor[,,1]),countcol="Freq") #' @export counts_to_cases # counts_to_cases <- function(x, countcol = "Freq") { # Get the row indices to pull from x idx <- rep.int(seq_len(nrow(x)), x[[countcol]]) # Drop count column x[[countcol]] <- NULL # Get the rows from x x[idx, ] }
/scratch/gouwar.j/cran-all/cranData/visStatistics/R/counts_to_cases.R
detach_package <- function(pkg, character.only = FALSE) { if(!character.only) { pkg <- deparse(substitute(pkg)) } search_item <- paste("package", pkg, sep = ":") while(search_item %in% search()) { detach(search_item, unload = TRUE, character.only = TRUE) } }
/scratch/gouwar.j/cran-all/cranData/visStatistics/R/detach_package.R
#Helper function in visstat---- #' Selects columns defined by characters varsample and varfactor from a data.frame #' #'Selects columns defined by characters \code{varsample} and \code{varfactor} from \code{dataframe}, returns selected columns with their names. #' @param dataframe \code{data.frame} or \code{list} containing at least two columns with column headings of data type \code{character}. Data must be column wise ordered. #' @param varsample column name of dependent variable in dataframe, datatype \code{character} #' @param varfactor column name of independent variable in dataframe, datatype \code{character} #' @return selected columns, \code{sample}, \code{factor}, \code{name_of_sample} (character string equaling varsample), \code{name_of_factor} (character string equaling varsample) #' #' @examples #' get_samples_fact_inputfile(trees,"Girth","Height") #' @export get_samples_fact_inputfile = function(dataframe, varsample, varfactor) { # json input------ if (is.null(dim(dataframe))) #FALSE for csv { fulldata = dataframe data = dataframe$data data = as.data.frame(data) if ("matching" %in% names(dataframe) & varfactor == "match") { matched_selected_group0 = which(data$group0 == 1 & data$match == 1) matched_selected_group1 = which(data$group1 == 1 & data$match == 1) fact = c(rep(fulldata$group0name, length(matched_selected_group0)), rep(fulldata$group1name, length(matched_selected_group1))) fact = as.factor(fact) fullsample = data[, varsample] samples = fullsample[c(matched_selected_group0, matched_selected_group1)] name_of_factor = paste(fulldata$group0name, "and", fulldata$group1name) #name_of_factor="groups" name_of_sample = varsample # does not work on multiple matching criterias: # matchingCriteria=paste(tolower(paste(as.character(dataframe$matching),collapse =" ")),sep="") matchingCriteria = tolower(paste( apply(dataframe$matching, 1, function(x) paste(x, collapse = " ")), collapse = ', ' )) name_of_sample = paste(name_of_sample, "with match:", matchingCriteria) # json file with no matching criterion } else if ("matching" %in% names(dataframe) & varfactor != "match") { samples = data[, varsample] fact = data[, varfactor] name_of_sample = varsample name_of_factor = varfactor matchingCriteria = "" } else{ stop("code runs only on json files generated by Web server") } # csv input------ } else{ #Select samples and fact from data.frame dataframe----- samples = dataframe[, varsample] fact = dataframe[, varfactor] name_of_sample = varsample name_of_factor = varfactor matchingCriteria = "" } # samples are the two groups selected by the user in groups at the web surface mylist = list( "samples" = samples, "fact" = fact, "name_of_sample" = name_of_sample, "name_of_factor" = name_of_factor, "matchingCriteria" = matchingCriteria ) return(mylist) }
/scratch/gouwar.j/cran-all/cranData/visStatistics/R/get_samples_fact_inputfile.R
#' Cairo wrapper function #' #' Cairo wrapper function returning NULL if not \code{type} is specified #' #' \code{openGraphCairo()} \code{Cairo()} wrapper function. Differences to \code{Cairo:} #' a) prematurely ends the function call to \code{Cairo()} returning NULL, if no output \code{type} of types "png", "jpeg", "pdf", "svg", "ps" or "tiff" #' is provided. #' b) #' The \code{file} argument of the underlying Cairo function is generated by \code{file.path(fileDirectory,paste(fileName,".", type, sep = ""))}. #' @param width see \code{Cairo()} #' @param height see \code{Cairo()} #' @param fileName name of file to be created. Does not include both file extension ".\code{type}" and file \code{filedirectory}. Default file name "visstat_plot". #' @param type Supported output types are "png", "jpeg", "pdf", "svg", "ps" and "tiff". See \code{Cairo()} #' @param fileDirectory path of directory, where plot is stored. Default current working directory. #' @param pointsize see \code{Cairo()} #' @param bg see \code{Cairo()} #' @param canvas see \code{Cairo()} #' @param units see \code{Cairo()} #' @param dpi DPI used for the conversion of units to pixels. Default value 150. #' @name openGraphCairo #' @return NULL, if no \code{type} is specified. Otherwise see \code{Cairo()} #' @examples #' #' ## adapted from example in \code{Cairo()} #' openGraphCairo(fileName="normal_dist",type="pdf", fileDirectory=tempdir()) #' plot(rnorm(4000),rnorm(4000),col="#ff000018",pch=19,cex=2) #' dev.off() # creates a file "normal_dist.pdf" in the directory specified in fileDirectory # ## remove the plot from fileDirectory #' file.remove(file.path(tempdir(),"normal_dist.pdf")) #' @import Cairo #' #' @export openGraphCairo openGraphCairo = function(width = 640, height = 480 , fileName = NULL, type = NULL, fileDirectory=getwd(), pointsize = 12, bg = "transparent", canvas = "white", units = "px", dpi=150) { oldparCairo <- par(no.readonly = TRUE) oldparCairo$new=FALSE on.exit(par(oldparCairo)) if (is.null(type)) {return()} else{ #set default fileName to "visstat_plot" if (is.null(fileName)) { fileName = "visstat_plot"} fullfilename = paste(fileName,".", type, sep = "") Cairofilename = file.path(fileDirectory,fullfilename) if (type == "png") { CairoPNG(filename = Cairofilename) }else if (type == "pdf") { CairoPDF(file = Cairofilename) }else if (type == "jpeg") { CairoJPEG(filename = Cairofilename) }else if (type == "tiff") { CairoTIFF(filename = Cairofilename) }else if (type == "svg") { CairoSVG(file = Cairofilename) }else if (type == "ps") { CairoPS(file=Cairofilename,family = "Helvetica") }else{ warning("Chosen output type not supported. No graphics saved.") return() } } }
/scratch/gouwar.j/cran-all/cranData/visStatistics/R/openGraphCairo.R
#' Saves Graphical Output #' #' Closes all graphical devices with \code{dev.off()} and saves the output only if both \code{fileName} and \code{type} are provided. #' #' @param fileName name of file to be created in directory \code{fileDirectory} without file extension ".\code{type}". #' @param type see \code{Cairo()}. #' @param fileDirectory path of directory, where graphic is stored. Default setting current working directory. #' @param oldfile old file of same name to be overwritten #' #' @return NULL, if no \code{type} or \code{fileName} is provided, TRUE if graph is created #' @examples #' # very simple KDE (adapted from example in \code{Cairo()}) #' openGraphCairo(type = "png", fileDirectory=tempdir()) #' plot(rnorm(4000),rnorm(4000),col="#ff000018",pch=19,cex=2) #' #save file "norm.png" in directory specified in \code{fileDirectory} #' saveGraphVisstat("norm",type = "png",fileDirectory=tempdir()) #' file.remove(file.path(tempdir(),"norm.png")) # remove file "norm.png" from \code{fileDirectory}. #' #' @export saveGraphVisstat #' saveGraphVisstat = function(fileName = NULL, type=NULL, fileDirectory=getwd(),oldfile = NULL) { #return if no fileName is provided if (is.null(fileName)) { #message("saveGraphVisstat() returns NULL if file=NULL") return() }else if (is.null(type)) { # message("saveGraphVisstat() returns NULL if type=NULL") return() }else if (is.null(oldfile)) { dummy_name = "visstat_plot" oldPlotName = paste(dummy_name,".",type,sep = "") oldfile=file.path(fileDirectory,oldPlotName) } while (!is.null(dev.list())) dev.off() #closes all devices #overwrite existing files file2 = gsub("[^[:alnum:]]", "_", fileName) #replaces numbers and '^' with underscore file3 = gsub("_{2,}", "_", file2) newFileName = paste0(file3,".", type) Cairofile = file.path(fileDirectory,newFileName) file.copy(oldfile,Cairofile,overwrite = T) if (file.exists(oldfile)) { file.remove(oldfile)} }
/scratch/gouwar.j/cran-all/cranData/visStatistics/R/saveGraphVisstat.R
# MIT License----- #Copyright (c) 2021 Sabine Schilling # Plotting functions---- # Testing for normality and visualization ---- test_norm_vis = function(x, y_axis_hist = c(0, 0.04)) { #store default graphical parameters------ oldparnormvis <- par(no.readonly = TRUE) on.exit(par(oldparnormvis)) par(mfrow = c(1, 2), oma = c(0, 0, 3, 0)) #Remove NA from x x <- x[!is.na(x)] n = length(x) norm_dens = function(z) { dnorm(z, mean(x), sd(x)) } ymax = max(norm_dens(x)) #Plot histogramm of raw data otto = hist( x, freq = FALSE, col = "grey", breaks = "Sturges", xlim = c( mean(x, na.rm = T) - 5 * sd(x, na.rm = T), mean(x, na.rm = T) + 5 * sd(x, na.rm = T) ), ylim = c(0, 1.2 * ymax) ) maxhist = max(otto$density) #normal distribution with mean and sd of given distribution curve(norm_dens, col = "red", add = TRUE, lwd = 2) #par(new = TRUE) #the next high-level plotting command does not clean the frame before drawing #as if it were on a new device. lines(density(x),col = "blue") legend( "topright", c("fitted", "estimated"), lty = 1, lwd = 2, col = c("red", "blue"), bty = "n" ) box() #frame around current plot qqnorm(x) qqline(x, col = "red", lwd = 2) KS = ad.test(x) p_KS = signif(KS$p.value, 2) SH = shapiro.test(x) p_SH = signif(SH$p.value, 2) mtext( paste( "Shapiro-Wilk: p = ", p_SH, "\n Anderson-Darling: p = ", p_KS, "\n Nullhypothesis: Data is normally distributed" ), outer = TRUE ) my_list = list("Anderson-Darling" = KS, "Shapiro" = SH) return(my_list) } ###### Two-Sample t-Test ############################### two_sample_tTest = function(samples, fact, alternative = c("two.sided", "less", "greater"), mu = 0, paired = FALSE, var.equal = FALSE, conf.level = 0.95, samplename = "", factorname = "") { oldpar <- par(no.readonly = TRUE) on.exit(par(oldpar)) alternative <- match.arg(alternative) if (!missing(mu) && (length(mu) != 1 || is.na(mu))) return(warning("'mu' must be a single number")) if (!missing(conf.level) && (length(conf.level) != 1 || !is.finite(conf.level) || conf.level < 0 || conf.level > 1)) return(warning("'conf.level' must be a single number between 0 and 1")) alpha = 1 - conf.level levels = unique(sort(fact)) twosamples = create_two_samples_vector(samples, fact) x = twosamples$sample1and2 x1 = twosamples$sample1 x2 = twosamples$sample2 #Check normality of both samples----- p1 = test_norm(twosamples$sample1) p2 = test_norm(twosamples$sample2) #margins of y -axis lower = 0.05 upper = 0.1 margins = calc_min_max_of_y_axis(x, lower, upper) mi = margins[[1]] ma = margins[[2]] x = cbind(x, factor(c(rep(1, length( x1 )), rep(2, length( x2 ))))) par(oma = c(0, 0, 3, 0)) b = boxplot( samples ~ fact, lwd = 0.5, xlab = factorname, ylab = samplename, ylim = c(mi, ma), varwidth = T, col = colorscheme(1) ) stripchart( samples ~ fact, vertical = TRUE, xlim = c(0, 3), ylim = c(mi, ma), #col = c("grey70", "grey80"), col = colorscheme(2), axes = FALSE, method = "jitter", add = TRUE ) axis(side = 2) axis(side = 1, at = c(1, 2), labels = levels) box() points(1, mean(x1), col = 2, pch = 1, lwd = 3) points(2, mean(x2), col = 2, pch = 1, lwd = 3) alpha_c = 1 - sqrt(1 - alpha) #two tests alpha<-0.025, corrects for pairwise testing by increasing the confidence interval from e.g. 95 % to 97.5 % #corected confidence intervals taking intou co correction1 = qt(1 - 0.5 * alpha_c, length(x1) - 1) * sd(x1) / sqrt(length(x1)) correction2 = qt(1 - 0.5 * alpha_c, length(x2) - 1) * sd(x2) / sqrt(length(x2)) arrows( 1, mean(x1, na.rm = T) + correction1, 1, mean(x1, na.rm = T) - correction1, angle = 90, code = 3, #halbes Konfidenzintervall col = 2, lty = 1, lwd = 2, length = 0.1 ) arrows( 2, mean(x2) + correction2, 2, mean(x2) - correction2, angle = 90, code = 3, col = 2, lty = 1, lwd = 2, length = 0.1 ) abline( h = mean(x1, na.rm = T) + correction1, col = "grey30", lty = 2, lwd = 1 ) abline( h = mean(x1, na.rm = T) - correction1, col = "grey30", lty = 2, lwd = 1 ) text(1:length(b$n), c(ma, ma), paste("N=", b$n)) t = t.test( x1, x2, alternative = alternative, conf.level = conf.level, paired = FALSE, var.equal = FALSE, na.action = na.omit ) p_value = t$p.value p_value = signif(p_value, 3) if (alternative == "two.sided") { ah = "equals" } else{ ah = alternative } compare = side_of_nh(alternative) mtext( paste( t$method, "p value = ", p_value, "null hypothesis:", "\n mean", samplename, "of", factorname, unique(fact)[1], compare, "mean", samplename, "of", factorname, unique(fact)[2] ) ) my_list <- list( "dependent variable (response)" = samplename, "indepedent variables (parameters)" = unique(fact), "t-test-statistics" = t, "Shapiro-Wilk-test_sample1" = p1, "Shapiro-Wilk-test_sample2" = p2 ) return(my_list) } # Two-Sample Wilcoxon-Test ############################### #One function with flags for greater, less, two sided and notch two_sample_WilcoxonTest = function(samples, fact, alternative = c("two.sided", "less", "greater"), conf.level = 0.95, notchf = F, samplename = "", factorname = "", cex = 1) { oldparwilcox <- par(no.readonly = TRUE) #make a copy of current values on.exit(par(oldparwilcox)) alternative <- match.arg(alternative) #Error handling ---- if (!((length(conf.level) == 1L) && is.finite(conf.level) && (conf.level > 0) && (conf.level < 1))) return(warning("'conf.level' must be a single number between 0 and 1")) if (!is.numeric(samples)) return(warning("'samples' must be numeric")) if (!is.null(fact)) { if (!is.factor(fact)) return(warning("'fact' must be factorial")) } #Store default graphical parameter alpha = 1 - conf.level #Define color palette colortuple2 = colorscheme(2) # Create to numeric vectors twosamples = create_two_samples_vector(samples, fact) x = twosamples$sample1and2 x1 = twosamples$sample1 x2 = twosamples$sample2 upper = 0.2 lower = 0.05 res = calc_min_max_of_y_axis(x, upper, lower) mi = res[[1]] ma = res[[2]] x = cbind(x, factor(c(rep(1, length( x1 )), rep(2, length( x2 ))))) b <- boxplot(samples ~ fact, plot = 0) #holds the counts par(oma = c(0, 0, 3, 0)) #links unten,... stripchart( samples ~ fact, vertical = TRUE, xlim = c(0, 3), #ylim = c(mi, ma), method = "jitter", col = colorscheme(2), ylim = c(0, ma), ylab = samplename, xlab = factorname ) boxplot( samples ~ fact, notch = notchf, varwidth = T, col = colorscheme(1), ylim = c(0, ma), add = T ) #text(1:length(b$n), b$stats[5,]+1, paste("n=", b$n)) text(1:length(b$n), c(ma, ma), paste("N =", b$n)) t = wilcox.test(samples ~ fact, alternative = alternative, na.action = na.omit) p_value = t$p.value #p_value = signif(p_value,5) p_value = formatC(signif(p_value, digits = 2)) compare = side_of_nh(alternative) if (factorname == "match") { prefix = "of matched" } else{ prefix = character() } mtext( paste( t$method, "p=value = ", p_value, " null hypothesis: \n median", samplename, "of", prefix, unique(fact)[1], compare, "median", samplename, prefix, unique(fact)[2] ) , cex = cex, outer = TRUE ) my_list <- list( "dependent variable (response)" = samplename, "indepedent variables (parameters)" = unique(fact), "statsWilcoxon" = t, "statsBoxplot" = b ) return(my_list) } # Two-Sample F-Test ############################### #subtract means; two lines according to variances. two_sample_FTest = function(samples, fact, conf.int = 0.95, alternative = "two.sided") { # if (missing(conf.int)) conf.int = 0.95 # if (missing(alternative)) alternative = "two.sided" #Store default graphical parameter oldparftest <- par(no.readonly = TRUE) on.exit(par(oldparftest)) alpha = 1 - confint levels = unique(sort(fact)) x1 = samples[fact == levels[1]] x2 = samples[fact == levels[2]] x1 = x1 - mean(x1, na.rm = T) x2 = x2 - mean(x2) x = c(x1, x2) spread = max(x) - min(x) spread = max(spread, var(x1), var(x2)) mi = min(x) - 0.3 * spread ma = max(x) + 0.3 * spread x = cbind(x, factor(c(rep(1, length( x1 )), rep(2, length( x2 ))))) par(oma = c(0, 0, 3, 0)) stripchart( x[, 1] ~ x[, 2], vertical = TRUE, xlim = c(0.5, 3), ylim = c(mi, ma), col = c("grey70", "grey80"), ylab = "centered samples", xlab = "", axes = FALSE ) axis(side = 2) axis(side = 1, at = c(1, 2), labels = levels) box() lines( x = c(1.1, 1.1), y = c(-0.5 * var(x1), 0.5 * var(x1)), col = "blue", lwd = 5 ) lines( x = c(1.9, 1.9), y = c(-0.5 * var(x2), 0.5 * var(x2)), col = "blue", lwd = 5 ) legend( "topright", inset = 0.05, c("variances"), col = c("blue"), lwd = 2 ) t = var.test(x1, x2, alternative = alternative) p_value = t$p.value p_value = signif(p_value, 3) mtext( paste( "Two Sample F-Test (", alternative, "): P = ", p_value, "\n Confidence Level = ", 1 - alpha ), outer = TRUE ) } #chi squared Test ---- # vis_chi_squared_test: implemented in vis_samples_fact ----- vis_chi_squared_test = function(samples, fact, samplename, factorname, cex = 1) { oldparchi <- par(no.readonly = TRUE) on.exit(par(oldparchi)) colortuple = colorscheme(1) ColorPalette = colorscheme(3) if (missing(samplename)) samplename = character() if (missing(factorname)) factorname = character() counts = makeTable(samples, fact, samplename, factorname) check_assumptions_chi = check_assumptions_count_data(samples, fact) if (check_assumptions_chi == FALSE) { fisher_chi = counts return(fisher_chi) } else{ row_sum = rowSums(counts) col_sum = colSums(counts) count_labels = dimnames(counts)[2] count_labels = as.character(unlist(count_labels)) category_names = dimnames(counts)[1] category_names = as.character(unlist(category_names)) norm_counts = (counts / row_sum) * 100 #100 %percentage in each group max_val_y = max(norm_counts, na.rm = T) #col_vec_browser=c(colortuple,rainbow(nrow(counts)-2, s = 0.5)) if (nrow(counts) < (length(ColorPalette) + 2)) { col_vec_browser = c(colortuple, head(ColorPalette, n = nrow(counts) - 2)) } else{ col_vec_browser = c(colortuple, rainbow(nrow(counts) - 2, s = 0.4,alpha=1)) } # x_val = seq(-0.5, ncol(counts) + 0.5, 1) # y_val = c(0, norm_counts[1, ], 0) #creates new plot for barplot par(mfrow = c(1, 1),oma = c(0, 0, 3, 0)) maxlabels = length(levels(samples)) if (maxlabels > 7 | grepl("basis", samplename) | grepl("source", samplename) | grepl("basis", factorname) | grepl("source", factorname) | grepl("genotyped", samplename) | grepl("genotyped", factorname)) { labelsize = 0.3 * cex } else if (maxlabels > 5) { labelsize = 0.7 * cex } else{ labelsize = cex } fisher_chi = fisher_chi(counts) #checks if Cochran requirements for chi2 are met, if not: only fisher exact test allowed titletext = paste(fisher_chi$method, ": p-value =", signif(fisher_chi$p.value, 3), sep = "") if (nrow(counts) > 3) { ma = max(1.3 * max_val_y) legendsize = 0.7 * cex } else{ ma = ma = max(1.1 * max_val_y) legendsize = cex } barplot( norm_counts, names.arg = count_labels, xlim = c(-0.5, ncol(counts) + 1), ylim = c(0, ma), width = 1 / (nrow(counts) + 1), space = c(0, 1), col = col_vec_browser, ylab = "%", xlab = samplename, beside = TRUE, cex.axis = 1, cex.names = labelsize #size of labels of barplot ) box() mtext(titletext) category_names = as.character(category_names) legend( "topright", inset = 0.05, category_names, col = col_vec_browser, bty = 'n', lwd = 2, cex = legendsize ) return(fisher_chi) } } ###### Visualize ANOVA ############################### ## performs ANOVA, oneway test and post-hoc t.test vis_anova = function(samples, fact, conf.level = 0.95, samplename = "", factorname = "", cex = 1) { oldparanova <- par(no.readonly = TRUE) on.exit(par(oldparanova)) alpha = 1 - conf.level samples3 = na.omit(samples) fact <- subset(fact,!is.na(samples)) samples = samples3 n_classes = length(unique(fact)) sdna = function(x) { sd(x, na.rm = T) } meanna = function(x) { mean(x, na.rm = T) } s = tapply(samples, fact, sdna) m = tapply(samples, fact, meanna) samples_per_class = integer(n_classes) for (i in 1:n_classes) { samples_per_class[i] = sum(fact == unique(fact)[i]) } an = aov(samples ~ fact) summaryAnova = summary(an) oneway = oneway.test(samples ~ fact) maximum = max(samples, na.rm = T) minimum = min(samples, na.rm = T) spread = maximum - minimum mi = minimum - 0.1 * spread ma = maximum + 0.4 * spread par(mfrow = c(1, 1), oma = c(0, 0, 3, 0)) stripchart( samples ~ fact, vertical = TRUE, xlim = c(0, n_classes + 1), ylim = c(mi, ma), col = rep("grey30", n_classes), ylab = samplename, xlab = factorname, las = 2 ) # sd: for (i in 1:n_classes) { lines( x = c(i - 0.2, i - 0.2), y = c(m[[i]] - s[[i]], m[[i]] + s[[i]]), col = colors()[131], lwd = 5 ) } for (i in 1:n_classes) { lines( x = c(i - 0.1, i + 0.1), y = c(m[[i]], m[[i]]), col = colors()[552], lwd = 3 ) arrows( i, m[[i]] + qt(1 - 0.025, samples_per_class[i] - 1) * s[[i]] / sqrt(samples_per_class[i]), i, m[[i]] - qt(1 - 0.025, samples_per_class[i] - 1) * s[[i]] / sqrt(samples_per_class[i]), angle = 90, code = 3, col = colors()[552], lty = 1, lwd = 2, length = 0.1 ) } tuk = TukeyHSD(an) s = multcompLetters(tuk[[1]][, 4], threshold = alpha) ord = c() v = attributes(s$Letters)$names f_levels = sort(unique(fact)) for (i in 1:n_classes) { ord[i] = which(v == f_levels[i]) } text(seq(1:n_classes + 1), mi, s$Letters[ord], col = colors()[81], lwd = 2) mtext(paste( "ANOVA: P = ", signif(summaryAnova[[1]][["Pr(>F)"]][[1]], 3), "\n", "OneWay: P = ", signif(oneway$p.value, 3) ), outer = TRUE) legend( "top", inset = 0.05, horiz = F, c("mean +- sd ", "mean with 95% conf. intervall"), col = c(colors()[131], colors()[552]), bty = 'n', lwd = 3 ) my_list <- list( "ANOVA" = summaryAnova, "oneway_test" = oneway, "adjusted_p_values_t_test" = tuk, "conf.level" = conf.level ) return(my_list) } ## Visualize ANOVA assumptions---- ### Header vis_anova_asumptions ----- #' Testing ANOVA assumptions #' #' \code{vis_anova_assumptions} checks for normality of the standardised residuals of the anova both graphically by qq-plots as well as performing #' the Shapiro-Wilk-test \code{shapiro.test} and the Anderson-Darling-Test \code{ad.test}. #' \code{aov} further tests the homoscedacity of each factor level in \code{fact} with the \code{bartlett.test}. #' #' @param samples vector containing dependent variable, datatype numeric #' @param fact vector containing independent variable, datatype factor #' @param conf.level confidence level, 0.95=default #' @param samplename name of sample used in graphical output, dataype character , ""=default #' @param factorname name of sample used in graphical output, dataype character, ""=default #' @param cex number indicating the amount by which plotting text and symbols should be scaled relative to the default. 1=default, 1.5 is 50\% larger, 0.5 is 50\% smaller, etc. #' #' @return my_list: list containing the test statistics of the anova #' \code{aov(samples~fact)},\code{bartlett.test(samples~fact)} and the tests of normality of the standardized residuals of aov, \code{ks_test} and \code{shapiro_test} #' @examples #'ToothGrowth$dose=as.factor(ToothGrowth$dose) #'vis_anova_assumptions(ToothGrowth$len, ToothGrowth$dose) #' #'vis_anova_assumptions(ToothGrowth$len, ToothGrowth$supp) #'vis_anova_assumptions(iris$Petal.Width,iris$Species) #' @export vis_anova_assumptions vis_anova_assumptions = function(samples, fact, conf.level = 0.95, samplename = "", factorname = "", cex = 1) { oldparanovassum <- par(no.readonly = TRUE) on.exit(par(oldparanovassum)) samples3 = na.omit(samples) fact <- subset(fact,!is.na(samples)) samples = samples3 anova = aov(samples ~ fact) summary_anova = summary(anova) par(mfrow = c(1, 2), oma = c(0, 0, 3, 0)) plot(anova$fitted, rstandard(anova), main = "std. Residuals vs. Fitted") abline(h = 0, col = 1, lwd = 2) qqnorm(rstandard(anova)) qqline(rstandard(anova), col = "red", lwd = 2) par(mfrow = c(1, 1)) #check for normality of standardized residuals if (length(anova)>7){ ad_test = ad.test(rstandard(anova)) p_AD = signif(ad_test$p.value, 3)} else{ ad_test="Anderson-Darling test requires sample size of at lest 7." p_AD=NA } shapiro_test = shapiro.test(rstandard(anova)) p_SH = shapiro_test$p.value bartlett_test = bartlett.test(samples ~ fact) p_bart = bartlett_test$p.value mtext( paste( "Check for homogeneity of variances:Bartlett Test, p = ", signif(p_bart, 2), "\n Check for normality of standardized residuals:\n Shapiro-Wilk: p = ", signif(p_SH, 2), "\n Anderson-Darling: p = ", signif(p_AD, 2) ), outer = TRUE ) my_list <- list( "shapiro_test" = shapiro_test, "ad_test" = ad_test, "summary_anova" = summary_anova, "bartlett_test"=bartlett_test ) return(my_list) } ###### Visualize Kruskal_Wallis ############################### ## performs Kruskal Wallis and post-hoc Wilcoxon: vis_Kruskal_Wallis_clusters = function(samples, fact, conf.level = 0.95, samplename = "", factorname = "", cex = 1, notch = F) { oldparkruskal <- par(no.readonly = TRUE) on.exit(par(oldparkruskal)) alpha = 1 - conf.level #remove rows with NAs in samples samples3 = na.omit(samples) fact <- subset(fact,!is.na(samples)) samples = samples3 n_classes = length(unique(fact)) #define color scheme dependent on number of classes mc = rainbow(n_classes,alpha = 1) #mc=ColorPalette(n_classes) s = tapply(samples, fact, sd) m = tapply(samples, fact, mean) samples_per_class = c() for (i in 1:n_classes) { samples_per_class[i] = sum(fact == unique(fact)[i]) } kk = kruskal.test(samples ~ fact) extramargin = 0.1 margins = calc_min_max_of_y_axis(samples, extramargin, extramargin) mi = margins[[1]] ma = margins[[2]] par(mfrow = c(1, 1), oma = c(1, 0, 1, 0)) #oma: outer margin sout, west, north, east if (notch == TRUE) { b = boxplot( samples ~ fact, notch = TRUE, col = mc, las = 1, xlim = c(0, n_classes + 1), ylim = c(mi, ma), xlab = factorname, ylab = samplename, #changes group names size cex.lab = cex, cex.axis = 0.8 * cex, cex.main = cex, cex.sub = cex, boxwex = 0.5 ) } else { b = boxplot( samples ~ fact, notch = FALSE, col = mc, las = 1, xlim = c(0, n_classes + 1), ylim = c(mi, ma), xlab = factorname, ylab = samplename, boxwex = 0.5 ) } stripchart( samples ~ fact, vertical = TRUE, #method="jitter", col = rep("grey50", n_classes), # ylab = ylab, #xlab = xlab, las = 1, #horizontal legend, add = TRUE ) mtext(c("N = ", b$n), at = c(0.7, seq(1, n_classes)), las = 1) #nmber of cases in each group tuk = sig_diffs_nongauss(samples, fact) s = multcompLetters(tuk[[1]][, 4], threshold = alpha) ord = c() v = attributes(s$Letters)$names f_levels = sort(unique(fact)) for (i in 1:n_classes) { ord[i] = which(v == f_levels[i]) } (ma) text( seq(1:n_classes + 1), mi, s$Letters[ord], col = "darkgreen", cex = cex, lwd = 2 ) title(paste(kk$method, "p =", signif(kk$p.value, digits = 3)), outer = TRUE) my_list <- list("kruskal_wallis" = kk, "adjusted_p_values_wilcoxon" = tuk) return(my_list) } ##### Visualize Regression und trumpet curves ############################### vis_regr_trumpets = function(x, y, P) { oldparreg <- par(no.readonly = TRUE) on.exit(par(oldparreg)) reg = lm(y ~ x) summary(reg) ## error bands: y_conf_low = conf_band(x, reg, P, -1) y_conf_up = conf_band(x, reg, P, 1) ma = max(y, reg$fitted) mi = min(y, reg$fitted) spread = ma - mi lower = 0.1 upper = 0.4 margins = calc_min_max_of_y_axis(y, lower, upper) mi = margins[[1]] ma = margins[[2]] par(oma = c(0, 0, 5, 0)) plot(x, y, ylim = c(mi, ma)) points(x, reg$fitted, type = "l", col = 2, lwd = 2) points( x, y_conf_low, type = "l", lwd = 2, lty = 2, col = colors()[84] ) points( x, y_conf_up, type = "l", lwd = 2, lty = 2, col = colors()[84] ) legend( "bottomright", c("regr. line", paste("trumpet curves for gamma=", P)), lwd = 2, col = c(2, colors()[84], colors()[85]), lty = c(1, 2, 3), bty = "n" ) s = summary(reg) mtext( paste("Regression: ax + b. trumpet curves for gamma = ", P, "\n \n"), outer = TRUE, cex = 1.5 ) mtext( paste( "\n \n a = ", signif(reg$coefficients[2], 2), ", p = ", signif(s$coefficients[2, 4], 2), "\n b = ", signif(reg$coefficients[1], 2), ", p = ", signif(s$coefficients[1, 4], 2), "\n R^2 = ", signif(summary(reg)$r.squared, 4) ), outer = TRUE ) par(mfrow = c(1, 2), oma = c(0, 0, 3, 0)) plot( reg$fitted, residuals(reg), main = "Residuals vs. Fitted", xlab = "Fitted Values", ylab = "Residuals" ) abline(h = 0, col = 1, lwd = 2) qqnorm(residuals(reg), ylab = "Sample Quantiles of Residuals") qqline(residuals(reg), col = "red", lwd = 2) KS = ad.test(residuals(reg)) p_KS = signif(KS$p.value, 2) SH = shapiro.test(residuals(reg)) p_SH = signif(SH$p.value, 2) mtext( paste( "Residual Analysis\n Shapiro-Wilk: p = ", p_SH, "\n Anderson-Darling: P = ", p_KS ), outer = TRUE ) } ###### Visualize Residuals ############################### vis_resid = function(resid, fitted) { oldparresid <- par(no.readonly = TRUE) on.exit(par(oldparresid)) par(mfrow = c(1, 2), oma = c(0, 0, 3, 0)) plot(fitted, resid, main = "Residuals vs. Fitted") abline(h = 0, col = 1, lwd = 2) qqnorm(resid) qqline(resid, col = "red", lwd = 2) KS = ad.test(resid) p_KS = signif(KS$p.value, 2) SH = shapiro.test(resid) p_SH = signif(SH$p.value, 2) mtext( paste( "Residual Analysis\n Shapiro-Wilk: p = ", p_SH, "\n Anderson-Darling: p = ", p_KS ), outer = TRUE ) } ###### Visualize Regression ############################### vis_regression_assumptions = function(x, y, conf.level = 0.95) { oldparreg <- par(no.readonly = TRUE) on.exit(par(oldparreg)) alpha = 1 - conf.level # P = alpha #remove all NAs from both vectors xna <- x[!is.na(y) & !is.na(x)] yna <- y[!is.na(y) & !is.na(x)] x <- xna y <- yna ord = order(x) x = sort(x) y = y[ord] reg = lm(y ~ x) resreg = summary(reg) par(mfrow = c(1, 2), oma = c(0, 0, 4, 0)) plot( reg$fitted, rstandard(reg), main = "std. Residuals vs. Fitted", xlab = "Fitted Values", ylab = "Standardized Residuals" ) abline(h = 0, col = 1, lwd = 2) qqnorm(rstandard(reg), ylab = "Sample Quantiles of Std. Residuals") qqline(rstandard(reg), col = "red", lwd = 2) KS = ad.test(rstandard(lm(y ~ x))) p_KS = signif(KS$p.value, 2) SH = shapiro.test(rstandard(lm(y ~ x))) p_SH = signif(SH$p.value, 2) if (p_KS < alpha & p_SH < alpha) { mtext( paste( "Residual Analysis\n Shapiro-Wilk: p = ", p_SH, "\n Anderson-Darling: p = ", p_KS, "\n Requirements regression not met" ), outer = TRUE ) } else{ mtext( paste( "Residual Analysis\n Shapiro-Wilk: p = ", p_SH, "\n Anderson-Darling: p = ", p_KS ), outer = TRUE ) } my_list = list( "summary_regression" = resreg, "shapiro_test_residuals" = SH, "ad_test_residuals" = KS ) return(my_list) } vis_regression = function(x, y, conf.level = 0.05, name_of_factor = character(), name_of_sample = character()) { oldparregr <- par(no.readonly = TRUE) on.exit(par(oldparregr)) alpha = 1 - conf.level P = alpha #remove all NAs from both vectors xna <- x[!is.na(y) & !is.na(x)] yna <- y[!is.na(y) & !is.na(x)] x <- xna y <- yna ord = order(x) x = sort(x) y = y[ord] ylim = 1.1 * max(y, na.rm <- T) reg = lm(y ~ x) resreg = summary(reg) ## error bands: y_conf_low = conf_band(x, reg, P, -1) y_conf_up = conf_band(x, reg, P, 1) y_progn_low = progn_band(x, reg, P, -1) y_progn_up = progn_band(x, reg, P, 1) ma = max(y, reg$fitted, y_progn_up, na.rm <- T) mi = min(y, reg$fitted, y_progn_low, na.rm <- T) spread = ma - mi par(mfrow = c(1, 1), oma = c(0, 0, 5, 0)) plot( x, y, ylim = c(mi - 0.1 * spread, ma + 0.4 * spread), xlab = name_of_factor, ylab = name_of_sample ) points(x, reg$fitted, type = "l", col = colorscheme(2)[1], #dark green lwd = 2) #plot confidence band, lower boundary points( x, y_conf_low, type = "l", lwd = 2, lty = 2, col = colorscheme(1)[1] ) #plot confidence band, upper boundary points( x, y_conf_up, type = "l", lwd = 2, lty = 2, col = colorscheme(1)[1] ) #plot prognosis band, lower boundary points( x, y_progn_low, type = "l", lwd = 2, lty = 3, col = colorscheme(1)[2] ) #plot prognosis band, upper boundary points( x, y_progn_up, type = "l", lwd = 2, lty = 3, col = colorscheme(1)[2] ) legend( "topleft", horiz=TRUE, text.width = 0.75, c("regr. line", "confidence band", "prognosis band"), lwd = 2, #line width col = c(colorscheme(2)[1], colorscheme(1)[1],colorscheme(1)[2]), lty = c(1, 2, 3), #line types of legend bty = 'n', #no box around legend cex=0.75 #reduces the legend size ) s = summary(reg) b = confint(reg) KS = ad.test(rstandard(lm(y ~ x))) SH = shapiro.test(rstandard(lm(y ~ x))) mtext( paste( " regression: y = ax + b \n Confidence = ", alpha, ", a = ", signif(reg$coefficients[2], 2), ", interval [", signif(b[2, 1], 2), ",", signif(b[2, 2], 2), "]", ", p = ", signif(s$coefficients[2, 4], 2), "\n b = ", signif(reg$coefficients[1], 2), ", interval [", signif(b[1, 1], 2), ",", signif(b[1, 2], 2), "]", ", p = ", signif(s$coefficients[1, 4], 2), "\n adjusted R^2 = ", signif(s$adj.r.squared, 2) ), outer = TRUE ) my_list = list( "independent variable x"=name_of_factor, "dependent variable y"=name_of_sample, "summary_regression" = resreg, "shapiro_test_residuals" = SH, "ad_test_residuals" = KS ) return(my_list) } #Mosaic plots----- vis_mosaic = function(samples, fact, name_of_sample = character(), name_of_factor= character(), minperc = 0.05, numbers = TRUE) { oldparmosaic <- par(no.readonly = TRUE) oldparmosaic$new=FALSE on.exit(par(oldparmosaic)) if (missing(minperc)) { #minperc is the minimum percehntage a column has to contribute to be displayed minperc = 0.05 } if (missing(numbers)) { #numbers are shown in rectangle of category numbers = TRUE } counts = makeTable(samples, fact,name_of_sample, name_of_factor) check_assumptions = check_assumptions_count_data(samples, fact) if (check_assumptions == FALSE) { my_list =counts return(my_list) } else{ ##Mosaic plot ##The height of the box is the same for all boxes in the same row and #is equal to the total count in that row. # #The width of the box is the proportion of individuals in the row which fall into that cell. # #Full mosaic plot with all data only if unique number of samples and fact below threshold maxfactors = max(length(unique(samples)), length(unique(fact))) threshold = 6 if (length(unique(samples)) < threshold & length(unique(fact)) < threshold) { res = mosaic( counts, shade = TRUE, legend = TRUE, #shows pearsons residual pop = F #,main = titletext ) tab <- as.table(ifelse(counts < 0.005 * sum(counts), NA, counts)) #puts numbers on count if (numbers == TRUE) { labeling_cells(text = tab, margin = 0)(counts) } } else{ # ##Elimintate rows and columns distributing less than minperc total number of counts rowSum = rowSums(counts) colSum = colSums(counts) total = sum(counts) countscolumn_row_reduced = as.table(counts[which(rowSum > minperc * total), which(colSum > minperc * total)]) #check dimensions after reduction: must be a contingency table test = dim(as.table(countscolumn_row_reduced)) if (is.na(test[2])) { countsreduced = counts } else{ countsreduced = countscolumn_row_reduced } res = mosaic( countsreduced, shade = TRUE, legend = TRUE, cex.axis = 50 / maxfactors, labeling_args = list(gp_labels = (gpar( fontsize = 70 / maxfactors ))), # main = titletext, pop = F ) if (numbers == TRUE) { labeling_cells(text = countsreduced, margin = 0)(countsreduced) } } my_list <- list( "mosaic_stats" =res ) return(my_list) } } #Helper functions-------------------------------------- #Check for type of samples and fact type_sample_fact = function(samples, fact) { typesample = class(samples) typefactor = class(fact) listsf = list("typesample" = typesample, "typefactor" = typefactor) return(listsf) } #helper function odds ratio #calculation of odds ratio odds_ratio = function(a, b, c, d, alpha, zerocorrect) { attr(odds_ratio, "help") <- "odds_ratio calculates odds ratio OR=(a/b)/(c/d) and corresponding upper and lower confidence intervalls\n INPUT: a = group 1 positive, c = group 2 positive, b=group 1 non positive, d = group 2 non positive, 1-alpha: confidence level, default alpha=0.05" # "odds_ratio calculates odds ratio OR=(a/b)/(c/d) and corresponding upper and lower confidence intervalls\n # INPUT: a=number of positives in group 1, c=group 2 positive, b=group 1 non positive, d =group 2 non positive,default alpha=0.05, OR=(a/b)/(c/d)"\n # a,b,c,d can be vectors, elementwise calculation # if (missing(alpha)) { alpha = 0.05 } if (missing(zerocorrect)) { zerocorrect = TRUE } #odds ratio:=OR=a/b/(c/d) #eliminate columns with zeros #a=c=0 or b=d 0: no positive or no negative cases in both groups # Higgins and Green 2011: if (zerocorrect == TRUE) { #eliminate columns with zeros, if #a=c=0 or b=d=0: no positive or no control cases in BOTH groups # Higgins and Green 2011: doublezero = which(a == 0 & c == 0 | b == 0 & d == 0, arr.ind = T) a[doublezero] = NaN b[doublezero] = NaN c[doublezero] = NaN d[doublezero] = NaN #Where zeros cause problems with computation of effects or standard errors, 0.5 is added to all cells (a, b, c, d) singlezero = which(a == 0 | b == 0 | c == 0 | d == 0, arr.ind = T) a[singlezero] = a[singlezero] + 0.5 b[singlezero] = b[singlezero] + 0.5 c[singlezero] = c[singlezero] + 0.5 d[singlezero] = d[singlezero] + 0.5 } oddA = a / b oddB = c / d OR = oddA / oddB #confidence intervall #SE of ln(OR) SE = sqrt(1 / a + 1 / b + 1 / c + 1 / d) alpha = 0.05 zalph <- qnorm(1 - alpha / 2) logLOW = log(OR) - zalph * SE logUP = log(OR) + zalph * SE lowconf = exp(logLOW) #lower confidence upconf = exp(logUP) output = rbind(OR, lowconf, upconf, SE) my_list = ("odds_ratio_statistics" = output) return(my_list) } #create sorted table makeTable = function(samples, fact, samplename, factorname) { counts = data.frame(fact, samples) colnames(counts) = c(factorname, samplename) counts2 = table(counts) #sort by column sums counts3 = counts2[, order(colSums(counts2), decreasing = T)] #sort by row sums counts4 = counts3[order(rowSums(counts3), decreasing = T),] #remove columnns with all entries zero counts4 = counts4[, colSums(counts4 != 0) > 0] return(counts4) } fisher_chi = function(counts) { #if Cochran requirements for chi2 not given: fisher test is performed # if more than 20% of cells have count smaller 5 # # if (any(counts == 0) #at least one cell with zero enry | sum(counts < 5) / length(counts) > 0.2# more than 20% of cells have count smaller 5 & #Fisher Tests breaks down for too large tables dim(counts)[2] < 7) { #fisher.test testFisherChi = fisher.test( counts, workspace = 1e9, simulate.p.value = T, hybrid = F, B = 1e5 ) } else{ testFisherChi = chisq.test(counts) } return(testFisherChi) } side_of_nh = function(alternative) { if (alternative == "less") { compare = c(">=") } else if (alternative == "greater") { compare = c("<=") } else compare = c("equals") return(compare) } create_two_samples_vector = function(samples, fact) { #Creates column vector built out of two samples #samples all in one column levels = unique(sort(fact)) #two levels if (length(levels) > 2) { return(warning( "warning: create_two_samples_vector: only two level input allowed" )) } else{ samples1 = samples[fact == levels[1]] samples1 <- samples1[!is.na(samples1)] if (length(samples1) == 0) { return(warning("each group needs at least one entry")) } else{ samples2 = samples[fact == levels[2]] samples2 <- samples2[!is.na(samples2)] if (length(samples2) == 0) { return(warning("each group needs at least one entry")) } else { x = c(samples1, samples2) my_list = list( "sample1" = samples1, "sample2" = samples2, "sample1and2" = x ) return(my_list) } } } } calc_min_max_of_y_axis = function(samples, lowerExtramargin, upperExtramargin) { maximum = max(samples, na.rm = T) minimum = min(samples, na.rm = T) spread = maximum - minimum min_y_axis = minimum - lowerExtramargin * spread max_y_axis = maximum + upperExtramargin * spread return(list(min_y_axis, max_y_axis)) } check_assumptions_shapiro = function(x) { x <- sort(x[complete.cases(x)]) n <- length(x) rng <- x[n] - x[1L]#1L is integer checkSize = !(is.na(n) || n < 3L || n > 5000L) #FALSE or TRUE if (checkSize == FALSE) { warning("sample size must be between 3 and 5000") return(FALSE) } if (rng == 0) { warning("all 'x' values are identical") return(FALSE) } return(TRUE) } check_assumption_shapiro_size_range_two_samples = function(x1, x2) { boolean1 = check_assumptions_shapiro(x1) boolean2 = check_assumptions_shapiro(x2) if (boolean1 == TRUE & boolean2 == TRUE) { return(TRUE) } else{ return(FALSE) } } check_assumptions_count_data = function(samples, fact) { counts = table(samples, fact) sr <- rowSums(counts) sc <- colSums(counts) counts <- counts[sr > 0, sc > 0, drop = FALSE] nr <- as.integer(nrow(counts)) nc <- as.integer(ncol(counts)) if (is.null(dim(counts))) { warning("no entries in count table ") return(FALSE) } else if (is.na(nr) || is.na(nc) || is.na(nr * nc)) { warning("invalid nrow or ncol in count data ", domain = NA) return(FALSE) } else if (nr <= 1L) { warning("need 2 or more non-zero row marginals") return(FALSE) } else if (nc <= 1L) { warning("need 2 or more non-zero column marginals") return(FALSE) } else{ return(TRUE) } } sig_diffs_nongauss <- function(samples, fact) { # function to produce a table similar to that produced for TukeyHSD, # but for non-normally distributed data # calculate p values for each data classification based on pairwise.wilcox.test ufactor = levels(fact) pwt = pairwise.wilcox.test(samples, fact) factormeans = matrix(0, length(ufactor), 1) for (ii in 1:length(ufactor)) { pos = which(fact == ufactor[ii]) factormeans[ii] = mean(samples[pos]) } # make a matrix with a row for every possible combination of # 2 data classifications and populate it with the calculated # p values xcomb = combn(length(ufactor), 2) tukeylike = matrix(0, ncol(xcomb), 4) colnames(tukeylike) <- c("diff", "lwr", "upr", "p adj") tukeynames = vector("list", ncol(xcomb)) for (ii in 1:ncol(xcomb)) { tukeynames[ii] = paste(ufactor[xcomb[2, ii]], "-", ufactor[xcomb[1, ii]], sep = "") p_value = pwt$p.value[xcomb[2, ii] - 1, xcomb[1, ii]] if (is.na(p_value)) { p_value = 1 } tukeylike[ii, 4] = p_value tukeylike[ii, 1] = 0 tukeylike[ii, 2] = 0 tukeylike[ii, 3] = 0 } rownames(tukeylike) = tukeynames # re-format the table slightly so it is the same as that produced # by TukeyHSD and output tukeylike2 = list(tukeylike) #print(tukeylike2) return(tukeylike2) } conf_band = function(x, reg, P, up) { #reg: result of linear regression lm #up: fact plus or minus if (missing(P)) { P = 0.05 } if (missing(up)) { up = 1 } a = reg$coefficients[2] b = reg$coefficients[1] md = x - mean(x) #residual result = x for (i in 1:length(x)) { result[i] = a * x[i] + b + up * qt(P, length(x) - 2) * sqrt(sum(reg$resid * reg$resid) / (length(x) - 2)) * sqrt(1 / (length(x) - 2) + md[i] ^ 2 / sum(md * md)) } return(result) } progn_band = function(x, reg, P, up) { if (missing(P)) { P = 0.05 } if (missing(up)) { up = 1 } a = reg$coefficients[2] b = reg$coefficients[1] md = x - mean(x) result = x for (i in 1:length(x)) { result[i] = a * x[i] + b + up * qt(P, length(x) - 2) * sqrt(sum(reg$resid * reg$resid) / (length(x) - 2)) * sqrt(1 + 1 / (length(x) - 2) + md[i] ^ 2 / sum(md * md)) } return(result) } # Check for normality with Shapiro-Wilk-test without visualization---- test_norm = function(x) { #Remove NA from x x <- x[!is.na(x)] # KS = ks.test(x, pnorm, mean(x), sd(x)) shapiro_wilk_test = shapiro.test(x) # my_list = list("Kolmogorov-Smirnoff" = KS, "Shapiro" =SH) return(shapiro_wilk_test) } #Check length of distributions for t-test---- check_assumption_sample_size_t_test = function(x1, x2, minimum_size) { #x1 sample 1 #x2 sample 2 #minimum_size:return TRUE if length> minimum_size if (length(x1) > minimum_size & length(x2) > minimum_size) { return(TRUE) } else{ return(FALSE) } } #Define color scheme----- #'\code{colorscheme(x)} selects color scheme of graphical output. Function parameter NULL lists all available color schemes, 1 a color tuple of green and blue #'2 a color tuple of dark green and turquoi, 3 a colorplaette as defined by RcolorBrewer #' #' @param colorcode selects color scheme. parameters NULL: list of all available color schemes, 1: colortuple, 2, colortuple2, 3, ColorPalette #' @return selected color scheme, colors are given with their Hex Code #RRGGBB names colorscheme = function(colorcode = NULL) { browserLightGreen = "#B8E0B8" #matched part group0 browserLightBlue = "#B3D1EF"#matched part group1 browserLightTurquois = "#B3E1EF"#light turquois browserDarkGreen = "#5CB85C" #dark green colortuple = c(browserLightGreen, browserLightBlue) colortuple2 = c(browserDarkGreen, browserLightTurquois) #from package RColorBrewer Set 3 ColorPalette = c( "#8DD3C7" , "#FFFFB3" , "#BEBADA" , "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5", "#D9D9D9", "#BC80BD" , "#CCEBC5" , "#FFED6F" ) my_list = list( "colortuple" = colortuple, "colortuple2" = colortuple2, "ColorPalette" = ColorPalette ) if (is.null(colorcode)) { return(my_list) } else if (colorcode == 1) { return(colortuple) } else if (colorcode == 2) { return(colortuple2) } else if (colorcode == 3) { return(ColorPalette) } else{ message("Choose valid parameter: NULL, 1,2 or 3") } } resetPar <- function() { dev.new while (!is.null(dev.list())) dev.off() #restores to default values oldpar <- par(no.readonly = TRUE) return(oldpar) }
/scratch/gouwar.j/cran-all/cranData/visStatistics/R/test_and_visuals.R
#MIT License---- #Copyright (c) 2020 Sabine Schilling #Feedback highly welcome: [email protected] # Header visstat ----- #' Visualization of statistical hypothesis testing based on decision tree #' #' \code{visstat()} \strong{vis}ualizes the \strong{stat}istical hypothesis testing between #' the dependent variable (or response) #' \code{varsample} and the independent variable \code{varfactor}. \code{varfactor} can have more than two features. #' \code{visstat()} runs a decision tree selecting the statistical hypothesis test with the highest statistical power #' fulfilling the assumptions of the underlying test. For each test #' \code{visstat()} returns a graph displaying the data with the main test statistics #' in the title and a list with the complete test statistics including eventual post-hoc analysis. #' The automated workflow is especially suited for browser based interfaces to #' server-based deployments of R. #' Implemented tests: \code{lm()},\code{t.test()}, \code{wilcox.test()}, #' \code{aov()}, \code{kruskal.test()}, \code{fisher.test()}, \code{chisqu.test()}. #' Implemented tests for normal distribution of standardized residuals: \code{shapiro.test()} and \code{ad.test()}. #' Implemented post-hoc tests: \code{TukeyHSD()} for aov() and \code{pairwise.wilcox.test()} for \code{kruskal.test()}. #' #' For the comparison of averages, the following algorithm is implemented: #' If the p-values of the standardized residuals of \code{shapiro.test()} or \code{ks.test()} are smaller #' than 1-conf.level, \code{kruskal.test()} resp. \code{wilcox.test()} are performed, otherwise the \code{oneway.test()} #' and \code{aov()} resp. \code{t.test()} are performed and displayed. #' Exception: If the sample size is bigger than 100, \code{wilcox.test()} is never executed,instead always the \code{t.test()} is performed #' (Lumley et al. (2002) <doi:10.1146/annurev.publheath.23.100901.140546>). #' For the test of independence of count data, Cochran's rule (Cochran (1954) <doi:10.2307/3001666>) is implemented: #' If more than 20 percent of all cells have a count smaller than 5, \code{fisher.test()}is performed and displayed, otherwise \code{chisqu.test()}. #' In both cases case an additional mosaic plot showing Pearson's residuals is generated. #' @param dataframe \code{data.frame} containing at least two columns. Data must be column wise ordered. #' Contingency tables can be transformed to column wise structure with helper function \code{counts_to_cases(as.data.frame())}. #' @param varsample column name of dependent variable in \code{dataframe}, datatype \code{character}. #' @param varfactor column name of independent variable in \code{dataframe}, datatype \code{character}. #' @param conf.level confidence level of the interval. #' @param numbers a logical indicating whether to show numbers in mosaic count plots. #' @param minpercent number between 0 and 1 indicating minimal fraction of total count data of a category to be displayed in mosaic count plots. #' @param graphicsoutput saves plot(s) of type "png", "jpg", "tiff" or "bmp" in directory specified in \code{plotDirectory}. #' If graphicsoutput=NULL, no plots are saved. #' @param plotName graphical output is stored following the naming convention "plotName.graphicsoutput" in \code{plotDirectory}. #' Without specifying this parameter, plotName is automatically generated following the convention "statisticalTestName_varsample_varfactor". #' @param plotDirectory specifies directory, where generated plots are stored. Default is current working directory. #' @return \code{list} containing statistics of test with highest statistical power meeting assumptions. All values are returned as invisibly copies. Values can be accessed by assigning a return value to \code{visstat}. #' @examples #' #' ## Kruskal-Wallis rank sum test (calling kruskal.test()) #' visstat(iris,"Petal.Width", "Species") #' visstat(InsectSprays,"count","spray") #' #' ## ANOVA (calling aov()) and One-way analysis of means (oneway.test()) #' anova_npk=visstat(npk,"yield","block") #' anova_npk #prints summary of tests #' #' ## Welch Two Sample t-test (calling t.test()) #' visstat(mtcars,"mpg","am") #' #' ## Wilcoxon rank sum test (calling wilcox.test()) #' grades_gender <- data.frame( #' Sex = as.factor(c(rep("Girl", 20), rep("Boy", 20))), #' Grade = c(19.25, 18.1, 15.2, 18.34, 7.99, 6.23, 19.44, #' 20.33, 9.33, 11.3, 18.2,17.5,10.22,20.33,13.3,17.2,15.1,16.2,17.3, #' 16.5, 5.1, 15.25, 17.41, 14.5, 15, 14.3, 7.53, 15.23, 6,17.33, #' 7.25, 14,13.5,8,19.5,13.4,17.5,17.4,16.5,15.6)) #' visstat(grades_gender,"Grade", "Sex") #' #' ## Pearson's Chi-squared test and mosaic plot with Pearson residuals #' visstat(counts_to_cases(as.data.frame(HairEyeColor[,,1])),"Hair","Eye") #' ##2x2 contingency tables with Fisher's exact test and mosaic plot with Pearson residuals #' HairEyeColorMaleFisher = HairEyeColor[,,1] #' ##slicing out a 2 x2 contingency table #' blackBrownHazelGreen = HairEyeColorMaleFisher[1:2,3:4] #' blackBrownHazelGreen = counts_to_cases(as.data.frame(blackBrownHazelGreen)); #' fisher_stats=visstat(blackBrownHazelGreen,"Hair","Eye") #' fisher_stats #print out summary statistics #' #' ## Linear regression #' visstat(trees,"Girth","Height") #' #'## Saving the graphical output in directory plotDirectory #' ## A) saving graphical output of type "png" in temporary directory tempdir() #' ## with default naming convention: #' visstat(blackBrownHazelGreen,"Hair","Eye",graphicsoutput = "png",plotDirectory=tempdir()) #' ##remove graphical output from plotDirectory #' file.remove(file.path(tempdir(),"chi_squared_or_fisher_Hair_Eye.png")) #' file.remove(file.path(tempdir(),"mosaic_complete_Hair_Eye.png")) #' ## B) Specifying pdf as output type: #' visstat(iris,"Petal.Width", "Species",graphicsoutput = "pdf",plotDirectory=tempdir()) #' ##remove graphical output from plotDirectory #' file.remove(file.path(tempdir(),"kruskal_Petal_Width_Species.pdf")) #' ## C) Specifiying plotName overwrites default naming convention #' visstat(iris,"Petal.Width","Species",graphicsoutput = "pdf", #' plotName="kruskal_iris",plotDirectory=tempdir()) #' ##remove graphical output from plotDirectory #' file.remove(file.path(tempdir(),"kruskal_iris.pdf")) #' @import vcd #' @import Cairo #' @import graphics #' @import grDevices #' @import grid #' @import multcompView #' @import stats #' @import utils #' @importFrom nortest ad.test #' #' @export visstat visstat = function(dataframe, varsample, varfactor, conf.level = 0.95, numbers = TRUE, minpercent = 0.05, graphicsoutput = NULL, plotName=NULL, plotDirectory = getwd()) { # The function vistat() visualizes the statistical hypothesis testing between the dependent variable (response) varsample and the independent variable (feature) varfactor. # The statistical hypothesis test (including the eventual corresponding post-hoc analysis) with the highest statistical power fulfilling # the assumptions of the corresponding test is performed. # A graph displaying the raw data accordingly to the chosen test as well as the test statistics is generated and returned. # Implemented tests: lm(), t.test(), wilcox.test(), aov(), kruskal.test(), fisher.test(),chisqu.test(). # Three variables must be provided: # - dataframe of type data.frame or list (generated from json file) # with headers which are either the dependent variable (varsamples) # or the independent variable (varfact) # - varsample: dependent variable chosen by user out of columns of dataframe, varsample is the name given in the header # - varfactor: independent variables chosen by user out of columns of dataframe, varfactor is the name given in the header # Optional parameters with set default values: # numbers: Boolean deciding if in mosaic plots counts of each category should be shown # minpercent: number between 0 and 1 indicating the minimal fraction of total count which has to be in each category of count data in order to be displayed in mosaic plot # graphicsoutput: character string indicating if a plot of type "png", "jpeg", "jpg", "tiff", "bmp" should be saved to the director specified i plotDirectory following the # Note that the parameter graphicsoutput must be specified to save plots. The default "NULL" does not save the current plot(s). # plotName: Graphical output is stored following the naming convention "plotName.graphicsoutput" in plotDirectory. # The default plotName=NULL generates automatically a plotName following the pattern "statisticalTestName_varsample_varfactor". # plotDirectory: specifies directory to save plots. Default directory is the current working directory defined by getwd() stopifnot(is.data.frame(dataframe)) stopifnot(varsample %in% names(dataframe)) stopifnot(varfactor %in% names(dataframe)) #store default graphical parameters------ oldparvisstat <- par(no.readonly = TRUE) oldparvisstat$new=FALSE #reset the default value on.exit(par(oldparvisstat)) #Set default values--------------------------- alpha = 1 - conf.level ##Get input variables--------------------------------- input = get_samples_fact_inputfile(dataframe, varsample, varfactor) #out of function get_groups_inputfile samples = input$samples fact = input$fact name_of_sample = input$name_of_sample name_of_factor = input$name_of_factor matchingCriteria = input$matchingCriteria #dependent on samples, fact, name_of_sample, name_of_factor, conf.level, #paired=F, typesample = class(samples) typefactor = class(fact) #type of independent variable returned as a character vector #transform independent variable "fact" of class "character" to factor if (typefactor=="character"){ fact=as.factor(fact) #transform independent variable "fact" of class "character" to factor typefactor = class(fact) #store the newly generate class of type "factor" of the independent variable } maxlabels = length(levels(samples)) ## Comparison of all possible combinations of input variables ------------------ ##A) median or mean----- #requirement: only two levels of factors #if the chosen "sample" is numeric or integer, we can perform parametric tests like the t-test #(if the assumption of normal distribution is met ) #otherwise Wilcoxon test if ((#Wilcoxon or t-test ----- typesample == "integer" | typesample == "numeric") && (typefactor == "factor") && nlevels(fact) == 2) { #check if there is at least one entry in each group, if not return empty twosamples = create_two_samples_vector(samples, fact) #returns list with three entries if (length(twosamples) < 3) { vis_sample_fact = warning("In each group must be at least one member ") } else{ # t-Test ----- # rest = two_sample_tTest(samples, fact, alpha, side = "two.sided", samplename=varsample,factorname=matchingCriteria) x = twosamples$sample1and2 x1 = twosamples$sample1 x2 = twosamples$sample2 #the two-sample t-test is robust to non-normality due to the central limit theorem #checking for normality of samples not necessary if sample size roughly >100 #citation: THE IMPORTANCE OF THE NORMALITY ASSUMPTION IN LARGE PUBLIC HEALTH DATA SETS # DOI: 10.1146/annurev.publhealth.23.100901.140546 # #Check normality of both samples with Shapiro -Test----- #Check assumptions of Shapiro-Test:length between 3 and 5000, at least one level #returns TRUE if size between 3 and 50000 # # There are two different ways to justify the use of the t-test" # 1.Your data is normally distributed and you have at least two samples per group # 2. You have large (N>100)sample sizes in each group shapiro_assumptions1 = check_assumptions_shapiro(x1) shapiro_assumptions2 = check_assumptions_shapiro(x2) if (shapiro_assumptions1 == TRUE) p1 = test_norm(twosamples$sample1) if (shapiro_assumptions2 == TRUE) p2 = test_norm(twosamples$sample2) # Check if normal distributions are given in both samples by Shapiro and KS-Test -- # Assume normal distributions if the p-value of at least one of the tests is greater alpha #Perform always t-test if both samples are >100 if (length(twosamples$sample1) > 100 & length(twosamples$sample2) > 100) { openGraphCairo(type = graphicsoutput, fileDirectory = plotDirectory) vis_sample_fact = two_sample_tTest( samples, fact, conf.level = conf.level, alternative = 'two.sided', var.equal = F, paired = F, samplename = varsample, factorname = matchingCriteria ) if (is.null(plotName)) {filename=paste("ttest_", name_of_sample, "_", name_of_factor, sep = "") }else{ filename=plotName } saveGraphVisstat( filename, type = graphicsoutput, fileDirectory = plotDirectory ) } #2. If assumptions of t-test are not met: Wilcoxon, else t-test else if (!exists("p1") | (if (exists("p1")) { p1$p.value < alpha } else{ FALSE }) | !exists("p2") | (if (exists("p2")) { (p2$p.value < alpha) } else{ FALSE })) { #case 1: Wilcoxon-Test: #normal distribution not given for n<limit openGraphCairo(type = graphicsoutput, fileDirectory = plotDirectory) vis_sample_fact = two_sample_WilcoxonTest( samples, fact, alternative = "two.sided", conf.level = conf.level, notchf = F, samplename = varsample, factorname = matchingCriteria ) if (is.null(plotName)) {filename=paste("wilcoxon-test_", name_of_sample, "_", name_of_factor, sep = "") }else{ filename=plotName } saveGraphVisstat( fileName=filename, type = graphicsoutput, fileDirectory = plotDirectory ) } else{ openGraphCairo(type = graphicsoutput, fileDirectory = plotDirectory) vis_sample_fact = two_sample_tTest( samples, fact, conf.level = conf.level, alternative = 'two.sided', var.equal = F, paired = F, samplename = varsample, factorname = matchingCriteria ) if (is.null(plotName)) {filename=paste("ttest_", name_of_sample, "_", name_of_factor, sep = "") }else{ filename=plotName } saveGraphVisstat( fileName=filename, type = graphicsoutput, fileDirectory = plotDirectory ) } return(invisible(vis_sample_fact)) } } ## B) Chi2 and Mosaic----- if (typefactor == "factor" && typesample == "factor") { if (check_assumptions_count_data(samples, fact) == FALSE) { vis_sample_fact = makeTable(samples, fact, name_of_sample, name_of_factor) } else{ #Chi^2 Test----- openGraphCairo(type = graphicsoutput, fileDirectory = plotDirectory) vis_chi = vis_chi_squared_test(samples, fact, name_of_sample, "groups") if (is.null(plotName)) {filename=paste( "chi_squared_or_fisher_", name_of_sample, "_", name_of_factor, sep = "" ) }else{ filename=paste(plotName,"_","chi_squared_or_fisher",sep="") } saveGraphVisstat( fileName=filename, type = graphicsoutput, fileDirectory = plotDirectory ) #Mosaic plots ----- #a) complete labeled mosaic graph if (maxlabels > 7) { numberflag = F } else{ numberflag = T } openGraphCairo(type = graphicsoutput, fileDirectory = plotDirectory) vis_mosaic_res = vis_mosaic( samples, fact, name_of_sample = name_of_sample, name_of_factor = name_of_factor, minperc = 0, numbers = numberflag ) if (is.null(plotName)) {filename=paste( "mosaic_complete_", name_of_sample, "_", name_of_factor, sep = "" ) }else{ filename=paste(plotName,"_","mosaic_complete",sep="") } saveGraphVisstat( filename , type = graphicsoutput, fileDirectory = plotDirectory ) #b) reduced plots if number of of levels>7 #Display only categories with at least minpercent of entries if (maxlabels > 7) { openGraphCairo(type = graphicsoutput, fileDirectory = plotDirectory) vis_mosaic_res = vis_mosaic( samples, fact, name_of_sample = name_of_sample, name_of_factor = "groups", minperc = minpercent, numbers = T ) saveGraphVisstat( paste( "mosaic_reduced_", name_of_sample, "_", name_of_factor, sep = "" ), type = graphicsoutput, fileDirectory = plotDirectory ) } vis_sample_fact = c(vis_chi, vis_mosaic_res) } } #C) both types numeric----- #Both samples and fact of type integer or numeric #Regression # # if ((typefactor == "integer" | typefactor == "numeric") && (typesample == "integer" | typesample == "numeric")) { openGraphCairo(type = graphicsoutput, fileDirectory = plotDirectory) vis_sample_fact = vis_regression(fact, samples, name_of_factor = name_of_factor, name_of_sample = name_of_sample) if (is.null(plotName)) {filename=paste("regression_", name_of_sample, "_", name_of_factor, sep = "") }else{ filename=paste(plotName) } saveGraphVisstat( fileName=filename, type = graphicsoutput, fileDirectory = plotDirectory ) } #D) more than two comparisons---- #A) sample is numeric or integer: ANOVA or Kruskal/Wallis if (typefactor == "factor" && (typesample == "integer" | typesample == "numeric") && nlevels(fact) > 2) { visanova = vis_anova_assumptions( samples, fact, conf.level = 0.95, samplename = varsample, factorname = varfactor ) if (visanova$shapiro_test$p.value > alpha | visanova$ad_test$p.value > alpha) { openGraphCairo(type = graphicsoutput, fileDirectory = plotDirectory) vis_sample_fact = vis_anova(samples, fact, samplename = varsample, factorname = varfactor) if (is.null(plotName)) { filename=paste("anova_", name_of_sample, "_", name_of_factor, sep = "") }else{ filename=paste(plotName) } saveGraphVisstat( fileName=filename, type = graphicsoutput, fileDirectory = plotDirectory ) #if p -values of both Shapiro-Wilk and Kruskall-Wallis-Test are smaller than 0.05, Kruskall-Wallis-Test } else{ openGraphCairo(type = graphicsoutput, fileDirectory = plotDirectory) vis_sample_fact = vis_Kruskal_Wallis_clusters( samples, fact, conf.level = conf.level, samplename = varsample, factorname = varfactor, cex = 1, notch = F ) if (is.null(plotName)) { filename=paste("kruskal_", name_of_sample, "_", name_of_factor, sep = "") }else{ filename=paste(plotName) } saveGraphVisstat( fileName=filename, type = graphicsoutput, fileDirectory = plotDirectory ) } } return(invisible(vis_sample_fact)) } #End of vis_sample_fact function -------
/scratch/gouwar.j/cran-all/cranData/visStatistics/R/visstat.R
blsdata <- NULL #' Box Lunch Study - Baseline dataset #' #' The variables are as follows: #' #' \itemize{ #' \item trt. Treatment #' \item sex. Sex #' \item bmi0. BMI #' \item snackkcal0. Snacking kilo calories #' \item srvgfv0. Serving size of fruits and vegetables #' \item srvgssb0. Serving size of beverages #' \item kcal24h0. #' \item edeq01. #' \item edeq02. #' \item edeq13. #' \item edeq14. #' \item edeq15. #' \item edeq22. #' \item edeq23. #' \item edeq25. #' \item edeq26. #' \item cdrsbody0. Body image #' \item weighfreq0. Weighing frequency #' \item freqff0. Fast food frequency #' \item age. Age #' \item tfactor1. #' \item tfactor2. #' \item tfactor3. #' \item mlhfbias0. #' \item fwahfbias0. #' \item rrvfood. Relative reinforcement of food #' #' #' } #' #' @docType data #' @keywords datasets #' @name blsdata #' @usage data(blsdata) #' @format A data frame with 226 rows and 26 variables #' @examples #' data(blsdata) #' "blsdata"
/scratch/gouwar.j/cran-all/cranData/visTree/R/blsdata.R
#' Function for determining a pathway #' #' Decision tree structure #' #' @param newtree Decision tree generated as a party object #' @param node_id Node ID #' @param start_criteria Character vector #' @keywords pathway decision tree #' @export #' l_node <- function(newtree, node_id = 1, start_criteria = character(0)) { tree <- node_party(newtree) node <- as.list(node_party(newtree)) if (!length(nodeapply(tree, ids = nodeids(tree))[[node_id]])) { prediction <- predict_party(newtree, node_id)[[1]] ypred <- paste(start_criteria, ",y =", prediction) print(ypred) } left_node_id <- ptree_left(newtree, node_id) right_node_id <- ptree_right(newtree, node_id) if (is.null(left_node_id) != is.null(right_node_id)) { print("left node ID != right node id") } ypred <- character(0) if (!is.null(left_node_id)) { new_criteria <- paste(start_criteria, ptree_criteria(newtree, node_id, TRUE), sep = ",") if (1 == node_id) { new_criteria <- ptree_criteria(newtree, node_id, TRUE) } ypred <- l_node(newtree, left_node_id, new_criteria) } if (!is.null(right_node_id)) { new_criteria <- paste(start_criteria, ptree_criteria(newtree, node_id, FALSE), sep = ",") if (1 == node_id) { new_criteria <- ptree_criteria(newtree, node_id, F) } ypred <- paste(ypred, l_node(newtree, right_node_id, new_criteria)) } if (!is.character(ypred)) { return(ypred) } }
/scratch/gouwar.j/cran-all/cranData/visTree/R/l_node.R
#' Color Scheme #' #' Function to adjust the transparency and define the color scheme within the visualization. #' #' @param colortype Color palette #' @param alpha Transparency #' @keywords pathway decision tree #' @export #' makeTransparent <- function(colortype, alpha) { ## Helper function to make colors transparent if (alpha < 0 | alpha > 1) stop("alpha must be between 0 and 1") alpha <- floor(255 * alpha) # color = rainbow_hcl() newColor <- col2rgb(col = unlist(list(colortype)), alpha = FALSE) .makeTransparent <- function(col, alpha) { rgb(red = col[1], green = col[2], blue = col[3], alpha = alpha, maxColorValue = 255) } newColor <- apply(newColor, 2, .makeTransparent, alpha = alpha) return(newColor) }
/scratch/gouwar.j/cran-all/cranData/visTree/R/makeTransparent.R
#' Minmax matrix #' #' Identifies splits and relevant criteria #' @param Y Response variable in the dataset #' @param str Structure of pathway from the root node in the decision tree to each terminal node #' @param varnms Names of covariates #' @param interval logical. Continuous response (interval = FALSE) and Categorical response (interval = TRUE). #' @keywords visualization pathway decision tree #' @export #' minmax_mat <- function(str, varnms, Y, interval) { comps <- strsplit(str, ",") MMM <- matrix(data = rep(c(-Inf, Inf, " "), length(varnms)), nrow = length(varnms), ncol = 3, byrow = TRUE) ### min-max matrix rownames(MMM) <- varnms # nodeids(potentialtree) Mlist <- list() # Y<-fitted(cond.tree)[[3]] if (is.numeric(Y)) { length.mat <- length(comps[[1]]) - 2 } if (is.factor(Y)) { if (interval == TRUE) { length.mat <- length(comps[[1]]) - 3 } else { length.mat <- length(comps[[1]]) - 2 } } for (i in 1:length.mat) { nodestr <- strsplit(trim(comps[[1]][i]), " ") node.varnm <- trim(nodestr[[1]][1]) node.dir <- trim(nodestr[[1]][2]) node.split <- trim(nodestr[[1]][3]) var.row <- which(varnms == node.varnm) if (node.dir == "<=") { MMM[var.row, 2] <- as.numeric(node.split) MMM[var.row, 3] <- "<=" } else { MMM[var.row, 1] <- as.numeric(node.split) MMM[var.row, 3] <- ">" } Mlist[[i]] <- c(node.varnm, MMM[var.row, ]) output <- matrix(unlist(Mlist), ncol = 4, byrow = TRUE) } if (is.factor(Y)) { y <- paste0(comps[[1]][length(comps[[1]]) - 2], ",", comps[[1]][length(comps[[1]]) - 1]) } if (is.numeric(Y)) { y <- comps[[1]][length(comps[[1]]) - 1] } return(list(M = MMM, y = y)) }
/scratch/gouwar.j/cran-all/cranData/visTree/R/minmax_mat.R
#' Function for determining a pathway #' #' Generates the pathway from the root node to individual terminal nodes of a decision tree generated as a party object using the partykit package. #' #' @param newtree Decision tree generated as a party object #' @param idnumber Terminal ID number #' @keywords pathway decision tree #' @export #' @import partykit #' @importFrom utils capture.output tail #' path_node <- function(newtree, idnumber = 0) { pathway <- capture.output(l_node(newtree)) gerid <- gsub("*\\[1](*)\\'*", "\\1", pathway) gerid <- gsub("\"", "", gerid) terminal.id <- nodeids(newtree, terminal = TRUE) if (idnumber > 0) { index <- which(terminal.id == idnumber) if (length(index) > 0) { return(gerid[index]) } else { stop("This ID does not correspond to a Terminal Node") } } else { return(paste(gerid, collapse = " ;")) } }
/scratch/gouwar.j/cran-all/cranData/visTree/R/path_node.R
#' Generate individual subplots within the graphical visualization #' #' This function is utilized to generate a series of sub-plots, where each subplot corresponds to individual terminal nodes within the decision tree structure. Each subplot is composed of a histogram (or a barchart) that displays the distribution for the relevant subgroup and colored horizontal bars that summarize the set of covariate splits. #' #' @param My A matrix to define the split points within the decision tree structure #' @param X Covariates #' @param Y Response variable #' @param str Structure of pathway from the root node in the decision tree to each terminal node #' @param color.type Color palettes. (rainbow_hcl = 1; heat_hcl = 2; terrain_hcl = 3; sequential_hcl = 4; diverge_hcl = 5) #' @param alpha Transparency of individual horizontal bars. Choose values between 0 to 1. #' @param add.p.axis logical. Add axis for the percentiles (add.p.axis = TRUE), remove axis for the percentiles (add.p.axis = FALSE). #' @param add.h.axis logical. Add axis for the outcome (add.h.axis = TRUE), remove axis for the outcome (add.h.axis = FALSE). #' @param cond.tree Tree as a party object #' @param text.main Change the size of the main titles #' @param text.bar Change the size of the text in the horizontal bar and below the bar plot #' @param text.round Round the threshold displayed on the bar #' @param text.percentile Change the size of the percentile title #' @param density.line Draw a density line #' @param text.title Change the size of the text in the title #' @param text.axis Change the size of the text of axis labels #' @param text.label Change the size of the axis annotation #' @keywords matrix pathway decision tree #' @export #' @importFrom graphics barplot hist par plot polygon segments text title lines layout axis abline #' @importFrom stats aggregate ecdf fitted smooth.spline density #' @importFrom grDevices cm.colors col2rgb gray rgb #' #' plot_minmax <- function(My, X, Y, str, color.type, alpha, add.p.axis, add.h.axis, cond.tree, text.main, text.bar, text.round, text.percentile, density.line, text.title, text.axis, text.label) { ## Main function which plots the bars for each variable along with a histogram of the outcome comps <- strsplit(str, ",") mymat <- matrix(as.numeric(My$M[, -3]), ncol = 2) my.y <- My$y mydir <- My$M[, 3] if (!is.factor(Y)) { my.y.val <- strsplit(trim(my.y), " ")[[1]][3] my.y.pct <- ecdf(Y)(my.y.val) } if (is.factor(Y)) { my.y.val <- strsplit(trim(my.y), " ")[[1]][3] # my.y.pct <- ecdf(Y)(my.y.val) } var.nms <- rownames(My$M) act.vars <- apply(mymat, 1, function(x) { !all(abs(x) == Inf) }) max.y <- sum(act.vars) + 1 # rbw <- cm.colors(n=nrow(mymat)) colors <- list(colorspace::rainbow_hcl(n = nrow(mymat)), colorspace::heat_hcl(n = nrow(mymat)), colorspace::terrain_hcl(n = nrow(mymat)), colorspace::sequential_hcl(n = nrow(mymat)), colorspace::diverge_hcl(n = nrow(mymat))) rbw <- colors[[color.type]] ## Find the y's which "belong" in this node node.index <- 1:length(Y) for (i in 1:nrow(mymat)) { digit <- 8 node.index <- intersect(node.index, c(which(round(X[, i], digit) > round(mymat[i, 1], digit) & round(X[, i], digit) <= round(mymat[i, 2], digit)))) } if (is.factor(Y)) { wdth <- 1 / length(levels(Y)) H <- hist(as.integer(Y[node.index]) / length(levels(Y)), breaks = seq(0, 1, length.out = length(levels(Y)) + 1), plot = FALSE) ## Scale the histogram so it fits vertically on the plot. scale.factor <- max.y / max(H$density) ## Set up an empty plot of the correct size plot(NA, xlim = c(0, 1), ylim = c(0, max.y), ylab = "", font = 2, main = paste0("Node ID = ", tail(comps[[1]], 1), "(", "n = ", length(node.index), ")"), bty = "n", yaxt = "n", xaxt = "n", cex.axis = text.percentile, cex.main = text.title) if (add.p.axis == TRUE) { axis(side = 3, at = c(0.0, 0.2, 0.4, 0.6, 0.8, 1.0), labels = rep("", 6), tck = 0.05) title(main = "Percentile", line = -1.05, cex.main = text.percentile) axis(side = 3, at = c(0.0, 0.2, 0.4, 0.6, 0.8, 1.0), labels = c(0, 20, 40, 60, 80, 100), line = -2.5, lwd = 0, cex.axis = text.label, font = 2) } } else { H <- hist(ecdf(Y)(Y[node.index]), breaks = seq(0, 1, by = 0.1), plot = FALSE) ## Scale the histogram so it fits vertically on the plot. scale.factor <- max.y / max(H$density) ## Set up an empty plot of the correct size # plot(NA,xlim=c(0,1),ylim=c(0,max.y),ylab="", font = 2,main=paste0("Node ID = ", tail(comps[[1]], 1), " (Mean = ",round(my.y.val, 2),", n = ",length(node.index),")"),bty="n", yaxt = "n", xaxt = "n",cex.axis = text.label, cex.main =text.title) plot(NA, xlim = c(0, 1), ylim = c(0, max.y), ylab = "", font = 2, main = paste0("Node ID = ", tail(comps[[1]], 1), " (n = ", length(node.index), ")"), bty = "n", yaxt = "n", xaxt = "n", cex.axis = text.label, cex.main = text.title) if (add.p.axis == TRUE) { axis(side = 3, at = c(0.0, 0.2, 0.4, 0.6, 0.8, 1.0), labels = rep("", 6), tck = 0.05) title(main = "Percentile", line = -1.05, cex.main = text.percentile) axis(side = 3, at = c(0.0, 0.2, 0.4, 0.6, 0.8, 1.0), labels = c(0, 20, 40, 60, 80, 100), line = -2.5, lwd = 0, cex.axis = text.label, font = 2) ## Plot the background histogram } } ## Now plot the horizontal bars corresponding to each variable. j <- 1 for (i in which(act.vars)) { F.x <- ecdf(X[, var.nms[i]]) lo <- ifelse(mymat[i, 1] == -Inf, 0, F.x(mymat[i, 1])) hi <- ifelse(mymat[i, 2] == Inf, 1, F.x(mymat[i, 2])) polygon(c(lo, lo, hi, hi), c(j - 0.5, j + 0.5, j + 0.5, j - 0.5), col = makeTransparent(rbw[i], alpha = alpha), border = NA) # rect(xleft = lo, xright = lo, ytop = hi, ybottom = hi, density = c(j - 0.5, j + 0.5, j + 0.5, j - 0.5), col = makeTransparent(rbw[i], alpha = alpha), border = NA) if (mymat[i, 2] == Inf) { idx <- 1 } else { idx <- 2 } if (inherits(cond.tree, "constparty")) { if (My$M[i, 1] != -Inf && My$M[i, 2] == Inf) { text(mean(c(lo, hi)), j, paste0(rownames(My$M)[i], ">", round(as.numeric(My$M[i, 1]), text.round)), font = 2, cex = text.bar) } if (My$M[i, 1] != -Inf && My$M[i, 2] != Inf) { text(mean(c(lo, hi)), j, paste0(rownames(My$M)[i], "<=", round(as.numeric(My$M[i, 2]), text.round), "\n", rownames(My$M)[i], ">", round(as.numeric(My$M[i, 1]), text.round)), font = 2, cex = text.bar) } if (My$M[i, 1] == -Inf && My$M[i, 2] != Inf) { text(mean(c(lo, hi)), j, paste0(rownames(My$M)[i], "<=", round(as.numeric(My$M[i, 2]), text.round)), font = 2, cex = text.bar) } } else { if (My$M[i, 1] != -Inf && My$M[i, 2] == Inf) { text(mean(c(lo, hi)), j, paste0(rownames(My$M)[i], ">=", round(as.numeric(My$M[i, 1]), text.round)), font = 2, cex = text.bar) } if (My$M[i, 1] != -Inf && My$M[i, 2] != Inf) { text(mean(c(lo, hi)), j, paste0(rownames(My$M)[i], "<", round(as.numeric(My$M[i, 2]), text.round), "\n", rownames(My$M)[i], ">=", round(as.numeric(My$M[i, 1]), text.round)), font = 2, cex = text.bar) } if (My$M[i, 1] == -Inf && My$M[i, 2] != Inf) { text(mean(c(lo, hi)), j, paste0(rownames(My$M)[i], "<", round(as.numeric(My$M[i, 2]), text.round)), font = 2, cex = text.bar) } } ## Label the variables j <- j + 1 } if (is.factor(Y)) { if (add.h.axis == TRUE) { bp <- barplot(scale.factor * H$density, width = wdth, yaxt = "n", col = rgb(0, 0, 0, 0.15), border = rgb(0, 0, 0, 0.1), add = FALSE, space = 0) } else { bp <- barplot(scale.factor * H$density, width = wdth, yaxt = "n", col = rgb(0, 0, 0, 0.15), xaxt = "n", border = rgb(0, 0, 0, 0.1), add = FALSE, space = 0) } ## Add the category labels text(seq(wdth / 2, 1 - wdth / 2, by = wdth), rep(0, length(levels(Y))), levels(Y), pos = 3, adj = 0.5, cex = text.bar, font = 2) # text(seq(wdth/2,1-wdth/2,by=wdth),rep(quantile(scale.factor*H$density, 0.97),length(levels(Y))),levels(Y),pos=3,adj=0.5,cex=1.5,col=gray(0.5)) if (inherits(cond.tree, "constparty")) { title(main = paste0(names(cond.tree$data)[1], " (Mean = ", my.y.val, ")"), cex.main = text.main) } else { title(main = paste0(names(as.party(cond.tree)$data)[1], " (Mean = ", my.y.val, ")"), cex.main = text.main) } } else { max.density <- max(hist(Y, plot = FALSE)$density) yaxis.limits <- c(range(density(Y[node.index])$y)) xaxis.limits <- c(range(Y)) if (add.h.axis == TRUE) { H <- hist(Y[node.index], plot = TRUE, prob = TRUE, xlim = xaxis.limits, ylim = yaxis.limits, yaxt = "n", main = " ", font = 2, cex.axis = text.axis, border = rgb(0, 0, 0, 0.1), col = rgb(0, 0, 0, 0.15)) } else { H <- hist(Y[node.index], plot = TRUE, prob = TRUE, xlim = xaxis.limits, ylim = yaxis.limits, yaxt = "n", main = " ", xaxt = "n", font = 2, cex.axis = text.axis, border = rgb(0, 0, 0, 0.1), col = rgb(0, 0, 0, 0.15)) } if (density.line) { lines(density(Y[node.index]), lty = 2, lwd = 1.5) } if (inherits(cond.tree, "constparty")) { title(main = paste0(names(cond.tree$data)[1], " (Mean = ", round(as.numeric(my.y.val), 0), ")"), cex.main = text.main) } else { title(main = paste0(names(as.party(cond.tree)$data)[1], " (Mean = ", round(as.numeric(my.y.val), 0), ")"), cex.main = text.main) } ## Draw in a line for the mean mu.Y <- mean(Y[node.index]) abline(v = mu.Y, col = rgb(0, 0, 0, 0.5), lwd = 2) } }
/scratch/gouwar.j/cran-all/cranData/visTree/R/plot_minmax.R
#' Splitting Criteria #' #' Identifies the splitting criteria for the relevant node leading to lower level inner nodes or a terminal node. #' #' @param newtree Decision tree #' @param node_id Node id #' @param left Splits to the left #' @keywords pathway decision tree #' @export ptree_criteria <- function(newtree, node_id, left) { tree <- node_party(newtree) node <- as.list(node_party(newtree)) if (length(nodeapply(tree, ids = nodeids(tree))[[node_id]]) == "0") # Check if this is a terminal node { return("(error: terminal node)") } if (node[[node_id]]$split$breaks) { sp <- node[[node_id]]$split$breaks if (inherits(newtree, "constparty")) { split_id <- node[[node_id]]$split$varid vn <- names(data_party(newtree))[split_id] } else { vn <- names(node[[node_id]]$info$p.value) } # Left being true then the left string of variables with split points are returned if (left) { op <- "<=" } else { op <- ">" } return(paste(vn, op, sp)) } else { if (left) { l <- is.null(node[[node_id]]$split$breaks) } else { l <- is.null(!node[[node_id]]$split$breaks) } r <- paste(attr(node[[node_id]]$split$breaks, "levels")[l], sep = "", collapse = "','") return(paste(names(node[[node_id]]$info$p.value), " in ('", r, "')", sep = "")) } }
/scratch/gouwar.j/cran-all/cranData/visTree/R/ptree_criteria.R
#' Left split #' #' Identifies a node that corresponds to the left split #' #' @param newtree Decision tree generated as a party object #' @param start_id Character vector #' @keywords pathway decision tree #' @export ptree_left <- function(newtree, start_id) { node <- as.list(node_party(newtree)) if (!is.null(node[[start_id]]$kids)) { node[[start_id]]$kids[1] } }
/scratch/gouwar.j/cran-all/cranData/visTree/R/ptree_left.R
#' Right Split #' #' Identifies a node that corresponds to the right split #' #' @param newtree Decision tree generated as a party object #' @param start_id Character vector #' @keywords pathway decision tree #' @export ptree_right <- function(newtree, start_id) { node <- as.list(node_party(newtree)) if (!is.null(node[[start_id]]$kids)) { node[[start_id]]$kids[2] } }
/scratch/gouwar.j/cran-all/cranData/visTree/R/ptree_right.R
#' Function for determining a pathway #' #' Identifies the predicted outcome value for the relevant node. #' #' @param newtree Decision tree generated as a party object #' @param node_id Node ID #' @keywords pathway decision tree #' @export #' ptree_y <- function(newtree, node_id) { # Picks out the prediction from the ctree structure p <- predict_party(newtree, node_id)[[1]] return(p) }
/scratch/gouwar.j/cran-all/cranData/visTree/R/ptree_y.R
#' Function for determining a pathway #' #' Parsing function #' #' @param x String #' @keywords pathway decision tree #' @export trim <- function(x) { gsub("^\\s+|\\s+$", "", x) }
/scratch/gouwar.j/cran-all/cranData/visTree/R/trim.R
#' Visualization of subgroups for decision trees #' #' This visualization characterizes subgroups defined by a decision tree structure and identifies the range of covariate values associated with outcome values in each subgroup. #' #' @param cond.tree Decision tree generated as a party object. #' @param rng Restrict plotting to a particular set of nodes. Default value is set as NULL. #' @param interval logical. Continuous outcome (interval = FALSE) and Categorical outcome (interval = TRUE). #' @param color.type Color palettes (rainbow_hcl = 1; heat_hcl = 2; terrain_hcl = 3; sequential_hcl = 4 ; diverge_hcl = 5) #' @param alpha Transparency for horizontal colored bars in each subplot. Values between 0 to 1. #' @param add.h.axis logical. Add axis for the outcome distribution (add.h.axis = TRUE), remove axis for the outcome (add.h.axis = FALSE). #' @param add.p.axis logical. Add axis for the percentiles (add.p.axis = TRUE) computed over covariate values, remove axis for the percentiles (add.p.axis = FALSE). #' @param text.main Change the size of the main titles #' @param text.axis Change the size of the text of axis labels #' @param text.title Change the size of the text in the title #' @param text.bar Change the size of the text in the horizontal bar #' @param text.round Round the threshold displayed on the horizontal bar #' @param text.label Change the size of the axis annotation #' @param text.percentile Change the size of the percentile title #' @param density.line logical. Draw a density line. (density.line = TRUE). #' @keywords visualization pathway decision tree #' @author Ashwini Venkatasubramaniam and Julian Wolfson #' @export #' @importFrom utils capture.output tail #' @examples #' data(blsdata) #' newblsdata<-blsdata[,c(7,21, 22,23, 24, 25, 26)] #' ## Continuous response #' ptree1<-partykit::ctree(kcal24h0~., data = newblsdata) #' visTree(ptree1, text.axis = 1.3, text.label = 1.2, text.bar = 1.2, alpha = 0.5) #' #' ## Repeated covariates in the splits of the decision tree #' ptree2<-partykit::ctree(kcal24h0~skcal+rrvfood+resteating+age, data = blsdata) #' visTree(ptree2, text.axis = 1.3, text.label = 1.2, text.bar = 1.2, alpha = 0.5) #' #' ## Categorical response #' blsdataedit<-blsdata[,-7] #' blsdataedit$bin<-0 #' blsdataedit$bin<-cut(blsdata$kcal24h0, unique(quantile(blsdata$kcal24h0)), #' include.lowest = TRUE, dig.lab = 4) #' names(blsdataedit)[26]<-"kcal24h0" #' ptree3<-partykit::ctree(kcal24h0~hunger+rrvfood+resteating+liking, data = blsdataedit) #' visTree(ptree3, interval = TRUE, color.type = 1, alpha = 0.6, #' text.percentile = 1.2, text.bar = 1.8) #' #' ## Other decision trees (e.g., rpart) #' ptree4<-rpart::rpart(kcal24h0~wanting+liking+rrvfood, data = newblsdata, #' control = rpart::rpart.control(cp = 0.029)) #' visTree(ptree4, text.bar = 1.8, text.label = 1.4, text.round = 1, #' density.line = TRUE, text.percentile = 1.3) #' #' ## Change the color scheme and transparency of the horizontal bars #' ptree1<-partykit::ctree(kcal24h0~., data = newblsdata) #' visTree(ptree1, text.axis = 1.3, text.label = 1.2, text.bar = 1.2, alpha = 0.65, #' color.type = 3) #' #' ## Remove the axes corresponding to the percentiles and the response values. #' ptree1<-partykit::ctree(kcal24h0~., data = newblsdata) #' visTree(ptree1, text.axis = 1.3, text.label = 1.2, text.bar = 1.2, alpha = 0.65, #' color.type = 3, add.p.axis = FALSE, add.h.axis = FALSE) #' #' # Remove the density line over the histograms #' ptree1<-partykit::ctree(kcal24h0~., data = newblsdata) #' visTree(ptree1, text.axis = 1.3, text.label = 1.2, text.bar = 1.2, alpha = 0.65, #' color.type = 3, density.line = FALSE) visTree <- function(cond.tree, rng = NULL, interval = FALSE, color.type = 1, alpha = 0.5, add.h.axis = TRUE, add.p.axis = TRUE, text.round = 1, text.main = 1.5, text.bar = 1.5, text.title = 1.5, text.label = 1.5, text.axis = 1.5, text.percentile = 0.7, density.line = TRUE) { ## Wrapper function to produce plots from a conditional inference tree ## 'range' parameter can restrict plotting to a particular set of nodes if (inherits(cond.tree, "constparty")) { splittree <- path_node(cond.tree) } else { splittree <- path_node(as.party(cond.tree)) } structure <- strsplit(splittree, split = ";") if (inherits(cond.tree, "constparty")) { terminal.id <- nodeids(cond.tree, terminal = TRUE) } else { terminal.id <- nodeids(as.party(cond.tree), terminal = TRUE) } if (length(structure[[1]]) == length(terminal.id)) { structure[[1]] <- sapply(1:length(structure[[1]]), function(i) { paste0(structure[[1]][i], ",", terminal.id[i], " ") }) } if (inherits(cond.tree, "constparty")) { input.info <- data_party(cond.tree) # X <- input.info[,2:(length(input.info)-3)] X <- input.info[, 2:(length(input.info) - 3)] Y <- fitted(cond.tree)[[3]] } else { input.info <- data_party(as.party(cond.tree)) # X <- input.info[,2:(length(input.info)-3)] X <- input.info[, 2:(length(input.info) - 2)] Y <- fitted(as.party(cond.tree))[[2]] } if (is.factor(Y)) { n.terminals <- length(structure[[1]]) # prob.mat <- matrix(data=unlist(lapply(structure[[1]],function(S) { # unlist(lapply(strsplit(S,","),function(split.S) { # seg <- unlist(split.S[length(split.S)]) # as.numeric(trim(strsplit(seg,"=")[[1]][2])) # })) # })), nrow=n.terminals) y.list <- sapply(1:length(structure[[1]]), function(j) { seg <- strsplit(structure[[1]], ",") if (interval == TRUE) { paste0(seg[[j]][c((length(seg[[j]]) - 2):(length(seg[[j]]) - 1))], collapse = ",") } else { paste0(seg[[j]][length(seg[[j]]) - 2], collapse = ",") } }) x.list <- sapply(1:length(structure[[1]]), function(j) { seg <- strsplit(structure[[1]], ",") x.l <- sapply(1:length(seg), function(i) { if (interval == TRUE) { x.length <- length(seg[[i]]) - 3 } else { x.length <- length(seg[[i]]) - 2 } }) if (interval == TRUE) { paste0(seg[[j]][1:(length(seg[[j]]) - 3)], collapse = ",") } else { paste0(seg[[j]][1:(length(seg[[j]]) - 2)], collapse = ",") } }) term.node <- sapply(1:length(structure[[1]]), function(j) { seg <- strsplit(structure[[1]], ",") if (interval == TRUE) { paste0(tail(seg[[j]], 1), collapse = ",") } else { paste0(tail(seg[[j]], 1), collapse = ",") } }) structure <- lapply(1:length(y.list), function(i) { paste0(x.list[[i]], ", ", y.list[[i]], ", ", term.node[[i]]) }) } if (length(unlist(structure)) == 1) { stop("Tree has only a single node; nothing to visualize.") } # terminal.id<-nodeids(cond.tree, terminal = TRUE) n.terminals <- ifelse(is.null(rng), length(unlist(structure)), length(rng)) if (is.null(rng)) { index <- 1:n.terminals } else { index <- min(rng):min(max(rng), length(unlist(structure))) } ## Should probably do some range checking if (length(index) > 10) stop("Number of subgroups exceeds ten") par(mfrow = c(4, ceiling(length(index) / 2)), mar = c(2, 1, 3, 1)) number <- length(index) * 2 + (length(index) * 2) %% 4 layout(matrix(1:number, 4, ceiling(length(index) / 2))) invisible( sapply(unlist(structure)[index], function(S) { plot_minmax(minmax_mat(S, colnames(X), Y, interval), X, Y, S, color.type, alpha, add.p.axis, add.h.axis, cond.tree, text.main, text.bar, text.round, text.percentile, density.line, text.title, text.axis, text.label) }) ) }
/scratch/gouwar.j/cran-all/cranData/visTree/R/visTree.R
## ----setup, include = FALSE---------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ---- fig.show='hold', warnings = FALSE---------------------------------- #install_github("AshwiniKV/visTree") library(visTree) ## ---- fig.show='hold', warnings = FALSE---------------------------------- data("blsdata") library(partykit) library(rpart) library(colorspace) ## ---- fig.show='hold', echo = FALSE, warnings = FALSE-------------------- #names(blsdata) names(blsdata)[c(4, 6, 12, 19, 21, 22, 23, 24, 25)]<-c("skcal", "srvgssb", "edeq15", "freqff","resteating", "disinhibition", "hunger", "liking", "wanting") newblsdata<-blsdata[,c(7,21, 22,23, 24, 25, 26)] ## ---- fig.show='hold'---------------------------------------------------- data("blsdata") ## ---- fig.show='hide', warnings = FALSE, out.width = '90%',fig.align = 'center', fig.height = 9, ,fig.width = 12---- potentialtree<-ctree(kcal24h0~., data = newblsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, color.type = 1, alpha = 0.5) ## ---- fig.show='hold', include = FALSE, warnings = FALSE----------------- blsdataedit<-blsdata[,-7] blsdataedit$bin<-0 blsdataedit$bin<-cut(blsdata$kcal24h0, unique(quantile(blsdata$kcal24h0)), include.lowest = TRUE, dig.lab = 4) ## ---- fig.show='hide', out.width = '90%',fig.align = 'center', fig.height = 9, ,fig.width = 12,warnings = FALSE---- potentialtree<-ctree(bin~hunger+rrvfood+resteating+liking+wanting+disinhibition, data = blsdataedit, control = ctree_control(mincriterion = 0.85)) visTree(potentialtree, interval = T) ## ---- fig.show='hide', out.width = '95%',fig.align = 'center', fig.height = 9, ,fig.width = 14, warnings = FALSE---- potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree) ## ---- fig.show='hide', out.width = '98%',fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE---- potentialtree<-rpart(kcal24h0~., data = newblsdata, control = rpart.control(cp = 0.015)) visTree(potentialtree) ## ---- fig.show='hide', out.width = '98%',fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE---- potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, text.label = 1.5, text.title = 1.5, text.bar = 1.5, text.axis = 1.5, text.main = 1.5) ## ---- fig.show='hide', out.width = '98%',fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE---- potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, add.h.axis = FALSE) visTree(potentialtree, add.p.axis = FALSE) ## ---- fig.show='hide', out.width = '98%', fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE---- potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, text.round= 3, text.bar = 1.1) ## ---- fig.show='hide', out.width = '98%',fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE---- potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, alpha = 0.8) visTree(potentialtree, alpha = 0.3) ## ---- fig.show='hide', out.width = '98%',fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE---- potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, density.line = FALSE)
/scratch/gouwar.j/cran-all/cranData/visTree/inst/doc/visTree.R
--- title: "visTree: Visualization of Subgroups for Decision Trees" author: "Ashwini Venkatasubramaniam and Julian Wolfson" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true toc_depth: 3 vignette: > %\VignetteIndexEntry{visTree: Visualization of Subgroups for Decision Trees} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` visTree provides a visualization to characterize subgroups generated by a decision tree. Each individual terminal node identified by a decision tree corresponds to a subplot in the visualization. ### Installation The GitHub version: ```{r, fig.show='hold', warnings = FALSE} #install_github("AshwiniKV/visTree") library(visTree) ``` Load the BLSdata for the given examples and other relevant packages used for drawing trees. For this visTree package, the relevant packages are partykit and rpart. ```{r, fig.show='hold', warnings = FALSE} data("blsdata") library(partykit) library(rpart) library(colorspace) ``` ```{r, fig.show='hold', echo = FALSE, warnings = FALSE} #names(blsdata) names(blsdata)[c(4, 6, 12, 19, 21, 22, 23, 24, 25)]<-c("skcal", "srvgssb", "edeq15", "freqff","resteating", "disinhibition", "hunger", "liking", "wanting") newblsdata<-blsdata[,c(7,21, 22,23, 24, 25, 26)] ``` This document introduces you to the set of tools provided by the visTree package and provides examples of different scenarios that the package is able to developed to accommodate. ## Example dataset The example scenarios are illustrated by applications to the box lunch study dataset. This dataset is available within the package. ```{r, fig.show='hold'} data("blsdata") ``` ## Outcome type The visTree package is able to accommodate both continuous and categorical outcomes. An option interval = TRUE is utilized within the visTree function to display the relevant graphical output for a continuous outcome rather than a categorical outcome. ### Continuous outcome ```{r, fig.show='hide', warnings = FALSE, out.width = '90%',fig.align = 'center', fig.height = 9, ,fig.width = 12} potentialtree<-ctree(kcal24h0~., data = newblsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, color.type = 1, alpha = 0.5) ``` ### Categorical outcome ```{r, fig.show='hold', include = FALSE, warnings = FALSE} blsdataedit<-blsdata[,-7] blsdataedit$bin<-0 blsdataedit$bin<-cut(blsdata$kcal24h0, unique(quantile(blsdata$kcal24h0)), include.lowest = TRUE, dig.lab = 4) ``` ```{r, fig.show='hide', out.width = '90%',fig.align = 'center', fig.height = 9, ,fig.width = 12,warnings = FALSE} potentialtree<-ctree(bin~hunger+rrvfood+resteating+liking+wanting+disinhibition, data = blsdataedit, control = ctree_control(mincriterion = 0.85)) visTree(potentialtree, interval = T) ``` ### Repeated Splits This series of plots describe the splits leading to each subgroup and the splits need not necessarily be composed of different variables. The splits over multiple levels can be performed on the same variable and these are are summarized such that the resulting intervals are readily interpretable. The horizontal bars display these splits and the relevant criterions. ```{r, fig.show='hide', out.width = '95%',fig.align = 'center', fig.height = 9, ,fig.width = 14, warnings = FALSE} potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree) ``` ### Other Trees, (e.g., rpart) The examples in this document have so far focused on scenarios described over conditional inference trees. The conditional inference tree is implemented as an object of class party using the partykit package. However, the visTree package is also able to accommodate other types of decision tree structures such as CART (implemented by the rpart package); CART is generated as an object of class rpart by the rpart package. ```{r, fig.show='hide', out.width = '98%',fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE} potentialtree<-rpart(kcal24h0~., data = newblsdata, control = rpart.control(cp = 0.015)) visTree(potentialtree) ``` ## Display controls ### Text The controls within the visTree function can be utilized to specify different text sizes for the title of the subplots (text.title), title of the histogram (text.main), axis labels placed at the tickmarks (text.axis), title labels for the axis (text.labels) and the splits placed on the horizontal bars (text.bar). ```{r, fig.show='hide', out.width = '98%',fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE} potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, text.label = 1.5, text.title = 1.5, text.bar = 1.5, text.axis = 1.5, text.main = 1.5) ``` ### Axis The axis for each subplot within the visualization is placed above the horizontal colored bars for the percentiles of relevant covariates and below the histogram/bar chart for the outcome values. Both these axes can be removed or placed as necessary using the options add.h.axis (associated with the colored bars) and add.p.axis (associated with the percentiles) within the visTree function. ```{r, fig.show='hide', out.width = '98%',fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE} potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, add.h.axis = FALSE) visTree(potentialtree, add.p.axis = FALSE) ``` ### Rounding the displayed split criterion In addition to changing the size of the text placed on the bars, the number of decimal places can also be specified for the splitting criterions that are displayed on the horizontal bars. This is implemented using the option text.round in the visTree function. ```{r, fig.show='hide', out.width = '98%', fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE} potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, text.round= 3, text.bar = 1.1) ``` ### Transparency The transparency of the horizontal bars in each of the subplots can also be modified by specifying a value between 0 and 1 for alpha in the visTree function. As values get closer to 1, the opaqueness of the horizontal colored bars increases. ```{r, fig.show='hide', out.width = '98%',fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE} potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, alpha = 0.8) visTree(potentialtree, alpha = 0.3) ``` ### Density curve The visualization tool accommodates continuous and categorical data. For continuous data, a density curve over the histogram can also be placed or removed from the lower part of the sub-plot. ```{r, fig.show='hide', out.width = '98%',fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE} potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, density.line = FALSE) ```
/scratch/gouwar.j/cran-all/cranData/visTree/inst/doc/visTree.Rmd
--- title: "visTree: Visualization of Subgroups for Decision Trees" author: "Ashwini Venkatasubramaniam and Julian Wolfson" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true toc_depth: 3 vignette: > %\VignetteIndexEntry{visTree: Visualization of Subgroups for Decision Trees} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` visTree provides a visualization to characterize subgroups generated by a decision tree. Each individual terminal node identified by a decision tree corresponds to a subplot in the visualization. ### Installation The GitHub version: ```{r, fig.show='hold', warnings = FALSE} #install_github("AshwiniKV/visTree") library(visTree) ``` Load the BLSdata for the given examples and other relevant packages used for drawing trees. For this visTree package, the relevant packages are partykit and rpart. ```{r, fig.show='hold', warnings = FALSE} data("blsdata") library(partykit) library(rpart) library(colorspace) ``` ```{r, fig.show='hold', echo = FALSE, warnings = FALSE} #names(blsdata) names(blsdata)[c(4, 6, 12, 19, 21, 22, 23, 24, 25)]<-c("skcal", "srvgssb", "edeq15", "freqff","resteating", "disinhibition", "hunger", "liking", "wanting") newblsdata<-blsdata[,c(7,21, 22,23, 24, 25, 26)] ``` This document introduces you to the set of tools provided by the visTree package and provides examples of different scenarios that the package is able to developed to accommodate. ## Example dataset The example scenarios are illustrated by applications to the box lunch study dataset. This dataset is available within the package. ```{r, fig.show='hold'} data("blsdata") ``` ## Outcome type The visTree package is able to accommodate both continuous and categorical outcomes. An option interval = TRUE is utilized within the visTree function to display the relevant graphical output for a continuous outcome rather than a categorical outcome. ### Continuous outcome ```{r, fig.show='hide', warnings = FALSE, out.width = '90%',fig.align = 'center', fig.height = 9, ,fig.width = 12} potentialtree<-ctree(kcal24h0~., data = newblsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, color.type = 1, alpha = 0.5) ``` ### Categorical outcome ```{r, fig.show='hold', include = FALSE, warnings = FALSE} blsdataedit<-blsdata[,-7] blsdataedit$bin<-0 blsdataedit$bin<-cut(blsdata$kcal24h0, unique(quantile(blsdata$kcal24h0)), include.lowest = TRUE, dig.lab = 4) ``` ```{r, fig.show='hide', out.width = '90%',fig.align = 'center', fig.height = 9, ,fig.width = 12,warnings = FALSE} potentialtree<-ctree(bin~hunger+rrvfood+resteating+liking+wanting+disinhibition, data = blsdataedit, control = ctree_control(mincriterion = 0.85)) visTree(potentialtree, interval = T) ``` ### Repeated Splits This series of plots describe the splits leading to each subgroup and the splits need not necessarily be composed of different variables. The splits over multiple levels can be performed on the same variable and these are are summarized such that the resulting intervals are readily interpretable. The horizontal bars display these splits and the relevant criterions. ```{r, fig.show='hide', out.width = '95%',fig.align = 'center', fig.height = 9, ,fig.width = 14, warnings = FALSE} potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree) ``` ### Other Trees, (e.g., rpart) The examples in this document have so far focused on scenarios described over conditional inference trees. The conditional inference tree is implemented as an object of class party using the partykit package. However, the visTree package is also able to accommodate other types of decision tree structures such as CART (implemented by the rpart package); CART is generated as an object of class rpart by the rpart package. ```{r, fig.show='hide', out.width = '98%',fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE} potentialtree<-rpart(kcal24h0~., data = newblsdata, control = rpart.control(cp = 0.015)) visTree(potentialtree) ``` ## Display controls ### Text The controls within the visTree function can be utilized to specify different text sizes for the title of the subplots (text.title), title of the histogram (text.main), axis labels placed at the tickmarks (text.axis), title labels for the axis (text.labels) and the splits placed on the horizontal bars (text.bar). ```{r, fig.show='hide', out.width = '98%',fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE} potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, text.label = 1.5, text.title = 1.5, text.bar = 1.5, text.axis = 1.5, text.main = 1.5) ``` ### Axis The axis for each subplot within the visualization is placed above the horizontal colored bars for the percentiles of relevant covariates and below the histogram/bar chart for the outcome values. Both these axes can be removed or placed as necessary using the options add.h.axis (associated with the colored bars) and add.p.axis (associated with the percentiles) within the visTree function. ```{r, fig.show='hide', out.width = '98%',fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE} potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, add.h.axis = FALSE) visTree(potentialtree, add.p.axis = FALSE) ``` ### Rounding the displayed split criterion In addition to changing the size of the text placed on the bars, the number of decimal places can also be specified for the splitting criterions that are displayed on the horizontal bars. This is implemented using the option text.round in the visTree function. ```{r, fig.show='hide', out.width = '98%', fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE} potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, text.round= 3, text.bar = 1.1) ``` ### Transparency The transparency of the horizontal bars in each of the subplots can also be modified by specifying a value between 0 and 1 for alpha in the visTree function. As values get closer to 1, the opaqueness of the horizontal colored bars increases. ```{r, fig.show='hide', out.width = '98%',fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE} potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, alpha = 0.8) visTree(potentialtree, alpha = 0.3) ``` ### Density curve The visualization tool accommodates continuous and categorical data. For continuous data, a density curve over the histogram can also be placed or removed from the lower part of the sub-plot. ```{r, fig.show='hide', out.width = '98%',fig.align = 'center', fig.height = 9, ,fig.width = 16, warnings = FALSE} potentialtree<-ctree(kcal24h0~skcal+hunger+rrvfood+resteating+liking+wanting+age, data = blsdata, control = ctree_control(mincriterion = 0.95)) visTree(potentialtree, density.line = FALSE) ```
/scratch/gouwar.j/cran-all/cranData/visTree/vignettes/visTree.Rmd
#' @include visa.R #' #' Class 'Spectra'/'SpectraDatabase' #' #' An S4 Class `Spectra`, with five slots of data. #' It has `SpectraDatabase` as the alias. #' #' @name Spectra-class #' @rdname Spectra-class #' @aliases Spectra,Spectra-class #' @docType class #' @slot spectra A matrix #' @slot wavelength A numeric vector #' @slot w.unit A character string #' @slot data A data.frame #' @importFrom methods new #' @exportClass Spectra Spectra <- setClass("Spectra", slots = c(spectra = "matrix", wavelength = "numeric", w.unit = "character", data = "data.frame")) setValidity("Spectra", function(object){ w <- length(object) if (length(w) != 0L && any(w != w[[1]])) return("object width is not constant") TRUE } ) setMethod("initialize", "Spectra", function(.Object, spectra = matrix(0), wavelength = numeric(0), w.unit = character(0), data = data.frame(), ...){ .Object <- methods::callNextMethod() if(length(.Object@data) == 0 && ncol(.Object@spectra) != length(.Object@wavelength)){ stop("specified spectra and wavelength of different dimensions") } else if (length(.Object@data) >= 1 && nrow(.Object@spectra) != nrow(.Object@data)){ stop("specified spectra and data of different lengths") } else .Object } ) #' Class 'SpectraDatabase' #' #' SpectraDatabase is an extended 'Spectra' class, with associated vegetation data ('data') #' in a \link{data.frame}. #' #' @name SpectraDatabase-class #' @rdname SpectraDatabase-class #' @aliases SpectraDatabase-class,spectra.database #' @docType class #' @slot spectra A matrix #' @slot wavelength A numeric vector #' @slot w.unit A character string #' @slot data A data.frame of vegetation data corresponding to the spectra #' @export #' @exportClass SpectraDatabase setClass("SpectraDatabase", contains = "Spectra", slots = c(data="data.frame"), validity = function(object){ w <- length(object) if (length(w) != 0L && any(w != w[[1]])) return("object width is not constant") TRUE } ) setMethod("initialize", "SpectraDatabase", function(.Object, spectra = matrix(0), wavelength = numeric(0), w.unit = character(0), data = data.frame(0), ...){ .Object <- methods::callNextMethod() if(nrow(.Object@spectra) != nrow(.Object@data) && length(.Object@wavelength) >1) stop("specified 'spectra' and 'data' of different lengths") .Object } ) #' Create a Spectra or SpectraDatabase #' #' Constructor \code{as.spectra} creates a Spectra object. #' #' @name as.spectra #' @rdname Spectra-class #' @param spectra A matrix #' @param wavelength A numeric vector #' @param w.unit A character string #' @param data A data.frame #' @param ... Other parameters #' @examples #' s <- as.spectra(matrix(1:100, 4), 1:25, "nm", data.frame(x = letters[1:4])) #' str(s) #' #' @export as.spectra <- function(spectra = matrix(0), wavelength = numeric(0), w.unit = "nm", data = data.frame(), ...){ return(methods::new("Spectra", spectra, wavelength, w.unit, data, ...)) } #' Create a Spectra or SpectraDatabase #' #' Constructor \code{as.spectra.database} creates a SpectraDatabase object. #' #' @rdname Spectra-class #' @examples #' s <- as.spectra.database(matrix(1:100, 4), 1:25, "nm", data.frame(x = letters[1:4])) #' str(s) #' @export as.spectra.database <- function(spectra = matrix(0), wavelength = numeric(0), w.unit = "nm", data = data.frame(), ...){ return(methods::new("Spectra", spectra, wavelength, w.unit, data, ...)) } #' Class 'SpectraMatrix' #' #' SpectraMatrix is a extended 'Spectra' class. #' #' @name SpectraMatrix-class #' @rdname SpectraMaxtrix-class #' @aliases SpectraMaxtrix-class,spectra.maxtrix #' @docType class #' @export #' @exportClass SpectraMatrix setClass("SpectraMatrix", contains = "Spectra", validity = function(object){ w <- length(object) if (length(w) != 0L && any(w != w[[1]])) return("object width is not constant") TRUE } ) setMethod("initialize", "SpectraMatrix", function(.Object, spectra = matrix(0), wavelength = numeric(0), w.unit = character(0), ...){ .Object <- methods::callNextMethod() if(ncol(.Object@spectra) != length(.Object@wavelength)) stop("specified spectra and wavelength are of different lengths") .Object } ) #' Create a SpectraMatrix #' #' Constructor \code{as.spectra.matrix} creates a SpectraMatrix object. #' #' @name as.spectra.matrix #' @rdname SpectraMaxtrix-class #' @param spectra A matrix #' @param wavelength A numeric vector #' @param w.unit A character string #' #' @return #' \item{sdf}{Returns a SpectraDataFrame.} #' #' @examples #' smatrix <- as.spectra.matrix(matrix(1:10, 1), 1:10, "nm") #' str(smatrix) #' @export as.spectra.matrix <- function(spectra = matrix(0), wavelength = numeric(0), w.unit = character(0)){ sls <- methods::new("SpectraMatrix", spectra, wavelength, w.unit) smat <- sls@spectra colnames(smat) <- paste(wavelength, w.unit) # rownames(smat) <- [email protected] smat } #' Class 'SpectraDataFrame' #' #' SpectraDataFrame is an extended 'Spectra' class, with associated vegetation data ('data') #' in a \link{data.frame}. #' #' @name SpectraDataFrame-class #' @rdname SpectraDataFrame-class #' @aliases SpectraDataFrame,spectra.data.frame #' @docType class #' @slot spectra A matrix #' @slot wavelength A numeric vector #' @slot w.unit A character string #' @slot data A data.frame of vegetation data corresponding to the spectra #' @export #' @exportClass SpectraDataFrame setClass("SpectraDataFrame", contains = "Spectra", slots = c(data="data.frame"), validity = function(object){ w <- length(object) if (length(w) != 0L && any(w != w[[1]])) return("object width is not constant") TRUE } ) setMethod("initialize", "SpectraDataFrame", function(.Object, spectra = matrix, wavelength = numeric, w.unit = character, data = data.frame, ...){ .Object <- methods::callNextMethod() if(nrow(.Object@spectra) != nrow(.Object@data) && length(.Object@wavelength) >1) stop("specified 'spectra' and 'data' of different lengths") .Object } ) #' Create a SpectraDataFrame #' #' Constructor \code{as.spectra.data.frame} function creates a SpectraDataFrame object, which is equivalent to the use of \link{as.specdf}. #' #' @name as.spectra.data.frame #' @rdname SpectraDataFrame #' @aliases as.specdf #' @param data A data.frame #' @param spectra A matrix #' @param wavelength A numeric vector #' @param w.unit A character string #' @param ... Other options for similar format of variables #' #' @return #' \item{sdf}{Returns a SpectraDataFrame.} #' @examples #' sdf <- as.spectra.data.frame(matrix(1:10, 1), 1:10, "nm", data.frame(a = 1, b =2)) #' str(sdf) #' @export as.spectra.data.frame <- function(spectra = matrix(0), wavelength = numeric(0), w.unit = character(0), data = data.frame(0), ...){ sls <- methods::new("SpectraDataFrame", spectra, wavelength, w.unit, data) spectra <- sls@spectra colnames(spectra) <- paste(wavelength, w.unit) sdf <- sls@data sdf$spectra <- spectra sdf }
/scratch/gouwar.j/cran-all/cranData/visa/R/Spectra-class.R
#' Selecting the best 2-Band combinations for Normalized Simple Ratio (NSR) #' #' This function develops a optimization algorithm based on correlation analysis between spectral matrix 'spectra' and the #' vegetation variable of interest x, which #' determines the best spectral band combinations of the full spectrum that are most predictive for 'x'. #' #' @param S A matrix of spectral data, a row is a spectrum across all spectral bands. #' @param x A vector. #' @param w A vector of wavelength. #' @param w.unit Character string, default = NULL, #' @param cm.plot A logic value for whether plotting the coefficient matrix or not, default FALSE. #' @return #' \item{cm}{Returns a correlation coefficients matrix.} #' #' @details #' This function runs a calculation of \deqn{ NDVI = (\lambda_i - \lambda_j)/(\lambda_i + \lambda_j) } using all the possible pairs/combinations of any two bands (i,j) #' within the full spectrum range thoroughly. A correlation analysis is then performed between the x and all possible NDVIs, and it calculates #' the correlation coefficients (r) which indicates the predictive performance of each NDVI and its corresponding two-band combination. The #' output is the wavelength (nm) indicating the best two bands that produce the highest value of r. #' @seealso \code{\link{cor}} #' @examples #' library(visa) #' data(NSpec.DF) #' x <- NSpec.DF$N # nitrogen #' S <- NSpec.DF$spectra[, seq(1, ncol(NSpec.DF$spectra), 5)] # resampled to 5 nm steps #' cm <- cm.nsr(S, x, cm.plot = TRUE) #' #' @import ggplot2 Matrix reshape2 grDevices #' @export cm.nsr <- function(S, x, w = wavelength(S), w.unit = NULL, cm.plot = FALSE){ # determin the format of spectra # if (is(spectra, "Spectra")) w <- wavelength(spectra) # if (is(spectra, "data.frame")) w <- wavelength(spectra) # shoudl be numeric # if (is(spectra, "matrix")) w <- wavelength(spectra) # shoudl be numeric # n <- length(spectra) spectra <- spectra(S) if (is.matrix(spectra) && is.null(colnames(spectra)) && length(w) == 0) stop("Wavelength for the spectra matrix is not correctly defined") n <- dim(spectra)[2] # Returns the Number of wavebands, should equal w ## (Rj-Ri)/(Rj+Ri) R2 <- Matrix::Matrix(0, n, n, sparse = TRUE) # Zero sparse matrix Rj <- spectra ones <- matrix(1,1,n) for (cI in 1:n){ Ri <- spectra[,cI] Ri <- Ri %*% ones # to matrix # VI formular V <- (Rj-Ri)/(Rj+Ri) # Squared values (R2) of the Pearson Correlation coefficients Rcorr <- (stats::cor(x, V))^2 # To store the value of R2 # spR <- Matrix::sparseMatrix(i = c(1:n),j = rep(cI,n), x = as.numeric(Rcorr), dims = c(n,n)) spR <- Matrix::sparseMatrix(i = rep(cI,n), j = c(1:n), x = as.numeric(Rcorr), dims = c(n,n)) R2 <- R2 + spR } cm <- as.matrix(R2) # str(cm) # max(cm, na.rm = TRUE) colnames(cm) <- paste(w, "nm") R2max <- max(cm, na.rm = TRUE) print(paste('The max value of R^2 is', as.character(round(R2max,4)))) ind_max <- which(cm == R2max, arr.ind = TRUE) bestBands = w[ind_max[1,]] print(paste(c("i", "j"), as.vector(bestBands), sep = "_")) # cm plot cm_plot <- ggplot.cm(cm, show.stat = FALSE) if (isTRUE(cm.plot)) print(cm_plot) # cm.res <- list(cm = cm, cm.plot = cm_plot) cm <- cm }
/scratch/gouwar.j/cran-all/cranData/visa/R/cm.nsr.R
#' Selecting the best 2-Band combinations for Simple Ratio (SR) #' #' @name cm.sr #' @description This function develops a optimization algorithm based on correlation analysis between spectral matrix 'spectra' and the #' vegetation variable of interest x, which #' determines the best spectral band combinations of the full spectrum that are most predictive for 'x'. #' #' @details This function runs a calculation of \deqn{ NDVI = \lambda_i / \lambda_j } using all the possible pairs/combinations of any two bands (i,j) #' within the full spectrum range thoroughly. A correlation analysis is then performed between the x and all possible NDVIs, and it calculates #' the correlation coefficients (r) which indicates the predictive performance of each NDVI and its corresponding two-band combination. The #' output is the wavelength (nm) indicating the best two bands that produce the highest value of r. #' #' @inheritParams cm.nsr #' @return #' \item{cm}{Returns a correlation coefficients matrix.} #' @seealso \code{\link{cm.nsr}} #' @examples #' library(visa) #' data(NSpec.DF) #' x <- NSpec.DF$N # nitrogen #' S <- NSpec.DF$spectra[, seq(1, ncol(NSpec.DF$spectra), 10)] # resampled to 10 nm steps #' cm <- cm.sr(S, x, cm.plot = FALSE) #' #' @import ggplot2 Matrix reshape2 grDevices #' @export cm.sr <- function(S, x, w = wavelength(S), w.unit = NULL, cm.plot = FALSE){ # account for the format of spectra # check nsr spectra <- spectra(S) if (is.matrix(spectra) && is.null(colnames(spectra)) && length(w) == 0) stop("Wavelength for the spectra matrix is not correctly defined") n <- dim(spectra)[2] # Returns the Number of wavebands, should equal w dn <- list(paste0("i_", w), paste0("j_",w)) ## Ri/Rj R2 <- Matrix::Matrix(0, n, n, dimnames = dn, sparse = TRUE) # Zero sparse matrix # str(R2) Rj <- spectra ones <- matrix(1,1,n) for (cI in 1:n){ Ri <- spectra[,cI] Ri <- Ri %*% ones # to matrix # VI formular V <- Ri/Rj # Squared values (R2) of the Pearson Correlation coefficients Rcorr <- (stats::cor(x, V))^2 # To store the value of R2 spR <- Matrix::sparseMatrix(i = rep(cI,n),j = c(1:n), x = as.numeric(Rcorr), dims = c(n,n), dimnames = dn) R2 <- R2 + spR # image(R2) } cm <- Matrix::as.matrix(R2) R2max <- max(cm, na.rm = TRUE) print(paste('The max value of R^2 is', as.character(round(R2max,4)))) ind_max <- which(cm == R2max, arr.ind = TRUE) # ind_max bestBands = w[ind_max[1,]] print(paste(c("i", "j"), as.vector(bestBands), sep = "_")) # str(cm) # max(cm, na.rm = TRUE) colnames(cm) <- paste(w, "nm") # cm plot cm_plot <- ggplot.cm(cm, show.stat = FALSE) if (isTRUE(cm.plot)) print(cm_plot) # cm.res <- list(cm = cm, cm.plot = cm_plot) cm <- cm }
/scratch/gouwar.j/cran-all/cranData/visa/R/cm.sr.R
#' Example data in the Spectra/SpectraDatabase format. #' #' A S4 data structure containing the plant spectra and nitorgen (N) content. Spectra is organized as a matrix and is stored as a slot, #' named 'spectra'. The corresponding N content is stored in the slot 'data', which is a data.frame to be used for storing vegetation traits, #' such as here the plant N content. #' #' @aliases Data-SpectraDatabase,Data-Spectra #' @format A Spectra object with 19 rows and 4 slots (spectra, wavelength, w.unit, data). #' #' \describe{ #' \item{spectra}{A matrix of plant spectral data} #' \item{wavelength}{A vector of wavelength for the 'spectra' data} #' \item{w.unit}{A character string of wavelength unit (default "nm")} #' \item{data}{A data.frame of vegetation traits, here plant nitrogen content} #' ...{currently not used} #' } #' @examples #' library(visa) #' data(NSpec.DB) #' str(NSpec.DB) #' "NSpec.DB"
/scratch/gouwar.j/cran-all/cranData/visa/R/data-specdb.R
#' Example data in the SpectraDataFrame format #' #' A dataset containing the plant Nitrogen content and spectra. #' The Spectra matrix is stored as a variable (in a column) of a data.frame. #' #' @aliases Data-SpectraDataFrame #' @format A data frame with 19 rows and 2 variables: #' #' \describe{ #' \item{N}{Plant nitrogen content} #' \item{spectra}{A variable of Matrix of plant spectra} #' ... #' } #' @examples #' library(visa) #' data(NSpec.DF) #' str(NSpec.DF) #' @seealso \link{data.frame} and \code{\link{NSpec.DB}} #' "NSpec.DF"
/scratch/gouwar.j/cran-all/cranData/visa/R/data-specdf.R
#' Plot functions #' #' @name ggplot-method NULL
/scratch/gouwar.j/cran-all/cranData/visa/R/ggplot-method.R
#' Create a ggplot plot for linear fit with Equation and R^2. #' #' This functions plots model fit using ggplot. #' #' Visualization of linear fit (y = ax + b), using scatter plots and with regression line, as well as #' added details of regression equation and R^2. #' #' @rdname ggplot-method #' #' @param x,y Two vectors #' @param ... Other arguments passed on to methods. Not currently used. #' @param environment If an variable defined in the aesthetic mapping is not #' found in the data, ggplot will look for it in this environment. It defaults #' to using the environment in which \code{ggplot()} is called. #' @return #' \item{p}{Returns a ggplot object.} #' #' @examples #' library(visa) #' x <- 1:10 #' y <- 2:11+0.5 #' ggplot.lmfit(x, y) #' #' @import ggplot2 #' @importFrom ggpmisc stat_poly_eq #' @export ggplot.lmfit ggplot.lmfit <- function(x, y, ..., environment = parent.frame()){ ..eq.label.. <- ..rr.label.. <- NULL # To pass R CMD check: Undefined global functions or variables df <- data.frame(x,y) my.formula <- y ~ x p <- ggplot2::ggplot(data = df, aes(x, y), ... = ..., environment = environment) + geom_point() + geom_smooth(method = "lm", se = FALSE, color = "blue", formula = my.formula) + ggpmisc::stat_poly_eq(aes(label = paste(..eq.label.., ..rr.label.., sep = "*plain(\",\")~")), formula = my.formula, rr.digits = 4, parse = TRUE, col = "blue", size = 4, ...) # yrange <- ggplot_build(p)$panel$ranges[[1]]$y.range # xrange <- ggplot_build(p)$panel$ranges[[1]]$x.range # p <- p + stat_poly_eq(data = df, formula = my.formula, eq.with.lhs = "italic(y)~`=`~", # aes(label = paste(stat(eq.label), stat(rr.label), sep = "~~~")), # parse = TRUE, col = "blue", label.x = xrange[2]*0.5, # label.y = yrange[2]*0.95, size = 4) p }
/scratch/gouwar.j/cran-all/cranData/visa/R/ggplot.lmfit.R
#' Create a new ggplot plot with a geom_line() layer from spectra data #' #' \code{ggplot()} initializes a ggplot object. It can be used to #' declare the input spectra object for a graphic and to optionally specify the #' set of plot aesthetics intended to be common throughout all #' subsequent layers unless specifically overridden. #' #' \code{ggplot()} is typically used to construct a plot #' incrementally, using the + operator to add layers to the #' existing ggplot object. This is advantageous in that the #' code is explicit about which layers are added and the order #' in which they are added. For complex graphics with multiple #' layers, initialization with \code{ggplot} is recommended. #' #' #' @param data Default spectra database to use for plot. If not a spectra database, the #' methods used will be those defined in package \code{ggplot2}. See \code{\link[ggplot2]{ggplot}}. #' If not specified, must be supplied in each layer added to the plot. #' @param mapping Default list of aesthetic mappings to use for plot. #' If not specified, in the case of spectra objects, a default mapping will #' be used. #' @param wl numeric The wavelength vector. #' @param w.unit character The wavelength unit of the spectra. #' @param ... Other arguments passed on to methods. Not currently used. #' @param environment If an variable defined in the aesthetic mapping is not #' found in the data, ggplot will look for it in this environment. It defaults #' to using the environment in which \code{ggplot()} is called. #' #' @seealso \code{?ggpmisc::ggplot()} #' @examples #' library(visa) #' library(ggplot2) #' ggplot.spectra(NSpec.DF) #' #' @note Current implementation does not merge default mapping with user #' supplied mapping. If user supplies a mapping, it is used as is. #' To add to the default mapping, aes() can be used by itself to compose #' the ggplot. #' #' @import reshape2 ggplot2 #' @name ggplot #' @export ggplot.spectra ggplot.spectra <- function(data, mapping = NULL, ..., wl = NULL, w.unit = "nm", environment = parent.frame()) { spec <- spectra(data) # wl <- wavelength(data) specdf <- melt(spec) # "Var1" is the rownames specdf$Var1 <- as.factor(specdf$Var1) specdf$Var2 <- as.numeric(gsub("\\D", "", specdf$Var2)) names(specdf)[1:2] <- c("spectrum_id", "band") if (is.null(mapping)) { mapping <- aes_string(x = "band", y = "value", group = "spectrum_id", color = "spectrum_id") } ggplot2::ggplot(data = specdf, mapping = mapping, ... = ..., environment = environment) + geom_line(show.legend = FALSE) } #' Create a new ggplot plot from the correlation matrix derived from the #' cm.nsr and cm.sr output. #' #' @rdname ggplot #' #' @param show.stat A logic value. whether show the best R^2 and bands. #' @return #' \item{cm_plot}{Returns a ggplot object of correlation-matrix.} #' #' @examples #' library(visa) #' data(NSpec.DF) #' x <- NSpec.DF$N # nitrogen #' S <- NSpec.DF$spectra[, seq(1, ncol(NSpec.DF$spectra), 5)] # resampled to 10 nm steps #' cm <- cm.sr(S, x, cm.plot = FALSE) #' ggplot.cm(cm) #' @import ggplot2 reshape2 grDevices #' @export ggplot.cm ggplot.cm <- function(data, mapping = NULL, ..., show.stat = TRUE, environment = parent.frame()){ # Identify the max R2 and its corresponding bands in a correlation matrix w <- as.numeric(gsub("\\D", "", colnames(data))) R2max <- max(data, na.rm = TRUE) if (show.stat) print(paste('The max value of R^2 is', as.character(round(R2max,4)))) ind_max <- which(data == R2max, arr.ind = TRUE) # ind_max bestBands = w[ind_max[1,]] if (show.stat) print(paste(c("i", "j"), as.vector(bestBands), sep = "_")) # plot correlation matrix cmDF <- reshape2::melt(data) w1_index <- cmDF$Var1 w2_index <- cmDF$Var2 cmDF$Var1 <- w[w1_index] cmDF$Var2 <- w[w2_index] myPalette <- colorRampPalette(rev(RColorBrewer::brewer.pal(11, "Spectral")), space="Lab") if (is.null(mapping)) { mapping <- aes_string("Var1", "Var2", fill = "value") } cmp <- ggplot2::ggplot(cmDF, mapping = mapping, ... = ..., environment = environment)+ geom_tile()+ scale_fill_gradientn(colours = myPalette(100))+ coord_equal()+ theme_bw() cm_plot <- cmp + xlab("Wavelength i") + ylab("Wavelength j") cm_plot # cm_plot <- cm_plot + scale_x_discrete(expand = c(0, 0))+ # scale_y_discrete(expand = c(0, 0)) # print(cm_plot) }
/scratch/gouwar.j/cran-all/cranData/visa/R/ggspectra.R
#' Calculate and plot a 2-band NDVI. #' #' This function calculates a 2-band NDVI using the \code{\link{nsr}} function. #' #' @details #' Calculate a NDVI with two specific bands of choice. The new NDVI follows the #' the standard formula \deqn{NDVI = (\lambda_i + \lambda_j)/(\lambda_i - \lambda_j)}. #' Bands i and j correspond to the b1 and b2 input arguments, respectively. Wavelength #' indexes are determined based on the first argument 's'. #' #' @rdname ndvi2 #' @inheritParams nsr #' @return #' \item{ndvi}{The returned values are the new NDVI.} #' @examples #' library(visa) #' s <- NSpec.DF$spectra #' ndvi2(s, 780, 680) #' #' @import ggplot2 #' @export ndvi2 <- function(s, b1, b2){ if (is.null(s)) stop("input s is not valid spectra") ndvi <- nsr(s, b1, b2) }
/scratch/gouwar.j/cran-all/cranData/visa/R/ndvi2.R
#' Access the spectra data of 'SpectraDatabase'. #' #' Functions to access slot data of the Class Spectra. #' #' Construct generic functions for the Spectra object, spectra.data.frame, #' and spectra.matrix. #' #' @include Spectra-class.R #' @docType methods #' @rdname spectra-methods #' @name spectra #' @aliases spectra #' @param object A Spectra object, spectra.data.frame, or spectra.matrix. #' @param ... Other options. #' @examples #' # For the S4 class 'Spectra' #' library(visa) #' data(NSpec.DB) #' spectra_matrix <- spectra(NSpec.DB) #' # For the spectra data.frame #' data(NSpec.DF) #' spectra_matrix <- spectra(NSpec.DF) #' #' @export spectra setGeneric("spectra", function(object, ...) standardGeneric("spectra")) #' @rdname spectra-methods #' @aliases spectra,Spectra,ANY-method setMethod("spectra", signature(object = "Spectra"), function(object, ...){ mat <- object@spectra colnames(mat) <- object@wavelength mat } ) #' @rdname spectra-methods #' @aliases spectra,data.frame,ANY-method setMethod("spectra", signature(object = "data.frame"), function(object, ...){ mat <- object$spectra mat } ) #' @rdname spectra-methods #' @aliases spectra,matrix,ANY-method setMethod("spectra", signature(object = "matrix"), function(object, ...){ mat <- object mat } )
/scratch/gouwar.j/cran-all/cranData/visa/R/spectra.R
#' Calculate Simple Ratio (SR). #' #' Simple Ratio is the ratio of the spectra (mostly reflectance) between two bands #' in the format of \deqn{SR = \lambda_i/\lambda_j} #' #' Simple ratio and NDVI looking indices are the two groups of mostly used spectral indices in remote sensing. #' #' @param s Spectral data in the format of visa's Spectra object, spectra.data.frame or spectra.matrix. #' @param b1 A integer number which defines the wavelength of the 1st spectral band. #' @param b2 A integer number which defines the wavelength of the 2nd spectral band. #' #' @return #' \item{sr}{Returns a simple ratio index.} #' #' @examples #' library(visa) #' s <- NSpec.DF$spectra #' sr1 <- sr(s, 480, 550) #' #' @import ggplot2 #' #' @export sr <- function(s, b1, b2){ spec <- spectra(s) wl <- wavelength(s) idx1 <- b1 == wl idx2 <- b2 == wl s1 <- spec[, idx1] s2 <- spec[, idx2] sr <- s1/s2 } #' Calculate Normalized Simple Ratio (NSR) index. #' #' It is a normalization of SR by doing NSR = (1-SR)/(1+SR), with the same two spectral bands. #' #' As it exactly reads in its name, it is a normalization of the SR and ranges in (0,1). #' #' @rdname sr #' @inheritParams sr #' #' @return #' \item{nsr}{Returns a NSR index.} #' @examples #' s <- NSpec.DF$spectra #' nsr1 <- nsr(s, 480, 550) #' #' @export nsr <- function(s, b1, b2){ sr <- sr(s, b1, b2) nsr <- (1 - sr)/(1+ sr) } #' Fit linear model for the Simple Ratio (SR) and another variable. #' #' @rdname sr #' @inheritParams sr #' @param y A numeric variable to correlate with SR #' #' @return #' \item{p}{Returns a ggplot object.} #' @examples #' s <- NSpec.DF #' y <- NSpec.DF$N #' lm.sr(s,600,500,y) #' #' @export lm.sr <- function(s,b1,b2,y){ x <- sr(s,b1,b2) bstr <- paste("SR = R", b1, "/R", b2, sep = "") p <- ggplot.lmfit(x,y)+ labs(x = bstr) p } #' Fit linear model for the Normalized Simple Ratio (NSR) and another variable. #' #' @rdname sr #' @inheritParams lm.sr #' #' @return #' \item{p}{Returns a ggplot object.} #' @examples #' s <- NSpec.DF #' y <- NSpec.DF$N #' lm.nsr(s,600,500,y) #' #' @export lm.nsr <- function(s,b1,b2,y){ x <- sr(s,b1,b2) bstr <- paste("SR = R", b1, "/R", b2, sep = "") p <- ggplot.lmfit(x,y)+ labs(x = bstr) p }
/scratch/gouwar.j/cran-all/cranData/visa/R/sr.R
#' @details #' The visa package provides a set of functions for hyperspectral data analysis, with the main purpose #' of simplifying the analysis and interpretation of vegetation spectral data. #' #' It implement two categories of important functions, including its own function and also #' some built on other r packages. #' #' \itemize{ #' \item vegetation indices, and #' \item multivariate analyses.... #' } #' #' To know more about visa, check the vignettes: #' `browseVignettes("visa")` #' #' @section Vegetation Indices: #' Support the calculation of vegetation indices from literature, as well as creation of new indices. #' #' @section Multivariate analysis: #' ... not implemented yet #' The multivariate analysis use the spectra.data.frame format. #' #' #' @docType package #' @name visa #' @author Kang Yu #' @keywords internal #' #' @import ggplot2 "_PACKAGE"
/scratch/gouwar.j/cran-all/cranData/visa/R/visa.R
#' Access the wavelength of Spectra #' #' Construct generic functions for the Spectra object, spectra.data.frame, and spectra.matrix. #' #' A call to {new} returns a newly allocated object from the class identified by the first argument. #' This call in turn calls the method for the generic function `initialize`. #' Construct a Spectra class by using the #' #' @docType methods #' @name wavelength #' @rdname wavelength-methods #' @aliases waveband #' @param object A object of Spectra #' @param ... Other options (... T/F with unit) #' @examples #' library(visa) #' # For S4 class Spectra #' wavelength(NSpec.DB) #' # For spectra data.frame format #' wavelength(NSpec.DF) #' #' @export wavelength # setMethod("as.spectra", # signature(spectra = "matrix", wavelength = "numeric"), # function(spectra, wavelength, ...){ # return(as.spectra(spectra, wavelength, ...)) # } # ) setGeneric("wavelength", function(object, ...) standardGeneric("wavelength")) #' @rdname wavelength-methods #' @aliases wavelength,Spectra,ANY-method setMethod("wavelength", signature(object = "Spectra"), function(object, ...){ w <- object@wavelength w } ) #' @rdname wavelength-methods #' @aliases wavelength,data.frame,ANY-method setMethod("wavelength", signature(object = "data.frame"), function(object, ...){ as.numeric(gsub("\\D", "", colnames(object$spectra))) } ) #' @rdname wavelength-methods #' @aliases wavelength,matrix,ANY-method setMethod("wavelength", signature(object = "matrix"), function(object, ...){ as.numeric(gsub("\\D", "", colnames(object))) } )
/scratch/gouwar.j/cran-all/cranData/visa/R/wavelength.R
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ---- echo=TRUE, warning=FALSE------------------------------------------------ library(visa) data(NSpec.DF) x <- NSpec.DF$N # nitrogen S <- NSpec.DF$spectra[, seq(1, ncol(NSpec.DF$spectra), 10)] # resampled to 10 nm steps cm <- cm.nsr(S, x, cm.plot = TRUE) ## ---- fig.show='hold', fig.cap = "Plot of correlation matrix"----------------- # use the output from last example # cm <- cm.nsr(S, x) # Plotting the correlation matrix ggplot.cm(cm) ## ---- echo=TRUE, results='asis'----------------------------------------------- library(visa) # check the data type class(NSpec.DB) # data structure # str(NSpec.DB) # print the first 10 columns knitr::kable(head(NSpec.DB@spectra[,1:10])) ## ---- echo=TRUE, results='asis'----------------------------------------------- # check the data type class(NSpec.DF) # check whether it contains the same data as 'NSpec.DB' knitr::kable(head(NSpec.DF$spectra[,1:10]))
/scratch/gouwar.j/cran-all/cranData/visa/inst/doc/visa.R
--- title: "Introduction to visa" author: "Kang Yu" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Introduction to visa} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} fig_caption: yes --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` Imaging Spectroscopy (also known as Hyperspectral Remote Sensing, HRS) technology and data are increasingly used in environmental sciences, and nowadays much more beyond that, thus requiring accessible data and analytical tools (especially open source) for students and scientists with a diverse background. Therefore, I come up with such a idea since i was a PhD student at the University of Cologne, inspired by the growing community of R and R users. I have been mainly working with spectral data of plants, and that is reason i use the name VISA for this package, with the aim to facilitate the use of imaging spectroscopy techniques and data for the extraction of vegetation signatures for plant stresses and biodiversity. Future development of this tool has a long-term goal to include: i) implement the state-of-the-art applications of vegetation spectral indicators, ii) provide a platform to share vegetation spectral data to address certain questions of interest or applications in a broad context, and iii) make it compatible with more data formats and tools, such as the r package hsdar^[ Lukas W. Lehnert, Hanna Meyer, Joerg Bendix (2018). hsdar: Manage, analyse and simulate hyperspectral data in R]. Currently, `visa` can be installed via my [GitHub repository visa](https://github.com/kang-yu/visa), by `devtools::install_github("kang-yu/visa")`, and its submission to CRAN is in progress. This vignette will introduces the design and features of `visa` from the following aspects: - Data - Functions - Compatibility ## Data The `visa` package intends to simplify the use and reduce the limit of data format and structure. `visa` uses two data formats, and a hacking use of R's `data.frame` and, a S4^[The S4 object system http://adv-r.had.co.nz/S4.html] class specifically for `visa`. ### Built on `data.frame` Why i call it a hacking use is because a data.frame is a table organized by variables, and it is ideal to store every spectral band in as a variable. Imaging that you have thousands of columns and you have to refer to thousands of bands when using data.frame. Then, why not just store all the spectral bands, ie. the spectral matrix in a single variable, like the example data `NSpec.DF`. This will be ease your coding for analysis, and you write your argument as '*y ~ spectra*' instead of '*y ~ band1 + band2 + band3 + ... *' ``` # check the data type of `NSpec.DF` class(NSpec.DF) class(NSpec.DF$spectra) ``` ![Storing spectra matrix as a variable in data.frame](visa-df.PNG) ### S4 class `Spectra` and `SpectraDatabase` There are already a lot of r packages for spectral data analysis, and some of them use the S4 class, e.g. the `hsdar` package. `visa` also supports the S4 format but in a simplified version, using only five slots currently. ``` # check the data type of `NSpec.DB` class(NSpec.DB) class(NSpec.DB@spectra) ``` ![Storing spectra matrix as a slot of the S4 class](visa-db.PNG) Notice that the small difference of accessing data in two types of data, i.e., using `$` and `@`, respectively. ## Functions ### Computing correlation matrix The first idea of writing this package was to compute the correlation matrix for the thorough analysis of correlations between, on one hand, the combinations of spectral bands, and on the other hand, the vegetation variables of interest. Here gives the example using the `cm.nsr` function, which can be used for non-spectra data as well. ```{r, echo=TRUE, warning=FALSE} library(visa) data(NSpec.DF) x <- NSpec.DF$N # nitrogen S <- NSpec.DF$spectra[, seq(1, ncol(NSpec.DF$spectra), 10)] # resampled to 10 nm steps cm <- cm.nsr(S, x, cm.plot = TRUE) ``` ### Plotting correlation matrix The correlation matrix plot is the plot of correlation coefficients (r/r2) by bands in x- and y-axis. ```{r, fig.show='hold', fig.cap = "Plot of correlation matrix"} # use the output from last example # cm <- cm.nsr(S, x) # Plotting the correlation matrix ggplot.cm(cm) ``` #### More Examples and Details The computation of SR and NSR follow the equations, e.g.: $SR = \lambda_i / \lambda_j$ $NSR = (\lambda_i - \lambda_j)/(\lambda_i + \lambda_j)$ To know more about the NDVI, please also check on Wikipedia^[Normalized difference vegetation index (https://en.wikipedia.org/wiki/Normalized_difference_vegetation_index)]. #### Example data `NSpec.DB` The first type is the 'NSpec.DB' in the default S4 class 'Spectra'. ```{r, echo=TRUE, results='asis'} library(visa) # check the data type class(NSpec.DB) # data structure # str(NSpec.DB) # print the first 10 columns knitr::kable(head(NSpec.DB@spectra[,1:10])) ``` #### Example data `NSpec.DF` The second type is a data.frame format, i.e., `NSpec.DF`. ```{r, echo=TRUE, results='asis'} # check the data type class(NSpec.DF) # check whether it contains the same data as 'NSpec.DB' knitr::kable(head(NSpec.DF$spectra[,1:10])) ``` #### Accessing data `spectra` `wavelength` ## Compatibility ### Data format conversion `as.spectra` `as.spectra.data.frame` ### Future development Regarding compatibility for future development, special focuses will be put on: - spatial data integration - image analysis - deep learning *`visa` believes:* > "Software probably makes knowledge gaps, but should not be due to the access to software."
/scratch/gouwar.j/cran-all/cranData/visa/inst/doc/visa.Rmd
--- title: "Introduction to visa" author: "Kang Yu" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Introduction to visa} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} fig_caption: yes --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` Imaging Spectroscopy (also known as Hyperspectral Remote Sensing, HRS) technology and data are increasingly used in environmental sciences, and nowadays much more beyond that, thus requiring accessible data and analytical tools (especially open source) for students and scientists with a diverse background. Therefore, I come up with such a idea since i was a PhD student at the University of Cologne, inspired by the growing community of R and R users. I have been mainly working with spectral data of plants, and that is reason i use the name VISA for this package, with the aim to facilitate the use of imaging spectroscopy techniques and data for the extraction of vegetation signatures for plant stresses and biodiversity. Future development of this tool has a long-term goal to include: i) implement the state-of-the-art applications of vegetation spectral indicators, ii) provide a platform to share vegetation spectral data to address certain questions of interest or applications in a broad context, and iii) make it compatible with more data formats and tools, such as the r package hsdar^[ Lukas W. Lehnert, Hanna Meyer, Joerg Bendix (2018). hsdar: Manage, analyse and simulate hyperspectral data in R]. Currently, `visa` can be installed via my [GitHub repository visa](https://github.com/kang-yu/visa), by `devtools::install_github("kang-yu/visa")`, and its submission to CRAN is in progress. This vignette will introduces the design and features of `visa` from the following aspects: - Data - Functions - Compatibility ## Data The `visa` package intends to simplify the use and reduce the limit of data format and structure. `visa` uses two data formats, and a hacking use of R's `data.frame` and, a S4^[The S4 object system http://adv-r.had.co.nz/S4.html] class specifically for `visa`. ### Built on `data.frame` Why i call it a hacking use is because a data.frame is a table organized by variables, and it is ideal to store every spectral band in as a variable. Imaging that you have thousands of columns and you have to refer to thousands of bands when using data.frame. Then, why not just store all the spectral bands, ie. the spectral matrix in a single variable, like the example data `NSpec.DF`. This will be ease your coding for analysis, and you write your argument as '*y ~ spectra*' instead of '*y ~ band1 + band2 + band3 + ... *' ``` # check the data type of `NSpec.DF` class(NSpec.DF) class(NSpec.DF$spectra) ``` ![Storing spectra matrix as a variable in data.frame](visa-df.PNG) ### S4 class `Spectra` and `SpectraDatabase` There are already a lot of r packages for spectral data analysis, and some of them use the S4 class, e.g. the `hsdar` package. `visa` also supports the S4 format but in a simplified version, using only five slots currently. ``` # check the data type of `NSpec.DB` class(NSpec.DB) class(NSpec.DB@spectra) ``` ![Storing spectra matrix as a slot of the S4 class](visa-db.PNG) Notice that the small difference of accessing data in two types of data, i.e., using `$` and `@`, respectively. ## Functions ### Computing correlation matrix The first idea of writing this package was to compute the correlation matrix for the thorough analysis of correlations between, on one hand, the combinations of spectral bands, and on the other hand, the vegetation variables of interest. Here gives the example using the `cm.nsr` function, which can be used for non-spectra data as well. ```{r, echo=TRUE, warning=FALSE} library(visa) data(NSpec.DF) x <- NSpec.DF$N # nitrogen S <- NSpec.DF$spectra[, seq(1, ncol(NSpec.DF$spectra), 10)] # resampled to 10 nm steps cm <- cm.nsr(S, x, cm.plot = TRUE) ``` ### Plotting correlation matrix The correlation matrix plot is the plot of correlation coefficients (r/r2) by bands in x- and y-axis. ```{r, fig.show='hold', fig.cap = "Plot of correlation matrix"} # use the output from last example # cm <- cm.nsr(S, x) # Plotting the correlation matrix ggplot.cm(cm) ``` #### More Examples and Details The computation of SR and NSR follow the equations, e.g.: $SR = \lambda_i / \lambda_j$ $NSR = (\lambda_i - \lambda_j)/(\lambda_i + \lambda_j)$ To know more about the NDVI, please also check on Wikipedia^[Normalized difference vegetation index (https://en.wikipedia.org/wiki/Normalized_difference_vegetation_index)]. #### Example data `NSpec.DB` The first type is the 'NSpec.DB' in the default S4 class 'Spectra'. ```{r, echo=TRUE, results='asis'} library(visa) # check the data type class(NSpec.DB) # data structure # str(NSpec.DB) # print the first 10 columns knitr::kable(head(NSpec.DB@spectra[,1:10])) ``` #### Example data `NSpec.DF` The second type is a data.frame format, i.e., `NSpec.DF`. ```{r, echo=TRUE, results='asis'} # check the data type class(NSpec.DF) # check whether it contains the same data as 'NSpec.DB' knitr::kable(head(NSpec.DF$spectra[,1:10])) ``` #### Accessing data `spectra` `wavelength` ## Compatibility ### Data format conversion `as.spectra` `as.spectra.data.frame` ### Future development Regarding compatibility for future development, special focuses will be put on: - spatial data integration - image analysis - deep learning *`visa` believes:* > "Software probably makes knowledge gaps, but should not be due to the access to software."
/scratch/gouwar.j/cran-all/cranData/visa/vignettes/visa.Rmd
## All the functions in the following are from the Rpackage ITRSelect ## estimated concordance function visa.Cest <- function(beta, X, A, Y, pi.est, h.est){ concor <- as.vector((A-pi.est)*(Y-h.est)/(pi.est*(1-pi.est))) Api <- A/pi.est concorApi <- as.matrix(outer(concor, Api, "*")) concorApi <- concorApi-t(concorApi) Xbeta <- as.vector(X%*%beta) IXbXb <- (outer(Xbeta, Xbeta, "-")>0) return (mean(concorApi*IXbXb)) } ## CIC criteria visa.CIC <- function(beta, X, A, Y, pi.est, h.est, kap=1){ n <- length(Y) p <- dim(X)[2] d <- sum(abs(beta)>1e-15) return (n*visa.Cest(beta, X, A, Y, pi.est, h.est)-d*log(p)*log(n, 10)*log(log(n, 10))/kap) } ## VIC criteria visa.VIC <- function(beta, X, A, Y, pi.est, h.est, kap=4){ n <- length(Y) p <- dim(X)[2] gA <- A*((cbind(1,X)%*%beta)>0)+(1-A)*((cbind(1,X)%*%beta)<=0) Api <- A*pi.est+(1-A)*(1-pi.est) gApi <- gA/Api d <- sum(abs(beta[2:(p+1)])>1e-15) return (n*mean(gApi*Y-(gApi-1)*(h.est+pmax(cbind(1,X)%*%beta,0)))-d*n^(1/3)*(log(p))^(2/3)*log(log(n))/kap) } ## BIC criteria visa.BIC <- function(beta, X, A, Y, pi.est, h.est, kap=1){ d <- sum(abs(beta)>1e-15) n <- length(Y) p <- dim(X)[2] X0 <- cbind(1, X) BIC0 <- mean(((A-pi.est)*(Y-h.est-A*(X0%*%beta)))^2) BIC0 <- -n*log(BIC0) - d*(log(n)+log(p+1))/kap return (BIC0) } ## the Dantzig selector visa.IC.Dantzig <- function(X, Y, A, pi.est, h.est, lambda.list, IC="CIC", kap, refit=TRUE){ p <- dim(as.matrix(X))[2] lambdan <- length(lambda.list) # all the coefficients beta.all <- matrix(0, lambdan, p+1) # all the criteria obj <- rep(0, lambdan) ## standardization center <- colMeans(X) std <- sqrt(apply(X, 2, var)) ## estimation X0 <- cbind(1, t((t(X)-center)/std)) n <- length(A) #parameters flp <- c(1,rep(0,2*(p+1))) #model parameters: t,beta+,beta- #condition on estimating equation Ynew <- colSums(X0*as.vector((A-pi.est)*(Y-h.est))) Xnew <- crossprod(X0*as.vector(A-pi.est),(X0*as.vector(A))) #constraint matrix Alp <- Matrix(rbind(cbind(-1,-Xnew,Xnew),cbind(-1,Xnew,-Xnew),c(0,rep(1,p+1),rep(1,p+1)))) #cat("\n") for (i in 1:lambdan){ #constraint upper bound blp <- c(-Ynew,Ynew,lambda.list[i]) #lower bound on variables dir <- rep("<=", 2*(p+1)+1) #lower bound on variables bounds <- list(lower = list(ind = 2:(2*(p+1)+1), val = rep(0, 2*(p+1)))) result <- Rglpk_solve_LP(obj=flp, mat=Alp, dir=dir, rhs=blp, bounds=bounds, max=FALSE) beta.est <- result$solution[2:(p+2)]-result$solution[(p+3):(2*p+3)] beta.est[2:(p+1)] <- beta.est[2:(p+1)]/std beta.est[1] <- beta.est[1]-crossprod(center, beta.est[2:(p+1)]) ########################################### #Double with estimating equation ########################################### if (refit&&sum(beta.est[2:(p+1)]!=0)>0&&sum(beta.est!=0)<(n/log(n))){ index <- (1:p)[beta.est[2:(p+1)]!=0] X.refit <- as.matrix(cbind(1,X[, index])) Ynew0 <- colSums(X.refit*as.vector((A-pi.est)*(Y-h.est))) Xnew0 <- crossprod((X.refit*as.vector(A-pi.est)),(X.refit*as.vector(A))) beta.refit <- solve(Xnew0, Ynew0) beta.est[c(1, index+1)] <- beta.refit } beta.all[i, ] <- beta.est if (IC=="CIC") obj[i] <- visa.CIC(beta.est[2:(p+1)], X, A, Y, pi.est, h.est, kap=kap) else if (IC=="BIC") obj[i] <- visa.BIC(beta.est, X, A, Y, pi.est, h.est, kap=kap) else obj[i] <- visa.VIC(beta.est, X, A, Y, pi.est, h.est, kap=kap) } return (as.vector(beta.all[which.max(obj), ])) }
/scratch/gouwar.j/cran-all/cranData/visaOTR/R/tools.R
#' @title Valid Improved Sparsity A-Learning for Optimal Treatment Decision #' @param y Vector of response (the larger the better) #' @param x Matrix of model covariates. #' @param a Vector of treatment received. It is a 0/1 index vector representing the subject is in control/treatment group. For details see Example section. #' @param IC Information criterion used in determining the regularization parameter. Users can choose among \code{BIC}, \code{CIC} and \code{VIC}. #' @param kap The model complexity penalty used in the information criteria. By default, kappa = 1 if BIC or CIC is used and kap = 4 if VIC is used. #' @param lambda.list A list of regularization parameter values. Default is exp(seq(-3.5, 2, 0.1)) #' @param refit logical. If \code{TRUE}, the coefficients should be refitted using A-learning estimating equation. Default is \code{TRUE}. #' @details See the paper provided in Reference section. #' #' @return an object of class "visa" is a list containing at least the following components: #' \item{beta.est}{A vector of coefficients of optimal treatment regime.} #' \item{pi.est}{A vector of estimated propensity score.} #' \item{h.est}{A vector of estimated baseline function.} #' #' @references #' {Shi, C., Fan, A., Song, R. and Lu, W. (2018) High-Dimensional A-Learing for Optimal Dynamic Treatment Regimes. \emph{Annals of Statistics,} \bold{ 46:} 925-957. DOI:10.1214/17-AOS1570} #' #' {Shi, C.,Song, R. and Lu, W. (2018) Concordance and Value Information Criteria for Optimal Treatment Decision. \emph{Annals of Statistics,} \bold{ 49:} 49-75. DOI:10.1214/19-AOS1908} #' #' {Zhan, Z. and Zhang, J. (2022+) Valid Improved Sparsity A-learning for Optimal Treatment Decision. Under review.} #' #' @export #' @import stats Rglpk #' @importFrom kernlab ksvm predict #' @importFrom e1071 svm #' @importFrom xgboost xgboost xgb.DMatrix #' @importFrom randomForest randomForest #' @importFrom mboost glmboost Binomial #' @importFrom Matrix Matrix #' @examples #' data(visa_SimuData) #' y = visa_SimuData$y #' a = visa_SimuData$a #' x = visa_SimuData$x #' # estimation #' result <- visa.est(y, x, a, IC = "BIC", lambda.list = c(0.1, 0.5)) #' result$beta.est #' result$pi.est #' result$h.est visa.est <- function(y, x, a, IC = c("BIC", "CIC", "VIC"), kap = NULL, lambda.list = exp(seq(-3.5, 2, 0.1)), refit = TRUE) { if (missing(IC)) IC <- "CIC" if (is.null(kap)) { if (IC == "CIC") { kap <- 1 } else if (IC == "BIC") { kap <- 1 } else { kap <- 4 } } n <- length(y) if (!all(a %in% c(0, 1))) stop("Treatment must be binary variable!") if (length(y) < 1) stop("empty model") ## Compute the propensity score pi.weight <- visa.weight(x, a, family = 'binomial')$weight pi.est <- as.vector(visa.mix(x, a, family='binomial', weight = pi.weight)) ## Compute the baseline h.weight <- visa.weight(x, y, subset = which(a==1), family = 'gaussian')$weight h.est <- as.vector(visa.mix(x, y, family='gaussian', weight = h.weight)) beta.est <- visa.IC.Dantzig(x, y, a, pi.est = pi.est, h.est = h.est, lambda.list = lambda.list, IC = IC, kap = kap, refit = refit) object <- list(beta.est = beta.est, pi.est = pi.est, h.est = h.est) return(object) }
/scratch/gouwar.j/cran-all/cranData/visaOTR/R/visa.est.R
visa.mix <- function(x, y, factorID = NULL, weight, family = c('gaussian', 'binomial')) { family <- match.arg(family) # Check the data if (family == "binomial") { if (!all(y %in% c(0, 1))) stop("There can only be 0 or 1 in y when using binomial family") } if(all(is.na(x))) { stop("Missing data (NA's) detected. Take actions (e.g., removing cases, removing features, imputation) to eliminate missing data before passing X and y to calculate.") } else if(is.numeric(x)==TRUE & is.null(factorID)==TRUE){ n <- length(y) p <- ncol(x) ########################### family = binomial if (family == 'binomial'){ ##### svm <- e1071::svm(y = as.factor(y), x = x, fitted = FALSE, probability = TRUE) pi_svm <- attr(predict(svm, newdata = x, probability = TRUE), "prob")[, "1"] ##### sGboost <- mboost::glmboost(y = as.factor(y), x = x, center = F, family = Binomial(link = "logit")) pi_Gboost <- predict(sGboost, newdata = x, type = 'response') ##### data_rf <- data.frame(y=as.factor(y),x) srandomf <- randomForest::randomForest(y ~ ., data=data_rf) pi_rf <- predict(srandomf,newdata=data.frame(x),type='prob')[,2] ##### sksvm <- kernlab::ksvm(x, as.factor(y), scaled = FALSE, kernel = 'rbfdot', prob.model=TRUE) pi_ksvm <- predict(sksvm,newdata=data.frame(x),type='probabilities')[,2] ##### xgmat <- xgboost::xgb.DMatrix(data = x, label = y) skxgb <- xgboost::xgboost(data = xgmat, objective="binary:logistic", nrounds = 10, verbose = 0, eval_metric = "logloss") pi_xgb <- predict(skxgb, xgmat) est_mix <- cbind(pi_svm, pi_Gboost, pi_rf, pi_ksvm, pi_xgb) colnames(est_mix) <- c("SVM", "L2B", "RF", "kSVM", "Xgboost") pi_mix <- est_mix %*% weight } ########################### family = gaussian if (family == 'gaussian'){ svm <- e1071::svm(y = y, x = x, fitted = FALSE) hat_svm <- predict(svm, newdata = x) ##### sGboost <- mboost::glmboost(y=as.numeric(y), x = x, center=F) hat_Gboost <- predict(sGboost, newdata = x) ##### data_rf <- data.frame(y,x) srandomf <- randomForest::randomForest(y ~ ., data=data_rf) hat_rf <- predict(srandomf,newdata=data.frame(x)) ##### sksvm <- kernlab::ksvm(x, y, scaled = FALSE, kernel = 'rbfdot') hat_ksvm <- predict(sksvm,newdata=data.frame(x)) ##### xgmat <-xgboost::xgb.DMatrix(data = x, label = y) skxgb <- xgboost::xgboost(data = xgmat, nrounds = 10, verbose = 0) hat_xgb <- predict(skxgb, xgmat) est_mix <- cbind(hat_svm, hat_Gboost, hat_rf, hat_ksvm, hat_xgb) colnames(est_mix) <- c("SVM", "L2B", "RF", "kSVM", "Xgboost") h_mix <- est_mix %*% weight } } if(family == 'gaussian') object <- h_mix if(family == 'binomial') object <- pi_mix return(object) }
/scratch/gouwar.j/cran-all/cranData/visaOTR/R/visa.mix.R
###weight provide the calculation of model averging methods for propensity score and baseline functions. visa.weight <- function(x, y, factorID = NULL, subset = NULL, family = c('gaussian', 'binomial'), n_train = ceiling(n/2), no_rep = 20, p0 = 0.5, psi = 1, prior = TRUE) { family <- match.arg(family) # Check the data if (family == "binomial") { if (!all(y %in% c(0, 1))) stop("There can only be 0 or 1 in y when using binomial family") } if(is.null(subset)==FALSE) { y <- y[subset] x <- x[subset, ] } if(all(is.na(x))) { stop("Missing data (NA's) detected. Take actions (e.g., removing cases, removing features, imputation) to eliminate missing data before passing X and y to calculate.") } else if(is.numeric(x)==TRUE & is.null(factorID)==TRUE){ n <- length(y) p <- ncol(x) ########################### family = binomial if (family == 'binomial'){ wt_calc=function(rep_id) { lw_num <- matrix(0,nrow=1, 5) tindex <- sample(n,n_train, replace = FALSE) x1 <- x[tindex,];x2=x[-tindex,] y1 <- y[tindex];y2=y[-tindex] svm <- e1071::svm(y = as.factor(y1), x = x1, fitted = FALSE, probability = TRUE) pi0 <- attr(predict(svm, newdata = x1, probability = TRUE), "prob")[, "1"] spre_svm0 <- log(pi0/(1-pi0)) pi1 <- attr(predict(svm, newdata = x2, probability = TRUE), "prob")[, "1"] spre_svm1 <- log(pi1/(1-pi1)) dk <- as.vector(spre_svm1) fk <- ifelse(dk < 0, log(1 + exp(dk)), dk + log(1 + exp(-dk))) lw_num[1] <- sum(y2 * dk) - sum(fk) ##### sGboost <- mboost::glmboost(y=as.factor(y1),x=x1,center=F,family=Binomial(link = "logit")) spre_Gboost0 <- predict(sGboost,newdata=x1)*2 spre_Gboost1 <- predict(sGboost,newdata=x2)*2 dk <- as.vector(spre_Gboost1) fk <- ifelse(dk < 0, log(1 + exp(dk)), dk + log(1 + exp(-dk))) lw_num[2] <- sum(y2 * dk) - sum(fk) ##### data_rf <- data.frame(y1=as.factor(y1),x1) srandomf <- randomForest::randomForest(y1 ~ ., data=data_rf) pi0 <- predict(srandomf,newdata=data.frame(x1),type='prob')[,2] spre_rf0 <- log(pi0/(1-pi0)) pi1 <- predict(srandomf,newdata=data.frame(x2),type='prob')[,2] spre_rf1 <- log(pi1/(1-pi1)) dk <- as.vector(spre_rf1) fk <- ifelse(dk < 0, log(1 + exp(dk)), dk + log(1 + exp(-dk))) lw_num[3] <- sum(y2 * dk) - sum(fk) ##### sksvm <- kernlab::ksvm(x1, as.factor(y1), scaled = FALSE, kernel = 'rbfdot', prob.model=TRUE) pi0=predict(sksvm,newdata=data.frame(x1),type='probabilities')[,2] spre_ksvm0 <- log(pi0/(1-pi0)) pi1=predict(sksvm,newdata=data.frame(x2),type='probabilities')[,2] spre_ksvm1 <- log(pi1/(1-pi1)) dk <- as.vector(spre_ksvm1) fk <- ifelse(dk < 0, log(1 + exp(dk)), dk + log(1 + exp(-dk))) lw_num[4] <- sum(y2 * dk) - sum(fk) ##### xgmat1 <- xgboost::xgb.DMatrix(data = x1, label = y1) xgmat2 <- xgboost::xgb.DMatrix(data = x2) skxgb <- xgboost::xgboost(data = xgmat1, objective="binary:logistic", nrounds = 10, verbose = 0, eval_metric = "logloss") pi0 <- predict(skxgb, xgmat1) spre_xgb0 <- log(pi0/(1-pi0)) pi1 <- predict(skxgb, xgmat2) spre_xgb1 <- log(pi1/(1-pi1)) dk <- as.vector(spre_xgb1) fk <- ifelse(dk < 0, log(1 + exp(dk)), dk + log(1 + exp(-dk))) lw_num[5] <- sum(y2 * dk) - sum(fk) return(lw_num) } lw_num=matrix(unlist(parallel::mclapply(seq(no_rep), wt_calc)), nrow = no_rep, ncol = 5, byrow = TRUE) if (prior == TRUE) { ck0=-log(1-p0)+log(5) #ck1=-log(p0)+ck_compute(n_mo, sk, p) ck=c(rep(ck0,5)) lw_num=sweep(lw_num, MARGIN = 2, psi*ck, "-") } lw_num=sweep(lw_num, MARGIN = 1, apply(lw_num, 1, max), "-") w_num=exp(lw_num) weight=colMeans(w_num/rowSums(w_num)) weight_se=apply(w_num,2,sd)/sqrt(no_rep) } ########################### family = gaussian if (family == 'gaussian'){ wt_calc=function(rep_id) { lw_num <- matrix(0, nrow=1, 5) tindex <- sample(n, n_train, replace = FALSE) x1=x[tindex,];x2=x[-tindex,] y1=y[tindex];y2=y[-tindex] svm <- e1071::svm(y = y1, x = x1, fitted = FALSE) spre_svm0 <- predict(svm, newdata = x1) spre_svm1 <- predict(svm, newdata = x2) sigmak <- sqrt(sum((y1-spre_svm0)^2)/n_train) dk <- sum((y2-spre_svm1)^2) lw_num[1] <- (-(n-n_train))*log(sigmak)-((sigmak)^(-2))*dk/2 ##### sGboost <- mboost::glmboost(y=as.numeric(y1),x=x1,center=F) spre_Gboost0 <- predict(sGboost,newdata=x1) spre_Gboost1 <- predict(sGboost,newdata=x2) sigmak <- sqrt(sum((y1-spre_Gboost0)^2)/n_train) dk <- sum((y2-spre_Gboost1)^2) lw_num[2] <- (-(n-n_train))*log(sigmak) - ((sigmak)^(-2))*dk/2 ##### data_rf <- data.frame(y1,x1) srandomf <- randomForest::randomForest(y1 ~ ., data=data_rf) spre_rf0 <- predict(srandomf,newdata=data.frame(x1)) spre_rf1 <- predict(srandomf,newdata=data.frame(x2)) sigmak <- sqrt(sum((y1-spre_rf0)^2)/n_train) dk <- sum((y2-spre_rf1)^2) lw_num[3] <- (-(n-n_train))*log(sigmak)-((sigmak)^(-2))*dk/2 ##### sksvm <- kernlab::ksvm(x1, y1, scaled = FALSE, kernel = 'rbfdot') spre_ksvm0 <- predict(sksvm,newdata=data.frame(x1)) spre_ksvm1 <- predict(sksvm,newdata=data.frame(x2)) sigmak <- sqrt(sum((y1-spre_ksvm0)^2)/n_train) dk <- sum((y2-spre_ksvm1)^2) lw_num[4] <- (-(n-n_train))*log(sigmak)-((sigmak)^(-2))*dk/2 ##### xgmat1 <- xgboost::xgb.DMatrix(data = x1, label = y1) xgmat2 <- xgboost::xgb.DMatrix(data = x2) skxgb <- xgboost::xgboost(data = xgmat1, nrounds = 10, verbose = 0) spre_xgb0 <- predict(skxgb, xgmat1) spre_xgb1 <- predict(skxgb, xgmat2) sigmak <- sqrt(sum((y1-spre_xgb0)^2)/n_train) dk <- sum((y2-spre_xgb1)^2) lw_num[5] <- (-(n-n_train))*log(sigmak)-((sigmak)^(-2))*dk/2 return(lw_num) } lw_num <- matrix(unlist(lapply(seq(no_rep), wt_calc)), nrow = no_rep, ncol = 5, byrow = TRUE) if (prior == TRUE) { ck0 <- -log(1-p0)+log(5) #ck1=-log(p0)+ck_compute(n_mo, sk, p) ck <- c(rep(ck0,5)) lw_num <- sweep(lw_num, MARGIN = 2, psi*ck, "-") } lw_num <- sweep(lw_num, MARGIN = 1, apply(lw_num, 1, max), "-") w_num <- exp(lw_num) weight <- colMeans(w_num/rowSums(w_num)) weight_se <- apply(w_num,2,sd)/sqrt(no_rep) } } names(weight) <- c("SVM", "L2B", "RF", "kSVM", "Xgboost") object <- list(weight = weight, weight_se = weight_se) return(object) }
/scratch/gouwar.j/cran-all/cranData/visaOTR/R/visa.weight.R
#' visa_SimuData #' #' An Example of Simulated Data for visa #' #' @format The dataset visa_SimuData contains n = 50 samples with p = 10 covariates and treatment variable #' \describe{ #' \item{y}{the response} #' \item{x}{the covariates} #' \item{a}{the treatment received} #' } "visa_SimuData"
/scratch/gouwar.j/cran-all/cranData/visaOTR/R/visa_SimuData.R
# /** # * Copyright (c) 2021, 2023 Visa, Inc. # * # * This source code is licensed under the MIT license # * https://github.com/visa/visa-chart-components/blob/master/LICENSE # * # **/ #' alluvial_diagram #' @name alluvial_diagram #' @description R wrapper for \href{https://github.com/visa/visa-chart-components/tree/master/packages/alluvial-diagram}{@visa/alluvial-diagram} via \href{https://www.htmlwidgets.org/}{htmlwidgets}. #' #' Here is an example of alluvial-diagram in action: #' \if{html}{\figure{alluvial-diagram-1.png}{options: width=400 alt="example alluvial diagram"}} #' #' @param linkData required to be a valid, R data frame. Data used to create links in diagram, an array of objects which includes keys that map to chart accessors. See \href{https://github.com/d3/d3-sankey}{d3-sankey} for additional detail on data requirements. #' @param nodeData required to be a valid, R data frame. Optional. Data used to create nodes in diagram, an array of objects which includes key that map to chart accessors. See \href{https://github.com/d3/d3-sankey}{d3-sankey} for additional detail on data requirements. #' @param sourceAccessor String. Key used to determine link's source, must be a node. #' @param targetAccessor String. Key used to determine link's target, must be a node. #' @param valueAccessor String. Key used to determine link (and ultimately node size). #' @param nodeIDAccessor String. Key used to determine unique node identifiers. Requires nodeData to be populated. #' @param groupAccessor String. Key used to determine link's group or category. #' @param mainTitle String. The dynamic tag of title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param subTitle String. The dynamic tag for a sub title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param accessibility List(). Manages messages and settings for chart accessibility, see \href{https://github.com/visa/visa-chart-components/tree/master/packages/alluvial-diagram#accessibility-props}{object definition} #' @param props List(). A valid R list with additional property configurations, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/alluvial-diagram}{@visa/alluvial-diagram} #' @param ... All other props passed into the function will be passed through to the chart, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/alluvial-diagram}{@visa/alluvial-diagram}. #' @details To see all available options for the chart properties/API see \href{https://github.com/visa/visa-chart-components/tree/master/packages/alluvial-diagram}{@visa/alluvial-diagram}. #' @return a visaNodeLinkChart htmlwidget object for plotting an alluvial diagram #' @export #' @examples #' library(dplyr) #' data.frame(HairEyeColor) %>% #' filter(Sex=="Female") %>% #' mutate(newHair = paste(Hair,"-Hair")) %>% #' mutate(newEye = paste(Eye,"-Eye")) %>% #' alluvial_diagram(sourceAccessor = "newHair", targetAccessor = "newEye", valueAccessor = "Freq") alluvial_diagram = function(linkData, nodeData = NULL, sourceAccessor, targetAccessor, valueAccessor, nodeIDAccessor = "", groupAccessor = "", mainTitle = "", subTitle = "", accessibility = list(), props = list(), ...) { # now we are going to append all of these inputted props into the # expected prop list and also the "..." operator propList = c( linkData = linkData, nodeData = nodeData, sourceAccessor = sourceAccessor, targetAccessor = targetAccessor, valueAccessor = valueAccessor, nodeIDAccessor = nodeIDAccessor, groupAccessor = groupAccessor, mainTitle = mainTitle, subTitle = subTitle, list(accessibility = accessibility), props ) # now we just pass this through to visaNodeLinkChart for render visaNodeLinkChart("alluvial-diagram", linkData = linkData, nodeData = nodeData, propList = propList, ...) }
/scratch/gouwar.j/cran-all/cranData/visachartR/R/alluvial_diagram.R
# /** # * Copyright (c) 2021, 2023 Visa, Inc. # * # * This source code is licensed under the MIT license # * https://github.com/visa/visa-chart-components/blob/master/LICENSE # * # **/ #' bar_chart #' @name bar_chart #' @description R wrapper for \href{https://github.com/visa/visa-chart-components/tree/master/packages/bar-chart}{@visa/bar-chart} via \href{https://www.htmlwidgets.org/}{htmlwidgets}. #' #' Here is an example of bar-chart in action: #' #' \if{html}{\figure{bar-chart-1.png}{options: width=400 alt="example bar chart"}} #' #' @param data required to be a valid, R data frame. Data used to create chart, an array of objects which includes keys that map to chart accessors. #' @param ordinalAccessor String. Key used to determine bar's categorical property. (similar to x in ggplot) #' @param valueAccessor String. Key used to determine bar's numeric property. (similar to y in ggplot) #' @param groupAccessor String. Key used to determine bar group encoding (e.g., color/texture). #' @param mainTitle String. The dynamic tag of title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param subTitle String. The dynamic tag for a sub title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param accessibility List(). Manages messages and settings for chart accessibility, see \href{https://github.com/visa/visa-chart-components/tree/master/packages/bar-chart#accessibility-props}{object definition} #' @param props List(). A valid R list with additional property configurations, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/bar-chart}{@visa/bar-chart} #' @param ... All other props passed into the function will be passed through to the chart, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/bar-chart}{@visa/bar-chart}. #' @details To see all available options for the chart properties/API see \href{https://github.com/visa/visa-chart-components/tree/master/packages/bar-chart}{@visa/bar-chart}. #' @return a visaChart htmlwidget object for plotting a bar chart #' @export #' @examples #' library(dplyr) #' bar_chart(BOD, "Time", "demand") #' mtcars %>% #' sample_n(5) %>% #' tibble::rownames_to_column() %>% #' bar_chart("rowname", "mpg") bar_chart = function(data, ordinalAccessor, valueAccessor, groupAccessor = "", mainTitle = "", subTitle = "", accessibility = list(), props = list(), ...) { # now we are going to append all of these inputted props into the # expected prop list and also the "..." operator propList = c( ordinalAccessor = ordinalAccessor, valueAccessor = valueAccessor, groupAccessor = groupAccessor, mainTitle = mainTitle, subTitle = subTitle, list(accessibility = accessibility), props ) # now we just pass this through to visaChart for render visaChart("bar-chart", data = data, propList = propList, ...) }
/scratch/gouwar.j/cran-all/cranData/visachartR/R/bar_chart.R
# /** # * Copyright (c) 2021, 2023 Visa, Inc. # * # * This source code is licensed under the MIT license # * https://github.com/visa/visa-chart-components/blob/master/LICENSE # * # **/ #' circle_packing #' @name circle_packing #' @description R wrapper for \href{https://github.com/visa/visa-chart-components/tree/master/packages/circle-packing}{@visa/circle-packing} via \href{https://www.htmlwidgets.org/}{htmlwidgets}. #' #' Here is an example of circle-packing in action: #' \if{html}{\figure{circle-packing-1.png}{options: width=400 alt="example circle pack"}} #' #' @param data required to be a valid, R data frame. Data used to create chart, an array of objects which includes keys that map to chart accessors. See \href{https://github.com/d3/d3-hierarchy#stratify}{d3-hierarchy.stratify()} for additional detail on data requirements. #' @param nodeAccessor String. Key used to determine circle's child, must be a unique child. #' @param parentAccessor String. Key used to determine circle's parent. #' @param sizeAccessor String. Key used to determine circle size. #' @param mainTitle String. The dynamic tag of title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param subTitle String. The dynamic tag for a sub title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param accessibility List(). Manages messages and settings for chart accessibility, see \href{https://github.com/visa/visa-chart-components/tree/master/packages/circle-packing#accessibility-props}{object definition} #' @param props List(). A valid R list with additional property configurations, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/circle-packing}{@visa/circle-packing} #' @param ... All other props passed into the function will be passed through to the chart, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/circle-packing}{@visa/circle-packing}. #' @details To see all available options for the chart properties/API see \href{https://github.com/visa/visa-chart-components/tree/master/packages/circle-packing}{@visa/circle-packing}. #' @return a visaChart htmlwidget object for plotting a circle packing plot #' @export #' @examples #' library(dplyr) #' data.frame(parent = c(NA, "A", "A", "C", "C"), #' node = c("A", "B", "C", "D", "E"), #' size = c(NA, 8L, 7L, 6L, 5L)) %>% #' circle_packing("node", "parent", "size", #' accessibility = list(hideTextures = TRUE, #' hideDataTableButton = TRUE)) #' library(dplyr) #' data.frame(Orange) %>% #' mutate(age = as.character(age)) %>% #' bind_rows(data.frame(Tree = c(rep("Trees", 5), NA), #' age = c(1:5, "Trees"))) %>% #' circle_packing("age", "Tree", "circumference", #' accessibility=list(hideTextures = TRUE, #' includeDataKeyNames = TRUE, #' hideDataTableButton = TRUE)) circle_packing = function(data, nodeAccessor, parentAccessor, sizeAccessor, mainTitle = "", subTitle = "", accessibility = list(), props = list(), ...) { # now we are going to append all of these inputted props into the # expected prop list and also the "..." operator propList = c( nodeAccessor = nodeAccessor, parentAccessor = parentAccessor, sizeAccessor = sizeAccessor, mainTitle = mainTitle, subTitle = subTitle, list(accessibility = accessibility), props ) # now we just pass this through to visaChart for render visaChart("circle-packing", data = data, propList = propList, ...) }
/scratch/gouwar.j/cran-all/cranData/visachartR/R/circle_packing.R
# /** # * Copyright (c) 2021, 2023 Visa, Inc. # * # * This source code is licensed under the MIT license # * https://github.com/visa/visa-chart-components/blob/master/LICENSE # * # **/ #' clustered_bar_chart #' @name clustered_bar_chart #' @description R wrapper for \href{https://github.com/visa/visa-chart-components/tree/master/packages/clustered-bar-chart}{@visa/clustered-bar-chart} via \href{https://www.htmlwidgets.org/}{htmlwidgets}. #' #' Here is an example of clustered-bar-chart in action: #' \if{html}{\figure{figures/clustered-bar-chart-1.png}{options: width=400 alt="example clustered bar chart"}} #' #' @param data required to be a valid, R data frame. Data used to create chart, an array of objects which includes keys that map to chart accessors. #' @param ordinalAccessor String. Key used to determine bar's categorical property, within groups. (similar to x in ggplot) #' @param valueAccessor String. Key used to determine bar's numeric property. (similar to y in ggplot) #' @param groupAccessor String. Key used to determine bar clusters. #' @param mainTitle String. The dynamic tag of title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param subTitle String. The dynamic tag for a sub title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param accessibility List(). Manages messages and settings for chart accessibility, see \href{https://github.com/visa/visa-chart-components/tree/master/packages/clustered-bar-chart#accessibility-props}{object definition} #' @param props List(). A valid R list with additional property configurations, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/clustered-bar-chart}{@visa/clustered-bar-chart} #' @param ... All other props passed into the function will be passed through to the chart, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/clustered-bar-chart}{@visa/clustered-bar-chart}. #' @details To see all available options for the chart properties/API see \href{https://github.com/visa/visa-chart-components/tree/master/packages/clustered-bar-chart}{@visa/clustered-bar-chart}. #' @return a visaChart htmlwidget object for plotting a clustered bar chart #' @export #' @examples #' library(dplyr) #' data.frame(UCBAdmissions) %>% #' filter(Admit == "Rejected") %>% #' clustered_bar_chart("Gender","Freq","Dept") clustered_bar_chart = function(data, ordinalAccessor, valueAccessor, groupAccessor, mainTitle = "", subTitle = "", accessibility = list(), props = list(), ...) { # now we are going to append all of these inputted props into the # expected prop list and also the "..." operator propList = c( ordinalAccessor = ordinalAccessor, valueAccessor = valueAccessor, groupAccessor = groupAccessor, mainTitle = mainTitle, subTitle = subTitle, list(accessibility = accessibility), props ) # now we just pass this through to visaChart for render visaChart("clustered-bar-chart", data = data, propList = propList, ...) }
/scratch/gouwar.j/cran-all/cranData/visachartR/R/clustered_bar_chart.R
# /** # * Copyright (c) 2021, 2023 Visa, Inc. # * # * This source code is licensed under the MIT license # * https://github.com/visa/visa-chart-components/blob/master/LICENSE # * # **/ #' dumbbell_plot #' @name dumbbell_plot #' @description R wrapper for \href{https://github.com/visa/visa-chart-components/tree/master/packages/dumbbell-plot}{@visa/dumbbell-plot} via \href{https://www.htmlwidgets.org/}{htmlwidgets}. #' #' Here is an example of dumbbell-plot in action: #' #' \if{html}{\figure{dumbbell-plot-1.png}{options: width=400 alt="example dumbbell plot"}} #' #' @param data required to be a valid, R data frame. Data used to create chart, an array of objects which includes keys that map to chart accessors. #' @param ordinalAccessor String. Key used to determine dumbbell's categorical property. (similar to x in ggplot) #' @param valueAccessor String. Key used to determine dumbbell's numeric property. (similar to y in ggplot) #' @param seriesAccessor String. Key used to determine dumbbell's series. #' @param mainTitle String. The dynamic tag of title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param subTitle String. The dynamic tag for a sub title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param accessibility List(). Manages messages and settings for chart accessibility, see \href{https://github.com/visa/visa-chart-components/tree/master/packages/dumbbell-plot#accessibility-props}{object definition} #' @param props List(). A valid R list with additional property configurations, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/dumbbell-plot}{@visa/dumbbell-plot} #' @param ... All other props passed into the function will be passed through to the chart, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/dumbbell-plot}{@visa/dumbbell-plot}. #' @details To see all available options for the chart properties/API see \href{https://github.com/visa/visa-chart-components/tree/master/packages/dumbbell-plot}{@visa/dumbbell-plot}. #' @return a visaChart htmlwidget object for plotting a dumbbell plot #' @export #' @examples #' library(dplyr) #' data.frame(UCBAdmissions) %>% #' filter(Admit == "Rejected") %>% #' dumbbell_plot("Dept","Freq","Gender") dumbbell_plot = function(data, ordinalAccessor, valueAccessor, seriesAccessor, mainTitle = "", subTitle = "", accessibility = list(), props = list(), ...) { # now we are going to append all of these inputted props into the # expected prop list and also the "..." operator propList = c( ordinalAccessor = ordinalAccessor, valueAccessor = valueAccessor, seriesAccessor = seriesAccessor, mainTitle = mainTitle, subTitle = subTitle, list(accessibility = accessibility), props ) # now we just pass this through to visaChart for render visaChart("dumbbell-plot", data = data, propList = propList, ...) }
/scratch/gouwar.j/cran-all/cranData/visachartR/R/dumbbell_plot.R
# /** # * Copyright (c) 2021, 2023 Visa, Inc. # * # * This source code is licensed under the MIT license # * https://github.com/visa/visa-chart-components/blob/master/LICENSE # * # **/ #' heat_map #' @name heat_map #' @description R wrapper for \href{https://github.com/visa/visa-chart-components/tree/master/packages/heat-map}{@visa/heat-map} via \href{https://www.htmlwidgets.org/}{htmlwidgets}. #' #' Here is an example of heat-map in action: #' #' \if{html}{\figure{heat-map-1.png}{options: width=400 alt="example heat map"}} #' #' @param data required to be a valid, R data frame. Data used to create chart, an array of objects which includes keys that map to chart accessors. #' @param xAccessor String. Key used to determine the x-axis categorical value. (similar to x in ggplot) #' @param yAccessor String. Key used to determine the y-axis categorical value. (similar to y in ggplot) #' @param valueAccessor String. Key used to determine heatmap's numeric property, for assigning color. #' @param mainTitle String. The dynamic tag of title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param subTitle String. The dynamic tag for a sub title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param accessibility List(). Manages messages and settings for chart accessibility, see \href{https://github.com/visa/visa-chart-components/tree/master/packages/heat-map#accessibility-props}{object definition} #' @param props List(). A valid R list with additional property configurations, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/heat-map}{@visa/heat-map} #' @param ... All other props passed into the function will be passed through to the chart, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/heat-map}{@visa/heat-map}. #' @details To see all available options for the chart properties/API see \href{https://github.com/visa/visa-chart-components/tree/master/packages/heat-map}{@visa/heat-map}. #' @return a visaChart htmlwidget object for plotting a heat map #' @export #' @examples #' library(dplyr) #' data.frame(UCBAdmissions) %>% #' filter(Admit == "Rejected") %>% #' heat_map("Dept","Gender", "Freq") heat_map = function(data, xAccessor, yAccessor, valueAccessor, mainTitle = "", subTitle = "", accessibility = list(), props = list(), ...) { # now we are going to append all of these inputted props into the # expected prop list and also the "..." operator propList = c( xAccessor = xAccessor, yAccessor = yAccessor, valueAccessor = valueAccessor, mainTitle = mainTitle, subTitle = subTitle, list(accessibility = accessibility), props ) # now we just pass this through to visaChart for render visaChart("heat-map", data = data, propList = propList, ...) }
/scratch/gouwar.j/cran-all/cranData/visachartR/R/heat_map.R
# /** # * Copyright (c) 2021 Visa, Inc. # * # * This source code is licensed under the MIT license # * https://github.com/visa/visa-chart-components/blob/master/LICENSE # * # **/ #' line_chart #' @name line_chart #' @description R wrapper for \href{https://github.com/visa/visa-chart-components/tree/master/packages/line-chart}{@visa/line-chart} via \href{https://www.htmlwidgets.org/}{htmlwidgets}. #' #' Here is an example of line-chart in action: #' #' \if{html}{\figure{line-chart-1.png}{options: width=400 alt="example line chart"}} #' #' @param data required to be a valid, R data frame. Data used to create chart, an array of objects which includes keys that map to chart accessors. #' @param ordinalAccessor String. Key used to determine line's categorical property. (similar to x in ggplot) #' @param valueAccessor String. Key used to determine line's numeric property. (similar to y in ggplot) #' @param seriesAccessor String. Key used to determine series (e.g., color/texture). #' @param mainTitle String. The dynamic tag of title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param subTitle String. The dynamic tag for a sub title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param accessibility List(). Manages messages and settings for chart accessibility, see \href{https://github.com/visa/visa-chart-components/tree/master/packages/line-chart#accessibility-props}{object definition} #' @param props List(). A valid R list with additional property configurations, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/line-chart}{@visa/line-chart} #' @param ... All other props passed into the function will be passed through to the chart, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/line-chart}{@visa/line-chart}. #' @details To see all available options for the chart properties/API see \href{https://github.com/visa/visa-chart-components/tree/master/packages/line-chart}{@visa/line-chart}. #' @return a visaChart htmlwidget object for plotting a line chart #' @export #' @examples #' library(dplyr) #' ChickWeight %>% #' filter(Chick==1 | Chick == 4) %>% #' line_chart("Time", "weight", "Chick", #' showBaselineX=FALSE, #' xAxis=list(label="Time",format="0a", visible=TRUE), #' yAxis=list(label="Weight", visible=TRUE, gridVisible=TRUE), #' mainTitle = "Selected chick weight over time") line_chart = function(data, ordinalAccessor, valueAccessor, seriesAccessor, mainTitle = "", subTitle = "", accessibility = list(), props = list(), ...) { # now we are going to append all of these inputted props into the # expected prop list and also the "..." operator propList = c( ordinalAccessor = ordinalAccessor, valueAccessor = valueAccessor, seriesAccessor = seriesAccessor, mainTitle = mainTitle, subTitle = subTitle, list(accessibility = accessibility), props ) # now we just pass this through to visaChart for render visaChart("line-chart", data = data, propList = propList, ...) }
/scratch/gouwar.j/cran-all/cranData/visachartR/R/line_chart.R
# /** # * Copyright (c) 2021 Visa, Inc. # * # * This source code is licensed under the MIT license # * https://github.com/visa/visa-chart-components/blob/master/LICENSE # * # **/ #' parallel_plot #' @name parallel_plot #' @description R wrapper for \href{https://github.com/visa/visa-chart-components/tree/master/packages/parallel-plot}{@visa/parallel-plot} via \href{https://www.htmlwidgets.org/}{htmlwidgets}. #' #' Here is an example of parallel-plot in action: #' #' \if{html}{\figure{parallel-plot-1.png}{options: width=400 alt="example parallel plot"}} #' #' @param data required to be a valid, R data frame. Data used to create chart, an array of objects which includes keys that map to chart accessors. #' @param ordinalAccessor String. Key used to determine line's categorical property. (similar to x in ggplot) #' @param valueAccessor String. Key used to determine line's numeric property. (similar to y in ggplot) #' @param seriesAccessor String. Key used to determine series (e.g., color/texture). #' @param mainTitle String. The dynamic tag of title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param subTitle String. The dynamic tag for a sub title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param accessibility List(). Manages messages and settings for chart accessibility, see \href{https://github.com/visa/visa-chart-components/tree/master/packages/parallel-plot#accessibility-props}{object definition} #' @param props List(). A valid R list with additional property configurations, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/parallel-plot}{@visa/parallel-plot} #' @param ... All other props passed into the function will be passed through to the chart, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/parallel-plot}{@visa/parallel-plot}. #' @details To see all available options for the chart properties/API see \href{https://github.com/visa/visa-chart-components/tree/master/packages/parallel-plot}{@visa/parallel-plot}. #' @return a visaChart htmlwidget object for plotting a parallel plot #' @export #' @examples #' library(dplyr) #' ChickWeight %>% #' filter(Chick==1 | Chick == 4) %>% #' parallel_plot("Time", "weight", "Chick", #' showBaselineX=FALSE, #' xAxis=list(label="Time",format="0a", visible=TRUE), #' yAxis=list(label="Weight", visible=FALSE, gridVisible=FALSE), #' mainTitle = "Selected chick weight over time", #' dataLabel=list(visible = TRUE, #' labelAccessor = "weight", #' placement = "bottom-right", #' format = "0a")) parallel_plot = function(data, ordinalAccessor, valueAccessor, seriesAccessor, mainTitle = "", subTitle = "", accessibility = list(), props = list(), ...) { # now we are going to append all of these inputted props into the # expected prop list and also the "..." operator propList = c( ordinalAccessor = ordinalAccessor, valueAccessor = valueAccessor, seriesAccessor = seriesAccessor, mainTitle = mainTitle, subTitle = subTitle, list(accessibility = accessibility), props ) # now we just pass this through to visaChart for render visaChart("parallel-plot", data = data, propList = propList, ...) }
/scratch/gouwar.j/cran-all/cranData/visachartR/R/parallel_plot.R
# /** # * Copyright (c) 2021, 2023 Visa, Inc. # * # * This source code is licensed under the MIT license # * https://github.com/visa/visa-chart-components/blob/master/LICENSE # * # **/ #' pie_chart #' @name pie_chart #' @description R wrapper for \href{https://github.com/visa/visa-chart-components/tree/master/packages/pie-chart}{@visa/pie-chart} via \href{https://www.htmlwidgets.org/}{htmlwidgets}. #' #' Here is an example of pie-chart in action: #' #' \if{html}{\figure{pie-chart-1.png}{options: width=400 alt="example pie chart"}} #' #' @param data required to be a valid, R data frame. Data used to create chart, an array of objects which includes keys that map to chart accessors. #' @param ordinalAccessor String. Key used to determine chart's categorical property. #' @param valueAccessor String. Key used to determine chart's numeric property. #' @param mainTitle String. The dynamic tag of title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param subTitle String. The dynamic tag for a sub title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param accessibility List(). Manages messages and settings for chart accessibility, see \href{https://github.com/visa/visa-chart-components/tree/master/packages/pie-chart#accessibility-props}{object definition} #' @param props List(). A valid R list with additional property configurations, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/pie-chart}{@visa/pie-chart} #' @param ... All other props passed into the function will be passed through to the chart, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/pie-chart}{@visa/pie-chart}. #' @details To see all available options for the chart properties/API see \href{https://github.com/visa/visa-chart-components/tree/master/packages/pie-chart}{@visa/pie-chart}. #' @return a visaChart htmlwidget object for plotting a pie chart #' @export #' @examples #' library(dplyr) #' data.frame (HairEyeColor) %>% #' filter(Hair=="Blond", Sex=="Male") %>% #' mutate(blueEyes = if_else(Eye=="Blue", "Blue","Other")) %>% #' group_by(blueEyes, Hair, Sex) %>% #' summarise(FreqSum=sum(Freq), n=n()) %>% #' pie_chart( #' "blueEyes", #' "FreqSum", #' mainTitle="How many males with Blonde hair have Blue eyes?", #' sortOrder="desc" #' ) pie_chart = function(data, ordinalAccessor, valueAccessor, mainTitle = "", subTitle = "", accessibility = list(), props = list(), ...) { # now we are going to append all of these inputted props into the # expected prop list and also the "..." operator propList = c( ordinalAccessor = ordinalAccessor, valueAccessor = valueAccessor, mainTitle = mainTitle, subTitle = subTitle, list(accessibility = accessibility), props ) # now we just pass this through to visaChart for render visaChart("pie-chart", data = data, propList = propList, ...) }
/scratch/gouwar.j/cran-all/cranData/visachartR/R/pie_chart.R
# /** # * Copyright (c) 2021, 2023 Visa, Inc. # * # * This source code is licensed under the MIT license # * https://github.com/visa/visa-chart-components/blob/master/LICENSE # * # **/ #' scatter_plot #' @name scatter_plot #' @description R wrapper for \href{https://github.com/visa/visa-chart-components/tree/master/packages/scatter-plot}{@visa/scatter-plot} via \href{https://www.htmlwidgets.org/}{htmlwidgets}. #' #' Here is an example of scatter-plot in action: #' #' \if{html}{\figure{scatter-plot-1.png}{options: width=400 alt="example scatter plot"}} #' #' @inheritParams bar_chart #' @param xAccessor String. Key used to determine each point's position along the x-axis. #' @param yAccessor String. Key used to determine each point's position along the y-axis. #' @param accessibility List(). Manages messages and settings for chart accessibility, see \href{https://github.com/visa/visa-chart-components/tree/master/packages/scatter-plot#accessibility-props}{object definition} #' @param props List(). A valid R list with additional property configurations, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/scatter-plot}{@visa/scatter-plot} #' @param ... All other props passed into the function will be passed through to the chart, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/scatter-plot}{@visa/scatter-plot}. #' @details To see all available options for the chart properties/API see \href{https://github.com/visa/visa-chart-components/tree/master/packages/scatter-plot}{@visa/scatter-plot}. #' @return a visaChart htmlwidget object for plotting a scatter plot #' @export #' @examples #' library(dplyr) #' scatter_plot(mtcars[order(mtcars$cyl),], "wt", "mpg", "cyl") scatter_plot = function(data, xAccessor, yAccessor, groupAccessor = "", mainTitle = "", subTitle = "", accessibility = list(), props = list(), ...) { # now we are going to append all of these inputted props into the # expected prop list and also the "..." operator propList = c( xAccessor = xAccessor, yAccessor = yAccessor, groupAccessor = groupAccessor, mainTitle = mainTitle, subTitle = subTitle, list(accessibility = accessibility), props ) # now we just pass this through to visaChart for render visaChart("scatter-plot", data = data, propList = propList, ...) }
/scratch/gouwar.j/cran-all/cranData/visachartR/R/scatter_plot.R
# /** # * Copyright (c) 2021 Visa, Inc. # * # * This source code is licensed under the MIT license # * https://github.com/visa/visa-chart-components/blob/master/LICENSE # * # **/ #' stacked_bar_chart #' @name stacked_bar_chart #' @description R wrapper for \href{https://github.com/visa/visa-chart-components/tree/master/packages/stacked-bar-chart}{@visa/stacked-bar-chart} via \href{https://www.htmlwidgets.org/}{htmlwidgets}. #' #' Here is an example of stacked-bar-chart in action: #' \if{html}{\figure{figures/stacked-bar-chart-1.png}{options: width=400 alt="example stacked bar chart"}} #' #' @param data required to be a valid, R data frame. Data used to create chart, an array of objects which includes keys that map to chart accessors. #' @param ordinalAccessor String. Key used to determine bar's categorical property, within groups. (similar to x in ggplot) #' @param valueAccessor String. Key used to determine bar's numeric property. (similar to y in ggplot) #' @param groupAccessor String. Key used to determine bar clusters. #' @param mainTitle String. The dynamic tag of title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param subTitle String. The dynamic tag for a sub title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param accessibility List(). Manages messages and settings for chart accessibility, see \href{https://github.com/visa/visa-chart-components/tree/master/packages/stacked-bar-chart#accessibility-props}{object definition} #' @param props List(). A valid R list with additional property configurations, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/stacked-bar-chart}{@visa/stacked-bar-chart} #' @param ... All other props passed into the function will be passed through to the chart, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/stacked-bar-chart}{@visa/stacked-bar-chart}. #' @details To see all available options for the chart properties/API see \href{https://github.com/visa/visa-chart-components/tree/master/packages/stacked-bar-chart}{@visa/stacked-bar-chart}. #' @return a visaChart htmlwidget object for plotting a stacked bar chart #' @export #' @examples #' library(dplyr) #' data.frame(UCBAdmissions) %>% #' filter(Admit == "Rejected") %>% #' stacked_bar_chart("Gender", "Freq", "Dept") stacked_bar_chart = function(data, ordinalAccessor, valueAccessor, groupAccessor, mainTitle = "", subTitle = "", accessibility = list(), props = list(), ...) { # now we are going to append all of these inputted props into the # expected prop list and also the "..." operator propList = c( ordinalAccessor = ordinalAccessor, valueAccessor = valueAccessor, groupAccessor = groupAccessor, mainTitle = mainTitle, subTitle = subTitle, list(accessibility = accessibility), props ) # now we just pass this through to visaChart for render visaChart("stacked-bar-chart", data = data, propList = propList, ...) }
/scratch/gouwar.j/cran-all/cranData/visachartR/R/stacked_bar_chart.R
# /** # * Copyright (c) 2021 Visa, Inc. # * # * This source code is licensed under the MIT license # * https://github.com/visa/visa-chart-components/blob/master/LICENSE # * # **/ #' visa charts 5.0.5 #' #' Visa Chart Components wrapped in r htmlwidgets package #' #' @param tagName String. The custom web component HTML tag for the Visa Chart Component. Set by respective chart functions. #' @param data a valid R data frame. See more details in respective component functions. #' @param propList a list of props, created by each component function, see \href{https://github.com/visa/visa-chart-components}{Visa Chart Components}. #' @param height Number. Height of chart container. #' @param width Number. Width of chart container. #' @param ... All other props passed into the function will be passed through to the chart. #' @return a visaChart htmlwidget object for creating a variety of plot types #' @import htmlwidgets #' #' @export visaChart <- function(tagName, data, propList, width = NULL, height = NULL, ...) { # create a list that contains the props x <- list( tagName = tagName, data = data, propList = propList, height = height, width = width, ... ) # create widget htmlwidgets::createWidget( name = 'visaChart', x, width = width, height = height, package = 'visachartR' ) } #' Shiny bindings for visaChart #' #' Output and render functions for using visaChart within Shiny #' applications and interactive Rmd documents. #' #' @param outputId output variable to read from #' @param width,height Must be a valid CSS unit (like \code{'100\%'}, #' \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a #' string and have \code{'px'} appended. #' @param expr An expression that generates a visaChart #' @param env The environment in which to evaluate \code{expr}. #' @param quoted Is \code{expr} a quoted expression (with \code{quote()})? This #' is useful if you want to save an expression in a variable. #' @return a Shiny output or render function for visaChart htmlwidgets #' #' @name visaChart-shiny #' #' @export visaChartOutput <- function(outputId, width = '100%', height = '400px'){ htmlwidgets::shinyWidgetOutput(outputId, 'visaChart', width, height, package = 'visachartR') } #' @rdname visaChart-shiny #' @export renderVisaChart <- function(expr, env = parent.frame(), quoted = FALSE) { if (!quoted) { expr <- substitute(expr) } # force quoted htmlwidgets::shinyRenderWidget(expr, visaChartOutput, env, quoted = TRUE) }
/scratch/gouwar.j/cran-all/cranData/visachartR/R/visaChart.R
# /** # * Copyright (c) 2021 Visa, Inc. # * # * This source code is licensed under the MIT license # * https://github.com/visa/visa-chart-components/blob/master/LICENSE # * # **/ #' visa charts 5.0.5 #' #' Visa Chart Components wrapped in r htmlwidgets package #' #' @param tagName String. The custom web component HTML tag for the Visa Chart Component. Set by respective chart functions. #' @param linkData a valid R data frame. See more details in respective component functions. #' @param nodeData a valid R data frame. See more details in respective component functions. #' @param propList a list of props, created by each component function, see \href{https://github.com/visa/visa-chart-components}{Visa Chart Components}. #' @param height Number. Height of chart container. #' @param width Number. Width of chart container. #' @param ... All other props passed into the function will be passed through to the chart. #' @return a visaNodeLinkChart htmlwidget object for creating a variety of plot types #' @import htmlwidgets #' #' @export visaNodeLinkChart <- function(tagName, linkData, nodeData, propList, width = NULL, height = NULL, ...) { # create a list that contains the props x <- list( tagName = tagName, linkData = linkData, nodeData = nodeData, propList = propList, height = height, width = width, ... ) # create widget htmlwidgets::createWidget( name = 'visaNodeLinkChart', x, width = width, height = height, package = 'visachartR' ) } #' Shiny bindings for visaNodeLinkChart #' #' Output and render functions for using visaNodeLinkChart within Shiny #' applications and interactive Rmd documents. #' #' @param outputId output variable to read from #' @param width,height Must be a valid CSS unit (like \code{'100\%'}, #' \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a #' string and have \code{'px'} appended. #' @param expr An expression that generates a visaNodeLinkChart #' @param env The environment in which to evaluate \code{expr}. #' @param quoted Is \code{expr} a quoted expression (with \code{quote()})? This #' is useful if you want to save an expression in a variable. #' @return a Shiny output or render function for visaNodeLinkChart htmlwidgets #' #' @name visaNodeLinkChart-shiny #' #' @export visaNodeLinkChartOutput <- function(outputId, width = '100%', height = '400px'){ htmlwidgets::shinyWidgetOutput(outputId, 'visaNodeLinkChart', width, height, package = 'visachartR') } #' @rdname visaNodeLinkChart-shiny #' @export rendervisaNodeLinkChart <- function(expr, env = parent.frame(), quoted = FALSE) { if (!quoted) { expr <- substitute(expr) } # force quoted htmlwidgets::shinyRenderWidget(expr, visaNodeLinkChartOutput, env, quoted = TRUE) }
/scratch/gouwar.j/cran-all/cranData/visachartR/R/visaNodeLinkChart.R
# /** # * Copyright (c) 2021, 2023 Visa, Inc. # * # * This source code is licensed under the MIT license # * https://github.com/visa/visa-chart-components/blob/master/LICENSE # * # **/ #' world_map #' @name world_map #' @description R wrapper for \href{https://github.com/visa/visa-chart-components/tree/master/packages/world-map}{@visa/world-map} via \href{https://www.htmlwidgets.org/}{htmlwidgets}. #' #' Here is an example of world-map in action: #' \if{html}{\figure{world-map-1.png}{options: width=400 alt="example world map"}} #' #' @param data required to be a valid, R data frame. Data used to create chart, an array of objects which includes keys that map to chart accessors. #' @param joinAccessor String. Key used to determine country's key property (ISO 3-Digit Code). #' @param joinNameAccessor String. Key used to determine country's name property. #' @param markerAccessor String. Key used to determine marker's key property. #' @param markerNameAccessor String. Key used to determine marker's name property. #' @param valueAccessor String. Key used to determine the country/marker's numeric property. #' @param groupAccessor String. Key used to determine country/marker color. #' @param latitudeAccessor String. Key used to determine marker's latitude property. #' @param longitudeAccessor String. Key used to determine marker's longitude property. #' @param mainTitle String. The dynamic tag of title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param subTitle String. The dynamic tag for a sub title for the map (or you can create your own separately). See \code{highestHeadingLevel} prop for how tags get assigned. #' @param accessibility List(). Manages messages and settings for chart accessibility, see \href{https://github.com/visa/visa-chart-components/tree/master/packages/world-map#accessibility-props}{object definition} #' @param props List(). A valid R list with additional property configurations, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/world-map}{@visa/world-map} #' @param ... All other props passed into the function will be passed through to the chart, see all props for \href{https://github.com/visa/visa-chart-components/tree/master/packages/world-map}{@visa/world-map}. #' @details To see all available options for the chart properties/API see \href{https://github.com/visa/visa-chart-components/tree/master/packages/world-map}{@visa/world-map}. #' @return a visaChart htmlwidget object for plotting a world map #' @export #' @examples #' library(dplyr) #' quakes %>% #' sample_n(100) %>% #' tibble::rowid_to_column() %>% #' world_map( #' markerAccessor = "rowid", #' latitudeAccessor = "long", #' longitudeAccessor = "lat", #' valueAccessor = "stations", #' markerStyle=list( #' visible=TRUE, #' fill=TRUE, #' opacity=.5, #' radiusRange=c(5,15) #' ) #' ) world_map = function(data, joinAccessor = "", joinNameAccessor = "", markerAccessor = "", markerNameAccessor = "", latitudeAccessor = "", longitudeAccessor = "", valueAccessor, groupAccessor = "", mainTitle = "", subTitle = "", accessibility = list(), props = list(), ...) { # now we are going to append all of these inputted props into the # expected prop list and also the "..." operator propList = c( joinAccessor = joinAccessor, joinNameAccessor = joinNameAccessor, markerAccessor = markerAccessor, markerNameAccessor = markerNameAccessor, valueAccessor = valueAccessor, groupAccessor = groupAccessor, latitudeAccessor = latitudeAccessor, longitudeAccessor = longitudeAccessor, mainTitle = mainTitle, subTitle = subTitle, list(accessibility = accessibility), props ) # now we just pass this through to visaChart for render visaChart("world-map", data = data, propList = propList, ...) }
/scratch/gouwar.j/cran-all/cranData/visachartR/R/world_map.R
#'Correspondence Analysis of Adverse Events #'@param data data.frame or tibble object. #'@param id unquoted expression indicating the #'variable name in \code{data} that corresponds to the id variable. #'@param group unquoted expression indicating the #'variable name in \code{data} that corresponds to the group variable. #'@param ae_class unquoted expression indicating the #'variable name in \code{data} that corresponds to AE class. #'@param label character value indicating the #'column name of AE class in resulting tables. #'@param contr_indicator logical value indicating the #'use of color intensity to represent the maximum contribution of each \code{ae_class}. #'@param mass_indicator logical value indicating the #'use of dot size to represent the overall relative frequency of each \code{ae_class}. #'@param contr_threshold numerical value between 0 an 1 filtering #'\code{ae_class} with contribution greater than \code{contr_threshold}. #'@param mass_threshold numerical value between 0 an 1 filtering #'\code{ae_class} with mass greater than \code{mass_threshold}. #' #'@return a list of #'\item{tab_abs}{a tibble showing absolute frequency of \code{ae_class} by \code{group};} #'\item{tab_rel}{a tibble showing percent of \code{ae_class} by \code{group};} #'\item{total_inertia}{a numerical value indicating the total inertia;} #'\item{tab_inertia}{a tibble showing inertia broken down by dimension and the percent relative to the total inertia;} #'\item{asymmetric_plot}{a contribution biplot.} #' #'@references Levine RA, Sampson E, Lee TC. Journal of Computational and Graphical Statistics. Wiley Interdisciplinary Reviews: Computational Statistics. 2014 Jul;6(4):233-9. #' #'@examples #'library(magrittr) #'library(dplyr) #' #'id <- rep(1:50, each = 2) #'group <- c(rep("A", 50), rep("B", 50)) #'ae_grade <- sample(1:5, size = 100, replace = TRUE) #'ae_domain <- sample(c("D", "E"), size = 100, replace = TRUE) #'ae_term <- sample(c("F", "G", "H", "I"), size = 100, replace = TRUE) #'df <- tibble(id = id, trt = group, #' ae_g = ae_grade, ae_d = ae_domain, ae_t = ae_term) #'test <- df %>% ca_ae(., id = id, group = trt, ae = ae_g, label = "AE", #' contr_indicator = TRUE, mass_indicator = TRUE, #' contr_threshold = 0.01, mass_threshold = 0.01) #' #'@import magrittr #'@import ggplot2 #'@import dplyr #'@importFrom rlang .data enquos := #'@importFrom tidyr pivot_wider separate #'@importFrom ca ca #'@importFrom stats addmargins #'@importFrom ggrepel geom_label_repel #' #'@export ca_ae <- function(data, id, group, ae_class, label = "AE", contr_indicator = TRUE, mass_indicator = TRUE, contr_threshold = NULL, mass_threshold = NULL) { temp <- enquos(group = group, ae = ae_class, id = id, .ignore_empty = "all") aux <- data %>% select(!!!temp) %>% na.exclude() %>% distinct(id, .data$ae, .keep_all = TRUE) total <- data %>% select(!!!temp) %>% distinct(id, .keep_all = TRUE) %>% count(group) tab <- table(aux$ae, aux$group) p <- t(t(tab)/as.numeric(total$n)) q <- 1 - p rownames(q) <- paste0(rownames(q), "_C") tab.ca <- rbind(p, q) res.ca <- ca(tab.ca) names(dimnames(p)) <- c("ae", "group") average <- round(100*rowMeans(p), 3) tab_rel <- round(100*p, 3) %>% as_tibble() %>% pivot_wider(names_from = .data$group, values_from = .data$n) %>% mutate(Average = average) if (is.null(contr_threshold)) contr_threshold <- 1/nrow(tab) if (is.null(mass_threshold)) mass_threshold <- 1/nrow(tab) expected_threshold <- 1/nrow(tab) names(dimnames(tab)) <- c("ae", "group") tab_abs <- tab %>% as_tibble() %>% pivot_wider(names_from = .data$group, values_from = .data$n) inertia <- res.ca$sv^2 total_inertia <- sum(inertia) explained_var <- 100*inertia/total_inertia tab_inertia = tibble(Dimension = 1:length(inertia), Inertia = inertia, 'Explained Variance' = explained_var) if (ncol(tab_abs) < 4){ aux <- res.ca$rowcoord*sqrt(res.ca$rowmass) contr <- round(100*(res.ca$rowcoord*sqrt(res.ca$rowmass))^2, 2) tab_contr <- as_tibble(contr, rownames = "labels") %>% separate(labels, into = c("ae", "delete"), sep = "_", fill = "right") %>% group_by(.data$ae) %>% summarise(across(starts_with("Dim"), sum, .names = "{col}"), .groups = "drop_last") colnames(tab_contr)[-1] <- paste0("Dim ", 1:ncol(aux)) standard.coordinates.row <- as_tibble(aux, rownames = "labels") %>% separate(labels, into = c("labels", "delete"), sep = "_", fill = "right") %>% filter(is.na(.data$delete)) %>% select(-.data$delete) %>% mutate(type = "row", contr = tab_contr[[2]]/100, mass = average/100) %>% filter(.data$contr > contr_threshold & .data$mass > mass_threshold) colnames(standard.coordinates.row)[2] <- "dim_1" group_mass <- ifelse(is.finite(min(standard.coordinates.row$mass, na.rm = TRUE)) & is.finite(max(standard.coordinates.row$mass, na.rm = TRUE)), (min(standard.coordinates.row$mass, na.rm = TRUE) + max(standard.coordinates.row$mass, na.rm = TRUE))/2, ifelse(is.finite(max(standard.coordinates.row$mass, na.rm = TRUE)), 0.5*max(standard.coordinates.row$mass, na.rm = TRUE), ifelse(is.finite(min(standard.coordinates.row$mass, na.rm = TRUE)), 1.5*min(standard.coordinates.row$mass, na.rm = TRUE), 0.5))) principal.coordinates.col <- tibble(dim_1 = as.numeric(res.ca$colcoord*res.ca$sv)) %>% # mutate(labels = rownames(res.ca$colcoord), type = "col", contr = 1, mass = group_mass) selected_classes <- as.character(standard.coordinates.row$labels) if (nrow(standard.coordinates.row) > 0) standard.coordinates.row <- standard.coordinates.row %>% mutate(contr = .data$contr/max(.data$contr)) dp <- bind_rows(principal.coordinates.col, standard.coordinates.row) if (mass_indicator & contr_indicator){ asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1, y = NA, color = .data$type, alpha = .data$contr, size = .data$mass)) } else if (mass_indicator & !contr_indicator){ asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1, y = NA, color = .data$type, size = .data$mass)) } else if (!mass_indicator & contr_indicator) { asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1, y = NA, color = .data$type, alpha = .data$contr)) } else { asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1, y = NA, color = .data$type)) } asymmetric_plot <- asymmetric_plot + geom_vline(xintercept = 0, linetype = 2) + geom_point() + geom_label_repel(aes(label = .data$labels), xlim = c(-Inf, Inf), ylim = c(-Inf, Inf), min.segment.length = 0) + scale_colour_manual(values = c("red", "blue")) + labs(x = paste0("Dim 1 ", "(", round(explained_var[1], 2), "%)")) + theme_minimal() + theme( legend.position = "none", axis.text.y = element_blank(), axis.ticks.y = element_blank(), axis.title.y = element_blank(), text = element_text(size = 20)) + scale_size_continuous(range = c(3, 6)) + scale_alpha_continuous(range = c(0.3, 1)) temp <- round(100*(res.ca$rowcoord*sqrt(res.ca$rowmass))^2, 2) tab_contr <- as_tibble(temp, rownames = "ae") %>% separate(.data$ae, into = c("ae", "delete"), sep = "_", fill = "right") %>% group_by(.data$ae) %>% summarize(across(starts_with("Dim"), sum, .names = "{col}")) colnames(tab_contr)[-1] <- "Dim 1" } else { aux <- res.ca$rowcoord*sqrt(res.ca$rowmass) colnames(aux) <- paste0("dim_", 1:ncol(aux)) contr <- round(100*(res.ca$rowcoord*sqrt(res.ca$rowmass))^2, 2) tab_contr <- as_tibble(contr, rownames = "labels") %>% separate(labels, into = c("ae", "delete"), sep = "_", fill = "right") %>% group_by(.data$ae) %>% summarise(across(starts_with("Dim"), sum, .names = "{col}"), .groups = "drop_last") colnames(tab_contr)[-1] <- paste0("Dim ", 1:ncol(aux)) standard.coordinates.row <- as_tibble(aux, rownames = "labels") %>% separate(labels, into = c("labels", "delete"), sep = "_", fill = "right") %>% filter(is.na(.data$delete)) %>% select(-.data$delete) %>% mutate(type = "row", contr = pmax(tab_contr[[2]]/100, tab_contr[[3]]/100), mass = average/100) %>% filter(.data$contr > contr_threshold & .data$mass > mass_threshold) selected_classes <- as.character(standard.coordinates.row$labels) group_mass <- ifelse(is.finite(min(standard.coordinates.row$mass, na.rm = TRUE)) & is.finite(max(standard.coordinates.row$mass, na.rm = TRUE)), (min(standard.coordinates.row$mass, na.rm = TRUE) + max(standard.coordinates.row$mass, na.rm = TRUE))/2, ifelse(is.finite(max(standard.coordinates.row$mass, na.rm = TRUE)), 0.5*max(standard.coordinates.row$mass, na.rm = TRUE), ifelse(is.finite(min(standard.coordinates.row$mass, na.rm = TRUE)), 1.5*min(standard.coordinates.row$mass, na.rm = TRUE), 0.5))) aux <- res.ca$colcoord%*%diag(res.ca$sv) colnames(aux) <- paste0("dim_", 1:ncol(aux)) principal.coordinates.col <- as_tibble(aux) %>% mutate(labels = rownames(res.ca$colcoord), type = "col", contr = 1, mass = group_mass) if (nrow(standard.coordinates.row) > 0) standard.coordinates.row <- standard.coordinates.row %>% mutate(contr = .data$contr/max(.data$contr)) dp <- bind_rows(principal.coordinates.col, standard.coordinates.row) if (mass_indicator & contr_indicator){ asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1, y = .data$dim_2, color = .data$type, alpha = .data$contr, size = .data$mass)) } else if (mass_indicator & !contr_indicator){ asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1, y = .data$dim_2, color = .data$type, size = .data$mass)) } else if (!mass_indicator & contr_indicator){ asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1, y = .data$dim_2, color = .data$type, alpha = .data$contr)) } else { asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1, y = .data$dim_2, color = .data$type)) } asymmetric_plot <- asymmetric_plot + geom_hline(yintercept = 0, linetype = 2) + geom_vline(xintercept = 0, linetype = 2) + geom_point() + geom_label_repel(aes(label = .data$labels), xlim = c(-Inf, Inf), ylim = c(-Inf, Inf), min.segment.length = 0) + scale_colour_manual(values = c("red", "blue")) + labs(x = paste0("Dim 1 ", "(", round(explained_var[1], 2), "%)"), y = paste0("Dim 2 ", "(", round(explained_var[2], 2), "%)")) + theme_minimal() + theme(legend.position = "none", text = element_text(size = 20))+ scale_size_continuous(range = c(3, 6)) + scale_alpha_continuous(range = c(0.3, 1)) } tab_rel <- tab_rel %>% filter(.data$ae %in% selected_classes) %>% rename(!!label := .data$ae) %>% mutate(across(where(is.numeric), ~ format(.x, digits = 2, nsmall = 2))) colnames(tab_rel)[-c(1, ncol(tab_rel))] <- paste0(colnames(tab_rel)[-c(1, ncol(tab_rel))], "<br> (n = ", total$n, ")") tab_contr <- tab_contr %>% filter(.data$ae %in% selected_classes) %>% rename(!!label := .data$ae) out <- list(tab_abs = tab_abs, tab_rel = tab_rel, total_inertia = total_inertia, tab_inertia = tab_inertia, tab_contr = tab_contr, asymmetric_plot = asymmetric_plot) return(out) }
/scratch/gouwar.j/cran-all/cranData/visae/R/ca.R
utils::globalVariables("where")
/scratch/gouwar.j/cran-all/cranData/visae/R/globals.R
#'Shiny App for Correspondence Analysis of Adverse Events #' #'@param data data.frame or tibble object. #'@param id unquoted expression indicating the #'variable name in \code{data} that corresponds to the id variable. #'@param group unquoted expression indicating the #'variable name in \code{data} that corresponds to the group variable. #'@param ae_grade unquoted expression indicating the #'variable name in \code{data} that corresponds to AE grade class. #'@param ae_domain unquoted expression indicating the #'variable name in \code{data} that corresponds to AE domain class. #'@param ae_term unquoted expression indicating the #'variable name in \code{data} that corresponds to AE term class. #'@param ae_cycle unquoted expression indicating the #'variable name in \code{data} that corresponds to AE cycle. #' #'@return an interactive web application to perform correspondence analysis #'for adverse event data. #' #' #'@examples #'\dontrun{ #'library(magrittr) #'library(dplyr) #'patient_id <- 1:100 #'group <- c(rep("A", 50), rep("B", 50)) #'ae_grade <- sample(1:5, size = 100, replace = TRUE) #'ae_domain <- sample(c("C", "D"), size = 100, replace = TRUE) #'ae_term <- sample(c("E", "F", "G", "H"), size = 100, replace = TRUE) #'dt <- tibble(patient_id = patient_id, trt = group, #' ae_g = ae_grade, ae_d = ae_domain, ae_t = ae_term) #'dt %>% run_ca(., group = trt, #' id = patient_id, #' ae_grade = ae_g, #' ae_domain = ae_d, #' ae_term = ae_t) #' } #' #'@rawNamespace import(shiny, except = c(dataTableOutput, renderDataTable)) #'@import magrittr #'@import dplyr #'@importFrom shinyjs js useShinyjs extendShinyjs #'@importFrom DT renderDataTable dataTableOutput #'@importFrom rlang enquos enquo quo_is_null #'@importFrom stats na.exclude #'@export run_ca <- function(data, id, group, ae_grade = NULL, ae_domain = NULL, ae_term = NULL, ae_cycle = NULL) { id <- enquo(id) group <- enquo(group) ae_grade <- enquo(ae_grade) ae_domain <- enquo(ae_domain) ae_term <- enquo(ae_term) ae_cycle <- enquo(ae_cycle) if (quo_is_null(ae_grade) & quo_is_null(ae_domain) & quo_is_null(ae_term)) stop("There is no toxicity data available. Please input either ae_grade, ae_domain or ae_term.") aux <- enquos(group = group, id = id, ae_grade = ae_grade, ae_domain = ae_domain, ae_term = ae_term, ae_cycle = ae_cycle, .ignore_empty = "all") cond <- lapply(aux, function(x) !quo_is_null(x)) aux <- aux[unlist(cond)] data <- data %>% select(!!!aux) #https://stackoverflow.com/questions/49470474/saving-r-shiny-app-as-a-function-with-arguments-passed-to-the-shiny-app shinyOptions(data = data) source(system.file("ca_shiny.R", package = "visae"))$value }
/scratch/gouwar.j/cran-all/cranData/visae/R/run.R
appDir <- getwd() data <- getShinyOption("data") #https://stackoverflow.com/questions/44319664/r-shiny-condition-a-tab-in-the-navbar-based-on-previous-tabs-condition jscode <- " shinyjs.disabletabD =function(name){ $('ul li:has(a[data-value= \"Domain\"])').addClass('disabled'); $('ul li:has(a[data-value= \"DomainGrade\"])').addClass('disabled'); } shinyjs.enabletabD = function(name){ $('ul li:has(a[data-value= \"Domain\"])').removeClass('disabled'); $('ul li:has(a[data-value= \"DomainGrade\"])').removeClass('disabled'); } shinyjs.disabletabT =function(name){ $('ul li:has(a[data-value= \"TermGrade\"])').addClass('disabled'); $('ul li:has(a[data-value= \"Term\"])').addClass('disabled'); } shinyjs.enabletabT = function(name){ $('ul li:has(a[data-value= \"TermGrade\"])').removeClass('disabled'); $('ul li:has(a[data-value= \"Term\"])').removeClass('disabled'); } " ui = function(data){ if("ae_cycle" %in% colnames(data)){ cycle_options <- levels(as.factor(data$ae_cycle)) } else { cycle_options <- NULL } if("ae_grade" %in% colnames(data)){ grade_options <- levels(as.factor(data$ae_grade)) } else { grade_options <- NULL } if("ae_domain" %in% colnames(data)){ domain_options <- levels(as.factor(data$ae_domain)) } else { domain_options <- NULL } if("ae_domain" %in% colnames(data) & "ae_grade" %in% colnames(data)){ domain_grade_options <- levels(as.factor(paste0(data$ae_domain, ":", data$ae_grade))) } else { domain_grade_options <- NULL } if("ae_term" %in% colnames(data)){ term_options <- levels(as.factor(data$ae_term)) } else { term_options <- NULL } if("ae_term" %in% colnames(data) & "ae_grade" %in% colnames(data)){ term_grade_options <- levels(as.factor(paste0(data$ae_term, ":", data$ae_grade))) } else { term_grade_options <- NULL } fluidPage( navbarPage("AE Report", tabPanel("Grade", value = "Grade", sidebarLayout( sidebarPanel(h4("Plot"), numericInput( inputId = 'contr_threshold_grade', label = h6('Contribution Threshold'), min = 0, max = 100, step = round(100/length(grade_options), 2), value = round(100/length(grade_options), 2) ), numericInput( inputId = 'mass_threshold_grade', label = h6('Mass Threshold'), min = 0, max = 100, step = round(100/length(grade_options), 2), value = round(100/length(grade_options), 2) ), checkboxInput( inputId = 'contr_grade', label = h6('Contribution Color Intensity'), value = TRUE ), checkboxInput( inputId = 'mass_grade', label = h6('Mass Dot Size'), value = TRUE ), downloadButton('downloadplot_grade', 'Download Asymmetric Plot'), downloadButton('downloadfreq_grade', 'Download Frequency Table'), h4("Trial"), selectInput( inputId = 'selected_cycle_grade', label = h6('Selected Cycles'), multiple = TRUE, selectize = TRUE, choices = cycle_options, selected = cycle_options ) ), mainPanel( tabsetPanel( tabPanel("Asymmetric Plot", plotOutput("ae_grade_biplot", height = "600px", width = "600px")), tabPanel("Frequency Table", DT::dataTableOutput("ae_grade_table")), tabPanel("Contribution Table", DT::dataTableOutput("ae_grade_contr_table")) ) ) ) ), tabPanel("Domain", value = "Domain", sidebarLayout( sidebarPanel(h4("Plot"), numericInput( inputId = 'contr_threshold_domain', label = h6('Contribution Threshold'), min = 0, max = 100, step = round(100/length(domain_options), 2), value = round(100/length(domain_options), 2) ), numericInput( inputId = 'mass_threshold_domain', label = h6('Mass Threshold'), min = 0, max = 100, step = round(100/length(domain_options), 2), value = round(100/length(domain_options), 2) ), checkboxInput( inputId = 'contr_domain', label = h6('Contribution Color Intensity'), value = TRUE ), checkboxInput( inputId = 'mass_domain', label = h6('Mass Dot Size'), value = TRUE ), downloadButton('downloadplot_domain', 'Download Asymmetric Plot'), downloadButton('downloadfreq_domain', 'Download Frequency Table'), h4("Trial"), selectInput( inputId = 'selected_cycle_domain', label = h6('Selected Cycles'), multiple = TRUE, selectize = TRUE, choices = cycle_options, selected = cycle_options ), selectInput( inputId = 'selected_grade_domain', label = h6('Selected Grades'), multiple = TRUE, selectize = TRUE, choices = grade_options, selected = grade_options ) ), mainPanel( tabsetPanel(tabPanel("Asymmetric Plot", plotOutput("ae_domain_biplot", height = "600px", width = "600px")), tabPanel("Frequency Table", DT::dataTableOutput("ae_domain_table")), tabPanel("Contribution Table", DT::dataTableOutput("ae_domain_contr_table")) ) ) ) ), tabPanel("Domain and Grade", value = "DomainGrade", sidebarLayout( sidebarPanel(h4("Plot"), numericInput( inputId = 'contr_threshold_domain_grade', label = h6('Contribution Threshold'), min = 0, max = 100, step = round(100/length(domain_grade_options), 2), value = round(100/length(domain_grade_options), 2) ), numericInput( inputId = 'mass_threshold_domain_grade', label = h6('Mass Threshold'), min = 0, max = 100, step = round(100/length(domain_grade_options), 2), value = round(100/length(domain_grade_options), 2) ), checkboxInput( inputId = 'contr_domain_grade', label = h6('Contribution Color Intensity'), value = TRUE ), checkboxInput( inputId = 'mass_domain_grade', label = h6('Mass Dot Size'), value = TRUE ), downloadButton('downloadplot_domain_grade', 'Download Asymmetric Plot'), downloadButton('downloadfreq_domain_grade', 'Download Frequency Table'), h4("Trial"), selectInput( inputId = 'selected_cycle_domain_grade', label = h6('Selected Cycles'), multiple = TRUE, selectize = TRUE, choices = cycle_options, selected = cycle_options ) ), mainPanel(tabsetPanel( tabPanel("Asymmetric Plot", plotOutput("ae_domain_grade_biplot", height = "600px", width = "600px")), tabPanel("Frequency Table", DT::dataTableOutput("ae_domain_grade_table")), tabPanel("Contribution Table", DT::dataTableOutput("ae_domain_grade_contr_table")) ) ) ) ), tabPanel("Term", value = "Term", sidebarLayout( sidebarPanel(h4("Plot"), numericInput( inputId = 'contr_threshold_term', label = h6('Contribution Threshold'), min = 0, max = 100, step = round(100/length(term_options), 2), value = round(100/length(term_options), 2) ), numericInput( inputId = 'mass_threshold_term', label = h6('Mass Threshold'), min = 0, max = 100, step = round(100/length(term_options), 2), value = round(100/length(term_options), 2) ), checkboxInput( inputId = 'contr_term', label = h6('Contribution Color Intensity'), value = TRUE ), checkboxInput( inputId = 'mass_term', label = h6('Mass Dot Size'), value = TRUE ), downloadButton('downloadplot_term', 'Download Asymmetric Plot'), downloadButton('downloadfreq_term', 'Download Frequency Table'), h4("Trial"), selectInput( inputId = 'selected_cycle_term', label = h6('Selected Cycles'), multiple = TRUE, selectize = TRUE, choices = cycle_options, selected = cycle_options ), selectInput( inputId = 'selected_domain_term', label = h6('Selected Domains'), multiple = TRUE, selectize = TRUE, choices = domain_options, selected = domain_options ), selectInput( inputId = 'selected_grade_term', label = h6('Selected Grades'), multiple = TRUE, selectize = TRUE, choices = grade_options, selected = grade_options ) ), mainPanel(tabsetPanel( tabPanel("Asymmetric Plot", plotOutput("ae_term_biplot", height = "600px", width = "600px")), tabPanel("Frequency Table", DT::dataTableOutput("ae_term_table")), tabPanel("Contribution Table", DT::dataTableOutput("ae_term_contr_table")) ) ) ) ), tabPanel("Term and Grade", value = "TermGrade", sidebarLayout( sidebarPanel(h4("Plot"), numericInput( inputId = 'contr_threshold_term_grade', label = h6('Contribution Threshold'), min = 0, max = 100, step = round(100/length(term_grade_options), 2), value = round(100/length(term_grade_options), 2) ), numericInput( inputId = 'mass_threshold_term_grade', label = h6('Mass Threshold'), min = 0, max = 100, step = round(100/length(term_grade_options), 2), value = round(100/length(term_grade_options), 2) ), checkboxInput( inputId = 'contr_term_grade', label = h6('Contribution Color Intensity'), value = TRUE ), checkboxInput( inputId = 'mass_term_grade', label = h6('Mass Dot Size'), value = TRUE ), downloadButton('downloadplot_term_grade', 'Download Asymmetric Plot'), downloadButton('downloadfreq_term_grade', 'Download Frequency Table'), h4("Trial"), selectInput( inputId = 'selected_cycle_term_grade', label = h6('Selected Cycles'), multiple = TRUE, selectize = TRUE, choices = cycle_options, selected = cycle_options ), selectInput( inputId = 'selected_domain_term_grade', label = h6('Selected Domains'), multiple = TRUE, selectize = TRUE, choices = domain_options, selected = domain_options ) ), mainPanel(tabsetPanel( tabPanel("Asymmetric Plot", plotOutput("ae_term_grade_biplot", height = "600px", width = "600px")), tabPanel("Frequency Table", DT::dataTableOutput("ae_term_grade_table")), tabPanel("Contribution Table", DT::dataTableOutput("ae_term_grade_contr_table")) ) ) ) ), tabPanel("About", mainPanel(tabsetPanel( tabPanel("Authors", column(width = 8, offset = 1, p(h4("Contact")), p("Marcio Augusto Diniz", "<[email protected]>"), p("Michael Luu", "<[email protected]>"), p("Gillian Gresham", "<[email protected]>"), p("Andre Rogatko", "<[email protected]>"), p(strong("Cedars Sinai Medical Center")), p(strong("Samuel Oschin Comprehensive Cancer Institute, Biostatistics Core")) ), column(width = 8, offset = 1, p(h4("Funding")), p(a("Moonshot Initiative", href="https://www.cancer.gov/research/key-initiatives/moonshot-cancer-initiative"), " - Grant Number: 1U01CA232859-01") ) ) ) ) ), shinyjs::useShinyjs(), shinyjs::extendShinyjs(text = jscode, functions = c("disabletabD", "enabletabD", "disabletabT", "enabletabT")) ) ) } server = function(input, output, session) { shiny_grade <- function(data, selected_cycle, contr_indicator, mass_indicator, contr_threshold, mass_threshold){ if ("ae_cycle" %in% colnames(data)) data <- data %>% filter(.data$ae_cycle %in% selected_cycle) out <- visae::ca_ae(data, id = .data$id, group = .data$group, ae_class = .data$ae_grade, label = "Grade", contr_indicator = contr_indicator, mass_indicator = mass_indicator, contr_threshold = contr_threshold, mass_threshold = mass_threshold) return(out) } shiny_domain <- function(data, selected_cycle, selected_grade, contr_indicator, mass_indicator, contr_threshold, mass_threshold){ if ("ae_cycle" %in% colnames(data)) data <- data %>% filter(.data$ae_cycle %in% selected_cycle) if ("ae_grade" %in% colnames(data)) data <- data %>% filter(.data$ae_grade %in% c(selected_grade, NA)) out <- visae::ca_ae(data, id = .data$id, group = .data$group, ae_class = .data$ae_domain, label = "Domain", contr_indicator = contr_indicator, mass_indicator = mass_indicator, contr_threshold = contr_threshold, mass_threshold = mass_threshold) return(out) } shiny_domain_grade <- function(data, selected_cycle, contr_indicator, mass_indicator, contr_threshold, mass_threshold){ if ("ae_cycle" %in% colnames(data)) data <- data %>% filter(.data$ae_cycle %in% selected_cycle) data <- data %>% mutate(ae_domain_grade = ifelse(!is.na(.data$ae_domain) & !is.na(.data$ae_grade), paste0(.data$ae_domain, ": ", .data$ae_grade), NA)) out <- visae::ca_ae(data, id = .data$id, group = .data$group, ae_class = .data$ae_domain_grade, label = "Domain:Grade", contr_indicator = contr_indicator, mass_indicator = mass_indicator, contr_threshold = contr_threshold, mass_threshold = mass_threshold) return(out) } shiny_term <- function(data, selected_cycle, selected_domain, selected_grade, contr_indicator, mass_indicator, contr_threshold, mass_threshold){ if ("ae_cycle" %in% colnames(data)) data <- data %>% filter(.data$ae_cycle %in% selected_cycle) if ("ae_domain" %in% colnames(data)) data <- data %>% filter(.data$ae_domain %in% c(selected_domain, NA)) if ("ae_grade" %in% colnames(data)) data <- data %>% filter(.data$ae_grade %in% c(selected_grade, NA)) out <- visae::ca_ae(data, id = .data$id, group = .data$group, ae_class = .data$ae_term, label = "Term", contr_indicator = contr_indicator, mass_indicator = mass_indicator, contr_threshold = contr_threshold, mass_threshold = mass_threshold) return(out) } shiny_term_grade <- function(data, selected_cycle, selected_domain, contr_indicator, mass_indicator, contr_threshold, mass_threshold){ if ("ae_cycle" %in% colnames(data)) data <- data %>% filter(.data$ae_cycle %in% selected_cycle) if ("ae_domain" %in% colnames(data)) data <- data %>% filter(.data$ae_domain %in% c(selected_domain, NA)) data <- data %>% mutate(ae_term_grade = ifelse(!is.na(.data$ae_term) & !is.na(.data$ae_grade), paste0(.data$ae_term, ": ", .data$ae_grade), NA)) out <- visae::ca_ae(data, id = .data$id, group = .data$group, ae_class = .data$ae_term_grade, label = "Term:Grade", contr_indicator = contr_indicator, mass_indicator = mass_indicator, contr_threshold = contr_threshold, mass_threshold = mass_threshold) return(out) } if('ae_grade' %in% colnames(data)){ tabInput_grade <- reactive({ tab <- shiny_grade(data, selected_cycle = input$selected_cycle_grade, contr_indicator = input$contr_grade, mass_indicator = input$mass_grade, contr_threshold = input$contr_threshold_grade/100, mass_threshold = input$mass_threshold_grade/100)$tab_rel }) output$ae_grade_table <- DT::renderDT({tabInput_grade()}, escape = FALSE) plotInput_grade <- reactive({ gp <- shiny_grade(data, selected_cycle = input$selected_cycle_grade, contr_indicator = input$contr_grade, mass_indicator = input$mass_grade, contr_threshold = input$contr_threshold_grade/100, mass_threshold = input$mass_threshold_grade/100)$asymmetric_plot }) output$ae_grade_biplot <- renderPlot({print(plotInput_grade())}) output$ae_grade_contr_table <- DT::renderDataTable( shiny_grade(data, selected_cycle = input$selected_cycle_grade, contr_indicator = input$contr_grade, mass_indicator = input$mass_grade, contr_threshold = input$contr_threshold_grade/100, mass_threshold = input$mass_threshold_grade/100)$tab_contr ) output$downloadplot_grade <- downloadHandler( filename = function() { paste('ca_grade.pdf', sep='') }, content = function(file) { ggsave(file, plot = plotInput_grade(), device = "pdf", height = 7, width = 7) } ) output$downloadfreq_grade <- downloadHandler( filename = function() { paste('ca_grade.csv', sep='') }, content = function(file) { write.csv(x = tabInput_grade(), file = file) } ) } if('ae_domain' %in% colnames(data)){ shinyjs::js$enabletabD("Domain") tabInput_domain <- reactive({ tab <- shiny_domain(data, selected_cycle = input$selected_cycle_domain, selected_grade = input$selected_grade_domain, contr_indicator = input$contr_domain, mass_indicator = input$mass_domain, contr_threshold = input$contr_threshold_domain/100, mass_threshold = input$mass_threshold_domain/100)$tab_rel }) output$ae_domain_table <- DT::renderDT({tabInput_domain()}, escape = FALSE) plotInput_domain <- reactive({ gp <- shiny_domain(data, selected_cycle = input$selected_cycle_domain, selected_grade = input$selected_grade_domain, contr_indicator = input$contr_domain, mass_indicator = input$mass_domain, contr_threshold = input$contr_threshold_domain/100, mass_threshold = input$mass_threshold_domain/100)$asymmetric_plot }) output$ae_domain_biplot <- renderPlot({print(plotInput_domain())}) output$ae_domain_contr_table <- DT::renderDataTable( shiny_domain(data, selected_cycle = input$selected_cycle_domain, selected_grade = input$selected_grade_domain, contr_indicator = input$contr_domain, mass_indicator = input$mass_domain, contr_threshold = input$contr_threshold_domain/100, mass_threshold = input$mass_threshold_domain/100)$tab_contr ) output$downloadplot_domain <- downloadHandler( filename = function() { paste('ca_domain.pdf', sep='') }, content = function(file) { ggsave(file, plot = plotInput_domain(), device = "pdf", height = 7, width = 7) } ) output$downloadfreq_domain <- downloadHandler( filename = function() { paste('ca_domain.csv', sep='') }, content = function(file) { write.csv(tabInput_domain(), file = file) } ) } else { shinyjs::js$disabletabD("Domain") } if('ae_domain' %in% colnames(data) & 'ae_grade' %in% colnames(data)){ shinyjs::js$enabletabD("DomainGrade") tabInput_domain_grade <- reactive({ tab <- shiny_domain_grade(data, selected_cycle = input$selected_cycle_domain_grade, contr_indicator = input$contr_domain_grade, mass_indicator = input$mass_domain_grade, contr_threshold = input$contr_threshold_domain_grade/100, mass_threshold = input$mass_threshold_domain_grade/100)$tab_rel }) output$ae_domain_grade_table <- DT::renderDT({tabInput_domain_grade()}, escape = FALSE) plotInput_domain_grade <- reactive({ gp <- shiny_domain_grade(data, selected_cycle = input$selected_cycle_domain_grade, contr_indicator = input$contr_domain_grade, mass_indicator = input$mass_domain_grade, contr_threshold = input$contr_threshold_domain_grade/100, mass_threshold = input$mass_threshold_domain_grade/100)$asymmetric_plot }) output$ae_domain_grade_biplot <- renderPlot({print(plotInput_domain_grade())}) output$ae_domain_grade_contr_table <- DT::renderDataTable( shiny_domain_grade(data, selected_cycle = input$selected_cycle_domain_grade, contr_indicator = input$contr_domain_grade, mass_indicator = input$mass_domain_grade, contr_threshold = input$contr_threshold_domain_grade/100, mass_threshold = input$mass_threshold_domain_grade/100)$tab_contr ) output$downloadplot_domain_grade <- downloadHandler( filename = function() { paste('ca_domain_grade.pdf', sep='') }, content = function(file) { ggsave(file, plot = plotInput_domain_grade(), device = "pdf", height = 7, width = 7) } ) output$downloadfreq_domain_grade <- downloadHandler( filename = function() { paste('ca_domain_grade.csv', sep='') }, content = function(file) { write.csv(tabInput_domain_grade(), file = file) } ) } else { shinyjs::js$disabletabD("DomainGrade") } if('ae_term' %in% colnames(data)){#If true enable, else disable shinyjs::js$enabletabT("Term") tabInput_term <- reactive({ tab <- shiny_term(data, selected_cycle = input$selected_cycle_term, selected_domain = input$selected_domain_term, selected_grade = input$selected_grade_term, contr_indicator = input$contr_term, mass_indicator = input$mass_term, contr_threshold = input$contr_threshold_term/100, mass_threshold = input$mass_threshold_term/100)$tab_rel }) output$ae_term_table <- DT::renderDT({tabInput_term()}, escape = FALSE) plotInput_term <- reactive({ gp <- shiny_term(data, selected_cycle = input$selected_cycle_term, selected_domain = input$selected_domain_term, selected_grade = input$selected_grade_term, contr_indicator = input$contr_term, mass_indicator = input$mass_term, contr_threshold = input$contr_threshold_term/100, mass_threshold = input$mass_threshold_term/100)$asymmetric_plot }) output$ae_term_biplot <- renderPlot({print(plotInput_term())}) output$ae_term_contr_table <- DT::renderDataTable( shiny_term(data, selected_cycle = input$selected_cycle_term, selected_domain = input$selected_domain_term, selected_grade = input$selected_grade_term, contr_indicator = input$contr_term, mass_indicator = input$mass_term, contr_threshold = input$contr_threshold_term/100, mass_threshold = input$mass_threshold_term/100)$tab_contr ) output$downloadplot_term <- downloadHandler( filename = function() { paste('ca_term.pdf', sep='') }, content = function(file) { ggsave(file, plot = plotInput_term(), device = "pdf", height = 7, width = 7) } ) output$downloadfreq_term <- downloadHandler( filename = function() { paste('ca_term.csv', sep='') }, content = function(file) { write.csv(tabInput_term(), file = file) } ) } else { shinyjs::js$disabletabT("Term") } if('ae_term' %in% colnames(data) & 'ae_grade' %in% colnames(data)){#If true enable, else disable shinyjs::js$enabletabT("TermGrade") tabInput_term_grade <- reactive({ shiny_term_grade(data, selected_cycle = input$selected_cycle_term_grade, selected_domain = input$selected_domain_term_grade, contr_indicator = input$contr_term_grade, mass_indicator = input$mass_term_grade, contr_threshold = input$contr_threshold_term_grade/100, mass_threshold = input$mass_threshold_term_grade/100)$tab_rel }) output$ae_term_grade_table <- DT::renderDT({tabInput_term_grade()}, escape = FALSE) plotInput_term_grade <- reactive({gp <- shiny_term_grade(data, selected_cycle = input$selected_cycle_term_grade, selected_domain = input$selected_domain_term_grade, contr_indicator = input$contr_term_grade, mass_indicator = input$mass_term_grade, contr_threshold = input$contr_threshold_term_grade/100, mass_threshold = input$mass_threshold_term_grade/100)$asymmetric_plot }) output$ae_term_grade_biplot <- renderPlot({print(plotInput_term_grade())}) output$ae_term_grade_contr_table <- DT::renderDataTable( shiny_term_grade(data, selected_cycle = input$selected_cycle_term_grade, selected_domain = input$selected_domain_term_grade, contr_indicator = input$contr_term_grade, mass_indicator = input$mass_term_grade, contr_threshold = input$contr_threshold_term_grade/100, mass_threshold = input$mass_threshold_term_grade/100)$tab_contr ) output$downloadplot_term_grade <- downloadHandler( filename = function() { paste('ca_term_grade.pdf', sep='') }, content = function(file) { ggsave(file, plot = plotInput_term_grade(), device = "pdf", height = 7, width = 7) } ) output$downloadfreq_term_grade <- downloadHandler( filename = function() { paste('ca_term_grade.csv', sep='') }, content = function(file) { write.csv(tabInput_term_grade(), file = file) } ) } else { shinyjs::js$disabletabT("TermGrade") } } shinyApp(ui = ui(data), server = server)
/scratch/gouwar.j/cran-all/cranData/visae/inst/ca_shiny.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ---- echo=FALSE, out.width="100%", out.height="100%"------------------------- knitr::include_graphics("figures/fig2.png") ## ---- echo=FALSE, out.width="100%", out.height="100%"------------------------- knitr::include_graphics("figures/fig3.png") ## ---- echo=FALSE, out.width="100%", out.height="100%"------------------------- knitr::include_graphics("figures/fig4.png")
/scratch/gouwar.j/cran-all/cranData/visae/inst/doc/ca_biplots.R
--- title: "Interpreting CA biplots" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Interpreting CA biplots} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` The R package [`visae`](https://CRAN.R-project.org/package=visae) implements 'shiny' apps to visualize adverse events (AE) based on the Common Terminology Criteria for Adverse Events (CTCAE). ## Installation ``` r instal.packages("visae") ``` The latest version can be installed from GitHub as follows: ``` r devtools::install_github("dnzmarcio/visae") ``` ## Stacked Correspondence Analysis ### Generating minimal dataset ```r patient_id <- 1:4000 group <- c(rep("A", 1000), rep("B", 1000), rep("C", 1000), rep("D", 1000)) ae_grade <- c(rep("AE class 01", 600), rep("AE class 02", 300), rep("AE class 03", 100), rep("AE class 04", 0), rep("AE class 01", 100), rep("AE class 02", 400), rep("AE class 03", 400), rep("AE class 04", 100), rep("AE class 01", 233), rep("AE class 02", 267), rep("AE class 03", 267), rep("AE class 04", 233), rep("AE class 01", 0), rep("AE class 02", 100), rep("AE class 03", 300), rep("AE class 04", 600)) dt <- tibble(patient_id = patient_id, trt = group, ae_g = ae_grade) ``` ### Investigating different CA configurations using the Shiny application ``` r library(visae) library(magrittr) library(dplyr) dt %>% run_ca(., group = trt, id = patient_id, ae_grade = ae_g) ``` ### Plotting CA biplot as ggplot object ```r ca <- dt %>% ca_ae(., group = trt, id = patient_id, ae_class = ae_g, contr_indicator = FALSE, mass_indicator = TRUE, contr_threshold = 0, mass_threshold = 0) ca$asymmetric_plot ``` ### Interpreting biplots for Correspondence Analysis Investigators often interpret CA biplots erroneously assuming that the distance between AE classes dots and treatments dots is an indicative of association. See step by step to interpret biplots correctly are below: #### 1. Minimum example dataset ![](figures/fig1.png) #### 2. Interpreting percentage of explained variability by dimensions, center average treatment and AE dot sizes ```{r, echo=FALSE, out.width="100%", out.height="100%"} knitr::include_graphics("figures/fig2.png") ``` #### 3. Interpreting dimensions and associations between treatments and AEs ```{r, echo=FALSE, out.width="100%", out.height="100%"} knitr::include_graphics("figures/fig3.png") ``` #### 4. Comparing treatments and avoiding misleading interpretations ```{r, echo=FALSE, out.width="100%", out.height="100%"} knitr::include_graphics("figures/fig4.png") ``` ## References - [Diniz, M.A., Gresham, G., Kim, S. et al. Visualizing adverse events in clinical trials using correspondence analysis with R-package visae. BMC Med Res Methodol 21, 244 (2021). https://doi.org/10.1186/s12874-021-01368-w](https://bmcmedresmethodol.biomedcentral.com/articles/10.1186/s12874-021-01368-w).
/scratch/gouwar.j/cran-all/cranData/visae/inst/doc/ca_biplots.Rmd
--- title: "Interpreting CA biplots" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Interpreting CA biplots} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` The R package [`visae`](https://CRAN.R-project.org/package=visae) implements 'shiny' apps to visualize adverse events (AE) based on the Common Terminology Criteria for Adverse Events (CTCAE). ## Installation ``` r instal.packages("visae") ``` The latest version can be installed from GitHub as follows: ``` r devtools::install_github("dnzmarcio/visae") ``` ## Stacked Correspondence Analysis ### Generating minimal dataset ```r patient_id <- 1:4000 group <- c(rep("A", 1000), rep("B", 1000), rep("C", 1000), rep("D", 1000)) ae_grade <- c(rep("AE class 01", 600), rep("AE class 02", 300), rep("AE class 03", 100), rep("AE class 04", 0), rep("AE class 01", 100), rep("AE class 02", 400), rep("AE class 03", 400), rep("AE class 04", 100), rep("AE class 01", 233), rep("AE class 02", 267), rep("AE class 03", 267), rep("AE class 04", 233), rep("AE class 01", 0), rep("AE class 02", 100), rep("AE class 03", 300), rep("AE class 04", 600)) dt <- tibble(patient_id = patient_id, trt = group, ae_g = ae_grade) ``` ### Investigating different CA configurations using the Shiny application ``` r library(visae) library(magrittr) library(dplyr) dt %>% run_ca(., group = trt, id = patient_id, ae_grade = ae_g) ``` ### Plotting CA biplot as ggplot object ```r ca <- dt %>% ca_ae(., group = trt, id = patient_id, ae_class = ae_g, contr_indicator = FALSE, mass_indicator = TRUE, contr_threshold = 0, mass_threshold = 0) ca$asymmetric_plot ``` ### Interpreting biplots for Correspondence Analysis Investigators often interpret CA biplots erroneously assuming that the distance between AE classes dots and treatments dots is an indicative of association. See step by step to interpret biplots correctly are below: #### 1. Minimum example dataset ![](figures/fig1.png) #### 2. Interpreting percentage of explained variability by dimensions, center average treatment and AE dot sizes ```{r, echo=FALSE, out.width="100%", out.height="100%"} knitr::include_graphics("figures/fig2.png") ``` #### 3. Interpreting dimensions and associations between treatments and AEs ```{r, echo=FALSE, out.width="100%", out.height="100%"} knitr::include_graphics("figures/fig3.png") ``` #### 4. Comparing treatments and avoiding misleading interpretations ```{r, echo=FALSE, out.width="100%", out.height="100%"} knitr::include_graphics("figures/fig4.png") ``` ## References - [Diniz, M.A., Gresham, G., Kim, S. et al. Visualizing adverse events in clinical trials using correspondence analysis with R-package visae. BMC Med Res Methodol 21, 244 (2021). https://doi.org/10.1186/s12874-021-01368-w](https://bmcmedresmethodol.biomedcentral.com/articles/10.1186/s12874-021-01368-w).
/scratch/gouwar.j/cran-all/cranData/visae/vignettes/ca_biplots.Rmd
FromTo <- function(X) { if (length(X) > 2) { X2 <- c(rep(X, each = 2)[-1], rep(X, each = 2)[1]) X3 <- matrix(X2, ncol = 2, byrow = TRUE) } else { X3 <- X } }
/scratch/gouwar.j/cran-all/cranData/viscomp/R/FromTo.R