content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Sets the default breaks for log10
#'
#' \code{xgx_breaks_log10} sets nice breaks for log10 scale.
#' it's better than the default function because it ensures there is at least
#' 2 breaks
#' and also, it will try to go by 3s (i.e. 1,3,10,30,100) if it makes sense
#'
#' for the extended breaks function, weights is a set of 4 weights for
#' \enumerate{
#' \item simplicity - how early in the Q order are you
#' \item coverage - labelings that don't extend outside the data:
#' range(data) / range(labels)
#' \item density (previously granularity) - how close to the number of
#' ticks do you get (default is 5)
#' \item legibility - has to do with fontsize and formatting to prevent
#' label overlap
#' }
#'
#' @references Talbot, Justin, Sharon Lin, and Pat Hanrahan.
#' "An extension of Wilkinson’s
#' algorithm for positioning tick labels on axes." IEEE Transactions
#' on visualization and computer graphics 16.6 (2010): 1036-1043.
#'
#' @param data_range range of the data
#'
#' @return numeric vector of breaks
#'
#' @examples
#' xgx_breaks_log10(c(1, 1000))
#' xgx_breaks_log10(c(0.001, 100))
#' xgx_breaks_log10(c(1e-4, 1e4))
#' xgx_breaks_log10(c(1e-9, 1e9))
#' xgx_breaks_log10(c(1, 2))
#' xgx_breaks_log10(c(1, 5))
#' xgx_breaks_log10(c(1, 10))
#' xgx_breaks_log10(c(1, 100))
#' xgx_breaks_log10(c(1, 1.01))
#' xgx_breaks_log10(c(1, 1.0001))
#' print(xgx_breaks_log10(c(1, 1.000001)), digits = 10)
#'
#' @importFrom labeling extended
#' @export
xgx_breaks_log10 <- function(data_range) {
data_min <- min(log10(data_range))
data_max <- max(log10(data_range))
n_breaks <- 5 # number of breaks to aim for
# preferred breaks, in log10-space
preferred_increment <- c(1, 0.5)
breaks <- labeling::extended(data_min, data_max, n_breaks, Q = preferred_increment)
breaks <- 10^breaks
# ensure that there are at least 2 breaks
# but also try to present "nice" breaks with only one significant digit
breaks1 <- unique(signif(breaks, 1))
breaks2 <- unique(signif(breaks, 2))
breaks3 <- unique(signif(breaks, 3))
if (length(breaks1) >= 2) {
breaks_out <- breaks1
} else if (length(breaks2) >= 2) {
breaks_out <- breaks2
} else if (length(breaks3) >= 2) {
breaks_out <- breaks3
} else {
breaks_out <- unique(breaks)
}
return(breaks_out)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_breaks_log10.R |
#' Sets the default breaks for a time axis
#'
#' \code{xgx_breaks_time} sets the default breaks for a time axis,
#' given the units of the data and the units of the plot.
#' It is inspired by scales::extended_breaks
#'
#' for the extended breaks function, weights is a set of 4 weights for
#' \enumerate{
#' \item simplicity - how early in the Q order are you
#' \item coverage - labelings that don't extend outside the data:
#' range(data) / range(labels)
#' \item density (previously granularity) - how close to the number of ticks
#' do you get (default is 5)
#' \item legibility - has to do with fontsize and formatting to prevent
#' label overlap
#' }
#'
#' @references Talbot, Justin, Sharon Lin, and Pat Hanrahan.
#' "An extension of Wilkinson’s algorithm for positioning tick labels on axes."
#' IEEE Transactions on visualization and
#' computer graphics 16.6 (2010): 1036-1043.
#'
#' @param data_range range of the data
#' @param units_plot units to use in the plot
#' @param number_breaks number of breaks to aim for (default is 5)
#'
#' @return numeric vector of breaks
#'
#' @examples
#' xgx_breaks_time(c(0, 5), "h")
#' xgx_breaks_time(c(0, 6), "h")
#' xgx_breaks_time(c(-3, 5), "h")
#' xgx_breaks_time(c(0, 24), "h")
#' xgx_breaks_time(c(0, 12), "h")
#' xgx_breaks_time(c(1, 4), "d")
#' xgx_breaks_time(c(1, 12), "d")
#' xgx_breaks_time(c(1, 14), "d")
#' xgx_breaks_time(c(1, 50), "d")
#' xgx_breaks_time(c(1000, 3000), "d")
#' xgx_breaks_time(c(-21, 100), "d")
#' xgx_breaks_time(c(-1, 10), "w")
#'
#' @importFrom labeling extended
#' @export
xgx_breaks_time <- function(data_range, units_plot, number_breaks = 5) {
data_min <- min(data_range)
data_max <- max(data_range)
data_span <- data_max - data_min
number_breaks <- 5 # number of breaks to aim for
preferred_increment_default <- c(1, 5, 2, 4, 3, 1)
weights_default <- c(0.25, 0.2, 0.5, 0.05)
weights_simple <- c(1, 0.2, 0.5, 0.05)
if (units_plot %in% c("h", "m") && data_span >= 48) {
preferred_increment <- c(24, 12, 6, 3)
weights <- weights_simple
} else if (units_plot %in% c("h", "m") && data_span >= 24) {
preferred_increment <- c(3, 12, 6, 2)
weights <- weights_simple
} else if (units_plot %in% c("h", "m") && data_span < 24) {
preferred_increment <- c(6, 3, 2, 1)
weights <- weights_simple
} else if (units_plot == "d" && data_span >= 12) {
preferred_increment <- c(7, 14, 28)
weights <- weights_simple
} else {
preferred_increment <- preferred_increment_default
weights <- weights_default
}
breaks <- labeling::extended(data_min, data_max, m = number_breaks,
Q = preferred_increment, w = weights)
return(breaks)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_breaks_time.R |
#' Check data for various issues
#'
#' \code{xgx_check_data} performs a series of checks on a PK or PKPD dataset
#' It was inspired by the dataset preparation table from
#' \href{https://iqrtools.intiquan.com/doc/book/analysis-dataset-preparation.html}{IntiQuan}.
#'
#' The dataset must have the following columns
#' \itemize{
#' \item ID = unique subject identifier. USUBJID is another option
#' if ID is not there
#' \item EVID = event ID: 1 for dose, 0 otherwise
#' \item AMT = value of the dose
#' \item TIME = time of the measurement
#' \item DV = dependent value (linear scale). will check if LIDV or
#' LNDV are also there if DV is not
#' \item YTYPE = data measurement for LIDV. will check if CMT is there,
#' if YTYPE is not
#' }
#'
#' The dataset may also have additional columns
#' \itemize{
#' \item CENS = flag for censoring of the data because it's below the
#' limit of quantification (BLOQ)
#' \item MDV = missing dependent variable - will be counted and then
#' filtered out from the data check
#' }
#'
#' @param data, the dataset to check. Must contain the above columns
#' @param covariates, the column names of covariates, to explore
#'
#' @return data.frame
#'
#' @examples
#' covariates <- c("WEIGHTB", "SEX")
#' check <- xgx_check_data(mad_missing_duplicates, covariates)
#'
#' @importFrom dplyr rename
#' @importFrom dplyr select
#' @importFrom dplyr filter
#' @importFrom dplyr count
#' @importFrom tibble tibble
#' @importFrom magrittr "%>%"
#' @importFrom dplyr group_by
#' @importFrom dplyr summarise
#' @importFrom dplyr transmute
#' @importFrom dplyr mutate
#' @importFrom dplyr ungroup
#' @importFrom dplyr summarise_all
#' @importFrom stats setNames
#' @importFrom dplyr bind_rows
#' @importFrom pander panderOptions
#' @importFrom pander pander
#' @importFrom utils head
#' @export
xgx_check_data <- function(data, covariates = NULL) {
# avoid CRAN note
ID <- EVID <- YTYPE <- MDV <- AMT <- DV <- TIME <- CENS <-
Value <- tot <- ntot <- pct <- Data_Check_Issue <- n <- NULL
# check for required column names in dataset
if (!("YTYPE" %in% names(data)) && ("CMT" %in% names(data))) {
warning("Setting YTYPE column equal to CMT\n")
data$YTYPE <- data$CMT
}
if (!("ID" %in% names(data)) && ("USUBJID" %in% names(data))) {
warning("Setting ID column equal to USUBJID\n")
data$ID <- data$USUBJID
}
if (!("DV" %in% names(data))) {
if ("LIDV" %in% names(data)) {
warning("Setting DV column equal to LIDV\n")
data$DV <- data$LIDV
} else if ("LNDV" %in% names(data)) {
warning("Setting DV column equal to LNDV\n")
data$DV <- data$LNDV
}
}
if (!("MDV" %in% names(data))) {
if ("EVID" %in% names(data)) {
data$MDV <- as.numeric(data$EVID != 0)
warning("Setting MDV column equal to as.numeric(EVID!=0)\n")
}
}
if (!("CENS" %in% names(data))) {
warning("Setting CENS column equal to 0\n")
data$CENS <- 0
}
required_names <- c("ID", "EVID", "AMT", "TIME", "DV", "YTYPE")
missing_cols <- setdiff(required_names, names(data))
if (length(missing_cols) > 0) {
missing_text <- paste(missing_cols, collapse = ",")
stop(paste0("These columns must be present in the dataset: ", missing_text))
}
# initialize output tibble
check <- list()
data_subset <- list()
i <- 0 #index for table
j <- 0 #index for list of data indices
# number of patients
num_patients <- length(unique(data$ID))
i <- i + 1
check[[i]] <- tibble::tibble(
Category = "Patients",
Description = "Number of Patients",
YTYPE = "-",
Statistic = paste0(num_patients),
Value = num_patients)
# number of patients with zero observations
zero_obs <- data %>%
dplyr::group_by(ID) %>%
dplyr::filter(EVID == 0) %>%
dplyr::count() %>%
dplyr::filter(n == 0)
num_zero_obs <- nrow(zero_obs)
i <- i + 1
check[[i]] <- tibble::tibble(
Category = "MDV",
Description = paste0("Number of patients with zero PK or PD observations"),
YTYPE = "all",
Statistic = paste0(num_zero_obs, " ", paste0(zero_obs$ID, collapse = ", ")),
Value = num_zero_obs)
# number of missing data points, to be filtered out from MDV
if ("MDV" %in% names(data)) {
mdv <- data %>%
dplyr::group_by(YTYPE) %>%
dplyr::summarise(n = sum(MDV == 1 & EVID == 0))
num_mdv <- sum(mdv$n)
if (num_mdv == 0) {
i <- i + 1
check[[i]] <- tibble::tibble(
Category = "MDV",
Description = paste0("Number of Missing Data Points (MDV==1 and EVID==0)"),
YTYPE = "all",
Statistic = "0",
Value = 0)
} else {
i <- i + 1
check[[i]] <- mdv %>%
dplyr::transmute(
Category = "MDV",
Description = paste0("Number of Missing Data Points (MDV==1 and EVID==0)"),
YTYPE = as.character(YTYPE),
Statistic = paste0(n),
Value = n)
message(paste0("removing ", nrow(num_mdv),
" points with MDV==1 & EVID==0 from dataset"))
data <- dplyr::filter(data, !(MDV == 1 & EVID == 0))
}
}
# number of doses
i <- i + 1
check[[i]] <- tibble::tibble(
Category = "Dose",
Description = paste0("Number of non-zero doses"),
YTYPE = "-",
Value = sum(data$AMT > 0),
Statistic = paste0(Value))
# number of zero doses
i <- i + 1
check[[i]] <- tibble::tibble(
Category = "Dose",
Description = paste0("Number of zero doses (AMT==0)"),
YTYPE = "-",
Value = sum(data$AMT == 0 & data$EVID == 1),
Statistic = paste0(Value))
# number of patients that have all zero doses or that never receive any dose
num_doses <- data %>%
dplyr::group_by(ID) %>%
dplyr::summarise(n = sum(AMT > 0))
i <- i + 1
check[[i]] <- tibble::tibble(
Category = "Dose",
Description = paste0("Number of patients that never received drug"),
YTYPE = "-",
Value = sum(num_doses$n == 0),
Statistic = paste0(Value))
# number of data points
num_datapoints <- data %>%
dplyr::group_by(ID, YTYPE) %>%
dplyr::count() %>%
dplyr::group_by(YTYPE) %>%
dplyr::summarise(tot = sum(n),
min = min(n),
median = median(n),
max = max(n))
i <- i + 1
check[[i]] <- num_datapoints %>%
dplyr::transmute(
Category = "DV",
Description = paste0("Number of Data Points"),
YTYPE = as.character(YTYPE),
Statistic = paste0(tot),
Value = tot)
i <- i + 1
check[[i]] <- num_datapoints %>%
dplyr::transmute(
Category = "DV",
Description = paste0("Number of Data Points per Individual"),
YTYPE = as.character(YTYPE),
Statistic = paste0("min = ", min, ", median = ", median,
", max = ", max),
Value = median)
# check for zero concentrations
num_zero_datapoints <- data %>%
dplyr::group_by(ID, YTYPE) %>%
dplyr::group_by(YTYPE) %>%
dplyr::summarise(tot = sum(DV == 0 & MDV == 0, na.rm = TRUE))
i <- i + 1
check[[i]] <- num_zero_datapoints %>%
dplyr::transmute(
Category = "DV",
Description = paste0("Number of Data Points with zero value (DV==0)"),
YTYPE = as.character(YTYPE),
Statistic = paste0(tot),
Value = tot)
j <- j + 1
data_subset[[j]] <- data %>%
dplyr::filter(DV == 0 & MDV == 0) %>%
dplyr::mutate(Data_Check_Issue = "DV == 0")
# check for missing data
num_na_datapoints <- data %>%
dplyr::group_by(ID, YTYPE) %>%
dplyr::group_by(YTYPE) %>%
dplyr::summarise(tot = sum(is.na(DV) & MDV == 0))
i <- i + 1
check[[i]] <- num_na_datapoints %>%
dplyr::transmute(
Category = "DV",
Description = paste0("Number of Data Points with NA (is.na(DV))"),
YTYPE = as.character(YTYPE),
Statistic = paste0(tot),
Value = tot)
j <- j + 1
data_subset[[j]] <- data %>%
dplyr::filter(is.na(DV) & MDV == 0) %>%
dplyr::mutate(Data_Check_Issue = "is.na(DV)")
# check for duplicate data
dup_time <- data %>%
dplyr::group_by(ID, YTYPE, TIME) %>%
dplyr::mutate(n = length(DV),
n = ifelse(n == 1, 0, n)) %>%
dplyr::ungroup()
i <- i + 1
check[[i]] <- dup_time %>%
dplyr::group_by(YTYPE) %>%
dplyr::summarise(ntot = sum(n)) %>%
dplyr::ungroup() %>%
dplyr::transmute(
Category = "DV+TIME",
Description = "Multiple measurements at same time",
YTYPE = as.character(YTYPE),
Statistic = paste0(ntot),
Value = ntot)
j <- j + 1
dup_time <- dup_time %>%
dplyr::filter(n >= 2)
data_subset[[j]] <- data %>%
dplyr::filter(ID %in% dup_time$ID,
TIME %in% dup_time$TIME,
YTYPE %in% dup_time$YTYPE) %>%
dplyr::mutate(Data_Check_Issue = "Duplicate Time Points")
# number of Censored data points
if ("CENS" %in% names(data)) {
num_cens <- data %>%
dplyr::group_by(YTYPE) %>%
dplyr::summarise(tot = sum(CENS == 1))
num_cens$pct <- round(num_cens$tot / num_datapoints$tot * 100)
i <- i + 1
check[[i]] <- num_cens %>%
dplyr::transmute(
Category = "CENS",
Description = paste0("Number of Censored Data Points"),
YTYPE = as.character(YTYPE),
Statistic = paste0(tot, " (", pct, "%)"),
Value = tot)
}
# columns with negative data
neg <- data %>%
ungroup() %>%
dplyr::select(DV, covariates) %>%
dplyr::select_if(is.numeric) %>%
dplyr::summarise_all(function(x) {sum( x < 0, na.rm = TRUE)})
nam <- names(neg)
neg <- neg %>%
as.numeric() %>%
stats::setNames(nam)
neg <- neg[neg > 0]
i <- i + 1
check[[i]] <- tibble::tibble(
Category = "All Columns",
Description = "Negative Values (number)",
YTYPE = "-",
Statistic = paste0(names(neg), ":", neg, collapse = ", "),
Value = sum(neg))
# columns with missing values
na <- data %>%
ungroup() %>%
dplyr::summarise_all(function(x) {sum(is.na(x))}) %>%
as.numeric() %>%
stats::setNames(names(data))
na <- na[na > 0]
i <- i + 1
check[[i]] <- tibble::tibble(
Category = "All Columns",
Description = "Missing Values (number)",
YTYPE = "-",
Statistic = paste0(names(na), ":", na, collapse = ", "),
Value = sum(na))
missing_summary <- check[[i]]$Statistic
# create summaries
check <- dplyr::bind_rows(check)
data_subset <- dplyr::bind_rows(data_subset) %>%
dplyr::select(Data_Check_Issue, ID, TIME, DV, CENS, YTYPE)
# covariates
cov_summary <- xgx_summarize_covariates(data, covariates)
# output
output <- list(summary = check,
cts_covariates = cov_summary$cts_covariates,
cat_covariates = cov_summary$cat_covariates,
data_subset = data_subset)
# print the summary
pander::panderOptions("table.split.table", Inf)
pander::panderOptions("table.split.cells", 60)
pander::panderOptions("table.alignment.default", "left")
cat("\nDATA SUMMARY\n")
pander::pander(check %>% dplyr::select(-Value))
if (length(output$cts_covariates) > 0) {
cat("CONTINUOUS COVARIATES\n")
pander::pander(output$cts_covariates)
} else {
cat("NO CONTINUOUS COVARIATES\n")
}
if (length(output$cat_covariates) > 0) {
cat("CATEGORICAL COVARIATES\n")
pander::panderOptions("table.split.cells", 100)
pander::pander(output$cat_covariates)
} else {
cat("NO CATEGORICAL COVARIATES\n")
}
if (nrow(data_subset) == 0) {
} else if (nrow(data_subset) <= 6) {
cat("POSSIBLE DATA ISSUES IN THE FOLLOWING RECORDS\n")
pander::pander(data_subset)
} else {
cat("POSSIBLE DATA ISSUES - FIRST 6 RECORDS\n")
pander::pander(utils::head(data_subset))
}
cat("The following columns contained missing values\n")
cat(missing_summary)
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_check_data.R |
#' Append filenames to bottom of the plot
#'
#' \code{xgx_dirs2char} returns a character variable based on the dirs list.
#' The caption gives the filename
#'
#' @param dirs list containing directories and filenames. It must contain
#' five fields
#' \enumerate{
#' \item parent_dir = Parent directory containing the Rscript and the Results
#' folder
#' \item rscript_dir = Subdirectory ofparent_dir that contains the Rscript
#' used to generate the figure
#' \item rscript_name= Name of the Rscript used to generate the figure
#' \item results_dir = Subdirectory ofparent_dir where the figure is stored
#' \item filename = Filename
#' }
#' @param include_time is logical with default TRUE. If TRUE, it includes
#' date / time in the output character
#'
#' @return character
#'
#' @examples
#' dirs <- list(parent_dir = "/your/parent/path/",
#' rscript_dir = "./Rscripts/",
#' rscript_name = "Example.R",
#' results_dir = "./Results/",
#' filename = "your_file_name.png")
#' caption <- xgx_dirs2char(dirs)
#'
#' @export
xgx_dirs2char <- function(dirs, include_time = TRUE) {
# check to make sure all filenames dirs
if (typeof(dirs)!="list") {
stop("dirs variable must be a list")
}
missing_filenames <- setdiff(c("parent_dir", "rscript_dir", "rscript_name",
"results_dir", "filename"),
names(dirs))
if (length(missing_filenames) > 0) {
stop(paste("Fields missing from dirs = ", missing_filenames))
}
output <- paste0(dirs$parent_dir, "\n",
file.path(dirs$rscript_dir, dirs$rscript_name), "\n",
file.path(dirs$results_dir, dirs$filename))
if (include_time) {
output <- paste0(output, "\n", "Created: ", Sys.time())
}
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_dirs2char.R |
#' Plot data with mean and confidence intervals
#'
#' @inheritParams xgx_stat_ci
#' @return ggplot2 plot layer
#'
#' @examples
#' data <- data.frame(x = rep(c(1, 2, 3), each = 20),
#' y = rep(c(1, 2, 3), each = 20) + stats::rnorm(60))
#' ggplot2::ggplot(data, ggplot2::aes(x = x, y = y)) +
#' xgx_geom_ci(conf_level = 0.95)
#'
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 aes
#' @importFrom stats rnorm
#' @export
xgx_geom_ci <- function(mapping = NULL, data = NULL, conf_level = 0.95,
distribution = "normal",
bins = NULL,
breaks = NULL,
geom = list("point", "line", "errorbar"),
position = "identity",
fun.args = list(),
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
...) {
return(list(xgx_stat_ci(mapping = mapping,
data = data,
conf_level = conf_level,
distribution = distribution,
bins = bins,
breaks = breaks,
geom = geom,
position = position,
fun.args = fun.args,
na.rm = na.rm,
show.legend = show.legend,
inherit.aes = inherit.aes,
...)))
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_geom_ci.R |
#' Plot data with median and percent intervals
#'
#' @inheritParams xgx_stat_pi
#' @return ggplot2 plot layer
#'
#' @examples
#' data <- data.frame(x = rep(c(1, 2, 3), each = 20),
#' y = rep(c(1, 2, 3), each = 20) + stats::rnorm(60))
#' ggplot2::ggplot(data, ggplot2::aes(x = x, y = y)) +
#' xgx_geom_pi(percent_level = 0.95)
#'
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 aes
#' @importFrom stats rnorm
#' @export
xgx_geom_pi <- function(mapping = NULL, data = NULL, percent_level = 0.95,
geom = list("line", "ribbon"),
position = "identity",
fun.args = list(),
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
...) {
return(list(xgx_stat_pi(mapping = mapping,
data = data,
percent_level = percent_level,
geom = geom,
position = position,
fun.args = fun.args,
na.rm = na.rm,
show.legend = show.legend,
inherit.aes = inherit.aes,
...)))
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_geom_pi.R |
#' Nice labels for log10.
#'
#' Returns a set of labels for ggplot
#'
#' @param breaks, breaks for the function
#'
#' @return either character or expression
#'
#' @examples
#' print(xgx_labels_log10(c(1e-5, 1, 1e5)))
#'
#' @export
xgx_labels_log10 <- function(breaks) {
labels <- as.character(breaks)
if (all(log10(breaks) == as.integer(log10(breaks)), na.rm = TRUE)
&& (min(breaks, na.rm = TRUE) < 0.001 ||
max(breaks, na.rm = TRUE) > 9999)) {
labels <- as.character(breaks)
}
return(labels)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_labels_log10.R |
#' Sets the default minor_breaks for log10 scales
#'
#' \code{xgx_minor_breaks_log10} sets nice minor_breaks for log10 scale.
#'
#'
#' @param data_range range of the data
#'
#' @return numeric vector of breaks
#'
#' @examples
#' xgx_minor_breaks_log10(c(1, 1000))
#' xgx_minor_breaks_log10(c(0.001, 100))
#' xgx_minor_breaks_log10(c(1e-4, 1e4))
#' xgx_minor_breaks_log10(c(1e-9, 1e9))
#' xgx_minor_breaks_log10(c(1, 2))
#' xgx_minor_breaks_log10(c(1, 5))
#' xgx_minor_breaks_log10(c(1, 10))
#' xgx_minor_breaks_log10(c(1, 100))
#' xgx_minor_breaks_log10(c(1, 1.01))
#' xgx_minor_breaks_log10(c(1, 1.0001))
#' print(xgx_minor_breaks_log10(c(1, 1.000001)), digits = 10)
#'
#' @importFrom labeling extended
#' @export
xgx_minor_breaks_log10 <- function(data_range) {
r1 <- range(log10(data_range))
r <- r1
r[1] <- floor(r[1])
r[2] <- ceiling(r[2]) + 1
minor_breaks <- c()
for (i in seq(r[1], r[2])) {
minor_breaks <- c(minor_breaks, seq(2 * 10^(i - 1), 10^i - 10^(i - 1),
by = 10^(i - 1)))
}
minor_breaks <- minor_breaks[minor_breaks <= 10^r1[2]]
minor_breaks <- minor_breaks[minor_breaks >= 10^r1[1]]
return(minor_breaks)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_minor_breaks_log10.R |
#' Create a new xgx plot
#'
#' @param data Default dataset to use for plot. If not already a data.frame,
#' will be converted to one by fortify.
#' @param mapping As in ggplot2; Default list of aesthetic mappings to use
#' for plot. Must define x, y, and group for xgx_spaghetti.
#' @param ... Other arguments passed on to methods. Not currently used.
#' @param environment If an variable defined in the aesthetic mapping is not
#' found in the data, ggplot will look for it in this environment. It defaults
#' to using the environment in which \code{\link[ggplot2]{ggplot}} is called.
#'
#' @return ggplot2 object
#'
#' @examples
#' time <- rep(seq(1, 10), 5)
#' id <- sort(rep(seq(1, 5), 10))
#' conc <- exp(-time) * sort(rep(stats::rlnorm(5), 10))
#'
#' data <- data.frame(time = time, concentration = conc, id = id)
#' xgx_plot(data = data,
#' mapping = ggplot2::aes(x = time, y = concentration, group = id)) +
#' ggplot2::geom_line() +
#' ggplot2::geom_point()
#'
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 geom_line
#' @importFrom ggplot2 geom_point
#' @importFrom stats rlnorm
#' @export
xgx_plot <- function(data = NULL, mapping = ggplot2::aes(), ...,
environment = parent.frame()) {
gg <- ggplot2::ggplot(data = data, mapping = mapping, ...,
environment = environment) +
xgx_theme()
return(gg)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_plot.R |
#' Saving plot, automatically annotating the status and denoting the filenames
#'
#' @param width width of plot
#' @param height height of plot
#' @param dirs list of directories. If NULL or if directories missing, there
#' is default behavior below
#'
#' \enumerate{
#' \item parent_dir = Parent directory containing the Rscript and the Results
#' folder, default getwd()
#' \item rscript_dir = Subdirectory of parent_dir that contains the Rscript
#' used to generate the figure, default "./"
#' \item rscript_name= Name of the Rscript used to generate the figure,
#' default "Name_Of_Script_Here.R"
#' \item results_dir = Subdirectory ofparent_dir where the figure is stored,
#' default "./"
#' \item filename_prefix = prefix of filename to be appended to filename_main
#' }
#'
#' @param filename_main main part of the filename, excluding prefix and suffix.
#' no default
#' @param status status to be annotated
#' @param g ggplot plot object, default is ggplot::last_plot()
#' @param filetype file extension (e.g. "pdf","csv" etc.)
#' @param status_x x location of the status in plot
#' @param status_y y location of the status in plot
#' @param status_fontcolor font color for status in plot
#' @param status_fontsize font size for status in plot
#' @param filenames_fontcolor font color for filenames info in plot
#' @param filenames_fontsize font size for filenames info in plot
#'
#' @return ggplot2 plot object
#'
#' @examples
#' directory = tempdir()
#' dirs <- list(parent_dir = directory,
#' rscript_dir = directory,
#' rscript_name = "example.R",
#' results_dir = directory,
#' filename_prefix = "example_")
#' data <- data.frame(x = 1:1000, y = stats::rnorm(1000))
#' ggplot2::ggplot(data = data, ggplot2::aes(x = x, y = y)) +
#' ggplot2::geom_point()
#' xgx_save(4, 4, dirs, "Example", "DRAFT")
#'
#' @importFrom ggplot2 last_plot
#' @importFrom ggplot2 ggsave
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 geom_point
#' @importFrom stats rnorm
#' @export
xgx_save <- function(width,
height,
dirs = NULL,
filename_main = NULL,
status = "DRAFT",
g = ggplot2::last_plot(),
filetype = "png",
status_x = Inf,
status_y = Inf,
status_fontsize = 7,
status_fontcolor = "grey",
filenames_fontsize = 11,
filenames_fontcolor = "black") {
if (typeof(dirs)!="list") {
stop("dirs variable must be a list")
}
if (is.null(dirs$parent_dir)) {
stop("The parent directory for your programs and results must be specified: dirs$parent_dir")
}
if (is.null(dirs$rscript_dir)) {
stop("The project directory where your R scripts are stored must be specified: dirs$rscript_dir")
}
if (is.null(dirs$rscript_name)) {
stop("The name of the R script that saves this plot must be specified: dirs$rscript_name")
}
if (is.null(dirs$results_dir)) {
stop("The results directory where your outputs are stored must be specified: dirs$results_dir")
}
if (is.null(dirs$filename_prefix)) {
dirs$filename_prefix <- ""
}
if (is.null(filename_main)) {
filename_main <- "unnamed_graph_"
}
filedir <- file.path(dirs$results_dir)
dirs$filename <- paste0(dirs$filename_prefix, filename_main, ".", filetype)
g <- g + xgx_annotate_filenames(dirs,
color = filenames_fontcolor,
size = filenames_fontsize)
g <- g + xgx_annotate_status(status, x = status_x, y = status_y,
color = status_fontcolor,
fontsize = status_fontsize)
ggplot2::ggsave(plot = g, width = width, height = height,
file.path(filedir, dirs$filename))
return(g)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_save.R |
#' Saving table as an image, also labeling the program that created the table
#' and where the table is stored
#'
#' @param data data.frame or table of results
#' @param dirs list of directories. If NULL or if directories missing, there
#' is default behavior below
#'
#' \enumerate{
#' \item parent_dir = Parent directory containing the Rscript and the
#' Results folder, default getwd()
#' \item rscript_dir = Subdirectory of parent_dir that contains the Rscript
#' used to generate the figure, default "./"
#' \item rscript_name= Name of the Rscript used to generate the figure,
#' default "Name_Of_Script_Here.R"
#' \item results_dir = Subdirectory ofparent_dir where the figure is stored,
#' default "./"
#' \item filename_prefix = prefix of filename to be appended to filename_main
#' }
#' @param filename_main main part of the filename, excluding prefix and
#' extension. no default
#'
#' @return ggplot2 plot object
#'
#' @examples
#' directory = tempdir()
#' dirs <- list(parent_dir = directory,
#' rscript_dir = directory,
#' rscript_name = "example.R",
#' results_dir = directory,
#' filename_prefix = "example_")
#' data <- data.frame(x = c(1, 2), y = c(1, 2))
#' xgx_save_table(data, dirs = dirs, filename_main = "test")
#'
#' @importFrom dplyr bind_rows
#' @importFrom dplyr mutate_all
#' @importFrom utils write.csv
#' @importFrom magrittr "%>%"
#' @export
xgx_save_table <- function(data, dirs = NULL, filename_main = NULL) {
if (is.null(dirs$parent_dir)) {
stop("The parent directory for your programs and results must be specified: dirs$parent_dir")
}
if (is.null(dirs$rscript_dir)) {
stop("The project directory where your R scripts are stored must be specified: dirs$rscript_dir")
}
if (is.null(dirs$rscript_name)) {
stop("The name of the R script that saves this plot must be specified: dirs$rscript_name")
}
if (is.null(dirs$results_dir)) {
stop("The results directory where your outputs are stored must be specified: dirs$results_dir")
}
if (is.null(dirs$filename_prefix)) {
dirs$filename_prefix <- ""
}
if (is.null(dirs$filename_main)) {
filename_main <- "unnamed_table_"
}
dirs$filename <- paste0(dirs$filename_prefix, filename_main, ".csv")
caption <- c("", dirs$parent_dir,
file.path(dirs$rscript_dir, dirs$rscript_name),
file.path(dirs$results_dir, dirs$filename),
file.path("Created: ", Sys.time()))
caption_row <- data[1, ] %>%
dplyr::mutate_all(function(x) {x <- ""})
caption_row <- dplyr::bind_rows(caption_row, caption_row, caption_row,
caption_row, caption_row)
caption_row[, 1] <- caption
data_append <- data %>%
dplyr::mutate_all(as.character) %>%
dplyr::bind_rows(caption_row)
utils::write.csv(data_append, file.path(dirs$results_dir, dirs$filename),
quote = FALSE, row.names = FALSE)
return(data_append)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_save_table.R |
#' log10 scales the x axis with a "pretty" set of breaks
#'
#' \code{xgx_scale_x_log10} is similar to
#' \code{\link[ggplot2:scale_continuous]{scale_x_log10}}.
#' But it uses what we believe to be a nicer spacing and set of tick marks
#' it can be used the same as
#' \code{\link[ggplot2:scale_continuous]{scale_x_log10}}
#'
#' @param breaks major breaks, default is a function defined here
#' @param minor_breaks minor breaks, default is a function defined here
#' @param labels function for setting the labels, defined here
#' @param ... other arguments passed to
#' \code{\link[ggplot2:scale_continuous]{scale_x_log10}}
#'
#' @return ggplot2 compatible scale object
#'
#' @examples
#' conc <- 10^(seq(-3, 3, by = 0.1))
#' ec50 <- 1
#' data <- data.frame(concentration = conc,
#' bound_receptor = 1 * conc / (conc + ec50))
#' ggplot2::ggplot(data, ggplot2::aes(x = concentration, y = bound_receptor)) +
#' ggplot2::geom_point() +
#' ggplot2::geom_line() +
#' xgx_scale_x_log10() +
#' xgx_scale_y_reverselog10()
#'
#' @importFrom ggplot2 scale_x_log10
#' @export
xgx_scale_x_log10 <- function(breaks = xgx_breaks_log10,
minor_breaks = NULL,
labels = xgx_labels_log10,
...) {
if (is.null(minor_breaks)) {
minor_breaks <- function(x) xgx_minor_breaks_log10(x)
}
ret <- try(list(ggplot2::scale_x_log10(..., breaks = breaks,
minor_breaks = minor_breaks,
labels = labels)),
silent = TRUE)
if (inherits(ret, "try-error")) {
return(ggplot2::scale_x_log10(...))
} else {
return(ret)
}
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_scale_x_log10.R |
#' Reverse-log transform for the x scale.
#'
#' \code{xgx_scale_x_reverselog10} is designed to be used with data that
#' approaches 100%.
#' A common example is receptor occupancy in drug development.
#' It is used when you want even spacing between 90, 99, 99.9, etc.
#'
#' @param labels if NULL, then the default is to use scales::percent()
#' @param accuracy if NULL, then use the the default as specified by scales::percent()
#' to round to the hundredths place, set accuracy 0.01
#' @param ... other parameters passed to
#' \code{\link[ggplot2:scale_continuous]{scale_x_continuous}}
#'
#' @return ggplot2 compatible scale object
#'
#' @examples
#' conc <- 10^(seq(-3, 3, by = 0.1))
#' ec50 <- 1
#' data <- data.frame(concentration = conc,
#' bound_receptor = 1 * conc / (conc + ec50))
#' ggplot2::ggplot(data, ggplot2::aes(y = concentration, x = bound_receptor)) +
#' ggplot2::geom_point() +
#' ggplot2::geom_line() +
#' xgx_scale_y_log10() +
#' xgx_scale_x_reverselog10()
#'
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 geom_point
#' @importFrom ggplot2 geom_line
#' @importFrom ggplot2 scale_x_continuous
#' @importFrom scales percent_format
#' @importFrom scales trans_new
#' @export
xgx_scale_x_reverselog10 <- function(labels = NULL, accuracy = NULL, ...) {
reverselog <- scales::trans_new(
name = "reverselog",
transform = function(x) -log10(1 - x),
inverse = function(x) 1 - 10^-x,
breaks = function(x) c(0, c(100 - 10^(-100:1))) / 100)
if (is.null(labels)) {
labels = scales::percent_format(accuracy = accuracy)
}
ggplot2::scale_x_continuous(trans = reverselog,
labels = labels, ...)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_scale_x_reverselog10.R |
xgx_scale_time_units_ <- function(units_dataset, units_plot = NULL,
breaks = NULL,
labels = NULL, ...) {
# h = hours, d = days, w = weeks, m = months, y = years
if (is.null(units_plot)) {
units_plot <- units_dataset
}
# allows for user to write out longer string for units
units_plot <- units_plot %>%
tolower() %>%
substr(1, 1)
units_dataset <- units_dataset %>%
tolower() %>%
substr(1, 1)
if (!(units_dataset %in% c("h", "d", "w", "m", "y"))) {
stop("units_dataset must be hours, days, weeks, months, or years")
}
if (!(units_plot %in% c("h", "d", "w", "m", "y"))) {
stop("units_plot must be hours, days, weeks, months, or years")
}
day_scale <- data.frame(
h = 1 / 24,
d = 1,
w = 7,
m = 30.4375,
y = 365.25
)
input_scale <- day_scale[[units_dataset]]
output_scale <- day_scale[[units_plot]]
scale_factor <- output_scale / input_scale
if (is.null(breaks)) {
breaks <- function(data_range) {
xgx_breaks_time(data_range / scale_factor, units_plot) * scale_factor
}
}
if (is.null(labels)) {
labels <- function(breaks) {
breaks / scale_factor
}
}
xlabel_list <- data.frame(
h = "Hour",
d = "Day",
w = "Week",
m = "Month",
y = "Year"
)
xlabel <- paste0("Time (", xlabel_list[[units_plot]], "s)")
return(list(breaks = breaks, labels = labels, xlabel = xlabel))
}
#' Convert time units for plotting
#'
#' \code{xgx_scale_x_time_units} converts x axis scale from one time unit
#' to another.
#' Supported units include hours, days, weeks, months, and years, which
#' can also be called using just the first letter (h, d, w, m, y).
#'
#' Note: \code{xgx_scale_x_time_units} only scales the plot axis, all other
#' specifications must be on the original scale of the dataset (e.g. breaks,
#' position, width)
#'
#' @param units_dataset units of the input dataset, must be specified by user
#' as "h", "d", "w", "m", or "y"
#' @param units_plot units of the plot, will be units of the dataset if empty
#' @inheritParams ggplot2::continuous_scale
#' @param ... other parameters for
#' \code{\link[ggplot2:scale_continuous]{scale_x_continuous}}
#'
#' @return ggplot2 compatible scale object
#'
#' @examples
#' data <- data.frame(x = 1:1000, y = rnorm(1000))
#' ggplot2::ggplot(data = data, ggplot2::aes(x = x, y = y)) +
#' ggplot2::geom_point() +
#' xgx_scale_x_time_units(units_dataset = "hours", units_plot = "weeks")
#' @importFrom magrittr "%>%"
#' @importFrom ggplot2 scale_x_continuous
#' @importFrom ggplot2 xlab
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 geom_point
#' @export
xgx_scale_x_time_units <- function(units_dataset, units_plot = NULL,
breaks = NULL,
labels = NULL, ...) {
# h = hours, d = days, w = weeks, m = months, y = years
lst <- xgx_scale_time_units_(units_dataset, units_plot, breaks, labels, ...)
return(list(
ggplot2::scale_x_continuous(
breaks = lst$breaks,
labels = lst$labels, ...
),
ggplot2::xlab(lst$xlabel)
))
}
#' @rdname xgx_scale_x_time_units
#' @export
xgx_scale_y_time_units <-
function(units_dataset, units_plot = NULL, breaks = NULL, labels = NULL,
...) {
lst <- xgx_scale_time_units_(units_dataset, units_plot, breaks, labels, ...)
return(list(ggplot2::scale_y_continuous(
breaks = lst$breaks,
labels = lst$labels, ...
), ggplot2::ylab(lst$xlabel)))
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_scale_x_time_units.R |
#' log10 scales the y axis with a "pretty" set of breaks
#'
#' \code{xgx_scale_y_log10} is similar to
#' \code{\link[ggplot2:scale_continuous]{scale_y_log10}}.
#' But it uses what we believe to be a nicer spacing and set of tick marks
#' it can be used the same as
#' \code{\link[ggplot2:scale_continuous]{scale_y_log10}}
#'
#' @param breaks major breaks, default is a function defined here
#' @param minor_breaks minor breaks, default is a function defined here
#' @param labels function for setting the labels, defined here
#' @param ... other arguments passed to
#' \code{\link[ggplot2:scale_continuous]{scale_y_log10}}
#'
#' @return ggplot2 compatible scale object
#'
#' @examples
#' conc <- 10^(seq(-3, 3, by = 0.1))
#' ec50 <- 1
#' data <- data.frame(concentration = conc,
#' bound_receptor = 1 * conc / (conc + ec50))
#' ggplot2::ggplot(data, ggplot2::aes(y = concentration, x = bound_receptor)) +
#' ggplot2::geom_point() +
#' ggplot2::geom_line() +
#' xgx_scale_y_log10() +
#' xgx_scale_x_reverselog10()
#'
#' @importFrom ggplot2 scale_y_log10
#' @export
xgx_scale_y_log10 <- function(breaks = xgx_breaks_log10,
minor_breaks = NULL,
labels = xgx_labels_log10,
...) {
if (is.null(minor_breaks)) {
minor_breaks <- function(x) xgx_minor_breaks_log10(x)
}
ret <- try(list(ggplot2::scale_y_log10(..., breaks = breaks,
minor_breaks = minor_breaks,
labels = labels)),
silent = TRUE)
if (inherits(ret, "try-error")) {
return(ggplot2::scale_y_log10(...))
} else {
return(ret)
}
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_scale_y_log10.R |
#' percentchangelog10 transform for the y scale.
#'
#' \code{xgx_scale_y_percentchangelog10} and \code{xgx_scale_x_percentchangelog10} are designed
#' to be used with percent change (PCHG) from baseline data (on a scale of -1 to +Inf).
#' Common examples include % weight loss, % reduction in LDL, % change in tumor diameter.
#' It is used when you have a wide range of data on a percent change scale,
#' especially data close to -100%, and/or several fold increase from baseline.
#'
#' @param breaks if NULL, then default is to use a variant of
#' 2^(labeling::extended(log2(PCHG + 1))) - 1, where PCHG represents the range of the data
#' @param minor_breaks if NULL, then default is to use nicely spaced log10(PCHG + 1) minor breaks
#' @param labels if NULL, then the default is to use scales::percent_format()
#' @param accuracy accuracy to use with scales::percent_format(), if NULL,
#' then the default is set to 1
#' @param n_breaks number of desired breaks, if NULL, then the default is set to 7
#' @param ... other parameters passed to
#' \code{\link[ggplot2:scale_continuous]{scale_y_continuous}}
#'
#' @return ggplot2 compatible scale object
#'
#' @examples
#' dat1 <- data.frame(x = rnorm(100), PCHG = exp(rnorm(100)) - 1)
#'
#' ggplot2::ggplot(dat1, ggplot2::aes(x = x, y = PCHG)) +
#' ggplot2::geom_point() +
#' xgx_theme() +
#' xgx_scale_y_percentchangelog10()
#'
#' @importFrom scales trans_new
#' @importFrom scales percent_format
#' @importFrom labeling extended
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 geom_boxplot
#' @importFrom ggplot2 scale_y_continuous
#' @importFrom dplyr mutate
#' @export
xgx_scale_y_percentchangelog10 <- function(breaks = NULL,
minor_breaks = NULL,
labels = NULL,
accuracy = 1,
n_breaks = 7,
...) {
if (is.null(breaks)){
breaks <- function(data_range) {
r <- range(log2(data_range + 1))
breaks <- 2^(labeling::extended(r[1], r[2], m = n_breaks, Q = c(1,2,4,8))) - 1
return(breaks)
}
}
if (is.null(minor_breaks)) {
minor_breaks <- function(x) xgx_minor_breaks_log10(x + 1) - 1
}
percentchangelog <- scales::trans_new(
name = "percentchangelog",
transform = function(x) log10(x + 1),
inverse = function(x) 10^(x) - 1)
if (is.null(labels)) {
labels = scales::percent_format(accuracy = accuracy)
}
ggplot2::scale_y_continuous(trans = percentchangelog,
labels = labels,
minor_breaks = minor_breaks,
breaks = breaks, ...)
}
#' @rdname xgx_scale_y_percentchangelog10
#'
#'
#' @importFrom scales trans_new
#' @importFrom scales percent_format
#' @importFrom labeling extended
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 geom_boxplot
#' @importFrom ggplot2 scale_y_continuous
#' @importFrom dplyr mutate
#' @export
xgx_scale_x_percentchangelog10 <- function(breaks = NULL,
minor_breaks = NULL,
labels = NULL,
accuracy = 1,
n_breaks = 7,
...) {
if (is.null(breaks)){
breaks <- function(data_range) {
r <- range(log2(data_range + 1))
breaks <- 2^(labeling::extended(r[1], r[2], m = n_breaks, Q = c(1,2,4,8))) - 1
return(breaks)
}
}
if (is.null(minor_breaks)) {
minor_breaks <- function(x) xgx_minor_breaks_log10(x + 1) - 1
}
percentchangelog <- scales::trans_new(
name = "percentchangelog",
transform = function(x) log10(x + 1),
inverse = function(x) 10^(x) - 1)
if (is.null(labels)) {
labels = scales::percent_format(accuracy = accuracy)
}
ggplot2::scale_x_continuous(trans = percentchangelog,
labels = labels,
minor_breaks = minor_breaks,
breaks = breaks, ...)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_scale_y_percentchangelog10.R |
#' Reverselog transform for the y scale.
#'
#' \code{xgx_scale_y_reverselog10} is designed to be used with data
#' that approaches 100%.
#' A common example is receptor occupancy in drug development.
#' It is used when you want even spacing between 90, 99, 99.9, etc.
#'
#' @param labels if NULL, then the default is to use scales::percent()
#' @param accuracy if NULL, then use the the default as specified by scales::percent()
#' to round to the hundredths place, set accuracy 0.01
#' @param ... other parameters passed to
#' \code{\link[ggplot2:scale_continuous]{scale_y_continuous}}
#'
#' @return ggplot2 compatible scale object
#'
#' @examples
#' conc <- 10^(seq(-3, 3, by = 0.1))
#' ec50 <- 1
#' data <- data.frame(concentration = conc,
#' bound_receptor = 1 * conc / (conc + ec50))
#' ggplot2::ggplot(data, ggplot2::aes(x = concentration, y = bound_receptor)) +
#' ggplot2::geom_point() +
#' ggplot2::geom_line() +
#' xgx_scale_x_log10() +
#' xgx_scale_y_reverselog10()
#'
#' @importFrom scales trans_new
#' @importFrom scales percent_format
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 geom_point
#' @importFrom ggplot2 geom_line
#' @importFrom ggplot2 scale_y_continuous
#' @export
xgx_scale_y_reverselog10 <- function(labels = NULL, accuracy = NULL, ...) {
reverselog <- scales::trans_new(
name = "reverselog",
transform = function(x) -log10(1 - x),
inverse = function(x) 1 - 10^-x,
breaks = function(x) c(0, c(100 - 10^(-100:1))) / 100)
if (is.null(labels)) {
labels = scales::percent_format(accuracy = accuracy)
}
ggplot2::scale_y_continuous(trans = reverselog,
labels = labels, ...)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_scale_y_reverselog10.R |
#' Plot data with mean and confidence intervals
#'
#' \code{xgx_stat_ci} returns a ggplot layer plotting mean +/- confidence
#' intervals
#'
#' This function can be used to generate mean +/- confidence interval plots
#' for different distributions,
#' and multiple geoms with a single function call.
#'
#' @param mapping Set of aesthetic mappings created by `aes` or `aes_`.
#' If specified and `inherit.aes = TRUE` (the default), it is combined with the
#' default mapping at the top level of the plot. You must supply mapping if
#' there is no plot mapping.
#' @param data The data to be displayed in this layer. There are three options:
#'
#' If NULL, the default, the data is inherited from the plot data as specified
#' in the call to ggplot.
#'
#' A data.frame, or other object, will override the plot data. All objects
#' will be fortified to produce a data frame. See fortify for which variables
#' will be created.
#'
#' A function will be called with a single argument, the plot data. The return
#' value must be a data.frame., and will be used as the layer data.
#' @param conf_level The percentile for the confidence interval (should fall
#' between 0 and 1). The default is 0.95, which corresponds to a 95 percent
#' confidence interval.
#' @param distribution The distribution which the data follow, used for
#' calculating confidence intervals. The options are "normal", "lognormal",
#' and "binomial". The "normal" option will use the Student t Distribution
#' to calculate confidence intervals, the "lognormal" option will transform
#' data to the log space first. The "binomial" option will use the
#' \code{\link[binom:binom.confint]{binom.exact}} function to calculate the
#' confidence
#' intervals. Note: binomial data must be numeric and contain only 1's and 0's.
#' @param bins number of bins to cut up the x data, cuts data into quantiles.
#' @param breaks breaks to cut up the x data, if this option is used, bins is ignored
#' @param geom Use to override the default geom. Can be a list of multiple
#' geoms, e.g. list("point","line","errorbar"), which is the default.
#' @param position Position adjustment, either as a string, or the result of
#' a call to a position adjustment function.
#' @param fun.args Optional additional arguments passed on to the functions.
#' @param fun.data A function that is given the complete data and should return
#' a data frame with variables ymin, y, and ymax.
#' @param na.rm If FALSE, the default, missing values are removed with a
#' warning. If TRUE, missing values are silently removed.
#' @param orientation The orientation of the layer, passed on to ggplot2::stat_summary.
#' Only implemented for ggplot2 v.3.3.0 and later. The default ("x") summarizes y values over
#' x values (same behavior as ggplot2 v.3.2.1 or earlier). Setting \code{orientation = "y"} will
#' summarize x values over y values, which may be useful in some situations where you want to flip
#' the axes, e.g. to create forest plots. Setting \code{orientation = NA} will try to automatically
#' determine the orientation from the aesthetic mapping (this is more stable for ggplot2 v.3.3.2
#' compared to v.3.3.0).
#' See \code{\link[ggplot2:stat_summary]{stat_summary}} (v.3.3.0 or greater) for more information.
#' @param show.legend logical. Should this layer be included in the legends?
#' NA, the default, includes if any aesthetics are mapped. FALSE never
#' includes, and TRUE always includes.
#' @param inherit.aes If FALSE, overrides the default aesthetics, rather
#' than combining with them. This is most useful for helper functions that
#' define both data and aesthetics and shouldn't inherit behaviour from the
#' default plot specification, e.g. borders.
#' @param ... other arguments passed on to layer. These are often aesthetics,
#' used to set an aesthetic to a fixed value, like color = "red" or size = 3.
#' They may also be parameters to the paired geom/stat.
#'
#' @return ggplot2 plot layer
#'
#' @examples
#' # default settings for normally distributed data, 95% confidence interval,
#' data <- data.frame(x = rep(c(1, 2, 3), each = 20),
#' y = rep(c(1, 2, 3), each = 20) + stats::rnorm(60),
#' group = rep(1:3, 20))
#' xgx_plot(data, ggplot2::aes(x = x, y = y)) +
#' xgx_stat_ci(conf_level = 0.95)
#'
#' # try different geom
#' xgx_plot(data, ggplot2::aes(x = x, y = y)) +
#' xgx_stat_ci(conf_level = 0.95, geom = list("ribbon", "point", "line"))
#'
#' # plotting lognormally distributed data
#' data <- data.frame(x = rep(c(1, 2, 3), each = 20),
#' y = 10^(rep(c(1, 2, 3), each = 20) + stats::rnorm(60)),
#' group = rep(1:3, 20))
#' xgx_plot(data, ggplot2::aes(x = x, y = y)) +
#' xgx_stat_ci(conf_level = 0.95, distribution = "lognormal")
#'
#' # note: you DO NOT need to use both distribution = "lognormal"
#' # and scale_y_log10()
#' xgx_plot(data, ggplot2::aes(x = x, y = y)) +
#' xgx_stat_ci(conf_level = 0.95) + xgx_scale_y_log10()
#'
#' # plotting binomial data
#' data <- data.frame(x = rep(c(1, 2, 3), each = 20),
#' y = stats::rbinom(60, 1, rep(c(0.2, 0.6, 0.8),
#' each = 20)),
#' group = rep(1:3, 20))
#' xgx_plot(data, ggplot2::aes(x = x, y = y)) +
#' xgx_stat_ci(conf_level = 0.95, distribution = "binomial")
#'
#' # including multiple groups in same plot
#' xgx_plot(data, ggplot2::aes(x = x, y = y)) +
#' xgx_stat_ci(conf_level = 0.95, distribution = "binomial",
#' ggplot2::aes(color = factor(group)),
#' position = ggplot2::position_dodge(width = 0.5))
#'
#' # plotting ordinal or multinomial data
#' set.seed(12345)
#' data = data.frame(x = 120*exp(stats::rnorm(100,0,1)),
#' response = sample(c("Mild","Moderate","Severe"), 100, replace = TRUE),
#' covariate = sample(c("Male","Female"), 100, replace = TRUE))
#'
#' xgx_plot(data = data) +
#' xgx_stat_ci(mapping = ggplot2::aes(x = x, response = response, colour = covariate),
#' distribution = "ordinal", bins = 4) +
#' ggplot2::scale_y_continuous(labels = scales::percent_format()) + ggplot2::facet_wrap(~response)
#'
#' xgx_plot(data = data) +
#' xgx_stat_ci(mapping = ggplot2::aes(x = x, response = response, colour = response),
#' distribution = "ordinal", bins = 4) +
#' ggplot2::scale_y_continuous(labels = scales::percent_format()) + ggplot2::facet_wrap(~covariate)
#'
#' # Example plotting categorical vs categorical data
#' set.seed(12345)
#' data = data.frame(x = 120*exp(stats::rnorm(100,0,1)),
#' response = sample(c("Trt1", "Trt2", "Trt3"), 100, replace = TRUE),
#' covariate = factor(
#' sample(c("White","Black","Asian","Other"), 100, replace = TRUE),
#' levels = c("White", "Black", "Asian", "Other")))
#'
#' xgx_plot(data = data) +
#' xgx_stat_ci(mapping = ggplot2::aes(x = response, response = covariate),
#' distribution = "ordinal") +
#' xgx_stat_ci(mapping = ggplot2::aes(x = 1, response = covariate), geom = "hline",
#' distribution = "ordinal") +
#' ggplot2::scale_y_continuous(labels = scales::percent_format()) +
#' ggplot2::facet_wrap(~covariate) +
#' ggplot2::xlab("Treatment group") +
#' ggplot2::ylab("Percent of subjects by category")
#'
#' # Same example with orientation flipped (only works for ggplot2 v.3.3.0 or later)
#' # only run if ggplot2 v.3.3.0 or later
#' ggplot2_geq_v3.3.0 <- utils::compareVersion(
#' as.character(utils::packageVersion("ggplot2")), '3.3.0') >= 0
#'
#' if(ggplot2_geq_v3.3.0){
#'
#' xgx_plot(data = data) +
#' xgx_stat_ci(mapping = ggplot2::aes(y = response, response = covariate), orientation = "y",
#' distribution = "ordinal") +
#' xgx_stat_ci(mapping = ggplot2::aes(y = 1, response = covariate), orientation = "y",
#' geom = "vline", distribution = "ordinal") +
#' ggplot2::scale_x_continuous(labels = scales::percent_format()) +
#' ggplot2::facet_wrap(~covariate) +
#' ggplot2::ylab("Treatment group") +
#' ggplot2::xlab("Percent of subjects by category")
#'
#' }
#'
#'
#' @importFrom stats rnorm
#' @importFrom stats rbinom
#' @importFrom stats na.omit
#' @importFrom stats qt
#' @importFrom stats var
#' @importFrom binom binom.exact
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 layer
#' @importFrom ggplot2 position_dodge
#' @importFrom ggplot2 StatSummary
#'
#' @export
xgx_stat_ci <- function(mapping = NULL,
data = NULL,
conf_level = 0.95,
distribution = "normal",
bins = NULL,
breaks = NULL,
geom = list("point", "line", "errorbar"),
position = "identity",
fun.args = list(),
fun.data = NULL,
na.rm = FALSE,
orientation = "x",
show.legend = NA,
inherit.aes = TRUE,
...) {
lays <- list()
# Confidence intervals via `xgx_conf_int` is the default function
if (is.null(fun.data)) {
fun.data <- function(y) xgx_conf_int(y = y,conf_level = conf_level,
distribution = distribution)
}
# Default parameters
gg_params = list(
fun.args = fun.args,
fun.data = fun.data,
na.rm = na.rm,
...)
# Compare to ggplot2 version 3.3.0
# If less than 3.3.0, then don't include orientation option
ggplot2_geq_v3.3.0 <- utils::compareVersion(as.character(utils::packageVersion("ggplot2")), '3.3.0') >= 0
if(ggplot2_geq_v3.3.0){
gg_params$orientation = orientation
}else{
if(!(orientation %in% "x")){
warning('orientation other than "x" not supported for ggplot2 versions less than 3.3.0')
}
}
# Ordinal, binned or not binned
if(distribution %in% c("ordinal", "multinomial")){
ggproto_stat <- StatSummaryOrdinal
gg_params = append(gg_params, list(conf_level = conf_level,
distribution = distribution,
bins = bins,
breaks = breaks))
}else{
# Continuous Non-binned
if (is.null(bins) & is.null(breaks)) {
ggproto_stat <- ggplot2::StatSummary
}
# Continuous binned
else {
ggproto_stat <- StatSummaryBinQuant
gg_params = append(gg_params, list(bins = bins,
breaks = breaks))
}
}
for (igeom in geom) {
lay = ggplot2::layer(
stat = ggproto_stat,
data = data,
mapping = mapping,
geom = igeom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = gg_params
)
# Adjust aes to default xgx preference
if (igeom == "point") {
if (is.null(lay$aes_params$size)) lay$aes_params$size <- 2
}
else if (igeom == "line") {
if (is.null(lay$aes_params$size)) lay$aes_params$size <- 1
}
else if (igeom == "errorbar") {
if (is.null(lay$aes_params$size)) lay$aes_params$size <- 1
if (is.null(lay$geom_params$width)) lay$geom_params$width <- 0
}
else if (igeom == "ribbon") {
if(is.null(lay$aes_params$alpha)) lay$aes_params$alpha <- 0.25
}
else if (igeom == "pointrange") {
if(is.null(lay$aes_params$size)){
lay$aes_params$size <- 1
lay$geom$geom_params$fatten <- 2
}
}
lays[[paste0("geom_", igeom)]] <- lay
}
return(lays)
}
# Function for computing confidence intervals
#'
#' \code{xgx_conf_int} returns a dataframe with mean +/- confidence intervals
#'
#' @param y data to compute confidence interval of
#' @param conf_level The percentile for the confidence interval (should fall
#' between 0 and 1). The default is 0.95, which corresponds to a 95 percent
#' confidence interval.
#' @param distribution The distribution which the data follow, used for
#' calculating confidence intervals. The options are "normal", "lognormal",
#' and "binomial". The "normal" option will use the Student t Distribution
#' to calculate confidence intervals, the "lognormal" option will transform
#' data to the log space first. The "binomial" option will use the
#' \code{\link[binom:binom.confint]{binom.exact}} function to calculate the
#' confidence
#' intervals. Note: binomial data must be numeric and contain only 1's and 0's.
#'
#' @return data.frame
#'
#' @examples
#' # default settings for normally distributed data, 95% confidence interval,
#' data <- data.frame(x = rep(c(1, 2, 3), each = 20),
#' y = rep(c(1, 2, 3), each = 20) + stats::rnorm(60),
#' group = rep(1:3, 20))
#' xgx_conf_int(data$y)
#'
#' @importFrom stats rnorm
#' @importFrom stats rbinom
#' @importFrom stats na.omit
#' @importFrom stats qt
#' @importFrom stats var
#' @importFrom binom binom.exact
#' @importFrom DescTools MultinomCI
#' @export
xgx_conf_int = function(y, conf_level = 0.95, distribution = "normal") {
if (!(conf_level > 0.5 && conf_level < 1)) {
stop("conf_level should be greater than 0.5 and less than 1")
}
percentile_value <- conf_level + (1 - conf_level) / 2
y <- stats::na.omit(y)
if (distribution == "normal") {
mu <- mean(y)
qtt <- stats::qt(percentile_value, length(y))
s_v = sqrt(stats::var(y) / length(y))
conf_int_out <- data.frame(
y = mu,
ymin = mu - qtt * s_v,
ymax = mu + qtt * s_v
)
} else if (distribution == "lognormal") {
yy <- log(y)
mu <- mean(yy)
qtt <- stats::qt(percentile_value, length(yy))
s_v <- sqrt(stats::var(yy) / length(yy))
# e^mu = median value - http://jse.amstat.org/v13n1/olsson.html
conf_int_out <- data.frame(
y = exp(mu),
ymin = exp(mu - qtt * s_v),
ymax = exp(mu + qtt * s_v)
)
} else if (distribution == "binomial") {
stats <- binom::binom.exact(sum(y), length(y),
conf.level = conf_level)
conf_int_out <- data.frame(
y = mean(y),
ymin = stats$lower,
ymax = stats$upper)
} else if (distribution %in% c("multinomial", "ordinal")) {
# Assuming `y` is a not yet collapsed to the number of counts per category
count <- table(y) #as.data.frame(table(y))$Freq
stats <- as.data.frame(DescTools::MultinomCI(count, conf.level = conf_level))
conf_int_out <- data.frame(
y = stats$est,
ymin = stats$lwr.ci,
ymax = stats$upr.ci)
} else {
stop("distribution must be either normal, lognormal, binomial,
or multinomial/ordinal.")
}
return(conf_int_out)
}
#' Stat ggproto object for creating ggplot layers of binned confidence intervals
#' for probabiliities of classes in ordinal data
#'
#' \code{StatSummaryOrdinal} returns a ggproto object for plotting mean +/- confidence intervals
#' for ordinal data. It also allows for binning values on the independent axis.
#'
#'
#' @return ggplot2 ggproto object
#'
#' @importFrom dplyr mutate
#' @importFrom dplyr summarize
#' @importFrom ggplot2 aes
#' @export
StatSummaryOrdinal <- ggplot2::ggproto("StatSummaryOrdinal", ggplot2::Stat,
required_aes = c("x", "response"),
extra_params = c("na.rm", "orientation"),
compute_group = function(data, scales, conf_level, distribution, bins, breaks,
fun.data = NULL,
fun.args = list()) {
return(data)
},
setup_params = function(self, data, params) {
params$flipped_aes <- has_flipped_aes(data, params)
required_aes <- self$required_aes
if(params$flipped_aes){
required_aes <- switch_orientation(self$required_aes)
}
# check required aesthetics
ggplot2:::check_required_aesthetics(
required_aes,
c(names(data), names(params)),
ggplot2:::snake_class(self)
)
# Make sure required_aes consists of the used set of aesthetics in case of
# "|" notation in self$required_aes
required_aes <- intersect(
names(data),
unlist(strsplit(required_aes, "|", fixed = TRUE))
)
# aes_to_group are the aesthetics that are different from response,
# it's assumed that these should split the data into groups for calculating CI,
# e.g. coloring by a covariate
#
# aes_not_to_group are aesthetics that are identical to response,
# it's assumed that these are only for applyng aesthetics to the end result,
# e.g. coloring by response category
params$aes_to_group <- c()
params$aes_not_to_group <- c()
# go through PANEL, colour, fill, linetype, shape
if( (data %>% subset(, c(response, PANEL)) %>% unique() %>% dim)[1] == length(unique(data$response) )){
params$aes_not_to_group <- c(params$aes_not_to_group, "PANEL")
}else{
params$aes_to_group <- c(params$aes_to_group, "PANEL")
}
if(is.null(data$colour)){
}else if((data %>% subset(, c(response, colour)) %>% unique() %>% dim)[1] == length(unique(data$response))){
params$aes_not_to_group <- c(params$aes_not_to_group, "colour")
}else{
params$aes_to_group <- c(params$aes_to_group, "colour")
}
if(is.null(data$linetype)){
}else if((data %>% subset(, c(response, linetype)) %>% unique() %>% dim)[1] == length(unique(data$response))){
params$aes_not_to_group <- c(params$aes_not_to_group, "linetype")
}else{
params$aes_to_group <- c(params$aes_to_group, "linetype")
}
if(is.null(data$fill)){
}else if((data %>% subset(, c(response, fill)) %>% unique() %>% dim)[1] == length(unique(data$response))){
params$aes_not_to_group <- c(params$aes_not_to_group, "fill")
}else{
params$aes_to_group <- c(params$aes_to_group, "fill")
}
if(is.null(data$shape)){
}else if((data %>% subset(, c(response, shape)) %>% unique() %>% dim)[1] == length(unique(data$response))){
params$aes_not_to_group <- c(params$aes_not_to_group, "shape")
}else{
params$aes_to_group <- c(params$aes_to_group, "shape")
}
if(length(params$aes_not_to_group) == 0){
warning("In xgx_stat_ci: \n No aesthetics defined to differentiate response groups.\n Suggest to add color = response, linetype = response, or similar to aes() mapping.",
call. = FALSE)
}else{
message(paste0("In xgx_stat_ci: \n The following aesthetics are identical to response: ",
paste0(params$aes_not_to_group, collapse = ", "),
"\n These will be used for differentiating response groups in the resulting plot."))
}
if(length(params$aes_to_group) > 0){
message(paste0("In xgx_stat_ci: \n The following aesthetics are different from response: ",
paste0(params$aes_to_group, collapse = ", "),
"\n These will be used to divide the data into different groups before calculating summary statistics on the response."))
}
if("mapped_discrete" %in% attr(data$x, "class") & (!is.null(params$breaks) | !is.null(params$bins))){
message("In xgx_stat_ci: \n ignoring bins or breaks supplied with discrete x values")
params$breaks <- NULL
params$bins <- NULL
}
params
},
setup_data = function(self, data, params) {
data <- flip_data(data, params$flipped_aes)
# Define new grouping variable for which to split the data computation
# (excludes aesthetics that are identical to the Response variable)
if(is.null(params$aes_to_group)){
data <- data %>% mutate(group2 = 1)
}else{
groups <- unique(data %>% subset(, params$aes_to_group))
groups <- groups %>%
mutate(group2 = 1:dim(groups)[1])
data <- data %>% merge(groups)
}
if(is.null(params$breaks)){
if(is.null(params$bins)){
data <- data %>% mutate(x_bin = x)
median_x <- data %>%
subset(,c(x_bin, group2, x)) %>%
unique() %>%
ungroup() %>% group_by(x_bin, group2)
}else{
# Calculate percentages for each category across each bin
data <- data %>% mutate(x_bin = dplyr::ntile(data$x, params$bins))
}
}else{
data <- data %>% mutate(x_bin = cut(data$x, params$breaks))
}
if(!is.null(params$breaks) | !is.null(params$bins)){
# Get median x value for each bin
median_x <- data %>% ungroup() %>%
group_by(x_bin, group2) %>%
summarize(x = median(x), .groups = "keep")
}
# Get the number of each category in each bin
counts <- data %>% ungroup() %>%
group_by(x_bin, group2, response) %>%
summarize(count = length(x), .groups = "keep") %>%
merge(data %>% subset(,-c(x)),
by = c("response","group2","x_bin")) %>%
unique()
# Combine the x and y data
data <- merge(median_x, counts, by = c("x_bin", "group2"), all = TRUE)
# Now calculate the confidence intervals for the multinomial data
data <- data %>% group_by(x_bin, group2) %>%
mutate(x = median(x),
y=as.data.frame(DescTools::MultinomCI(count, params$conf_level))$est,
ymin=as.data.frame(DescTools::MultinomCI(count, params$conf_level))$lwr.ci,
ymax=as.data.frame(DescTools::MultinomCI(count, params$conf_level))$upr.ci) %>%
ungroup() %>% group_by(group, group2)
# if you want to use geom hline, then need yintercept defined
data <- data %>% mutate(yintercept = y)
data <- flip_data(data, params$flipped_aes)
return(data)
},
compute_layer = function(self, data, params, layout) {
data
},
compute_panel = function(self, data, scales, ...) {
data
}
)
#' Stat ggproto object for binning by quantile for xgx_stat_ci
#'
#' Source:
#' https://github.com/tidyverse/ggplot2/blob/351eb41623397dea20ed0059df62a4a5974d88cb/R/stat-summary-bin.R
#'
#' \code{StatSummaryBinQuant} returns a ggproto object for plotting mean +/- confidence bins
#'
#'
#' @return ggplot2 ggproto object
#'
#' @importFrom dplyr mutate
#' @importFrom dplyr summarize
#' @importFrom ggplot2 aes
#' @export
StatSummaryBinQuant <- ggplot2::ggproto("StatSummaryBinQuant", ggplot2::Stat,
required_aes = c("x", "y"),
extra_params = c("na.rm", "orientation"),
setup_params = function(data, params) {
# gg_util_url <- "https://raw.githubusercontent.com/tidyverse/ggplot2/7e5ff921c50fb0beb203b115397ea33fee410a54/R/utilities.r"
# eval(text = RCurl::getURL(gg_util_url, ssl.verifypeer = FALSE))
params$flipped_aes <- has_flipped_aes(data, params, ambiguous = TRUE)
params
},
compute_group = function(data, scales,
fun.data = NULL,
fun = NULL,
fun.max = NULL,
fun.min = NULL,
fun.args = list(),
bins = NULL,
binwidth = NULL,
breaks = NULL,
origin = NULL,
right = FALSE,
na.rm = FALSE,
flipped_aes = FALSE) {
# data <- flip_data(data, flipped_aes)
fun <- ggplot2:::make_summary_fun(fun.data, fun, fun.max, fun.min, fun.args)
# Use breaks if available instead of bins
if (!is.null(breaks)) {
breaks <- breaks
}
else {
# Calculate breaks from number of bins
breaks <- quantile(data$x,probs = seq(0, 1, 1/bins))
}
data$bin <- cut(data$x, breaks, include.lowest = TRUE, labels = FALSE)
out <- ggplot2:::dapply(data, "bin", fun)
locs <- ggplot2:::bin_loc(breaks, out$bin)
out$x <- locs$mid
return(out)
}
)
#
# From ggplot2::ggplot_global
# Environment that holds various global variables and settings for ggplot,
# such as the current theme. It is not exported and should not be directly
# manipulated by other packages.
ggplot_global <- new.env(parent = emptyenv())
# The current theme. Defined here only as placeholder, and defined properly
# in file "theme-current.R". This setup avoids circular dependencies among
# the various source files.
ggplot_global$theme_current <- list()
# Element tree for the theme elements. Defined here only as placeholder, and
# defined properly in file "theme-elements.r".
ggplot_global$element_tree <- list()
# List of all aesthetics known to ggplot
# (In the future, .all_aesthetics should be removed in favor
# of direct assignment to ggplot_global$all_aesthetics, see below.)
.all_aesthetics <- c(
"adj", "alpha", "angle", "bg", "cex", "col", "color",
"colour", "fg", "fill", "group", "hjust", "label", "linetype", "lower",
"lty", "lwd", "max", "middle", "min", "pch", "radius", "sample", "shape",
"size", "srt", "upper", "vjust", "weight", "width", "x", "xend", "xmax",
"xmin", "xintercept", "y", "yend", "ymax", "ymin", "yintercept", "z"
)
ggplot_global$all_aesthetics <- .all_aesthetics
# Aesthetic aliases
# (In the future, .base_to_ggplot should be removed in favor
# of direct assignment to ggplot_global$base_to_ggplot, see below.)
.base_to_ggplot <- c(
"col" = "colour",
"color" = "colour",
"pch" = "shape",
"cex" = "size",
"lty" = "linetype",
"lwd" = "size",
"srt" = "angle",
"adj" = "hjust",
"bg" = "fill",
"fg" = "colour",
"min" = "ymin",
"max" = "ymax"
)
ggplot_global$base_to_ggplot <- .base_to_ggplot
ggplot_global$x_aes <- c("x", "xmin", "xmax", "xend", "xintercept",
"xmin_final", "xmax_final", "xlower", "xmiddle", "xupper", "x0")
ggplot_global$y_aes <- c("y", "ymin", "ymax", "yend", "yintercept", "ymin_final",
"ymax_final", "lower", "middle", "upper", "y0")
#
#
# From ggplot2::utilites github
#
#
"%||%" <- function(a, b) {
if (!is.null(a)) a else b
}
is_mapped_discrete <- function(x) inherits(x, "mapped_discrete")
has_flipped_aes <- function(data, params = list(), main_is_orthogonal = NA,
range_is_orthogonal = NA, group_has_equal = FALSE,
ambiguous = FALSE, main_is_continuous = FALSE,
main_is_optional = FALSE) {
# Is orientation already encoded in data?
if (!is.null(data$flipped_aes)) {
not_na <- which(!is.na(data$flipped_aes))
if (length(not_na) != 0) {
return(data$flipped_aes[[not_na[1L]]])
}
}
# Is orientation requested in the params
if (!is.null(params$orientation) && !is.na(params$orientation)) {
return(params$orientation == "y")
}
x <- data$x %||% params$x
y <- data$y %||% params$y
xmin <- data$xmin %||% params$xmin
ymin <- data$ymin %||% params$ymin
xmax <- data$xmax %||% params$xmax
ymax <- data$ymax %||% params$ymax
# Does a single x or y aesthetic corespond to a specific orientation
if (!is.na(main_is_orthogonal) && xor(is.null(x), is.null(y))) {
return(is.null(y) == main_is_orthogonal)
}
has_x <- !is.null(x)
has_y <- !is.null(y)
# Does a provided range indicate an orientation
if (!is.na(range_is_orthogonal)) {
if (!is.null(ymin) || !is.null(ymax)) {
return(!range_is_orthogonal)
}
if (!is.null(xmin) || !is.null(xmax)) {
return(range_is_orthogonal)
}
}
# If ambiguous orientation = NA will give FALSE
if (ambiguous && (is.null(params$orientation) || is.na(params$orientation))) {
return(FALSE)
}
# Is there a single actual discrete position
y_is_discrete <- is_mapped_discrete(y)
x_is_discrete <- is_mapped_discrete(x)
if (xor(y_is_discrete, x_is_discrete)) {
return(y_is_discrete != main_is_continuous)
}
# Does each group have a single x or y value
if (group_has_equal) {
if (has_x) {
if (length(x) == 1) return(FALSE)
x_groups <- vapply(split(data$x, data$group), function(x) length(unique(x)), integer(1))
if (all(x_groups == 1)) {
return(FALSE)
}
}
if (has_y) {
if (length(y) == 1) return(TRUE)
y_groups <- vapply(split(data$y, data$group), function(x) length(unique(x)), integer(1))
if (all(y_groups == 1)) {
return(TRUE)
}
}
}
# default to no
FALSE
}
flip_data <- function(data, flip = NULL) {
flip <- flip %||% any(data$flipped_aes) %||% FALSE
if (isTRUE(flip)) {
names(data) <- switch_orientation(names(data))
}
data
}
flipped_names <- function(flip = FALSE) {
x_aes <- ggplot_global$x_aes
y_aes <- ggplot_global$y_aes
if (flip) {
ret <- as.list(c(y_aes, x_aes))
} else {
ret <- as.list(c(x_aes, y_aes))
}
names(ret) <- c(x_aes, y_aes)
ret
}
switch_orientation <- function(aesthetics) {
ggplot_global <- list2env(
list(x_aes = c("x", "xmin", "xmax", "xend", "xintercept", "xmin_final", "xmax_final", "xlower", "xmiddle", "xupper", "x0"),
y_aes = c("y", "ymin", "ymax", "yend", "yintercept", "ymin_final", "ymax_final", "ylower", "ymiddle", "yupper", "y0")))
# We should have these as globals somewhere
x <- ggplot_global$x_aes
y <- ggplot_global$y_aes
x_aes <- match(aesthetics, x)
x_aes_pos <- which(!is.na(x_aes))
y_aes <- match(aesthetics, y)
y_aes_pos <- which(!is.na(y_aes))
if (length(x_aes_pos) > 0) {
aesthetics[x_aes_pos] <- y[x_aes[x_aes_pos]]
}
if (length(y_aes_pos) > 0) {
aesthetics[y_aes_pos] <- x[y_aes[y_aes_pos]]
}
aesthetics
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_stat_ci.R |
#' Plot data with median and percent intervals
#'
#' \code{xgx_stat_pi} returns a ggplot layer plotting median +/- percent
#' intervals
#'
#'
#' @param mapping Set of aesthetic mappings created by `aes` or `aes_`.
#' If specified and `inherit.aes = TRUE` (the default), it is combined with the
#' default mapping at the top level of the plot. You must supply mapping if
#' there is no plot mapping.
#' @param data The data to be displayed in this layer. There are three options:
#'
#' If NULL, the default, the data is inherited from the plot data as specified
#' in the call to ggplot.
#'
#' A data.frame, or other object, will override the plot data. All objects
#' will be fortified to produce a data frame. See fortify for which variables
#' will be created.
#'
#' A function will be called with a single argument, the plot data. The return
#' value must be a data.frame., and will be used as the layer data.
#' @param percent_level The upper or lower percentile for the percent interval (should fall
#' between 0 and 1). The default is 0.95, which corresponds
#' to (0.05, 0.95) interval. Supplying 0.05 would give the same result
#' @param geom Use to override the default geom. Can be a list of multiple
#' geoms, e.g. list("line","ribbon"), which is the default.
#' @param position Position adjustment, either as a string, or the result of
#' a call to a position adjustment function.
#' @param bins number of bins to cut up the x data, cuts data into quantiles.
#' @param breaks breaks to cut up the x data, if this option is used, bins is ignored
#' @param fun.args Optional additional arguments passed on to the functions.
#' @param na.rm If FALSE, the default, missing values are removed with a
#' warning. If TRUE, missing values are silently removed.
#' @param show.legend logical. Should this layer be included in the legends?
#' NA, the default, includes if any aesthetics are mapped. FALSE never
#' includes, and TRUE always includes.
#' @param inherit.aes If FALSE, overrides the default aesthetics, rather
#' than combining with them. This is most useful for helper functions that
#' define both data and aesthetics and shouldn't inherit behaviour from the
#' default plot specification, e.g. borders.
#' @param ... other arguments passed on to layer. These are often aesthetics,
#' used to set an aesthetic to a fixed value, like color = "red" or size = 3.
#' They may also be parameters to the paired geom/stat.
#'
#' @return ggplot2 plot layer
#'
#' @examples
#' # default settings for normally distributed data, (5%,95%) interval,
#' data <- data.frame(x = rep(c(1, 2, 3), each = 20),
#' y = rep(c(1, 2, 3), each = 20) + stats::rnorm(60),
#' group = rep(1:3, 20))
#' xgx_plot(data, ggplot2::aes(x = x, y = y)) +
#' xgx_stat_pi(percent_level = 0.95)
#'
#' # try different geom
#' xgx_plot(data, ggplot2::aes(x = x, y = y)) +
#' xgx_stat_pi(percent_level = 0.95, geom = list("errorbar", "point", "line"))
#'
#' # including multiple groups in same plot
#' xgx_plot(data, ggplot2::aes(x = x, y = y)) +
#' xgx_stat_pi(percent_level = 0.95,
#' ggplot2::aes(color = factor(group), fill = factor(group)),
#' position = ggplot2::position_dodge(width = 0.5))
#'
#' # including multiple percent intervals in same plot
#' xgx_plot(data, ggplot2::aes(x = x, y = y)) +
#' xgx_stat_pi(percent_level = 0.90) +
#' xgx_stat_pi(percent_level = 0.80) +
#' xgx_stat_pi(percent_level = 0.70) +
#' xgx_stat_pi(percent_level = 0.60)
#'
#' @importFrom stats rnorm
#' @importFrom ggplot2 stat_summary
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 position_dodge
#' @export
xgx_stat_pi <- function(mapping = NULL, data = NULL, percent_level = 0.95,
geom = list("line", "ribbon"),
position = "identity",
bins = NULL,
breaks = NULL,
fun.args = list(),
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
...) {
if (!(percent_level >= 0 && percent_level <= 1)) {
stop("percent_level should be greater or equal 0 and less or equal 1")
}
percent_int <- function(y, percent_level) {
percentile_value <- max(percent_level, 1 - percent_level)
y <- stats::na.omit(y)
percent_int_out <- data.frame(
y = median(y),
ymin = quantile(y, 1 - percentile_value),
ymax = quantile(y, percentile_value)
)
}
ret <- xgx_stat_ci(mapping = mapping,
data = data,
conf_level = NULL,
distribution = "normal",
bins = bins,
breaks = breaks,
geom = geom,
position = position,
fun.args = fun.args,
fun.data = function(y) percent_int(y, percent_level),
na.rm = na.rm,
show.legend = show.legend,
inherit.aes = inherit.aes,
...)
return(ret)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_stat_pi.R |
#' Wrapper for stat_smooth
#'
#' \code{xgx_stat_smooth} and \code{xgx_geom_smooth} produce smooth fits through continuous or categorical data.
#' For categorical, ordinal, or multinomial data use method = polr.
#' This wrapper also works with nonlinear methods like nls and nlsLM for continuous data.
#'
#' @seealso \code{\link{predictdf.nls}} for information on how nls confidence intervals are calculated.
#'
#'
#' @param mapping Set of aesthetic mappings created by `aes` or `aes_`.
#' If specified and `inherit.aes = TRUE` (the default), it is combined with the
#' default mapping at the top level of the plot. You must supply mapping if
#' there is no plot mapping.
#' Warning: for `method = polr`, do not define `y` aesthetic, use `response` instead.
#' @param data The data to be displayed in this layer. There are three options:
#'
#' If NULL, the default, the data is inherited from the plot data as specified
#' in the call to ggplot.
#'
#' A data.frame, or other object, will override the plot data. All objects
#' will be fortified to produce a data frame. See fortify for which variables
#' will be created.
#'
#' A function will be called with a single argument, the plot data. The return
#' value must be a data.frame., and will be used as the layer data.
#' @param level The percentile for the confidence interval (should fall
#' between 0 and 1). The default is 0.95, which corresponds to a 95 percent
#' confidence interval.
#' @param geom Use to override the default geom. Can be a list of multiple
#' geoms, e.g. list("point","line","errorbar"), which is the default.
#' @param position Position adjustment, either as a string, or the result of
#' a call to a position adjustment function.
#'
#' @param method method (function) to use, eg. lm, glm, gam, loess, rlm.
#' Example: `"polr"` for ordinal data. `"nlsLM"` for nonlinear least squares.
#' If method is left as `NULL`, then a typical `StatSmooth` is applied,
#' with the corresponding defaults, i.e. For datasets with n < 1000 default is loess.
#' For datasets with 1000 or more observations defaults to gam.
#' @param formula formula to use in smoothing function, eg. y ~ x, y ~ poly(x, 2), y ~ log(x)
#' @param se display confidence interval around smooth? (TRUE by default, see level to control)
#' @param fullrange should the fit span the full range of the plot, or just the data
#' @param n number of points to evaluate smoother at
#' @param span Controls the amount of smoothing for the default loess smoother.
#' Smaller numbers produce wigglier lines, larger numbers produce smoother lines.
#' @param n_boot number of bootstraps to perform to compute confidence interval,
#' currently only used for method = "polr", default is 200
#' @param method.args Optional additional arguments passed on to the method.
#' @param na.rm If FALSE, the default, missing values are removed with a
#' warning. If TRUE, missing values are silently removed.
#' @param orientation The orientation of the layer, passed on to ggplot2::stat_summary.
#' Only implemented for ggplot2 v.3.3.0 and later. The default ("x") summarizes y values over
#' x values (same behavior as ggplot2 v.3.2.1 or earlier). Setting \code{orientation = "y"} will
#' summarize x values over y values, which may be useful in some situations where you want to flip
#' the axes, e.g. to create forest plots. Setting \code{orientation = NA} will try to automatically
#' determine the orientation from the aesthetic mapping (this is more stable for ggplot2 v.3.3.2
#' compared to v.3.3.0).
#' @param show.legend logical. Should this layer be included in the legends?
#' NA, the default, includes if any aesthetics are mapped. FALSE never
#' includes, and TRUE always includes.
#' @param inherit.aes If FALSE, overrides the default aesthetics, rather
#' than combining with them. This is most useful for helper functions that
#' define both data and aesthetics and shouldn't inherit behaviour from the
#' default plot specification, e.g. borders.
#' @param ... other arguments passed on to layer. These are often aesthetics,
#' used to set an aesthetic to a fixed value, like color = "red" or size = 3.
#' They may also be parameters to the paired geom/stat.
#'
#' @return ggplot2 plot layer
#'
#' @section Warning:
#' \code{nlsLM} uses \code{nls.lm} which implements the Levenberg-Marquardt
#' algorithm for fitting a nonlinear model, and may fail to converge for a
#' number of reasons. See \code{?nls.lm} for more information.
#'
#' \code{nls} uses Gauss-Newton method for estimating parameters,
#' and could fail if the parameters are not identifiable. If this happens
#' you will see the following warning message:
#' Warning message:
#' Computation failed in `stat_smooth()`:
#' singular gradient
#'
#' \code{nls} will also fail if used on artificial "zero-residual" data,
#' use \code{nlsLM} instead.
#'
#' @examples
#'
#' # Example with nonlinear least squares (method = "nlsLM")
#' Nsubj <- 10
#' Doses <- c(0, 25, 50, 100, 200)
#' Ntot <- Nsubj*length(Doses)
#' times <- c(0,14,30,60,90)
#'
#' dat1 <- data.frame(ID = 1:(Ntot),
#' DOSE = rep(Doses, Nsubj),
#' PD0 = stats::rlnorm(Ntot, log(100), 1),
#' Kout = exp(stats::rnorm(Ntot,-2, 0.3)),
#' Imax = 1,
#' ED50 = 25) %>%
#' dplyr::mutate(PDSS = PD0*(1 - Imax*DOSE/(DOSE + ED50))*exp(stats::rnorm(Ntot, 0.05, 0.3))) %>%
#' merge(data.frame(ID = rep(1:(Ntot), each = length(times)), Time = times), by = "ID") %>%
#' dplyr::mutate(PD = ((PD0 - PDSS)*(exp(-Kout*Time)) + PDSS),
#' PCHG = (PD - PD0)/PD0)
#'
#' gg <- ggplot2::ggplot(dat1 %>% subset(Time == 90),
#' ggplot2::aes(x = DOSE, y = PCHG)) +
#' ggplot2::geom_boxplot(ggplot2::aes(group = DOSE)) +
#' xgx_theme() +
#' xgx_scale_y_percentchangelog10() +
#' ggplot2::ylab("Percent Change from Baseline") +
#' ggplot2::xlab("Dose (mg)")
#'
#' gg +
#' xgx_stat_smooth(method = "nlsLM", formula = y ~ E0 + Emax*x/(ED50 + x),
#' method.args = list(
#' start = list(Emax = -0.50, ED50 = 25, E0 = 0),
#' lower = c(-Inf, 0, -Inf)
#' ),
#' se = TRUE)
#'
#' gg +
#' xgx_geom_smooth_emax()
#'
#' \dontrun{
#' # example with ordinal data (method = "polr")
#' set.seed(12345)
#' data = data.frame(x = 120*exp(stats::rnorm(100,0,1)),
#' response = sample(c("Mild","Moderate","Severe"), 100, replace = TRUE),
#' covariate = sample(c("Male","Female"), 100, replace = TRUE)) %>%
#' dplyr::mutate(y = (50 + 20*x/(200 + x))*exp(stats::rnorm(100, 0, 0.3)))
#'
#' # example coloring by the response categories
#' xgx_plot(data = data) +
#' xgx_stat_smooth(mapping = ggplot2::aes(x = x, response = response,
#' colour = response, fill = response),
#' method = "polr") +
#' ggplot2::scale_y_continuous(labels = scales::percent_format())
#'
#'
#' # example faceting by the response categories, coloring by a different covariate
#' xgx_plot(data = data) +
#' xgx_stat_smooth(mapping = ggplot2::aes(x = x, response = response,
#' colour = covariate, fill = covariate),
#' method = "polr", level = 0.80) +
#' ggplot2::facet_wrap(~response) +
#' ggplot2::scale_y_continuous(labels = scales::percent_format())
#' }
#'
#' @importFrom stats nls
#' @importFrom ggplot2 StatSmooth
#' @export
xgx_stat_smooth <- function(mapping = NULL,
data = NULL,
geom = "smooth",
position = "identity",
...,
method = NULL,
formula = NULL,
se = TRUE,
n = 80,
span = 0.75,
n_boot = 200,
fullrange = FALSE,
level = 0.95,
method.args = list(),
na.rm = FALSE,
orientation = "x",
show.legend = NA,
inherit.aes = TRUE) {
lays <- list()
# Assume OLS / LM / nls / nlsLM / glm etc. model
ggproto_stat <- ggplot2::StatSmooth
# Default parameters
gg_params = list(method = method,
formula = formula,
se = se,
n = n,
n_boot = n_boot,
fullrange = fullrange,
level = level,
na.rm = na.rm,
method.args = method.args,
span = span,
...)
# Compare to ggplot2 version 3.3.0
# If less than 3.3.0, then don't include orientation option
ggplot2_geq_v3.3.0 <- utils::compareVersion(as.character(utils::packageVersion("ggplot2")), '3.3.0') >= 0
if(ggplot2_geq_v3.3.0){
gg_params$orientation = orientation
}else{
if(!(orientation %in% "x")){
warning('orientation other than "x" not supported for ggplot2 versions less than 3.3.0, setting orientation to "x"')
}
gg_params$orientation = "x"
}
# Class Model
if (is.null(method)){ }
else{
if (method %in% c("polr")) {
ggproto_stat <- StatSmoothOrdinal
if(!(gg_params$orientation %in% c("y")) & !is.null(mapping$y)){
if(is.null(mapping$response) ){
mapping$response <- mapping$y
warning("response aesthetic is not defined for ordinal data, but y is, reassigning y to response")
}else{
warning("y aesthetic is not used for ordinal data when orientation = 'x'")
}
mapping$y <- NULL
}
if(!(gg_params$orientation %in% c("x")) & !is.null(mapping$x)){
if(is.null(mapping$response) ){
mapping$response <- mapping$x
warning("response aesthetic is not defined for ordinal data, but x is, reassigning x to response")
}else{
warning("x aesthetic is not used for ordinal data when orientation = 'y'")
}
mapping$x <- NULL
}
}
}
for (igeom in geom) {
lay = ggplot2::layer(
stat = ggproto_stat,
data = data,
mapping = mapping,
geom = igeom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = gg_params
)
lays[[paste0("geom_", igeom)]] <- lay
}
return(lays)
}
##' @importFrom minpack.lm nlsLM
##' @export
minpack.lm::nlsLM
#' Wrapper for stat_smooth
#'
#' @rdname xgx_stat_smooth
#'
#' @export
#'
xgx_geom_smooth <- function(mapping = NULL,
data = NULL,
geom = "smooth",
position = "identity",
...,
method = NULL,
formula = NULL,
se = TRUE,
n = 80,
span = 0.75,
fullrange = FALSE,
level = 0.95,
method.args = list(),
na.rm = FALSE,
orientation = "x",
show.legend = NA,
inherit.aes = TRUE) {
return(list(xgx_stat_smooth(mapping = mapping,
data = data,
geom = geom,
position = position,
method = method,
formula = formula,
se = se,
n = n,
span = span,
fullrange = fullrange,
level = level,
method.args = method.args,
na.rm = na.rm,
orientation = orientation,
show.legend = show.legend,
inherit.aes = inherit.aes,
...)))
}
#' Plot Emax fit to data
#'
#' \code{xgx_geom_smooth_emax} uses minpack.lm::nlsLM, predictdf.nls, and stat_smooth to display Emax model fit to data
#'
#' @rdname xgx_stat_smooth
#'
#' @export
xgx_geom_smooth_emax <- function(mapping = NULL, data = NULL, geom = "smooth",
position = "identity", ..., method = "nlsLM", formula,
se = TRUE, n = 80, span = 0.75, fullrange = FALSE,
level = 0.95, method.args = list(), na.rm = FALSE,
orientation = "x", show.legend = NA, inherit.aes = TRUE){
if(missing(formula)) {
warning("Formula not specified.\nUsing default formula y ~ E0 + Emax*x/(ED50 + x),
initializing E0, Emax, and ED50 to 1,
and setting lower bound on ED50 to 0")
formula = y ~ E0 + Emax*x/(ED50 + x)
method.args$start = list(E0 = 1, Emax = 1, ED50 = 1)
method.args$lower = c(-Inf, -Inf, 0)
}
xgx_stat_smooth(mapping = mapping, data = data, geom = geom,
position = position, ..., method = method, formula = formula,
se = se, n = n, span = span, fullrange = fullrange,
level = level, method.args = method.args, na.rm = na.rm,
orientation = "x", show.legend = show.legend, inherit.aes = inherit.aes)
}
#' Prediction data frame from ggplot2
#' Get predictions with standard errors into data frame
#'
#' @param model model object
#' @param xseq newdata
#' @param se Display confidence interval around smooth?
#' @param level Level of confidence interval to use
predictdf <- function(model, xseq, se, level) UseMethod("predictdf")
#' Prediction data frame for nls
#'
#' Get predictions with standard errors into data frame for use with geom_smooth
#'
#' \code{ggplot2::geom_smooth} produces confidence intervals by silently calling functions
#' of the form predictdf.method, where method is "loess", "lm", "glm" etc.
#' depending on what method is specified in the call to \code{geom_smooth}.
#' Currently \code{ggplot2} does not define a \code{predictdf.nls} function for method of type "nls",
#' and thus confidence intervals cannot be automatically generated by \code{geom_smooth}
#' for method = "nls". Here we define \code{predictdf.nls} for calculating the confidence
#' intervals of an object of type nls. \code{geom_smooth} will silently call this function
#' whenever method = "nls", and produce the appropriate confidence intervals.
#'
#' \code{predictdf.nls} calculates CI for a model fit of class nls based on the "delta-method"
#' http://sia.webpopix.org/nonlinearRegression.html#confidence-intervals-and-prediction-intervals)
#'
#' CI = [ f(x0, beta) + qt_(alpha/2, n - d) * se(f(x0, beta)),
#' f(x0, beta) + qt_(1 - alpha/2, n - d) * se(f(x0, beta))]
#'
#' where:
#' beta = vector of parameter estimates
#' x = independent variable
#' se(f(x0, beta)) = sqrt( delta(f)(x0, beta) * Var(beta) * (delta(f)(x0, beta))' )
#' delta(f) is the gradient of f
#'
#' @param model nls object
#' @param xseq newdata
#' @param se Display confidence interval around smooth?
#' @param level Level of confidence interval to use
#'
#' @return dataframe with x and y values, if se is TRUE dataframe also includes ymin and ymax
#'
#' @importFrom Deriv Deriv
#' @importFrom stats nls
#' @exportS3Method ggplot2::predictdf
predictdf.nls <- function(model, xseq, se, level) {
# function to calculate gradient wrt model parameters
# value is the function value
# grad is the gradient
fun_grad <- function(form, x, pars, se){
# extract the model parameters to the local environment
list2env(pars %>% as.list(), envir = environment())
ret <- list()
ret$value <- eval(form[[3L]]) # this is the value of the formula
if(se){
ret$grad <- list()
xvec <- x
for(i in 1:length(xvec)){
x = xvec[i]
ret$grad[[i]] <- eval(Deriv::Deriv(form, names(pars), cache.exp = FALSE)) %>% as.list()
if(is.null(names(ret$grad[[i]]))){
names(ret$grad[[i]]) <- names(pars)
}
}
ret$grad <- dplyr::bind_rows(ret$grad) %>% as.matrix
}
return(ret)
}
fg <- fun_grad(form = model$m$formula(), x = xseq, pars = model$m$getPars(), se)
f.new <- fg$value # value of function
pred <- data.frame(x = xseq, y = f.new)
if(se){
grad.new <- fg$grad # value of gradient
vcov <- vcov(model)
GS = rowSums((grad.new%*%vcov)*grad.new)
alpha = 1 - level
deltaf <- sqrt(GS)*qt(1 - alpha/2, df = summary(model)$df[2])
pred$ymin <- f.new - deltaf
pred$ymax <- f.new + deltaf
}else{
pred <- data.frame(x = xseq, y = f.new)
}
return(pred)
}
predictdf_polr_env <- new.env(parent = emptyenv())
predictdf_polr_env$data <- NULL
predictdf_polr_env$method <- NULL
predictdf_polr_env$formula <- NULL
predictdf_polr_env$method.args <- NULL
predictdf_polr_env$weight <- NULL
predictdf_polr_env$n_boot <- NULL
#' Prediction data frame for polr
#'
#' Get predictions with standard errors into data frame for use with geom_smooth
#'
#' \code{predictdf.polr} is used by xgx_geom_smooth when method = "polr"
#' to calculate confidence intervals via bootstraps.
#'
#' @param model object returned from polr
#' @param xseq sequence of x values for which to compute the smooth
#' @param se if TRUE then confidence intervals are returned
#' @param level confidence level for confidence intervals
#'
#'
#' @exportS3Method ggplot2::predictdf
predictdf.polr <- function(model, xseq, se, level){
data <- predictdf_polr_env$data
method <- predictdf_polr_env$method
formula <- predictdf_polr_env$formula
method.args <- predictdf_polr_env$method.args
weight <- predictdf_polr_env$weight
n_boot <- predictdf_polr_env$n_boot
x <- y <- response <- NULL
percentile_value <- level + (1 - level) / 2
pred.df_boot = list()
iter_failed = 0
for (iboot in 1:n_boot) {
new_pred <- tryCatch ({
# Boostrap by resampling entire dataset
# (prediction + residual doesn't work with ordinal data)
data_boot <- dplyr::sample_n(tbl = data,
size = nrow(data),
replace = TRUE)
base.args <- list(quote(formula), data = quote(data_boot), weights = weight)
model_boot <- do.call(method, c(base.args, method.args))
# Extract Bootstrapped Predictions
# predictdf.polr(model_boot, xseq, se, level)
pred <- stats::predict(model_boot, newdata = data.frame(x = xseq), type = "probs") %>%
data.frame() %>%
dplyr::mutate( x = xseq)
pred.df <- tidyr::pivot_longer(data = pred, cols = -x, names_to = "response", values_to = "y")
}, warning = function(w) {
"There was a problem in the sampling."
}
)
if (is.character(new_pred)) {
iter_failed <- 1 + iter_failed
next
}
pred.df_boot[[iboot]] <- new_pred
}
pred.df_boot <- dplyr::bind_rows(pred.df_boot) %>%
dplyr::group_by(x, response) %>%
dplyr::summarize(ymin = quantile(stats::na.omit(y), 1 - percentile_value),
ymax = quantile(stats::na.omit(y), percentile_value), .groups = "keep") %>%
dplyr::ungroup()
pred <- stats::predict(model, newdata = data.frame(x = xseq), type = "probs") %>%
data.frame() %>%
dplyr::mutate( x = xseq)
pred.df <- tidyr::pivot_longer(data = pred, cols = -x, names_to = "response", values_to = "y")
pred.df_group <- merge(pred.df, pred.df_boot, by = c("x","response"))
ret <- pred.df_group %>% subset(,c("x", "y", "ymin", "ymax", "response"))
}
##' @importFrom gtable gtable
##' @export
gtable::gtable
#' Stat object for producing smooths through ordinal data
#'
#'
#' @importFrom ggplot2 ggproto
#' @export
StatSmoothOrdinal <- ggplot2::ggproto(
"StatSmoothOrdinal",
ggplot2::Stat,
required_aes = c("x", "response"),
extra_params = c("na.rm", "orientation", "method","formula","se","n","span","fullrange","level","method.args","na.rm","n_boot","xseq"),
compute_group = function(data, params) {
return(data)
},
setup_params = function(self, data, params, ...) {
# params$flipped_aes <- has_flipped_aes(data, params, ambiguous = TRUE)
params$flipped_aes <- has_flipped_aes(data, params)
required_aes <- self$required_aes
if(params$flipped_aes){
required_aes <- switch_orientation(self$required_aes)
}
msg <- character()
if (is.null(params$formula)) {
params$formula <- response ~ x
msg <- c(msg, paste0("formula '", deparse(params$formula), "'"))
}
if (length(msg) > 0) {
message("`geom_smooth()` using ", paste0(msg, collapse = " and "))
}
# check required aesthetics
ggplot2:::check_required_aesthetics(
required_aes,
c(names(data), names(params)),
ggplot2:::snake_class(self)
)
# Make sure required_aes consists of the used set of aesthetics in case of
# "|" notation in self$required_aes
required_aes <- intersect(
names(data),
unlist(strsplit(required_aes, "|", fixed = TRUE))
)
# aes_to_group are the aesthetics that are different from response,
# it's assumed that these should split the data into groups for calculating CI,
# e.g. coloring by a covariate
#
# aes_not_to_group are aesthetics that are identical to response,
# it's assumed that these are only for applyng aesthetics to the end result,
# e.g. coloring by response category
params$aes_to_group <- c()
params$aes_not_to_group <- c()
# go through PANEL, colour, fill, linetype, shape
if( (data %>% subset(, c(response, PANEL)) %>% unique() %>% dim)[1] == length(unique(data$response) )){
params$aes_not_to_group <- c(params$aes_not_to_group, "PANEL")
}else{
params$aes_to_group <- c(params$aes_to_group, "PANEL")
}
if(is.null(data$colour)){
}else if((data %>% subset(, c(response, colour)) %>% unique() %>% dim)[1] == length(unique(data$response))){
params$aes_not_to_group <- c(params$aes_not_to_group, "colour")
}else{
params$aes_to_group <- c(params$aes_to_group, "colour")
}
if(is.null(data$linetype)){
}else if((data %>% subset(, c(response, linetype)) %>% unique() %>% dim)[1] == length(unique(data$response))){
params$aes_not_to_group <- c(params$aes_not_to_group, "linetype")
}else{
params$aes_to_group <- c(params$aes_to_group, "linetype")
}
if(is.null(data$fill)){
}else if((data %>% subset(, c(response, fill)) %>% unique() %>% dim)[1] == length(unique(data$response))){
params$aes_not_to_group <- c(params$aes_not_to_group, "fill")
}else{
params$aes_to_group <- c(params$aes_to_group, "fill")
}
if(is.null(data$shape)){
}else if((data %>% subset(, c(response, shape)) %>% unique() %>% dim)[1] == length(unique(data$response))){
params$aes_not_to_group <- c(params$aes_not_to_group, "shape")
}else{
params$aes_to_group <- c(params$aes_to_group, "shape")
}
if(length(params$aes_not_to_group) == 0){
warning("In xgx_stat_smooth: \n No aesthetics defined to differentiate response groups.\n Suggest to add color = response, linetype = response, or similar to aes() mapping.",
call. = FALSE)
}else{
message(paste0("In xgx_stat_smooth: \n The following aesthetics are identical to response: ",
paste0(params$aes_not_to_group, collapse = ", "),
"\n These will be used for differentiating response groups in the resulting plot."))
}
if(length(params$aes_to_group) > 0){
message(paste0("In xgx_stat_smooth: \n The following aesthetics are different from response: ",
paste0(params$aes_to_group, collapse = ", "),
"\n These will be used to divide the data into different groups before calculating summary statistics on the response."))
}
params
},
setup_data = function(self, data, params, scales, xseq = NULL, method.args = list(), n_boot = 200) {
data <- flip_data(data, params$flipped_aes)
list2env(params, envir = environment())
percentile_value <- level + (1 - level) / 2
if(!is.factor(data$response)){
data$response <- factor(data$response)
message(paste0("In xgx_stat_smooth: \n response should be a factor, converting to factor using as.factor(response) with default levels"))
}
if (length(unique(data$x)) < 2) {
# Not enough data to perform fit
return(new_data_frame())
}
if (is.null(data$weight)) data$weight <- 1
if (is.null(xseq)) {
if (is.integer(data$x)) {
if (fullrange) {
xseq <- scales$x$dimension()
} else {
xseq <- sort(unique(data$x))
}
} else {
if (fullrange) {
range <- scales$x$dimension()
} else {
range <- range(data$x, na.rm = TRUE)
}
xseq <- seq(range[1], range[2], length.out = n)
}
}
if (is.character(method)) {
if (identical(method, "polr")) {
method <- MASS::polr
} else {
method <- match.fun(method)
}
}
# base.args <- list(quote(formula), data = quote(data), weights = quote(weight))
# Define new grouping variable for which to split the data computation
# (excludes aesthetics that are identical to the Response variable)
if(is.null(params$aes_to_group)){
data <- data %>% dplyr::mutate(group2 = 1)
}else{
groups <- unique(data %>% subset(, params$aes_to_group))
groups <- groups %>%
dplyr::mutate(group2 = 1:dim(groups)[1])
data <- data %>% merge(groups)
}
n_boot = n_boot
prediction <- list()
for(igroup in unique(data$group2)){
idata <- data %>% subset(group2 == igroup)
idata <- idata %>%
mutate(response_orig = response) %>%
mutate(response = paste0("X", as.numeric(response)) %>%
factor())
base.args <- list(quote(formula), data = quote(idata), weights = quote(weight))
model <- do.call(method, c(base.args, method.args))
predictdf_polr_env$data <- idata
predictdf_polr_env$method <- method
predictdf_polr_env$formula <- formula
predictdf_polr_env$method.args <- method.args
predictdf_polr_env$weight <- quote(weight)
predictdf_polr_env$n_boot <- n_boot
iprediction <- predictdf.polr(model, xseq, se, level)
iprediction <- merge(iprediction, idata %>% subset(,-c(x)), by = "response")
iprediction <- iprediction %>%
mutate(response = response_orig,
response_orig = NULL)
prediction[[igroup]] <- iprediction
}
prediction <- dplyr::bind_rows(prediction)
prediction <- flip_data(prediction, params$flipped_aes)
return(prediction)
},
compute_layer = function(self, data, params, layout) {
data
},
compute_panel = function(data, params) {
data
}
)
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_stat_smooth.R |
#'Summarize Covariate information in a dataset
#'
#' \code{xgx_summarize_covariates}
#'
#' @param data, the dataset to check. must contain a USUBJID or ID column
#' for subject id
#' @param covariates, the column names of covariates, to explore
#' @param n_cts, the number of unique values for a covariate to be treated as
#' continuous, default is 8
#'
#' @return list
#'
#' @examples
#' data <- data.frame(ID = 1:10, WT0 = rnorm(10, 70, 10),
#' SEX = round(runif(10)))
#' x <- xgx_summarize_covariates(data, c("WT0", "SEX"))
#'
#' @importFrom dplyr filter
#' @importFrom tibble tibble
#' @importFrom dplyr group_by
#' @importFrom dplyr count
#' @importFrom dplyr ungroup
#' @importFrom dplyr arrange
#' @importFrom stats quantile
#' @importFrom stats median
#' @importFrom dplyr bind_rows
#' @importFrom dplyr desc
#' @importFrom magrittr "%>%"
#' @export
xgx_summarize_covariates <- function(data, covariates = NULL, n_cts = 8) {
# avoid CRAN note
ID <- USUBJID <- n <- NULL
if ("USUBJID" %in% names(data)) {
data1 <- dplyr::filter(data, !duplicated(USUBJID))
} else if ("ID" %in% names(data)) {
data1 <- dplyr::filter(data, !duplicated(ID))
} else {
stop("data column USUBJID or ID is required")
}
icat <- 0
icts <- 0
catlist <- list()
ctslist <- list()
for (covk in covariates) {
x <- data1[[covk]]
xdistinct <- length(unique(x))
xmissing <- sum(is.na(x))
if (xdistinct >= n_cts) {
icts <- icts + 1
ctslist[[icts]] <- tibble::tibble(
Covariate = covk,
Nmissing = xmissing,
min = min(x, na.rm = TRUE),
`25th` = stats::quantile(x, 0.25, na.rm = TRUE),
median = stats::median(x, na.rm = TRUE),
`75th` = stats::quantile(x, 0.75, na.rm = TRUE),
max = max(x, na.rm = TRUE))
} else {
summ <- tibble::tibble(var = x) %>%
dplyr::group_by(var) %>%
dplyr::count() %>%
dplyr::ungroup() %>%
dplyr::arrange(dplyr::desc(n))
icat <- icat + 1
catlist[[icat]] <- tibble::tibble(
Covariate = covk,
Nmissing = xmissing,
Ndistinct = xdistinct,
`Value (Count)` = paste0(summ$var, " (", summ$n, ")", collapse = ", "))
}
}
# create summaries
cat_table <- dplyr::bind_rows(catlist)
cts_table <- dplyr::bind_rows(ctslist)
return(list(cts_covariates = cts_table,
cat_covariates = cat_table))
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_summarize_covariates.R |
#' Check data for various issues
#'
#' Calls \code{\link{xgx_check_data}}
#'
#' @return data.frame
#'
#' @examples
#' covariates <- c("WEIGHTB", "SEX")
#' check <- xgx_summarize_data(mad_missing_duplicates, covariates)
#' @inheritParams xgx_check_data
#' @export
xgx_summarize_data <- function(data, covariates = NULL) {
xgx_check_data(data, covariates)
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_summarize_data.R |
#' Calls the standard theme for xGx graphics
#'
#' @return xgx ggplot2 compatible theme
#'
#' @examples
#' conc <- 10^(seq(-3, 3, by = 0.1))
#' ec50 <- 1
#' data <- data.frame(concentration = conc,
#' bound_receptor = 1 * conc / (conc + ec50))
#' ggplot2::ggplot(data, ggplot2::aes(y = concentration, x = bound_receptor)) +
#' ggplot2::geom_point() +
#' ggplot2::geom_line() +
#' xgx_scale_y_log10() +
#' xgx_scale_x_reverselog10() +
#' xgx_theme()
#'
#' @importFrom ggplot2 theme_bw
#' @importFrom ggplot2 theme
#' @importFrom ggplot2 element_line
#' @export
xgx_theme <- function() {
minor_color <- "grey83"
major_color <- "grey83"
ggplot2::theme_bw() +
ggplot2::theme(
panel.grid.minor.x = ggplot2::element_line(color = minor_color),
panel.grid.minor.y = ggplot2::element_line(color = minor_color),
panel.grid.major.x = ggplot2::element_line(color = major_color),
panel.grid.major.y = ggplot2::element_line(color = major_color))
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_theme.R |
#' Sets the standard theme for xGx graphics
#'
#' xgx_theme_set
#'
#' @return xgx ggplot2 compatible theme
#'
#' @examples
#' conc <- 10^(seq(-3, 3, by = 0.1))
#' ec50 <- 1
#' data <- data.frame(concentration = conc,
#' bound_receptor = 1 * conc / (conc + ec50))
#' xgx_theme_set()
#' ggplot2::ggplot(data, ggplot2::aes(y = concentration, x = bound_receptor)) +
#' ggplot2::geom_point() +
#' ggplot2::geom_line() +
#' xgx_scale_y_log10() +
#' xgx_scale_x_reverselog10()
#'
#' @importFrom ggplot2 theme_set
#' @export
xgx_theme_set <- function() {
ggplot2::theme_set(xgx_theme())
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/R/xgx_theme_set.R |
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 7
)
## ---- warning=FALSE, message=FALSE--------------------------------------------
library(xgxr)
library(ggplot2)
library(dplyr)
library(tidyr)
# flag for labeling figures as draft
status <- "DRAFT"
# ggplot settings
xgx_theme_set()
## ---- warning=FALSE, message=FALSE--------------------------------------------
pkpd_data <- case1_pkpd %>%
arrange(DOSE) %>%
subset(,-IPRED) %>%
mutate(TRTACT_low2high = factor(TRTACT, levels = unique(TRTACT)),
TRTACT_high2low = factor(TRTACT, levels = rev(unique(TRTACT))),
DAY_label = paste("Day", PROFDAY),
DAY_label = ifelse(DAY_label == "Day 0","Baseline",DAY_label))
LOQ = 0.05 #ng/ml
dose_max = as.numeric(max(pkpd_data$DOSE))
pk_data <- pkpd_data %>%
filter(CMT == 2) %>%
mutate(LIDVNORM = LIDV / as.numeric(DOSE))
pk_data_cycle1 <- pk_data %>%
filter(CYCLE == 1)
pd_data <- pkpd_data %>%
filter(CMT == 3)
pd_data_baseline_day85 <- pkpd_data %>%
filter(CMT == 3,
DAY_label %in% c("Baseline", "Day 85"))
pk_vs_pd_data <- pkpd_data %>%
filter(!is.na(LIDV)) %>%
subset(,-c(EVENTU,NAME)) %>%
spread(CMT,LIDV) %>%
rename(Concentration = `2`, Response = `3`)
NCA <- pk_data_cycle1 %>%
group_by(ID, DOSE) %>%
filter(!is.na(LIDV)) %>%
summarize(AUC_last = caTools::trapz(TIME, LIDV),
Cmax = max(LIDV)) %>%
tidyr::gather(PARAM,VALUE,-c(ID, DOSE)) %>%
ungroup() %>%
mutate(VALUE_NORM = VALUE / DOSE)
AUC_last <- NCA %>%
filter(PARAM == "AUC_last") %>%
rename(AUC_last = VALUE) %>%
subset(,-c(DOSE,PARAM,VALUE_NORM))
pk_vs_pd_data_day85 <- pk_vs_pd_data %>%
filter(DAY_label == "Day 85",
!is.na(Concentration),
!is.na(Response)) %>%
left_join(AUC_last)
time_units_dataset <- "hours"
time_units_plot <- "days"
trtact_label <- "Dose"
dose_label <- "Dose (mg)"
conc_label <- "Concentration (ng/ml)"
auc_label <- "AUCtau (h.(ng/ml))"
concnorm_label <- "Normalized Concentration (ng/ml)/mg"
sex_label <- "Sex"
w100_label <- "WEIGHTB>100"
pd_label <- "FEV1 (mL)"
cens_label <- "Censored"
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
ggplot(data = pk_data_cycle1, aes(x = NOMTIME,
y = LIDV,
group = DOSE,
color = TRTACT_high2low)) +
xgx_geom_ci(conf_level = 0.95) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
labs(y = conc_label, color = trtact_label) +
xgx_annotate_status(status)
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
ggplot(data = pk_data_cycle1, aes(x = TIME, y = LIDV)) +
geom_line(aes(group = ID), color = "grey50", size = 1, alpha = 0.3) +
geom_point(aes(color = factor(CENS), shape = factor(CENS))) +
scale_shape_manual(values = c(1, 8)) +
scale_color_manual(values = c("grey50", "red")) +
xgx_geom_ci(aes(x = NOMTIME, color = NULL, group = NULL, shape = NULL), conf_level = 0.95) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
labs(y = conc_label, color = trtact_label) +
theme(legend.position = "none") +
facet_grid(.~TRTACT_low2high) +
xgx_annotate_status(status)
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
ggplot(data = pk_data_cycle1,
aes(x = NOMTIME,
y = LIDVNORM,
group = DOSE,
color = TRTACT_high2low)) +
xgx_geom_ci(conf_level = 0.95, alpha = 0.5, position = position_dodge(1)) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
labs(y = concnorm_label, color = trtact_label) +
xgx_annotate_status(status)
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
ggplot(data = NCA, aes(x = DOSE, y = VALUE_NORM)) +
geom_boxplot(aes(group = DOSE)) +
geom_smooth(method = "lm", color = "black") +
facet_wrap(~PARAM, scales = "free_y") +
labs(x = dose_label) +
theme(axis.title.y = element_blank()) +
xgx_annotate_status(status)
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
ggplot(data = NCA[!NCA$DOSE == 3 & !NCA$DOSE == 10 , ],
aes(x = DOSE, y = VALUE_NORM)) +
geom_boxplot(aes(group = DOSE)) +
geom_smooth(method = "lm", color = "black") +
facet_wrap(~PARAM, scales = "free_y") +
labs(x = dose_label) +
theme(axis.title.y = element_blank()) +
xgx_annotate_status(status)
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
ggplot(data = pk_data_cycle1, aes(x = NOMTIME,
y = LIDV,
group = WEIGHTB > 100,
color = WEIGHTB > 100)) +
xgx_geom_ci(conf_level = 0.95) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
facet_grid(.~DOSE) +
labs(y = conc_label, color = w100_label) +
xgx_annotate_status(status)
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
ggplot(data = pd_data, aes(x = NOMTIME,
y = LIDV,
group = DOSE,
color = TRTACT_high2low)) +
xgx_geom_ci(conf_level = 0.95) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
labs(y = pd_label, color = trtact_label) +
xgx_annotate_status(status)
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
ggplot(data = pd_data, aes(x = NOMTIME, y = LIDV, group = ID)) +
geom_line(alpha = 0.5) +
geom_point(alpha = 0.5) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
facet_grid(~TRTACT_low2high) +
labs(y = pd_label, color = trtact_label) +
xgx_annotate_status(status)
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
ggplot(data = pd_data_baseline_day85, aes(x = DOSE,
y = LIDV,
group = DOSE)) +
xgx_geom_ci(conf_level = 0.95) +
facet_grid(~DAY_label) +
labs(x = dose_label, y = pd_label, color = trtact_label) +
xgx_annotate_status(status)
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
ggplot(data = pd_data, aes(x = DOSE, y = LIDV, group = DOSE)) +
xgx_geom_ci(conf_level = 0.95) +
facet_grid(~DAY_label) +
labs(x = dose_label, y = pd_label, color = trtact_label) +
xgx_annotate_status(status)
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
ggplot(data = pd_data_baseline_day85, aes(x = DOSE,
y = LIDV,
group = WEIGHTB > 100,
color = WEIGHTB > 100)) +
xgx_geom_ci(conf_level = .95) +
facet_grid(~DAY_label) +
labs(x = dose_label, y = pd_label, color = w100_label) +
xgx_annotate_status(status)
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
g = ggplot(data = pk_vs_pd_data_day85, aes(x = Concentration, y = Response)) +
geom_point(aes(color = TRTACT_high2low, shape = factor(CENS))) +
geom_smooth(color="black",shape=NULL) +
xgx_scale_x_log10() +
labs(x = conc_label, y = pd_label, color = trtact_label, shape = cens_label) +
xgx_annotate_status(status)
print(g)
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
gAUC = g +
aes(x = AUC_last) +
xlab(auc_label)
print(gAUC)
## -----------------------------------------------------------------------------
sessionInfo()
| /scratch/gouwar.j/cran-all/cranData/xgxr/inst/doc/sad_pkpd.R |
---
title: "PKPD Single Ascending Dose example"
author: "Fariba Khanshan, Andrew Stein, Alison Margolskee"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
self_contained: yes
vignette: >
%\VignetteIndexEntry{PKPD Single Ascending Dose example}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 7
)
```
## Overview
This document contains exploratory plots for single ascending dose PK and PD
data as well as the R code that generates these graphs. The plots presented
here are based on simulated data.
## Setup
```{r, warning=FALSE, message=FALSE}
library(xgxr)
library(ggplot2)
library(dplyr)
library(tidyr)
# flag for labeling figures as draft
status <- "DRAFT"
# ggplot settings
xgx_theme_set()
```
## Load Dataset
```{r, warning=FALSE, message=FALSE}
pkpd_data <- case1_pkpd %>%
arrange(DOSE) %>%
subset(,-IPRED) %>%
mutate(TRTACT_low2high = factor(TRTACT, levels = unique(TRTACT)),
TRTACT_high2low = factor(TRTACT, levels = rev(unique(TRTACT))),
DAY_label = paste("Day", PROFDAY),
DAY_label = ifelse(DAY_label == "Day 0","Baseline",DAY_label))
LOQ = 0.05 #ng/ml
dose_max = as.numeric(max(pkpd_data$DOSE))
pk_data <- pkpd_data %>%
filter(CMT == 2) %>%
mutate(LIDVNORM = LIDV / as.numeric(DOSE))
pk_data_cycle1 <- pk_data %>%
filter(CYCLE == 1)
pd_data <- pkpd_data %>%
filter(CMT == 3)
pd_data_baseline_day85 <- pkpd_data %>%
filter(CMT == 3,
DAY_label %in% c("Baseline", "Day 85"))
pk_vs_pd_data <- pkpd_data %>%
filter(!is.na(LIDV)) %>%
subset(,-c(EVENTU,NAME)) %>%
spread(CMT,LIDV) %>%
rename(Concentration = `2`, Response = `3`)
NCA <- pk_data_cycle1 %>%
group_by(ID, DOSE) %>%
filter(!is.na(LIDV)) %>%
summarize(AUC_last = caTools::trapz(TIME, LIDV),
Cmax = max(LIDV)) %>%
tidyr::gather(PARAM,VALUE,-c(ID, DOSE)) %>%
ungroup() %>%
mutate(VALUE_NORM = VALUE / DOSE)
AUC_last <- NCA %>%
filter(PARAM == "AUC_last") %>%
rename(AUC_last = VALUE) %>%
subset(,-c(DOSE,PARAM,VALUE_NORM))
pk_vs_pd_data_day85 <- pk_vs_pd_data %>%
filter(DAY_label == "Day 85",
!is.na(Concentration),
!is.na(Response)) %>%
left_join(AUC_last)
time_units_dataset <- "hours"
time_units_plot <- "days"
trtact_label <- "Dose"
dose_label <- "Dose (mg)"
conc_label <- "Concentration (ng/ml)"
auc_label <- "AUCtau (h.(ng/ml))"
concnorm_label <- "Normalized Concentration (ng/ml)/mg"
sex_label <- "Sex"
w100_label <- "WEIGHTB>100"
pd_label <- "FEV1 (mL)"
cens_label <- "Censored"
```
## Provide an overview of the data
Summarize the data in a way that is easy to visualize the general trend of PK
over time and between doses. Using summary statistics can be helpful, e.g.
Mean +/- SE, or median, 5th & 95th percentiles. Consider either coloring by
dose or faceting by dose. Depending on the amount of data one graph may be
better than the other.
When looking at summaries of PK over time, there are several things to observe.
Note the number of doses and number of time points or sampling schedule.
Observe the overall shape of the average profiles. What is the average
Cmax per dose? Tmax? Does the elimination phase appear to be parallel across
the different doses? Is there separation between the profiles for different
doses? Can you make a visual estimate of the number of compartments that
would be needed in a PK model?
### Concentration over time, colored by Dose, mean +/- 95% CI
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pk_data_cycle1, aes(x = NOMTIME,
y = LIDV,
group = DOSE,
color = TRTACT_high2low)) +
xgx_geom_ci(conf_level = 0.95) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
labs(y = conc_label, color = trtact_label) +
xgx_annotate_status(status)
```
### Concentration over time, faceted by Dose, mean +/- 95% CI, overlaid on gray spaghetti plots
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pk_data_cycle1, aes(x = TIME, y = LIDV)) +
geom_line(aes(group = ID), color = "grey50", size = 1, alpha = 0.3) +
geom_point(aes(color = factor(CENS), shape = factor(CENS))) +
scale_shape_manual(values = c(1, 8)) +
scale_color_manual(values = c("grey50", "red")) +
xgx_geom_ci(aes(x = NOMTIME, color = NULL, group = NULL, shape = NULL), conf_level = 0.95) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
labs(y = conc_label, color = trtact_label) +
theme(legend.position = "none") +
facet_grid(.~TRTACT_low2high) +
xgx_annotate_status(status)
```
## Assess the dose linearity of exposure
### Dose Normalized Concentration over time, colored by Dose, mean +/- 95% CI
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pk_data_cycle1,
aes(x = NOMTIME,
y = LIDVNORM,
group = DOSE,
color = TRTACT_high2low)) +
xgx_geom_ci(conf_level = 0.95, alpha = 0.5, position = position_dodge(1)) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
labs(y = concnorm_label, color = trtact_label) +
xgx_annotate_status(status)
```
### NCA of dose normalized AUC and Cmax vs Dose
Observe the dose normalized AUC and Cmax over different doses. Does the
relationship appear to be constant across doses or do some doses stand
out from the rest? Can you think of reasons why some would stand out? For
example, the lowest dose may have dose normalized AUC much higher than the
rest, could this be due to BLQ observations? If the highest doses have dose
normalized AUC much higher than the others, could this be due to nonlinear
clearance, with clearance saturating at higher doses? If the highest doses
have dose normalized AUC much lower than the others, could there be saturation
of bioavailability, reaching the maximum absorbable dose?
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = NCA, aes(x = DOSE, y = VALUE_NORM)) +
geom_boxplot(aes(group = DOSE)) +
geom_smooth(method = "lm", color = "black") +
facet_wrap(~PARAM, scales = "free_y") +
labs(x = dose_label) +
theme(axis.title.y = element_blank()) +
xgx_annotate_status(status)
```
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = NCA[!NCA$DOSE == 3 & !NCA$DOSE == 10 , ],
aes(x = DOSE, y = VALUE_NORM)) +
geom_boxplot(aes(group = DOSE)) +
geom_smooth(method = "lm", color = "black") +
facet_wrap(~PARAM, scales = "free_y") +
labs(x = dose_label) +
theme(axis.title.y = element_blank()) +
xgx_annotate_status(status)
```
## Explore covariate effects on PK
### Concentration over time, colored by categorical covariate, mean +/- 95% CI
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pk_data_cycle1, aes(x = NOMTIME,
y = LIDV,
group = WEIGHTB > 100,
color = WEIGHTB > 100)) +
xgx_geom_ci(conf_level = 0.95) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
facet_grid(.~DOSE) +
labs(y = conc_label, color = w100_label) +
xgx_annotate_status(status)
```
## PD marker over time, colored by Dose, mean (95% CI) percentiles by nominal time
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pd_data, aes(x = NOMTIME,
y = LIDV,
group = DOSE,
color = TRTACT_high2low)) +
xgx_geom_ci(conf_level = 0.95) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
labs(y = pd_label, color = trtact_label) +
xgx_annotate_status(status)
```
## PD marker over time, faceted by Dose, dots & lines grouped by individuals
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pd_data, aes(x = NOMTIME, y = LIDV, group = ID)) +
geom_line(alpha = 0.5) +
geom_point(alpha = 0.5) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
facet_grid(~TRTACT_low2high) +
labs(y = pd_label, color = trtact_label) +
xgx_annotate_status(status)
```
## Explore Dose-Response Relationship
One of the key questions when looking at PD markers is to determine if there
is a dose-response relationship, and if there is, what dose is necessary to
achieve the desired effect? Simple dose-response plots can give insight into
these questions.
### PD marker by Dose, for endpoint of interest, mean (95% CI) by Dose
Plot PD marker against dose. Using summary statistics can be helpful, e.g.
Mean +/- SE, or median, 5th & 95th percentiles.
Here are some questions to ask yourself when looking at Dose-Response plots:
Do you see any relationship? Does response increase (decrease) with
increasing dose? Are you able to detect a plateau or Emax (Emin) on the
effect? If so, around what dose does this occur?
Warning: Even if you don’t see an Emax, that doesn’t mean there isn’t one.
Be very careful about using linear models for Dose-Response relationships.
Extrapolation outside of the observed dose range could indicate a higher dose
is always better (even if it isn’t).
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pd_data_baseline_day85, aes(x = DOSE,
y = LIDV,
group = DOSE)) +
xgx_geom_ci(conf_level = 0.95) +
facet_grid(~DAY_label) +
labs(x = dose_label, y = pd_label, color = trtact_label) +
xgx_annotate_status(status)
```
### PD marker by Dose, faceted by visit, mean (95% CI) by Dose
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pd_data, aes(x = DOSE, y = LIDV, group = DOSE)) +
xgx_geom_ci(conf_level = 0.95) +
facet_grid(~DAY_label) +
labs(x = dose_label, y = pd_label, color = trtact_label) +
xgx_annotate_status(status)
```
### Explore covariate effects on Dose-Response relationship
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pd_data_baseline_day85, aes(x = DOSE,
y = LIDV,
group = WEIGHTB > 100,
color = WEIGHTB > 100)) +
xgx_geom_ci(conf_level = .95) +
facet_grid(~DAY_label) +
labs(x = dose_label, y = pd_label, color = w100_label) +
xgx_annotate_status(status)
```
## Explore Exposure-Response Relationship
Plot PD marker against concentration. Do you see any relationship? Does
response increase (decrease) with increasing dose? Are you able to detect
a plateau or Emax (Emin) on the effect?
Warning: Even if you don’t see an Emax, that doesn’t mean there isn’t one.
Be very careful about using linear models for Dose-Response or
Exposure-Response relationships. Extrapolation outside of the observed
dose range could indicate a higher dose is always better (even if it isn’t).
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
g = ggplot(data = pk_vs_pd_data_day85, aes(x = Concentration, y = Response)) +
geom_point(aes(color = TRTACT_high2low, shape = factor(CENS))) +
geom_smooth(color="black",shape=NULL) +
xgx_scale_x_log10() +
labs(x = conc_label, y = pd_label, color = trtact_label, shape = cens_label) +
xgx_annotate_status(status)
print(g)
```
Plotting AUC vs response instead of concentration vs response may make more
sense in some situations. For example, when there is a large delay between
PK and PD it would be difficult to relate the time-varying concentration with
the response. If rich sampling is only done at a particular point in the
study, e.g. at steady state, then the AUC calculated on the rich profile
could be used as the exposure variable for a number of PD visits. If PK
samples are scarce, average Cmin could also be used as the exposure metric.
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
gAUC = g +
aes(x = AUC_last) +
xlab(auc_label)
print(gAUC)
```
## R Session Info
```{r}
sessionInfo()
```
| /scratch/gouwar.j/cran-all/cranData/xgxr/inst/doc/sad_pkpd.Rmd |
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 7
)
## ----data_assignments, echo=TRUE, warning=FALSE, message=FALSE----------------
library(xgxr)
library(ggplot2)
library(dplyr)
# setting ggplot theme
xgx_theme_set()
## ---- echo=TRUE, warning=FALSE, message=FALSE---------------------------------
# units of dataset
time_units_dataset <- "hours"
time_units_plot <- "days"
dose_label <- "Dose (mg)"
conc_label <- "Concentration (ug/ml)"
concnorm_label <- "Normalized Concentration (ug/ml)/mg"
# covariates in the dataset
covariates <- c("WT")
# load dataset
data <- nlmixr_theo_sd
# make sure that the necessary columns are assigned
# five columns are required: TIME, LIDV, CMT, DOSE, DOSEREG
data <- data %>%
mutate(ID = ID) %>% #ID column
group_by(ID) %>%
mutate(TIME = TIME, #TIME column name
NOMTIME = as.numeric(as.character(cut(TIME,
breaks = c(-Inf, 0.1, 0.7, 1.5, 4, 8, 10.5, 15, Inf),
labels = c( 0, 0.5, 1, 2, 7, 9, 12, 24)))),
EVID = EVID, # EVENT ID >=1 is dose, 0 otherwise
CYCLE = 1, # CYCLE of PK data
LIDV = DV, # DEPENDENT VARIABLE column name
CENS = 0, # CENSORING column name
CMT = CMT, # COMPARTMENT column here (e.g. CMT or YTYPE)
DOSE = signif(max(AMT) * WT, 2), # DOSE column here (numeric value)
# convert mg/kg (in dataset) to mg
DOSEREG = DOSE) %>% # DOSE REGIMEN column here
ungroup()
# convert DOSEREG to factor for proper ordering in the plotting
# add LIDVNORM dose normalized concentration for plotting
data <- data %>%
arrange(DOSE) %>%
mutate(LIDVNORM = LIDV / DOSE,
DOSEREG = factor(DOSEREG, levels = unique(DOSEREG)),
DOSEREG_REV = factor(DOSEREG, levels = rev(unique(DOSEREG))))
# define order of treatment factor
# for plotting the PK data
data_pk <- filter(data, CMT == 2, TIME > 0)
# NCA
NCA <- data %>%
filter(CMT == 2, NOMTIME > 0, NOMTIME <= 24) %>%
group_by(ID) %>%
summarize(AUC_0_24 = caTools::trapz(TIME, LIDV),
Cmax_0_24 = max(LIDV),
Ctrough_0_24 = LIDV[length(LIDV)],
DOSE = DOSE[1],
WT = WT[1]) %>%
tidyr::gather(PARAM, VALUE, -c(ID, DOSE, WT)) %>%
mutate(VALUE_NORM = VALUE / DOSE) %>%
ungroup()
## -----------------------------------------------------------------------------
check <- xgx_check_data(data,covariates)
knitr::kable(check$summary)
knitr::kable(head(check$data_subset))
knitr::kable(check$cts_covariates)
knitr::kable(check$cat_covariates)
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
glin <- ggplot(data = data_pk, aes(x = NOMTIME,
y = LIDV,
group = DOSE,
color = DOSEREG_REV)) +
stat_summary() +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
labs(y = conc_label, color = "Dose")
glog <- glin + scale_y_log10()
gridExtra::grid.arrange(gridExtra::arrangeGrob(glin, glog, nrow = 1))
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=4------------------
if (exists("data_pk_rich")) {
ggplot(data_pk_rich, aes(x = PROFTIME,
y = LIDV,
group = interaction(CYCLE,DOSE),
color = DOSEREG_REV)) +
facet_grid(~DAY_label, scales = "free_x") +
xgx_stat_ci() +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
labs(y = conc_label, color = "Dose")
}
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
ggplot(data = data_pk, aes(x = TIME,
y = LIDV,
group = interaction(ID, CYCLE))) +
geom_line(size = 1, color = rgb(0.5, 0.5, 0.5), alpha = 0.3) +
geom_point(aes(color = factor(CENS), shape = factor(CENS)),
size = 2, alpha = 0.3) +
xgx_stat_ci(aes(x = NOMTIME, group = NULL, color = NULL)) +
facet_grid(.~DOSEREG) +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
ylab(conc_label) +
theme(legend.position = "none") +
scale_shape_manual(values = c(1, 8)) +
scale_color_manual(values = c("grey50", "red"))
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=5-------------------
ggplot(data = data_pk, aes(x = TIME,
y = LIDV,
group = interaction(ID, CYCLE),
color = factor(DOSEREG_REV),
shape = factor(CENS))) +
geom_line(size = 1, alpha = 0.5) +
geom_point() +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
labs(y = conc_label, color = "Dose", shape = "Censoring")
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=4-------------------
if (exists("data_pk_rich")) {
ggplot(data = data_pk_rich, aes(x = TIME,
y = LIDV,
group = interaction(ID, CYCLE),
color = DOSEREG_REV,
shape = factor(CENS))) +
geom_line(size = 1, alpha = 0.5) +
geom_point() +
facet_grid(~DAY_label, scales = "free_x") +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
labs(y = conc_label, color = "Dose", shape = "Censoring")
}
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
ggplot(data = data_pk, aes(x = TIME,
y = LIDV,
group = interaction(ID, CYCLE),
color = factor(CENS),
shape = factor(CENS))) +
geom_line(size = 1, alpha = 0.5) +
geom_point() +
facet_grid(.~DOSEREG) +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
ylab(conc_label) +
scale_shape_manual(values = c(1, 8)) +
scale_color_manual(values = c("grey50", "red")) +
theme(legend.position = "none")
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=3-------------------
ggplot(data = data_pk, aes(x = NOMTIME,
y = LIDVNORM,
group = DOSEREG_REV,
color = DOSEREG_REV)) +
xgx_stat_ci() +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
labs(y = conc_label, color = "Dose")
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=4-------------------
if (exists("data_pk_rich")) {
ggplot(data_pk_rich, aes(x = NOMTIME,
y = LIDVNORM,
group = interaction(DOSE, CYCLE),
color = DOSEREG_REV)) +
xgx_stat_ci() +
facet_grid(~DAY_label,scales = "free_x") +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
labs(y = conc_label, color = "Dose")
}
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=7-------------------
ggplot(data = data_pk, aes(x = TIME, y = LIDV)) +
geom_line() +
geom_point(aes(color = factor(CENS), shape = factor(CENS))) +
facet_wrap(~ID + DOSEREG) +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
ylab(conc_label) +
theme(legend.position = "none") +
scale_shape_manual(values = c(1, 8)) +
scale_color_manual(values = c("black", "red"))
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=4-------------------
if (!exists("NCA")) {
warning("For PK data exploration, it is highly recommended to perform an NCA")
} else {
ggplot(data = NCA, aes(x = DOSE, y = VALUE_NORM)) +
geom_boxplot(aes(group = DOSE)) +
geom_smooth(method = "loess", color = "black") +
facet_wrap(~PARAM, scales = "free_y") +
xgx_scale_x_log10(breaks = unique(NCA$DOSE)) +
xgx_scale_y_log10() +
labs(x = dose_label, y = concnorm_label)
}
## ---- echo=TRUE, warning=FALSE, message=FALSE, fig.height=5-------------------
if (!exists("NCA")) {
warning("For covariate exploration, it is highly recommended to perform an NCA")
} else {
NCA_cts <- NCA[, c("PARAM", "VALUE", check$cts_covariates$Covariate)] %>%
tidyr::gather(COV, COV_VALUE, -c(PARAM, VALUE))
NCA_cat <- NCA[, c("PARAM", "VALUE", check$cat_covariates$Covariate)] %>%
tidyr::gather(COV, COV_VALUE, -c(PARAM, VALUE))
if (nrow(check$cts_covariates) >= 1) {
gg <- ggplot(data = NCA_cts, aes(x = COV_VALUE, y = VALUE)) +
geom_point() +
geom_smooth(method = "loess", color = "black") +
facet_grid(PARAM~COV,switch = "y", scales = "free_y") +
xgx_scale_x_log10() +
xgx_scale_y_log10() +
labs(x = "Covariate Value", y = "NCA Parameter Value")
print(gg)
}
if (nrow(check$cat_covariates) >= 1) {
gg <- ggplot(data = NCA_cat, aes(x = COV_VALUE, y = VALUE)) +
geom_boxplot() +
facet_grid(PARAM~COV, switch = "y", scales = "free_y") +
xgx_scale_y_log10() +
labs(x = "Covariate Value", y = "NCA Parameter Value")
print(gg)
}
}
| /scratch/gouwar.j/cran-all/cranData/xgxr/inst/doc/theoph.R |
---
title: "PK Exploration with nlmixr dataset"
author: "Andrew Stein, Fariba Khanshan, Alison Margolskee"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
self_contained: yes
vignette: >
%\VignetteIndexEntry{PK Exploration with nlmixr dataset for theophylline}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 7
)
```
## Overview
This document provides a template for exploring Single or Multiple Ascending
Dose PK data.
## Load Packages
```{r data_assignments, echo=TRUE, warning=FALSE, message=FALSE}
library(xgxr)
library(ggplot2)
library(dplyr)
# setting ggplot theme
xgx_theme_set()
```
## Load the dataset and assign columns. Take subsets of data that are needed
Multiplying dose by weight to get a mg dosing.
```{r, echo=TRUE, warning=FALSE, message=FALSE}
# units of dataset
time_units_dataset <- "hours"
time_units_plot <- "days"
dose_label <- "Dose (mg)"
conc_label <- "Concentration (ug/ml)"
concnorm_label <- "Normalized Concentration (ug/ml)/mg"
# covariates in the dataset
covariates <- c("WT")
# load dataset
data <- nlmixr_theo_sd
# make sure that the necessary columns are assigned
# five columns are required: TIME, LIDV, CMT, DOSE, DOSEREG
data <- data %>%
mutate(ID = ID) %>% #ID column
group_by(ID) %>%
mutate(TIME = TIME, #TIME column name
NOMTIME = as.numeric(as.character(cut(TIME,
breaks = c(-Inf, 0.1, 0.7, 1.5, 4, 8, 10.5, 15, Inf),
labels = c( 0, 0.5, 1, 2, 7, 9, 12, 24)))),
EVID = EVID, # EVENT ID >=1 is dose, 0 otherwise
CYCLE = 1, # CYCLE of PK data
LIDV = DV, # DEPENDENT VARIABLE column name
CENS = 0, # CENSORING column name
CMT = CMT, # COMPARTMENT column here (e.g. CMT or YTYPE)
DOSE = signif(max(AMT) * WT, 2), # DOSE column here (numeric value)
# convert mg/kg (in dataset) to mg
DOSEREG = DOSE) %>% # DOSE REGIMEN column here
ungroup()
# convert DOSEREG to factor for proper ordering in the plotting
# add LIDVNORM dose normalized concentration for plotting
data <- data %>%
arrange(DOSE) %>%
mutate(LIDVNORM = LIDV / DOSE,
DOSEREG = factor(DOSEREG, levels = unique(DOSEREG)),
DOSEREG_REV = factor(DOSEREG, levels = rev(unique(DOSEREG))))
# define order of treatment factor
# for plotting the PK data
data_pk <- filter(data, CMT == 2, TIME > 0)
# NCA
NCA <- data %>%
filter(CMT == 2, NOMTIME > 0, NOMTIME <= 24) %>%
group_by(ID) %>%
summarize(AUC_0_24 = caTools::trapz(TIME, LIDV),
Cmax_0_24 = max(LIDV),
Ctrough_0_24 = LIDV[length(LIDV)],
DOSE = DOSE[1],
WT = WT[1]) %>%
tidyr::gather(PARAM, VALUE, -c(ID, DOSE, WT)) %>%
mutate(VALUE_NORM = VALUE / DOSE) %>%
ungroup()
```
## Summary of the data issues and the covariates
```{r}
check <- xgx_check_data(data,covariates)
knitr::kable(check$summary)
knitr::kable(head(check$data_subset))
knitr::kable(check$cts_covariates)
knitr::kable(check$cat_covariates)
```
## Provide an overview of the data
Summarize the data in a way that is easy to visualize the general trend of
PK over time and between doses. Using summary statistics can be helpful,
e.g. Mean +/- SE, or median, 5th & 95th percentiles. Consider either coloring
by dose or faceting by dose. Depending on the amount of data one graph may
be better than the other.
When looking at summaries of PK over time, there are several things to observe.
Note the number of doses and number of time points or sampling schedule.
Observe the overall shape of the average profiles. What is the average Cmax
per dose? Tmax? Does the elimination phase appear to be parallel across the
different doses? Is there separation between the profiles for different
doses? Can you make a visual estimate of the number of compartments that would
be needed in a PK model?
### Concentration over Time, colored by dose, mean +/- 95% CI
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
glin <- ggplot(data = data_pk, aes(x = NOMTIME,
y = LIDV,
group = DOSE,
color = DOSEREG_REV)) +
stat_summary() +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
labs(y = conc_label, color = "Dose")
glog <- glin + scale_y_log10()
gridExtra::grid.arrange(gridExtra::arrangeGrob(glin, glog, nrow = 1))
```
### Side-by-side comparison of first administered dose and steady state
For multiple dose studies, zoom in on key visits for a clearer picture of the
profiles. Look for accumulation (if any) between first administered dose and
steady state.
```{r , echo=TRUE, warning=FALSE, message=FALSE, fig.height=4}
if (exists("data_pk_rich")) {
ggplot(data_pk_rich, aes(x = PROFTIME,
y = LIDV,
group = interaction(CYCLE,DOSE),
color = DOSEREG_REV)) +
facet_grid(~DAY_label, scales = "free_x") +
xgx_stat_ci() +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
labs(y = conc_label, color = "Dose")
}
```
### Concentration over Time, faceted by dose, mean +/- 95% CI, overlaid on gray spaghetti plots
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = data_pk, aes(x = TIME,
y = LIDV,
group = interaction(ID, CYCLE))) +
geom_line(size = 1, color = rgb(0.5, 0.5, 0.5), alpha = 0.3) +
geom_point(aes(color = factor(CENS), shape = factor(CENS)),
size = 2, alpha = 0.3) +
xgx_stat_ci(aes(x = NOMTIME, group = NULL, color = NULL)) +
facet_grid(.~DOSEREG) +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
ylab(conc_label) +
theme(legend.position = "none") +
scale_shape_manual(values = c(1, 8)) +
scale_color_manual(values = c("grey50", "red"))
```
## Explore variability
Use spaghetti plots to visualize the extent of variability between individuals.
The wider the spread of the profiles, the higher the between subject
variability. Distinguish different doses by color, or separate into different
panels. If coloring by dose, do the individuals in the different dose groups
overlap across doses? Dose there seem to be more variability at higher or
lower concentrations?
### Concentration over Time, colored by dose, dots and lines grouped by individual
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=5}
ggplot(data = data_pk, aes(x = TIME,
y = LIDV,
group = interaction(ID, CYCLE),
color = factor(DOSEREG_REV),
shape = factor(CENS))) +
geom_line(size = 1, alpha = 0.5) +
geom_point() +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
labs(y = conc_label, color = "Dose", shape = "Censoring")
```
### Side-by-side comparison of first administered dose and steady state
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=4}
if (exists("data_pk_rich")) {
ggplot(data = data_pk_rich, aes(x = TIME,
y = LIDV,
group = interaction(ID, CYCLE),
color = DOSEREG_REV,
shape = factor(CENS))) +
geom_line(size = 1, alpha = 0.5) +
geom_point() +
facet_grid(~DAY_label, scales = "free_x") +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
labs(y = conc_label, color = "Dose", shape = "Censoring")
}
```
### Concentration over Time, faceted by dose, lines grouped by individual
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = data_pk, aes(x = TIME,
y = LIDV,
group = interaction(ID, CYCLE),
color = factor(CENS),
shape = factor(CENS))) +
geom_line(size = 1, alpha = 0.5) +
geom_point() +
facet_grid(.~DOSEREG) +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
ylab(conc_label) +
scale_shape_manual(values = c(1, 8)) +
scale_color_manual(values = c("grey50", "red")) +
theme(legend.position = "none")
```
## Assess the dose linearity of exposure
### Dose Normalized Concentration over Time, colored by dose, mean +/- 95% CI
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = data_pk, aes(x = NOMTIME,
y = LIDVNORM,
group = DOSEREG_REV,
color = DOSEREG_REV)) +
xgx_stat_ci() +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
labs(y = conc_label, color = "Dose")
```
### Side-by-side comparison of first administered dose and steady state
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=4}
if (exists("data_pk_rich")) {
ggplot(data_pk_rich, aes(x = NOMTIME,
y = LIDVNORM,
group = interaction(DOSE, CYCLE),
color = DOSEREG_REV)) +
xgx_stat_ci() +
facet_grid(~DAY_label,scales = "free_x") +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
labs(y = conc_label, color = "Dose")
}
```
## Explore irregularities in profiles
Plot individual profiles in order to inspect them for any irregularities.
Inspect the profiles for outlying data points that may skew results or bias
conclusions. Looking at the shapes of the individual profiles now, do they
support your observations made about the mean profile (e.g. number of
compartments, typical Cmax, Tmax)?
Plotting individual profiles on top of gray spaghetti plots puts individual
profiles into context, and may help identify outlying individuals for further
inspection. Are there any individuals that appear to have very high or low
Cmax compared to others within the same dose group? What about the timing of
Cmax? What about the slope of the elimination phase? Does it appear that any
subjects could have received an incorrect dose?
### Concentration over Time, faceted by individual, individual line plots overlaid on gray spaghetti plots for that dose group
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=7}
ggplot(data = data_pk, aes(x = TIME, y = LIDV)) +
geom_line() +
geom_point(aes(color = factor(CENS), shape = factor(CENS))) +
facet_wrap(~ID + DOSEREG) +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
ylab(conc_label) +
theme(legend.position = "none") +
scale_shape_manual(values = c(1, 8)) +
scale_color_manual(values = c("black", "red"))
```
## NCA
### NCA of dose normalized AUC vs Dose
Observe the dose normalized AUC over different doses. Does the relationship
appear to be constant across doses or do some doses stand out from the rest?
Can you think of reasons why some would stand out? For example, the lowest
dose may have dose normalized AUC much higher than the rest, could this be
due to CENS observations? If the highest doses have dose normalized AUC much
higher than the others, could this be due to nonlinear clearance, with
clearance saturating at higher doses? If the highest doses have dose normalized
AUC much lower than the others, could there be saturation of bioavailability,
reaching the maximum absorbable dose?
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=4}
if (!exists("NCA")) {
warning("For PK data exploration, it is highly recommended to perform an NCA")
} else {
ggplot(data = NCA, aes(x = DOSE, y = VALUE_NORM)) +
geom_boxplot(aes(group = DOSE)) +
geom_smooth(method = "loess", color = "black") +
facet_wrap(~PARAM, scales = "free_y") +
xgx_scale_x_log10(breaks = unique(NCA$DOSE)) +
xgx_scale_y_log10() +
labs(x = dose_label, y = concnorm_label)
}
```
## Covariate Effects
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=5}
if (!exists("NCA")) {
warning("For covariate exploration, it is highly recommended to perform an NCA")
} else {
NCA_cts <- NCA[, c("PARAM", "VALUE", check$cts_covariates$Covariate)] %>%
tidyr::gather(COV, COV_VALUE, -c(PARAM, VALUE))
NCA_cat <- NCA[, c("PARAM", "VALUE", check$cat_covariates$Covariate)] %>%
tidyr::gather(COV, COV_VALUE, -c(PARAM, VALUE))
if (nrow(check$cts_covariates) >= 1) {
gg <- ggplot(data = NCA_cts, aes(x = COV_VALUE, y = VALUE)) +
geom_point() +
geom_smooth(method = "loess", color = "black") +
facet_grid(PARAM~COV,switch = "y", scales = "free_y") +
xgx_scale_x_log10() +
xgx_scale_y_log10() +
labs(x = "Covariate Value", y = "NCA Parameter Value")
print(gg)
}
if (nrow(check$cat_covariates) >= 1) {
gg <- ggplot(data = NCA_cat, aes(x = COV_VALUE, y = VALUE)) +
geom_boxplot() +
facet_grid(PARAM~COV, switch = "y", scales = "free_y") +
xgx_scale_y_log10() +
labs(x = "Covariate Value", y = "NCA Parameter Value")
print(gg)
}
}
```
| /scratch/gouwar.j/cran-all/cranData/xgxr/inst/doc/theoph.Rmd |
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 7
)
## ---- message=FALSE-----------------------------------------------------------
library(tidyr)
library(dplyr)
library(ggplot2)
library(xgxr)
## -----------------------------------------------------------------------------
# xgx_create_rmarkdown(type = "pk", open_file = FALSE)
## ---- fig.height=7------------------------------------------------------------
#if (sessionInfo()$otherPkgs$ggplot2$Version == "2.2.1") {
# nsubj <- 50
# ntime <- 8
# time <- rep(c(1, 2, 4, 8, 12, 24, 36, 48), nsubj)
# id <- sort(rep(seq(1, nsubj), ntime))
# trt <- sort(rep(c(25, 50, 100, 150, 300), ntime * nsubj / 5))
# ka <- rep(rlnorm(nsubj, -0.5, 0.3), each = ntime)
# ke <- rep(rlnorm(nsubj, -3, 0.3), each = ntime)
# conc <- trt * (ka * ke / (ka - ke)) * (exp(-time * ke) - exp(-time * ka)) * (rep(stats::rlnorm(ntime * nsubj, 0.3, 0.1)))
# data <- data.frame(TIME = time, CONC = conc, ID = id, TRT = trt)
# xgx_PK_summary(data = data, labels = list(TRT = "Dose"),
# units_dataset = list(TIME = "Hours", CONC = "ng/mL", TRT = "mg"))
#} else {
# print("Currently only works with ggplot2 version 2.2.1 (on DaVinci), and not version 3")
#}
## -----------------------------------------------------------------------------
dirs <- list(
parent_dir = tempdir(),
rscript_dir = tempdir(),
rscript_name = "example.R",
results_dir = tempdir(),
filename_prefix = "example_")
data <- data.frame(x = 1:1000, y = stats::rnorm(1000))
g <- xgx_plot(data = data, aes(x = x, y = y)) +
geom_point()
xgx_save(width = 4, height = 4, dirs = dirs, filename_main = "example_plot", status = "DRAFT")
## -----------------------------------------------------------------------------
data <- data.frame(x = 1:1000, y = stats::rnorm(1000))
g <- xgx_plot(data = data, aes(x = x, y = y)) +
geom_point()
filename = file.path(tempdir(), "png_example.png")
ggsave(filename, plot = g, height = 4, width = 4, dpi = 75)
xgx_annotate_status_png(filename, "./ExampleScript.R")
## -----------------------------------------------------------------------------
x <- data.frame(ID = c(1, 2), SEX = c("male", "female"))
data <- xgx_save_table(x, dirs = dirs, filename_main = "ExampleTable")
knitr::kable(data)
## -----------------------------------------------------------------------------
xgx_plot(mtcars, aes(x = cyl, y = mpg)) + geom_point()
## -----------------------------------------------------------------------------
theme_set(xgx_theme())
## Alternative, equivalent function:
xgx_theme_set()
## ---- fig.width=4, fig.height=2-----------------------------------------------
# time <- rep(seq(1,10),5)
# id <- sort(rep(seq(1,5), 10))
# conc <- exp(-time)*sort(rep(rlnorm(5),10))
#
# data <- data.frame(time = time, concentration = conc, id = factor(id))
# xgx_plot() + xgx_geom_spaghetti(data = data, mapping = aes(x = time, y = concentration, group = id, color = id))
#
# xgx_spaghetti(data = data, mapping = aes(x = time, y = concentration, group = id, color = id))
## ---- fig.width=4, fig.height=2-----------------------------------------------
data <- data.frame(x = rep(c(1, 2, 3), each = 20),
y = rep(c(1, 2, 3), each = 20) + stats::rnorm(60),
group = rep(1:3, 20))
xgx_plot(data,aes(x = x, y = y)) +
xgx_stat_ci(conf_level = .95)
xgx_plot(data,aes(x = x, y = y)) +
xgx_stat_pi(percent = .95)
xgx_plot(data,aes(x = x, y = y)) +
xgx_stat_ci(conf_level = .95, geom = list("pointrange","line"))
xgx_plot(data,aes(x = x, y = y)) +
xgx_stat_ci(conf_level = .95, geom = list("ribbon","line"))
xgx_plot(data,aes(x = x, y = y, group = group, color = factor(group))) +
xgx_stat_ci(conf_level = .95, alpha = 0.5,
position = position_dodge(width = 0.5))
## ---- fig.width=4, fig.height=2-----------------------------------------------
# plotting lognormally distributed data
data <- data.frame(x = rep(c(1, 2, 3), each = 20),
y = 10^(rep(c(1, 2, 3), each = 20) + stats::rnorm(60)),
group = rep(1:3, 20))
xgx_plot(data, aes(x = x, y = y)) +
xgx_stat_ci(conf_level = 0.95, distribution = "lognormal")
# note: you DO NOT need to use both distribution = "lognormal" and scale_y_log10()
xgx_plot(data,aes(x = x, y = y)) +
xgx_stat_ci(conf_level = 0.95) + xgx_scale_y_log10()
# plotting binomial data
data <- data.frame(x = rep(c(1, 2, 3), each = 20),
y = rbinom(60, 1, rep(c(0.2, 0.6, 0.8), each = 20)),
group = rep(1:3, 20))
xgx_plot(data, aes(x = x, y = y)) +
xgx_stat_ci(conf_level = 0.95, distribution = "binomial")
# Example plotting the percent of subjects in a categorical covariate group by treatment.
set.seed(12345)
data = data.frame(x = 120*exp(rnorm(100,0,1)),
response = sample(c("Trt1", "Trt2", "Trt3"), 100, replace = TRUE),
covariate = factor(sample(c("White","Black","Asian","Other"), 100, replace = TRUE),
levels = c("White", "Black", "Asian", "Other")))
xgx_plot(data = data) +
xgx_stat_ci(mapping = aes(x = response, response = covariate),
distribution = "ordinal") +
xgx_stat_ci(mapping = aes(x = 1, response = covariate), geom = "hline",
distribution = "ordinal") +
scale_y_continuous(labels = scales::percent_format()) +
facet_wrap(~covariate) +
xlab("Treatment group") + ylab("Percent of subjects by category")
## -----------------------------------------------------------------------------
# plotting
set.seed(12345)
data = data.frame(x = 120*exp(rnorm(100,0,1)),
response = sample(c("Mild","Moderate","Severe"), 100, replace = TRUE),
covariate = sample(c("Male","Female"), 100, replace = TRUE)) %>%
mutate(y = (50 + 20*x/(200 + x))*exp(rnorm(100, 0, 0.3)))
# plotting a lognormally distributed variable by quartiles of x
xgx_plot(data = data) +
xgx_stat_ci(mapping = aes(x = x, y = y, colour = covariate),
distribution = "lognormal", bins = 4)
# plotting ordinal or multinomial data, by quartiles of x
xgx_plot(data = data) +
xgx_stat_ci(mapping = aes(x = x, response = response, colour = covariate),
distribution = "ordinal", bins = 4) +
scale_y_continuous(labels = scales::percent_format()) + facet_wrap(~response)
xgx_plot(data = data) +
xgx_stat_ci(mapping = aes(x = x, response = response, colour = response),
distribution = "ordinal", bins = 4) +
scale_y_continuous(labels = scales::percent_format()) + facet_wrap(~covariate)
## -----------------------------------------------------------------------------
set.seed(123456)
Nsubj <- 10
Doses <- c(0, 25, 50, 100, 200)
Ntot <- Nsubj*length(Doses)
times <- c(0,14,30,60,90)
dat1 <- data.frame(ID = 1:(Ntot),
DOSE = rep(Doses, Nsubj),
E0 = 50*rlnorm(Ntot, 0, 0.3),
Emax = 100*rlnorm(Ntot, 0, 0.3),
ED50 = 50*rlnorm(Ntot, 0, 0.3)) %>%
dplyr::mutate(Response = (E0 + Emax*DOSE/(DOSE + ED50))*rlnorm(Ntot, 0, 0.3) ) %>%
merge(data.frame(ID = rep(1:(Ntot), each = length(times)), Time = times), by = "ID")
gg <- xgx_plot(data = dat1, aes(x = DOSE, y = Response))
gg <- gg + geom_point()
gg
gg + geom_smooth(method = "nlsLM",
formula = y ~ E0 + Emax*x/(ED50 + x),
method.args = list(start = list(E0 = 1, ED50 = 1, Emax = 1),
lower = c(-Inf, 0, -Inf)))
## -----------------------------------------------------------------------------
gg + xgx_geom_smooth_emax()
gg +
xgx_geom_smooth_emax(geom = "ribbon", color = "black", fill = NA, linetype = "dashed") +
xgx_geom_smooth_emax(geom = "line", color = "red")
## -----------------------------------------------------------------------------
mod <- nlsLM(formula = Response ~ E0 + Emax * DOSE / (ED50 + DOSE),
data = dat1,
start = list(E0 = 1, ED50 = 1, Emax = 1),
lower = c(-Inf, 0, -Inf))
predict(mod,
newdata = data.frame(DOSE = c(0, 25, 50, 100, 200)),
se.fit = TRUE)
predict(mod,
newdata = data.frame(DOSE = c(0, 25, 50, 100, 200)),
se.fit = TRUE, interval = "confidence", level = 0.95)
## -----------------------------------------------------------------------------
# example with ordinal data (method = "polr")
set.seed(12345)
data = data.frame(x = 120*exp(stats::rnorm(100,0,1)),
response = sample(c("Mild","Moderate","Severe"), 100, replace = TRUE),
covariate = sample(c("Male","Female"), 100, replace = TRUE)) %>%
dplyr::mutate(y = (50 + 20*x/(200 + x))*exp(stats::rnorm(100, 0, 0.3)))
# example coloring by the response categories
xgx_plot(data = data) +
xgx_stat_smooth(mapping = ggplot2::aes(x = x, response = response,
colour = response, fill = response),
method = "polr") +
ggplot2::scale_y_continuous(labels = scales::percent_format())
# example faceting by the response categories, coloring by a different covariate
xgx_plot(data = data) +
xgx_stat_smooth(mapping = ggplot2::aes(x = x, response = response,
colour = covariate, fill = covariate),
method = "polr", level = 0.80) +
ggplot2::facet_wrap(~response) +
ggplot2::scale_y_continuous(labels = scales::percent_format())
## -----------------------------------------------------------------------------
df <- data.frame(x = c(0, stats::rlnorm(1000, 0, 1)),
y = c(0, stats::rlnorm(1000, 0, 3)))
xgx_plot(data = df, aes(x = x, y = y)) +
geom_point() +
xgx_scale_x_log10() +
xgx_scale_y_log10()
## ---- fig.height=3.5, warning=FALSE-------------------------------------------
conc <- 10^(seq(-3, 3, by = 0.1))
ec50 <- 1
data <- data.frame(concentration = conc,
bound_receptor = 1 * conc / (conc + ec50))
gy <- xgx_plot(data, aes(x = concentration, y = bound_receptor)) +
geom_point() +
geom_line() +
xgx_scale_x_log10() +
xgx_scale_y_reverselog10()
gx <- xgx_plot(data, aes(x = bound_receptor, y = concentration)) +
geom_point() +
geom_line() +
xgx_scale_y_log10() +
xgx_scale_x_reverselog10()
gridExtra::grid.arrange(gy, gx, nrow = 1)
## ---- fig.height=3.5, warning=FALSE-------------------------------------------
Nsubj <- 10
Doses <- c(0, 25, 50, 100, 200)
Ntot <- Nsubj*length(Doses)
times <- c(0,14,30,60,90)
dat1 <- data.frame(ID = 1:(Ntot),
DOSE = rep(Doses, Nsubj),
PD0 = rlnorm(Ntot, log(100), 1),
Kout = exp(rnorm(Ntot,-2, 0.3)),
Imax = 1,
ED50 = 25) %>%
dplyr::mutate(PDSS = PD0*(1 - Imax*DOSE/(DOSE + ED50))*exp(rnorm(Ntot, 0.05, 0.3)) ) %>%
merge(data.frame(ID = rep(1:(Ntot), each = length(times)), Time = times), by = "ID") %>%
dplyr::mutate(PD = ((PD0 - PDSS)*(exp(-Kout*Time)) + PDSS),
PCHG = (PD - PD0)/PD0)
ggplot2::ggplot(dat1 %>% subset(Time == 90),
ggplot2::aes(x = DOSE, y = PCHG, group = DOSE)) +
ggplot2::geom_boxplot() +
xgx_theme() +
xgx_scale_y_percentchangelog10() +
ylab("Percent Change from Baseline") +
xlab("Dose (mg)")
ggplot2::ggplot(dat1,
ggplot2::aes(x = Time, y = PCHG, group = ID, color = factor(DOSE))) +
ggplot2::geom_line() +
xgx_theme() +
xgx_scale_y_percentchangelog10() +
guides(color = guide_legend(title = "Dose (mg)")) +
ylab("Percent Change from Baseline")
dat2 <- data.frame(ID = 1:(Ntot),
DOSE = rep(Doses, Nsubj),
PD0 = rlnorm(Ntot, log(100), 1),
Kout = exp(rnorm(Ntot,-2, 0.3)),
Emax = 50*rlnorm(Ntot, 0, 0.3),
ED50 = 300) %>%
dplyr::mutate(PDSS = PD0*(1 + Emax*DOSE/(DOSE + ED50))*exp(rnorm(Ntot, -1, 0.3)) ) %>%
merge(data.frame(ID = rep(1:(Ntot), each = length(times)), Time = times), by = "ID") %>%
dplyr::mutate(PD = ((PD0 - PDSS)*(exp(-Kout*Time)) + PDSS),
PCHG = (PD - PD0)/PD0)
ggplot2::ggplot(dat2, ggplot2::aes(x = DOSE, y = PCHG, group = DOSE)) +
ggplot2::geom_boxplot() +
xgx_theme() +
xgx_scale_y_percentchangelog10() +
ylab("Percent Change from Baseline") +
xlab("Dose (mg)")
ggplot2::ggplot(dat2,
ggplot2::aes(x = Time, y = PCHG, group = ID, color = factor(DOSE))) +
ggplot2::geom_line() +
xgx_theme() +
xgx_scale_y_percentchangelog10() +
guides(color = guide_legend(title = "Dose (mg)")) +
ylab("Percent Change from Baseline")
## ---- fig.height=7------------------------------------------------------------
data <- data.frame(x = 1:1000, y = stats::rnorm(1000))
g <- xgx_plot(data = data, aes(x = x, y = y)) +
geom_point()
g1 <- g + xgx_scale_x_time_units(units_dataset = "hours", units_plot = "hours")
g2 <- g + xgx_scale_x_time_units(units_dataset = "hours", units_plot = "days")
g3 <- g + xgx_scale_x_time_units(units_dataset = "hours", units_plot = "weeks")
g4 <- g + xgx_scale_x_time_units(units_dataset = "hours", units_plot = "months")
gridExtra::grid.arrange(g1, g2, g3, g4, nrow = 2)
## ---- message=FALSE-----------------------------------------------------------
data <- mad_missing_duplicates %>%
filter(CMT %in% c(1, 2, 3)) %>%
rename(DV = LIDV,
YTYPE = CMT,
USUBJID = ID)
covariates <- c("WEIGHTB", "SEX")
check <- xgx_check_data(data, covariates)
knitr::kable(check$summary)
knitr::kable(head(check$data_subset))
## -----------------------------------------------------------------------------
covar <- xgx_summarize_covariates(data,covariates)
knitr::kable(covar$cts_covariates)
knitr::kable(covar$cat_covariates)
| /scratch/gouwar.j/cran-all/cranData/xgxr/inst/doc/xgxr_overview.R |
---
title: "xgxr Overview"
author: "Andrew Stein, Fariba Khanshan, Alison Margolskee"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
self_contained: yes
vignette: >
%\VignetteIndexEntry{xgxr Overview}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 7
)
```
## Overview
The xgxr package supports a structured approach to exploring PKPD data
([outlined here](https://opensource.nibr.com/xgx/)). It also contains helper
functions for enabling the modeler to follow best R practices (by appending
the program name, figure name location, and draft status to each plot) and
enabling the modeler to follow best graphical practices (by providing an xgx
theme that reduces chart ink, and by providing time-scale, log-scale, and
reverse-log-transform-scale functions for more readable axes).
## Required Packages
```{r, message=FALSE}
library(tidyr)
library(dplyr)
library(ggplot2)
library(xgxr)
```
<!-- ## Data exploration
This package offers two frameworks for exploring data
1. Generate an Rmarkdown shell using `xgx_create_rmarkdown()`, which the user then edits with their dataset and column names. This Rmarkdown document relies on the `dplyr` and `ggplot2` packages, in addition to some add-on functionality to `ggplot2` that is provided by the `xgx` package. The Rmarkdown document is designed to be easy for the modeler to customize as needed.
2. Create a single page of overview of the data using the function `xgx_PK_summary` (and other functions to be added). This function is meant to provide a quick overview of the data on one page, but is not as customizable as the Rmarkdown document
### Rmarkdown shell for data exploration
To create a shell Rmarkdown with the code for exploring data, use `xgx_create_rmarkdown`. Currently, only the "pk" type is implemented.
```{r}
# xgx_create_rmarkdown(type = "pk", open_file = FALSE)
```
### PK summary plots
Function to make key plots of pharmacokinetic (PK) data.
```{r, fig.height=7}
#if (sessionInfo()$otherPkgs$ggplot2$Version == "2.2.1") {
# nsubj <- 50
# ntime <- 8
# time <- rep(c(1, 2, 4, 8, 12, 24, 36, 48), nsubj)
# id <- sort(rep(seq(1, nsubj), ntime))
# trt <- sort(rep(c(25, 50, 100, 150, 300), ntime * nsubj / 5))
# ka <- rep(rlnorm(nsubj, -0.5, 0.3), each = ntime)
# ke <- rep(rlnorm(nsubj, -3, 0.3), each = ntime)
# conc <- trt * (ka * ke / (ka - ke)) * (exp(-time * ke) - exp(-time * ka)) * (rep(stats::rlnorm(ntime * nsubj, 0.3, 0.1)))
# data <- data.frame(TIME = time, CONC = conc, ID = id, TRT = trt)
# xgx_PK_summary(data = data, labels = list(TRT = "Dose"),
# units_dataset = list(TIME = "Hours", CONC = "ng/mL", TRT = "mg"))
#} else {
# print("Currently only works with ggplot2 version 2.2.1 (on DaVinci), and not version 3")
#}
```
-->
## Traceability: annotating and saving plots and tables
### Saving figures
Our best practices require that we mark plots as "DRAFT" if not yet final,
and also list the program that created the plot and the location where the
plot is stored. This helps with the traceability of the work, by ensuring
that the following information is available for every plot in a report: the
R script used to create the figure, the location where the figure is stored,
and the time and date when the figure was created. The key functions here are:
* `xgx_annotate_status` allows for the addition of text (like the word draft)
to the plots
* `xgx_annotate_filenames` allows for printing the filenames as a caption for
the plot. It requires an input list `dirs` with particular fields, as shown
below.
The function `xgx_save` calls both of the above functions and it is
illustrated below.
This function also requires the user to input a width and height for the graph.
This is because often, the plots that are created have font that is so small
that it's impossible to read the x and y axes. We've found that the easiest
way to set the font size is "indirectly" by specifying the height and width
of the graph. Note that if you have a plot window open, you can get the
height and width by typing `dev.size()`
```{r}
dirs <- list(
parent_dir = tempdir(),
rscript_dir = tempdir(),
rscript_name = "example.R",
results_dir = tempdir(),
filename_prefix = "example_")
data <- data.frame(x = 1:1000, y = stats::rnorm(1000))
g <- xgx_plot(data = data, aes(x = x, y = y)) +
geom_point()
xgx_save(width = 4, height = 4, dirs = dirs, filename_main = "example_plot", status = "DRAFT")
```
The the function `xgx_save` works only with ggplot objects. If the figure
that is created is not a ggplot object, it will not work. An alternative
is to use `xgx_annotate_status_png` to add the status and filename to png files.
```{r}
data <- data.frame(x = 1:1000, y = stats::rnorm(1000))
g <- xgx_plot(data = data, aes(x = x, y = y)) +
geom_point()
filename = file.path(tempdir(), "png_example.png")
ggsave(filename, plot = g, height = 4, width = 4, dpi = 75)
xgx_annotate_status_png(filename, "./ExampleScript.R")
```
<!--  -->
### Saving tables
We also provide a function `xgx_save_table` for annotating the relevant
information to csv files. The annotated table is shown below.
```{r}
x <- data.frame(ID = c(1, 2), SEX = c("male", "female"))
data <- xgx_save_table(x, dirs = dirs, filename_main = "ExampleTable")
knitr::kable(data)
```
## Graphics helpers
### xgx theme
The `xgx_theme()` function includes the xGx recommended plot settings.
It sets the background to white with light grey lines for the major and
minor breaks. This minimizes chart ink as recommended by Edward Tufte.
You can add `xgx_theme()` to an existing `ggplot` object, or you can
call `xgx_plot()` in place of `ggplot()` for all of your plot initiations.
```{r}
xgx_plot(mtcars, aes(x = cyl, y = mpg)) + geom_point()
```
You may wish to set the theme to `xgx_theme` for your R session, as we do below.
```{r}
theme_set(xgx_theme())
## Alternative, equivalent function:
xgx_theme_set()
```
<!-- ### Spaghetti plot
Spaghetti plots combine dots and lines, grouped and colored by individuals onto one plot. Try out `xgx_geom_spaghetti` which combines `geom_point()` and `geom_line()` into one `geom`, grouping and coloring by the `group` aesthetic. Calling `xgx_spaghetti` further combines `xgx_plot()` and `xgx_geom_spaghetti()` into one line.
-->
```{r, fig.width=4, fig.height=2}
# time <- rep(seq(1,10),5)
# id <- sort(rep(seq(1,5), 10))
# conc <- exp(-time)*sort(rep(rlnorm(5),10))
#
# data <- data.frame(time = time, concentration = conc, id = factor(id))
# xgx_plot() + xgx_geom_spaghetti(data = data, mapping = aes(x = time, y = concentration, group = id, color = id))
#
# xgx_spaghetti(data = data, mapping = aes(x = time, y = concentration, group = id, color = id))
```
### Confidence intervals
The code for confidence intervals is a bit complex and hard to remember.
Rather than copy-pasting this code we provide the function `xgx_stat_ci`
for calculating and plotting default confidence intervals and also `xgx_geom_ci` for percentile intervals. `xgx_stat_ci` allows the definition of multiple `geom` options in one function call, defined through a list. The default is `geom = list("point","line","errorbar")`.
Additional ggplot options can be fed through the `ggplot` object call, or
the `xgx_stat_ci` layer. (Note that `xgx_stat_ci` and `xgx_geom_ci` are
equivalent). `xgx_stat_pi` and `xgx_geom_pi` work in a similar fashion but for percentile intervals.
```{r, fig.width=4, fig.height=2}
data <- data.frame(x = rep(c(1, 2, 3), each = 20),
y = rep(c(1, 2, 3), each = 20) + stats::rnorm(60),
group = rep(1:3, 20))
xgx_plot(data,aes(x = x, y = y)) +
xgx_stat_ci(conf_level = .95)
xgx_plot(data,aes(x = x, y = y)) +
xgx_stat_pi(percent = .95)
xgx_plot(data,aes(x = x, y = y)) +
xgx_stat_ci(conf_level = .95, geom = list("pointrange","line"))
xgx_plot(data,aes(x = x, y = y)) +
xgx_stat_ci(conf_level = .95, geom = list("ribbon","line"))
xgx_plot(data,aes(x = x, y = y, group = group, color = factor(group))) +
xgx_stat_ci(conf_level = .95, alpha = 0.5,
position = position_dodge(width = 0.5))
```
The default settings calculate the confidence interval based on the
Student t Distribution (assuming normally distributed data). You can also
specify "lognormal"", "binomial"" or "multinomial"" for the `distribution`. The first will
perform the confidence interval operation on the log-scaled data, the second
uses the binomial exact confidence interval calculation from the `binom` package, and the
third uses `MultinomCI` from the `DescTools` package. The "multinomial"" option
is used for ordinal response or categorical data.
Note: you DO NOT need to use both `distribution = "lognormal"`
and `scale_y_log10()`, choose only one of these.
```{r, fig.width=4, fig.height=2}
# plotting lognormally distributed data
data <- data.frame(x = rep(c(1, 2, 3), each = 20),
y = 10^(rep(c(1, 2, 3), each = 20) + stats::rnorm(60)),
group = rep(1:3, 20))
xgx_plot(data, aes(x = x, y = y)) +
xgx_stat_ci(conf_level = 0.95, distribution = "lognormal")
# note: you DO NOT need to use both distribution = "lognormal" and scale_y_log10()
xgx_plot(data,aes(x = x, y = y)) +
xgx_stat_ci(conf_level = 0.95) + xgx_scale_y_log10()
# plotting binomial data
data <- data.frame(x = rep(c(1, 2, 3), each = 20),
y = rbinom(60, 1, rep(c(0.2, 0.6, 0.8), each = 20)),
group = rep(1:3, 20))
xgx_plot(data, aes(x = x, y = y)) +
xgx_stat_ci(conf_level = 0.95, distribution = "binomial")
# Example plotting the percent of subjects in a categorical covariate group by treatment.
set.seed(12345)
data = data.frame(x = 120*exp(rnorm(100,0,1)),
response = sample(c("Trt1", "Trt2", "Trt3"), 100, replace = TRUE),
covariate = factor(sample(c("White","Black","Asian","Other"), 100, replace = TRUE),
levels = c("White", "Black", "Asian", "Other")))
xgx_plot(data = data) +
xgx_stat_ci(mapping = aes(x = response, response = covariate),
distribution = "ordinal") +
xgx_stat_ci(mapping = aes(x = 1, response = covariate), geom = "hline",
distribution = "ordinal") +
scale_y_continuous(labels = scales::percent_format()) +
facet_wrap(~covariate) +
xlab("Treatment group") + ylab("Percent of subjects by category")
```
`xgx_stat_ci` can now also cut data by quantiles of `x` using the `bins` option, e.g. `bins = 4` will cut the data by quartiles of `x`. You can also supply your own breaks to cut the data.
```{r}
# plotting
set.seed(12345)
data = data.frame(x = 120*exp(rnorm(100,0,1)),
response = sample(c("Mild","Moderate","Severe"), 100, replace = TRUE),
covariate = sample(c("Male","Female"), 100, replace = TRUE)) %>%
mutate(y = (50 + 20*x/(200 + x))*exp(rnorm(100, 0, 0.3)))
# plotting a lognormally distributed variable by quartiles of x
xgx_plot(data = data) +
xgx_stat_ci(mapping = aes(x = x, y = y, colour = covariate),
distribution = "lognormal", bins = 4)
# plotting ordinal or multinomial data, by quartiles of x
xgx_plot(data = data) +
xgx_stat_ci(mapping = aes(x = x, response = response, colour = covariate),
distribution = "ordinal", bins = 4) +
scale_y_continuous(labels = scales::percent_format()) + facet_wrap(~response)
xgx_plot(data = data) +
xgx_stat_ci(mapping = aes(x = x, response = response, colour = response),
distribution = "ordinal", bins = 4) +
scale_y_continuous(labels = scales::percent_format()) + facet_wrap(~covariate)
```
### Nonlinear smoothing (e.g. Emax), and ordinal response smoothing
The current ggplot2::geom_smooth does not allow for plotting confidence bands for method = "nls", as ggplot2 does not supply a `predictdf` for an object of class `nls`, which geom_smooth silently calls to calculate the ymin and ymax for the confidence bands. The xgxr package includes a definition of `predictdf.nls`, allowing for confidence bands for method = "nls".
```{r}
set.seed(123456)
Nsubj <- 10
Doses <- c(0, 25, 50, 100, 200)
Ntot <- Nsubj*length(Doses)
times <- c(0,14,30,60,90)
dat1 <- data.frame(ID = 1:(Ntot),
DOSE = rep(Doses, Nsubj),
E0 = 50*rlnorm(Ntot, 0, 0.3),
Emax = 100*rlnorm(Ntot, 0, 0.3),
ED50 = 50*rlnorm(Ntot, 0, 0.3)) %>%
dplyr::mutate(Response = (E0 + Emax*DOSE/(DOSE + ED50))*rlnorm(Ntot, 0, 0.3) ) %>%
merge(data.frame(ID = rep(1:(Ntot), each = length(times)), Time = times), by = "ID")
gg <- xgx_plot(data = dat1, aes(x = DOSE, y = Response))
gg <- gg + geom_point()
gg
gg + geom_smooth(method = "nlsLM",
formula = y ~ E0 + Emax*x/(ED50 + x),
method.args = list(start = list(E0 = 1, ED50 = 1, Emax = 1),
lower = c(-Inf, 0, -Inf)))
```
xgxr also includes an Emax smooth function called `xgx_geom_smooth_emax` which utilizes the "nlsLM" method, and silently calls the `predictdf.nls` defined by xgxr.
```{r}
gg + xgx_geom_smooth_emax()
gg +
xgx_geom_smooth_emax(geom = "ribbon", color = "black", fill = NA, linetype = "dashed") +
xgx_geom_smooth_emax(geom = "line", color = "red")
```
xgxr also modifies the stats method `predict.nls` for `nls` objects in order to include confidence interval prediction. Upon loading the xgxr package, the `predict` method for class `nls` should be updated to the xgxr version, and include functionality to supply confidence intervals. In order to output the confidence intervals, be sure to specify `interval = "confidence"`. The output will contain a "fit" data.frame with values for "fit", "lwr" and "upr" representing the prediction and lower and upper confidence intervals.
```{r}
mod <- nlsLM(formula = Response ~ E0 + Emax * DOSE / (ED50 + DOSE),
data = dat1,
start = list(E0 = 1, ED50 = 1, Emax = 1),
lower = c(-Inf, 0, -Inf))
predict(mod,
newdata = data.frame(DOSE = c(0, 25, 50, 100, 200)),
se.fit = TRUE)
predict(mod,
newdata = data.frame(DOSE = c(0, 25, 50, 100, 200)),
se.fit = TRUE, interval = "confidence", level = 0.95)
```
xgxr also includes ordinal response smoothing as an option under the `xgx_stat_smooth` function, indicated by `method = "polr"`. This requires a dataset of x values and response values, to be defined in the mapping. This method also allows defining of color, fill, facet, linetype, etc. by the response category, while preserving the ordinal response fit across these categories.
```{r}
# example with ordinal data (method = "polr")
set.seed(12345)
data = data.frame(x = 120*exp(stats::rnorm(100,0,1)),
response = sample(c("Mild","Moderate","Severe"), 100, replace = TRUE),
covariate = sample(c("Male","Female"), 100, replace = TRUE)) %>%
dplyr::mutate(y = (50 + 20*x/(200 + x))*exp(stats::rnorm(100, 0, 0.3)))
# example coloring by the response categories
xgx_plot(data = data) +
xgx_stat_smooth(mapping = ggplot2::aes(x = x, response = response,
colour = response, fill = response),
method = "polr") +
ggplot2::scale_y_continuous(labels = scales::percent_format())
# example faceting by the response categories, coloring by a different covariate
xgx_plot(data = data) +
xgx_stat_smooth(mapping = ggplot2::aes(x = x, response = response,
colour = covariate, fill = covariate),
method = "polr", level = 0.80) +
ggplot2::facet_wrap(~response) +
ggplot2::scale_y_continuous(labels = scales::percent_format())
```
### Nice log scale
This version of the log scale function shows the tick marks between the
major breaks (i.e. at 1, 2, 3, ... 10, instead of just 1 and 10). It also
uses $$10^x$$ notation when the labels are base 10 and are very small or
very large (<.001 or >9999)
```{r}
df <- data.frame(x = c(0, stats::rlnorm(1000, 0, 1)),
y = c(0, stats::rlnorm(1000, 0, 3)))
xgx_plot(data = df, aes(x = x, y = y)) +
geom_point() +
xgx_scale_x_log10() +
xgx_scale_y_log10()
```
### Reverse log transform
This transform is useful for plotting data on a percentage scale that can
approach 100% (such as receptor occupancy data).
```{r, fig.height=3.5, warning=FALSE}
conc <- 10^(seq(-3, 3, by = 0.1))
ec50 <- 1
data <- data.frame(concentration = conc,
bound_receptor = 1 * conc / (conc + ec50))
gy <- xgx_plot(data, aes(x = concentration, y = bound_receptor)) +
geom_point() +
geom_line() +
xgx_scale_x_log10() +
xgx_scale_y_reverselog10()
gx <- xgx_plot(data, aes(x = bound_receptor, y = concentration)) +
geom_point() +
geom_line() +
xgx_scale_y_log10() +
xgx_scale_x_reverselog10()
gridExtra::grid.arrange(gy, gx, nrow = 1)
```
### Nice scale for percent change data
This transform is useful for plotting percent change from baseline data.
Percent change data can range from -100% to +Inf%, and depending on the range
of the data, a linear scale can lose the desired resolution. This transform
plots percent change data on a scale of log10(PCHG + 100%), similar to a
log scale of ratio to baseline.
```{r, fig.height=3.5, warning=FALSE}
Nsubj <- 10
Doses <- c(0, 25, 50, 100, 200)
Ntot <- Nsubj*length(Doses)
times <- c(0,14,30,60,90)
dat1 <- data.frame(ID = 1:(Ntot),
DOSE = rep(Doses, Nsubj),
PD0 = rlnorm(Ntot, log(100), 1),
Kout = exp(rnorm(Ntot,-2, 0.3)),
Imax = 1,
ED50 = 25) %>%
dplyr::mutate(PDSS = PD0*(1 - Imax*DOSE/(DOSE + ED50))*exp(rnorm(Ntot, 0.05, 0.3)) ) %>%
merge(data.frame(ID = rep(1:(Ntot), each = length(times)), Time = times), by = "ID") %>%
dplyr::mutate(PD = ((PD0 - PDSS)*(exp(-Kout*Time)) + PDSS),
PCHG = (PD - PD0)/PD0)
ggplot2::ggplot(dat1 %>% subset(Time == 90),
ggplot2::aes(x = DOSE, y = PCHG, group = DOSE)) +
ggplot2::geom_boxplot() +
xgx_theme() +
xgx_scale_y_percentchangelog10() +
ylab("Percent Change from Baseline") +
xlab("Dose (mg)")
ggplot2::ggplot(dat1,
ggplot2::aes(x = Time, y = PCHG, group = ID, color = factor(DOSE))) +
ggplot2::geom_line() +
xgx_theme() +
xgx_scale_y_percentchangelog10() +
guides(color = guide_legend(title = "Dose (mg)")) +
ylab("Percent Change from Baseline")
dat2 <- data.frame(ID = 1:(Ntot),
DOSE = rep(Doses, Nsubj),
PD0 = rlnorm(Ntot, log(100), 1),
Kout = exp(rnorm(Ntot,-2, 0.3)),
Emax = 50*rlnorm(Ntot, 0, 0.3),
ED50 = 300) %>%
dplyr::mutate(PDSS = PD0*(1 + Emax*DOSE/(DOSE + ED50))*exp(rnorm(Ntot, -1, 0.3)) ) %>%
merge(data.frame(ID = rep(1:(Ntot), each = length(times)), Time = times), by = "ID") %>%
dplyr::mutate(PD = ((PD0 - PDSS)*(exp(-Kout*Time)) + PDSS),
PCHG = (PD - PD0)/PD0)
ggplot2::ggplot(dat2, ggplot2::aes(x = DOSE, y = PCHG, group = DOSE)) +
ggplot2::geom_boxplot() +
xgx_theme() +
xgx_scale_y_percentchangelog10() +
ylab("Percent Change from Baseline") +
xlab("Dose (mg)")
ggplot2::ggplot(dat2,
ggplot2::aes(x = Time, y = PCHG, group = ID, color = factor(DOSE))) +
ggplot2::geom_line() +
xgx_theme() +
xgx_scale_y_percentchangelog10() +
guides(color = guide_legend(title = "Dose (mg)")) +
ylab("Percent Change from Baseline")
```
### Scaling x-axis as a time scale
For time, it's often good for the x ticks to be spaced in a particular way.
For instance, for hours, subdividing in increments by 24, 12, 6, and 3 hours
can make more sense than by 10 or 100. Similarly for days, increments of 7
or 28 days are preferred over 5 or 10 days. `xgx_scale_x_time_units` allows
for this, where it is the input and output units.
```{r, fig.height=7}
data <- data.frame(x = 1:1000, y = stats::rnorm(1000))
g <- xgx_plot(data = data, aes(x = x, y = y)) +
geom_point()
g1 <- g + xgx_scale_x_time_units(units_dataset = "hours", units_plot = "hours")
g2 <- g + xgx_scale_x_time_units(units_dataset = "hours", units_plot = "days")
g3 <- g + xgx_scale_x_time_units(units_dataset = "hours", units_plot = "weeks")
g4 <- g + xgx_scale_x_time_units(units_dataset = "hours", units_plot = "months")
gridExtra::grid.arrange(g1, g2, g3, g4, nrow = 2)
```
## Data checking
### Numerical check
We've found that during exploration, it can be extremely important to
check the dataset for issues. This can be done using the `xgx_check_data`
or `xgx_summarize_data` function (the two functions are identical).
```{r, message=FALSE}
data <- mad_missing_duplicates %>%
filter(CMT %in% c(1, 2, 3)) %>%
rename(DV = LIDV,
YTYPE = CMT,
USUBJID = ID)
covariates <- c("WEIGHTB", "SEX")
check <- xgx_check_data(data, covariates)
knitr::kable(check$summary)
knitr::kable(head(check$data_subset))
```
You can also get an overview of the covariates in the dataset
with `xgx_summarize_covariates`. The covariate summaries are also provided
in the `xgx_check_data` and `xgx_summarize_data` functions.
```{r}
covar <- xgx_summarize_covariates(data,covariates)
knitr::kable(covar$cts_covariates)
knitr::kable(covar$cat_covariates)
```
| /scratch/gouwar.j/cran-all/cranData/xgxr/inst/doc/xgxr_overview.Rmd |
---
title: "PKPD Single Ascending Dose example"
author: "Fariba Khanshan, Andrew Stein, Alison Margolskee"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
self_contained: yes
vignette: >
%\VignetteIndexEntry{PKPD Single Ascending Dose example}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 7
)
```
## Overview
This document contains exploratory plots for single ascending dose PK and PD
data as well as the R code that generates these graphs. The plots presented
here are based on simulated data.
## Setup
```{r, warning=FALSE, message=FALSE}
library(xgxr)
library(ggplot2)
library(dplyr)
library(tidyr)
# flag for labeling figures as draft
status <- "DRAFT"
# ggplot settings
xgx_theme_set()
```
## Load Dataset
```{r, warning=FALSE, message=FALSE}
pkpd_data <- case1_pkpd %>%
arrange(DOSE) %>%
subset(,-IPRED) %>%
mutate(TRTACT_low2high = factor(TRTACT, levels = unique(TRTACT)),
TRTACT_high2low = factor(TRTACT, levels = rev(unique(TRTACT))),
DAY_label = paste("Day", PROFDAY),
DAY_label = ifelse(DAY_label == "Day 0","Baseline",DAY_label))
LOQ = 0.05 #ng/ml
dose_max = as.numeric(max(pkpd_data$DOSE))
pk_data <- pkpd_data %>%
filter(CMT == 2) %>%
mutate(LIDVNORM = LIDV / as.numeric(DOSE))
pk_data_cycle1 <- pk_data %>%
filter(CYCLE == 1)
pd_data <- pkpd_data %>%
filter(CMT == 3)
pd_data_baseline_day85 <- pkpd_data %>%
filter(CMT == 3,
DAY_label %in% c("Baseline", "Day 85"))
pk_vs_pd_data <- pkpd_data %>%
filter(!is.na(LIDV)) %>%
subset(,-c(EVENTU,NAME)) %>%
spread(CMT,LIDV) %>%
rename(Concentration = `2`, Response = `3`)
NCA <- pk_data_cycle1 %>%
group_by(ID, DOSE) %>%
filter(!is.na(LIDV)) %>%
summarize(AUC_last = caTools::trapz(TIME, LIDV),
Cmax = max(LIDV)) %>%
tidyr::gather(PARAM,VALUE,-c(ID, DOSE)) %>%
ungroup() %>%
mutate(VALUE_NORM = VALUE / DOSE)
AUC_last <- NCA %>%
filter(PARAM == "AUC_last") %>%
rename(AUC_last = VALUE) %>%
subset(,-c(DOSE,PARAM,VALUE_NORM))
pk_vs_pd_data_day85 <- pk_vs_pd_data %>%
filter(DAY_label == "Day 85",
!is.na(Concentration),
!is.na(Response)) %>%
left_join(AUC_last)
time_units_dataset <- "hours"
time_units_plot <- "days"
trtact_label <- "Dose"
dose_label <- "Dose (mg)"
conc_label <- "Concentration (ng/ml)"
auc_label <- "AUCtau (h.(ng/ml))"
concnorm_label <- "Normalized Concentration (ng/ml)/mg"
sex_label <- "Sex"
w100_label <- "WEIGHTB>100"
pd_label <- "FEV1 (mL)"
cens_label <- "Censored"
```
## Provide an overview of the data
Summarize the data in a way that is easy to visualize the general trend of PK
over time and between doses. Using summary statistics can be helpful, e.g.
Mean +/- SE, or median, 5th & 95th percentiles. Consider either coloring by
dose or faceting by dose. Depending on the amount of data one graph may be
better than the other.
When looking at summaries of PK over time, there are several things to observe.
Note the number of doses and number of time points or sampling schedule.
Observe the overall shape of the average profiles. What is the average
Cmax per dose? Tmax? Does the elimination phase appear to be parallel across
the different doses? Is there separation between the profiles for different
doses? Can you make a visual estimate of the number of compartments that
would be needed in a PK model?
### Concentration over time, colored by Dose, mean +/- 95% CI
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pk_data_cycle1, aes(x = NOMTIME,
y = LIDV,
group = DOSE,
color = TRTACT_high2low)) +
xgx_geom_ci(conf_level = 0.95) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
labs(y = conc_label, color = trtact_label) +
xgx_annotate_status(status)
```
### Concentration over time, faceted by Dose, mean +/- 95% CI, overlaid on gray spaghetti plots
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pk_data_cycle1, aes(x = TIME, y = LIDV)) +
geom_line(aes(group = ID), color = "grey50", size = 1, alpha = 0.3) +
geom_point(aes(color = factor(CENS), shape = factor(CENS))) +
scale_shape_manual(values = c(1, 8)) +
scale_color_manual(values = c("grey50", "red")) +
xgx_geom_ci(aes(x = NOMTIME, color = NULL, group = NULL, shape = NULL), conf_level = 0.95) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
labs(y = conc_label, color = trtact_label) +
theme(legend.position = "none") +
facet_grid(.~TRTACT_low2high) +
xgx_annotate_status(status)
```
## Assess the dose linearity of exposure
### Dose Normalized Concentration over time, colored by Dose, mean +/- 95% CI
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pk_data_cycle1,
aes(x = NOMTIME,
y = LIDVNORM,
group = DOSE,
color = TRTACT_high2low)) +
xgx_geom_ci(conf_level = 0.95, alpha = 0.5, position = position_dodge(1)) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
labs(y = concnorm_label, color = trtact_label) +
xgx_annotate_status(status)
```
### NCA of dose normalized AUC and Cmax vs Dose
Observe the dose normalized AUC and Cmax over different doses. Does the
relationship appear to be constant across doses or do some doses stand
out from the rest? Can you think of reasons why some would stand out? For
example, the lowest dose may have dose normalized AUC much higher than the
rest, could this be due to BLQ observations? If the highest doses have dose
normalized AUC much higher than the others, could this be due to nonlinear
clearance, with clearance saturating at higher doses? If the highest doses
have dose normalized AUC much lower than the others, could there be saturation
of bioavailability, reaching the maximum absorbable dose?
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = NCA, aes(x = DOSE, y = VALUE_NORM)) +
geom_boxplot(aes(group = DOSE)) +
geom_smooth(method = "lm", color = "black") +
facet_wrap(~PARAM, scales = "free_y") +
labs(x = dose_label) +
theme(axis.title.y = element_blank()) +
xgx_annotate_status(status)
```
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = NCA[!NCA$DOSE == 3 & !NCA$DOSE == 10 , ],
aes(x = DOSE, y = VALUE_NORM)) +
geom_boxplot(aes(group = DOSE)) +
geom_smooth(method = "lm", color = "black") +
facet_wrap(~PARAM, scales = "free_y") +
labs(x = dose_label) +
theme(axis.title.y = element_blank()) +
xgx_annotate_status(status)
```
## Explore covariate effects on PK
### Concentration over time, colored by categorical covariate, mean +/- 95% CI
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pk_data_cycle1, aes(x = NOMTIME,
y = LIDV,
group = WEIGHTB > 100,
color = WEIGHTB > 100)) +
xgx_geom_ci(conf_level = 0.95) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
facet_grid(.~DOSE) +
labs(y = conc_label, color = w100_label) +
xgx_annotate_status(status)
```
## PD marker over time, colored by Dose, mean (95% CI) percentiles by nominal time
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pd_data, aes(x = NOMTIME,
y = LIDV,
group = DOSE,
color = TRTACT_high2low)) +
xgx_geom_ci(conf_level = 0.95) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
labs(y = pd_label, color = trtact_label) +
xgx_annotate_status(status)
```
## PD marker over time, faceted by Dose, dots & lines grouped by individuals
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pd_data, aes(x = NOMTIME, y = LIDV, group = ID)) +
geom_line(alpha = 0.5) +
geom_point(alpha = 0.5) +
xgx_scale_y_log10() +
xgx_scale_x_time_units(units_dataset = time_units_dataset, units_plot = time_units_plot) +
facet_grid(~TRTACT_low2high) +
labs(y = pd_label, color = trtact_label) +
xgx_annotate_status(status)
```
## Explore Dose-Response Relationship
One of the key questions when looking at PD markers is to determine if there
is a dose-response relationship, and if there is, what dose is necessary to
achieve the desired effect? Simple dose-response plots can give insight into
these questions.
### PD marker by Dose, for endpoint of interest, mean (95% CI) by Dose
Plot PD marker against dose. Using summary statistics can be helpful, e.g.
Mean +/- SE, or median, 5th & 95th percentiles.
Here are some questions to ask yourself when looking at Dose-Response plots:
Do you see any relationship? Does response increase (decrease) with
increasing dose? Are you able to detect a plateau or Emax (Emin) on the
effect? If so, around what dose does this occur?
Warning: Even if you don’t see an Emax, that doesn’t mean there isn’t one.
Be very careful about using linear models for Dose-Response relationships.
Extrapolation outside of the observed dose range could indicate a higher dose
is always better (even if it isn’t).
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pd_data_baseline_day85, aes(x = DOSE,
y = LIDV,
group = DOSE)) +
xgx_geom_ci(conf_level = 0.95) +
facet_grid(~DAY_label) +
labs(x = dose_label, y = pd_label, color = trtact_label) +
xgx_annotate_status(status)
```
### PD marker by Dose, faceted by visit, mean (95% CI) by Dose
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pd_data, aes(x = DOSE, y = LIDV, group = DOSE)) +
xgx_geom_ci(conf_level = 0.95) +
facet_grid(~DAY_label) +
labs(x = dose_label, y = pd_label, color = trtact_label) +
xgx_annotate_status(status)
```
### Explore covariate effects on Dose-Response relationship
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = pd_data_baseline_day85, aes(x = DOSE,
y = LIDV,
group = WEIGHTB > 100,
color = WEIGHTB > 100)) +
xgx_geom_ci(conf_level = .95) +
facet_grid(~DAY_label) +
labs(x = dose_label, y = pd_label, color = w100_label) +
xgx_annotate_status(status)
```
## Explore Exposure-Response Relationship
Plot PD marker against concentration. Do you see any relationship? Does
response increase (decrease) with increasing dose? Are you able to detect
a plateau or Emax (Emin) on the effect?
Warning: Even if you don’t see an Emax, that doesn’t mean there isn’t one.
Be very careful about using linear models for Dose-Response or
Exposure-Response relationships. Extrapolation outside of the observed
dose range could indicate a higher dose is always better (even if it isn’t).
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
g = ggplot(data = pk_vs_pd_data_day85, aes(x = Concentration, y = Response)) +
geom_point(aes(color = TRTACT_high2low, shape = factor(CENS))) +
geom_smooth(color="black",shape=NULL) +
xgx_scale_x_log10() +
labs(x = conc_label, y = pd_label, color = trtact_label, shape = cens_label) +
xgx_annotate_status(status)
print(g)
```
Plotting AUC vs response instead of concentration vs response may make more
sense in some situations. For example, when there is a large delay between
PK and PD it would be difficult to relate the time-varying concentration with
the response. If rich sampling is only done at a particular point in the
study, e.g. at steady state, then the AUC calculated on the rich profile
could be used as the exposure variable for a number of PD visits. If PK
samples are scarce, average Cmin could also be used as the exposure metric.
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
gAUC = g +
aes(x = AUC_last) +
xlab(auc_label)
print(gAUC)
```
## R Session Info
```{r}
sessionInfo()
```
| /scratch/gouwar.j/cran-all/cranData/xgxr/vignettes/sad_pkpd.Rmd |
---
title: "PK Exploration with nlmixr dataset"
author: "Andrew Stein, Fariba Khanshan, Alison Margolskee"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
self_contained: yes
vignette: >
%\VignetteIndexEntry{PK Exploration with nlmixr dataset for theophylline}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 7
)
```
## Overview
This document provides a template for exploring Single or Multiple Ascending
Dose PK data.
## Load Packages
```{r data_assignments, echo=TRUE, warning=FALSE, message=FALSE}
library(xgxr)
library(ggplot2)
library(dplyr)
# setting ggplot theme
xgx_theme_set()
```
## Load the dataset and assign columns. Take subsets of data that are needed
Multiplying dose by weight to get a mg dosing.
```{r, echo=TRUE, warning=FALSE, message=FALSE}
# units of dataset
time_units_dataset <- "hours"
time_units_plot <- "days"
dose_label <- "Dose (mg)"
conc_label <- "Concentration (ug/ml)"
concnorm_label <- "Normalized Concentration (ug/ml)/mg"
# covariates in the dataset
covariates <- c("WT")
# load dataset
data <- nlmixr_theo_sd
# make sure that the necessary columns are assigned
# five columns are required: TIME, LIDV, CMT, DOSE, DOSEREG
data <- data %>%
mutate(ID = ID) %>% #ID column
group_by(ID) %>%
mutate(TIME = TIME, #TIME column name
NOMTIME = as.numeric(as.character(cut(TIME,
breaks = c(-Inf, 0.1, 0.7, 1.5, 4, 8, 10.5, 15, Inf),
labels = c( 0, 0.5, 1, 2, 7, 9, 12, 24)))),
EVID = EVID, # EVENT ID >=1 is dose, 0 otherwise
CYCLE = 1, # CYCLE of PK data
LIDV = DV, # DEPENDENT VARIABLE column name
CENS = 0, # CENSORING column name
CMT = CMT, # COMPARTMENT column here (e.g. CMT or YTYPE)
DOSE = signif(max(AMT) * WT, 2), # DOSE column here (numeric value)
# convert mg/kg (in dataset) to mg
DOSEREG = DOSE) %>% # DOSE REGIMEN column here
ungroup()
# convert DOSEREG to factor for proper ordering in the plotting
# add LIDVNORM dose normalized concentration for plotting
data <- data %>%
arrange(DOSE) %>%
mutate(LIDVNORM = LIDV / DOSE,
DOSEREG = factor(DOSEREG, levels = unique(DOSEREG)),
DOSEREG_REV = factor(DOSEREG, levels = rev(unique(DOSEREG))))
# define order of treatment factor
# for plotting the PK data
data_pk <- filter(data, CMT == 2, TIME > 0)
# NCA
NCA <- data %>%
filter(CMT == 2, NOMTIME > 0, NOMTIME <= 24) %>%
group_by(ID) %>%
summarize(AUC_0_24 = caTools::trapz(TIME, LIDV),
Cmax_0_24 = max(LIDV),
Ctrough_0_24 = LIDV[length(LIDV)],
DOSE = DOSE[1],
WT = WT[1]) %>%
tidyr::gather(PARAM, VALUE, -c(ID, DOSE, WT)) %>%
mutate(VALUE_NORM = VALUE / DOSE) %>%
ungroup()
```
## Summary of the data issues and the covariates
```{r}
check <- xgx_check_data(data,covariates)
knitr::kable(check$summary)
knitr::kable(head(check$data_subset))
knitr::kable(check$cts_covariates)
knitr::kable(check$cat_covariates)
```
## Provide an overview of the data
Summarize the data in a way that is easy to visualize the general trend of
PK over time and between doses. Using summary statistics can be helpful,
e.g. Mean +/- SE, or median, 5th & 95th percentiles. Consider either coloring
by dose or faceting by dose. Depending on the amount of data one graph may
be better than the other.
When looking at summaries of PK over time, there are several things to observe.
Note the number of doses and number of time points or sampling schedule.
Observe the overall shape of the average profiles. What is the average Cmax
per dose? Tmax? Does the elimination phase appear to be parallel across the
different doses? Is there separation between the profiles for different
doses? Can you make a visual estimate of the number of compartments that would
be needed in a PK model?
### Concentration over Time, colored by dose, mean +/- 95% CI
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
glin <- ggplot(data = data_pk, aes(x = NOMTIME,
y = LIDV,
group = DOSE,
color = DOSEREG_REV)) +
stat_summary() +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
labs(y = conc_label, color = "Dose")
glog <- glin + scale_y_log10()
gridExtra::grid.arrange(gridExtra::arrangeGrob(glin, glog, nrow = 1))
```
### Side-by-side comparison of first administered dose and steady state
For multiple dose studies, zoom in on key visits for a clearer picture of the
profiles. Look for accumulation (if any) between first administered dose and
steady state.
```{r , echo=TRUE, warning=FALSE, message=FALSE, fig.height=4}
if (exists("data_pk_rich")) {
ggplot(data_pk_rich, aes(x = PROFTIME,
y = LIDV,
group = interaction(CYCLE,DOSE),
color = DOSEREG_REV)) +
facet_grid(~DAY_label, scales = "free_x") +
xgx_stat_ci() +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
labs(y = conc_label, color = "Dose")
}
```
### Concentration over Time, faceted by dose, mean +/- 95% CI, overlaid on gray spaghetti plots
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = data_pk, aes(x = TIME,
y = LIDV,
group = interaction(ID, CYCLE))) +
geom_line(size = 1, color = rgb(0.5, 0.5, 0.5), alpha = 0.3) +
geom_point(aes(color = factor(CENS), shape = factor(CENS)),
size = 2, alpha = 0.3) +
xgx_stat_ci(aes(x = NOMTIME, group = NULL, color = NULL)) +
facet_grid(.~DOSEREG) +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
ylab(conc_label) +
theme(legend.position = "none") +
scale_shape_manual(values = c(1, 8)) +
scale_color_manual(values = c("grey50", "red"))
```
## Explore variability
Use spaghetti plots to visualize the extent of variability between individuals.
The wider the spread of the profiles, the higher the between subject
variability. Distinguish different doses by color, or separate into different
panels. If coloring by dose, do the individuals in the different dose groups
overlap across doses? Dose there seem to be more variability at higher or
lower concentrations?
### Concentration over Time, colored by dose, dots and lines grouped by individual
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=5}
ggplot(data = data_pk, aes(x = TIME,
y = LIDV,
group = interaction(ID, CYCLE),
color = factor(DOSEREG_REV),
shape = factor(CENS))) +
geom_line(size = 1, alpha = 0.5) +
geom_point() +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
labs(y = conc_label, color = "Dose", shape = "Censoring")
```
### Side-by-side comparison of first administered dose and steady state
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=4}
if (exists("data_pk_rich")) {
ggplot(data = data_pk_rich, aes(x = TIME,
y = LIDV,
group = interaction(ID, CYCLE),
color = DOSEREG_REV,
shape = factor(CENS))) +
geom_line(size = 1, alpha = 0.5) +
geom_point() +
facet_grid(~DAY_label, scales = "free_x") +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
labs(y = conc_label, color = "Dose", shape = "Censoring")
}
```
### Concentration over Time, faceted by dose, lines grouped by individual
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = data_pk, aes(x = TIME,
y = LIDV,
group = interaction(ID, CYCLE),
color = factor(CENS),
shape = factor(CENS))) +
geom_line(size = 1, alpha = 0.5) +
geom_point() +
facet_grid(.~DOSEREG) +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
ylab(conc_label) +
scale_shape_manual(values = c(1, 8)) +
scale_color_manual(values = c("grey50", "red")) +
theme(legend.position = "none")
```
## Assess the dose linearity of exposure
### Dose Normalized Concentration over Time, colored by dose, mean +/- 95% CI
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=3}
ggplot(data = data_pk, aes(x = NOMTIME,
y = LIDVNORM,
group = DOSEREG_REV,
color = DOSEREG_REV)) +
xgx_stat_ci() +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
labs(y = conc_label, color = "Dose")
```
### Side-by-side comparison of first administered dose and steady state
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=4}
if (exists("data_pk_rich")) {
ggplot(data_pk_rich, aes(x = NOMTIME,
y = LIDVNORM,
group = interaction(DOSE, CYCLE),
color = DOSEREG_REV)) +
xgx_stat_ci() +
facet_grid(~DAY_label,scales = "free_x") +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
labs(y = conc_label, color = "Dose")
}
```
## Explore irregularities in profiles
Plot individual profiles in order to inspect them for any irregularities.
Inspect the profiles for outlying data points that may skew results or bias
conclusions. Looking at the shapes of the individual profiles now, do they
support your observations made about the mean profile (e.g. number of
compartments, typical Cmax, Tmax)?
Plotting individual profiles on top of gray spaghetti plots puts individual
profiles into context, and may help identify outlying individuals for further
inspection. Are there any individuals that appear to have very high or low
Cmax compared to others within the same dose group? What about the timing of
Cmax? What about the slope of the elimination phase? Does it appear that any
subjects could have received an incorrect dose?
### Concentration over Time, faceted by individual, individual line plots overlaid on gray spaghetti plots for that dose group
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=7}
ggplot(data = data_pk, aes(x = TIME, y = LIDV)) +
geom_line() +
geom_point(aes(color = factor(CENS), shape = factor(CENS))) +
facet_wrap(~ID + DOSEREG) +
xgx_scale_x_time_units(time_units_dataset, time_units_plot) +
xgx_scale_y_log10() +
ylab(conc_label) +
theme(legend.position = "none") +
scale_shape_manual(values = c(1, 8)) +
scale_color_manual(values = c("black", "red"))
```
## NCA
### NCA of dose normalized AUC vs Dose
Observe the dose normalized AUC over different doses. Does the relationship
appear to be constant across doses or do some doses stand out from the rest?
Can you think of reasons why some would stand out? For example, the lowest
dose may have dose normalized AUC much higher than the rest, could this be
due to CENS observations? If the highest doses have dose normalized AUC much
higher than the others, could this be due to nonlinear clearance, with
clearance saturating at higher doses? If the highest doses have dose normalized
AUC much lower than the others, could there be saturation of bioavailability,
reaching the maximum absorbable dose?
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=4}
if (!exists("NCA")) {
warning("For PK data exploration, it is highly recommended to perform an NCA")
} else {
ggplot(data = NCA, aes(x = DOSE, y = VALUE_NORM)) +
geom_boxplot(aes(group = DOSE)) +
geom_smooth(method = "loess", color = "black") +
facet_wrap(~PARAM, scales = "free_y") +
xgx_scale_x_log10(breaks = unique(NCA$DOSE)) +
xgx_scale_y_log10() +
labs(x = dose_label, y = concnorm_label)
}
```
## Covariate Effects
```{r, echo=TRUE, warning=FALSE, message=FALSE, fig.height=5}
if (!exists("NCA")) {
warning("For covariate exploration, it is highly recommended to perform an NCA")
} else {
NCA_cts <- NCA[, c("PARAM", "VALUE", check$cts_covariates$Covariate)] %>%
tidyr::gather(COV, COV_VALUE, -c(PARAM, VALUE))
NCA_cat <- NCA[, c("PARAM", "VALUE", check$cat_covariates$Covariate)] %>%
tidyr::gather(COV, COV_VALUE, -c(PARAM, VALUE))
if (nrow(check$cts_covariates) >= 1) {
gg <- ggplot(data = NCA_cts, aes(x = COV_VALUE, y = VALUE)) +
geom_point() +
geom_smooth(method = "loess", color = "black") +
facet_grid(PARAM~COV,switch = "y", scales = "free_y") +
xgx_scale_x_log10() +
xgx_scale_y_log10() +
labs(x = "Covariate Value", y = "NCA Parameter Value")
print(gg)
}
if (nrow(check$cat_covariates) >= 1) {
gg <- ggplot(data = NCA_cat, aes(x = COV_VALUE, y = VALUE)) +
geom_boxplot() +
facet_grid(PARAM~COV, switch = "y", scales = "free_y") +
xgx_scale_y_log10() +
labs(x = "Covariate Value", y = "NCA Parameter Value")
print(gg)
}
}
```
| /scratch/gouwar.j/cran-all/cranData/xgxr/vignettes/theoph.Rmd |
---
title: "xgxr Overview"
author: "Andrew Stein, Fariba Khanshan, Alison Margolskee"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
self_contained: yes
vignette: >
%\VignetteIndexEntry{xgxr Overview}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 7
)
```
## Overview
The xgxr package supports a structured approach to exploring PKPD data
([outlined here](https://opensource.nibr.com/xgx/)). It also contains helper
functions for enabling the modeler to follow best R practices (by appending
the program name, figure name location, and draft status to each plot) and
enabling the modeler to follow best graphical practices (by providing an xgx
theme that reduces chart ink, and by providing time-scale, log-scale, and
reverse-log-transform-scale functions for more readable axes).
## Required Packages
```{r, message=FALSE}
library(tidyr)
library(dplyr)
library(ggplot2)
library(xgxr)
```
<!-- ## Data exploration
This package offers two frameworks for exploring data
1. Generate an Rmarkdown shell using `xgx_create_rmarkdown()`, which the user then edits with their dataset and column names. This Rmarkdown document relies on the `dplyr` and `ggplot2` packages, in addition to some add-on functionality to `ggplot2` that is provided by the `xgx` package. The Rmarkdown document is designed to be easy for the modeler to customize as needed.
2. Create a single page of overview of the data using the function `xgx_PK_summary` (and other functions to be added). This function is meant to provide a quick overview of the data on one page, but is not as customizable as the Rmarkdown document
### Rmarkdown shell for data exploration
To create a shell Rmarkdown with the code for exploring data, use `xgx_create_rmarkdown`. Currently, only the "pk" type is implemented.
```{r}
# xgx_create_rmarkdown(type = "pk", open_file = FALSE)
```
### PK summary plots
Function to make key plots of pharmacokinetic (PK) data.
```{r, fig.height=7}
#if (sessionInfo()$otherPkgs$ggplot2$Version == "2.2.1") {
# nsubj <- 50
# ntime <- 8
# time <- rep(c(1, 2, 4, 8, 12, 24, 36, 48), nsubj)
# id <- sort(rep(seq(1, nsubj), ntime))
# trt <- sort(rep(c(25, 50, 100, 150, 300), ntime * nsubj / 5))
# ka <- rep(rlnorm(nsubj, -0.5, 0.3), each = ntime)
# ke <- rep(rlnorm(nsubj, -3, 0.3), each = ntime)
# conc <- trt * (ka * ke / (ka - ke)) * (exp(-time * ke) - exp(-time * ka)) * (rep(stats::rlnorm(ntime * nsubj, 0.3, 0.1)))
# data <- data.frame(TIME = time, CONC = conc, ID = id, TRT = trt)
# xgx_PK_summary(data = data, labels = list(TRT = "Dose"),
# units_dataset = list(TIME = "Hours", CONC = "ng/mL", TRT = "mg"))
#} else {
# print("Currently only works with ggplot2 version 2.2.1 (on DaVinci), and not version 3")
#}
```
-->
## Traceability: annotating and saving plots and tables
### Saving figures
Our best practices require that we mark plots as "DRAFT" if not yet final,
and also list the program that created the plot and the location where the
plot is stored. This helps with the traceability of the work, by ensuring
that the following information is available for every plot in a report: the
R script used to create the figure, the location where the figure is stored,
and the time and date when the figure was created. The key functions here are:
* `xgx_annotate_status` allows for the addition of text (like the word draft)
to the plots
* `xgx_annotate_filenames` allows for printing the filenames as a caption for
the plot. It requires an input list `dirs` with particular fields, as shown
below.
The function `xgx_save` calls both of the above functions and it is
illustrated below.
This function also requires the user to input a width and height for the graph.
This is because often, the plots that are created have font that is so small
that it's impossible to read the x and y axes. We've found that the easiest
way to set the font size is "indirectly" by specifying the height and width
of the graph. Note that if you have a plot window open, you can get the
height and width by typing `dev.size()`
```{r}
dirs <- list(
parent_dir = tempdir(),
rscript_dir = tempdir(),
rscript_name = "example.R",
results_dir = tempdir(),
filename_prefix = "example_")
data <- data.frame(x = 1:1000, y = stats::rnorm(1000))
g <- xgx_plot(data = data, aes(x = x, y = y)) +
geom_point()
xgx_save(width = 4, height = 4, dirs = dirs, filename_main = "example_plot", status = "DRAFT")
```
The the function `xgx_save` works only with ggplot objects. If the figure
that is created is not a ggplot object, it will not work. An alternative
is to use `xgx_annotate_status_png` to add the status and filename to png files.
```{r}
data <- data.frame(x = 1:1000, y = stats::rnorm(1000))
g <- xgx_plot(data = data, aes(x = x, y = y)) +
geom_point()
filename = file.path(tempdir(), "png_example.png")
ggsave(filename, plot = g, height = 4, width = 4, dpi = 75)
xgx_annotate_status_png(filename, "./ExampleScript.R")
```
<!--  -->
### Saving tables
We also provide a function `xgx_save_table` for annotating the relevant
information to csv files. The annotated table is shown below.
```{r}
x <- data.frame(ID = c(1, 2), SEX = c("male", "female"))
data <- xgx_save_table(x, dirs = dirs, filename_main = "ExampleTable")
knitr::kable(data)
```
## Graphics helpers
### xgx theme
The `xgx_theme()` function includes the xGx recommended plot settings.
It sets the background to white with light grey lines for the major and
minor breaks. This minimizes chart ink as recommended by Edward Tufte.
You can add `xgx_theme()` to an existing `ggplot` object, or you can
call `xgx_plot()` in place of `ggplot()` for all of your plot initiations.
```{r}
xgx_plot(mtcars, aes(x = cyl, y = mpg)) + geom_point()
```
You may wish to set the theme to `xgx_theme` for your R session, as we do below.
```{r}
theme_set(xgx_theme())
## Alternative, equivalent function:
xgx_theme_set()
```
<!-- ### Spaghetti plot
Spaghetti plots combine dots and lines, grouped and colored by individuals onto one plot. Try out `xgx_geom_spaghetti` which combines `geom_point()` and `geom_line()` into one `geom`, grouping and coloring by the `group` aesthetic. Calling `xgx_spaghetti` further combines `xgx_plot()` and `xgx_geom_spaghetti()` into one line.
-->
```{r, fig.width=4, fig.height=2}
# time <- rep(seq(1,10),5)
# id <- sort(rep(seq(1,5), 10))
# conc <- exp(-time)*sort(rep(rlnorm(5),10))
#
# data <- data.frame(time = time, concentration = conc, id = factor(id))
# xgx_plot() + xgx_geom_spaghetti(data = data, mapping = aes(x = time, y = concentration, group = id, color = id))
#
# xgx_spaghetti(data = data, mapping = aes(x = time, y = concentration, group = id, color = id))
```
### Confidence intervals
The code for confidence intervals is a bit complex and hard to remember.
Rather than copy-pasting this code we provide the function `xgx_stat_ci`
for calculating and plotting default confidence intervals and also `xgx_geom_ci` for percentile intervals. `xgx_stat_ci` allows the definition of multiple `geom` options in one function call, defined through a list. The default is `geom = list("point","line","errorbar")`.
Additional ggplot options can be fed through the `ggplot` object call, or
the `xgx_stat_ci` layer. (Note that `xgx_stat_ci` and `xgx_geom_ci` are
equivalent). `xgx_stat_pi` and `xgx_geom_pi` work in a similar fashion but for percentile intervals.
```{r, fig.width=4, fig.height=2}
data <- data.frame(x = rep(c(1, 2, 3), each = 20),
y = rep(c(1, 2, 3), each = 20) + stats::rnorm(60),
group = rep(1:3, 20))
xgx_plot(data,aes(x = x, y = y)) +
xgx_stat_ci(conf_level = .95)
xgx_plot(data,aes(x = x, y = y)) +
xgx_stat_pi(percent = .95)
xgx_plot(data,aes(x = x, y = y)) +
xgx_stat_ci(conf_level = .95, geom = list("pointrange","line"))
xgx_plot(data,aes(x = x, y = y)) +
xgx_stat_ci(conf_level = .95, geom = list("ribbon","line"))
xgx_plot(data,aes(x = x, y = y, group = group, color = factor(group))) +
xgx_stat_ci(conf_level = .95, alpha = 0.5,
position = position_dodge(width = 0.5))
```
The default settings calculate the confidence interval based on the
Student t Distribution (assuming normally distributed data). You can also
specify "lognormal"", "binomial"" or "multinomial"" for the `distribution`. The first will
perform the confidence interval operation on the log-scaled data, the second
uses the binomial exact confidence interval calculation from the `binom` package, and the
third uses `MultinomCI` from the `DescTools` package. The "multinomial"" option
is used for ordinal response or categorical data.
Note: you DO NOT need to use both `distribution = "lognormal"`
and `scale_y_log10()`, choose only one of these.
```{r, fig.width=4, fig.height=2}
# plotting lognormally distributed data
data <- data.frame(x = rep(c(1, 2, 3), each = 20),
y = 10^(rep(c(1, 2, 3), each = 20) + stats::rnorm(60)),
group = rep(1:3, 20))
xgx_plot(data, aes(x = x, y = y)) +
xgx_stat_ci(conf_level = 0.95, distribution = "lognormal")
# note: you DO NOT need to use both distribution = "lognormal" and scale_y_log10()
xgx_plot(data,aes(x = x, y = y)) +
xgx_stat_ci(conf_level = 0.95) + xgx_scale_y_log10()
# plotting binomial data
data <- data.frame(x = rep(c(1, 2, 3), each = 20),
y = rbinom(60, 1, rep(c(0.2, 0.6, 0.8), each = 20)),
group = rep(1:3, 20))
xgx_plot(data, aes(x = x, y = y)) +
xgx_stat_ci(conf_level = 0.95, distribution = "binomial")
# Example plotting the percent of subjects in a categorical covariate group by treatment.
set.seed(12345)
data = data.frame(x = 120*exp(rnorm(100,0,1)),
response = sample(c("Trt1", "Trt2", "Trt3"), 100, replace = TRUE),
covariate = factor(sample(c("White","Black","Asian","Other"), 100, replace = TRUE),
levels = c("White", "Black", "Asian", "Other")))
xgx_plot(data = data) +
xgx_stat_ci(mapping = aes(x = response, response = covariate),
distribution = "ordinal") +
xgx_stat_ci(mapping = aes(x = 1, response = covariate), geom = "hline",
distribution = "ordinal") +
scale_y_continuous(labels = scales::percent_format()) +
facet_wrap(~covariate) +
xlab("Treatment group") + ylab("Percent of subjects by category")
```
`xgx_stat_ci` can now also cut data by quantiles of `x` using the `bins` option, e.g. `bins = 4` will cut the data by quartiles of `x`. You can also supply your own breaks to cut the data.
```{r}
# plotting
set.seed(12345)
data = data.frame(x = 120*exp(rnorm(100,0,1)),
response = sample(c("Mild","Moderate","Severe"), 100, replace = TRUE),
covariate = sample(c("Male","Female"), 100, replace = TRUE)) %>%
mutate(y = (50 + 20*x/(200 + x))*exp(rnorm(100, 0, 0.3)))
# plotting a lognormally distributed variable by quartiles of x
xgx_plot(data = data) +
xgx_stat_ci(mapping = aes(x = x, y = y, colour = covariate),
distribution = "lognormal", bins = 4)
# plotting ordinal or multinomial data, by quartiles of x
xgx_plot(data = data) +
xgx_stat_ci(mapping = aes(x = x, response = response, colour = covariate),
distribution = "ordinal", bins = 4) +
scale_y_continuous(labels = scales::percent_format()) + facet_wrap(~response)
xgx_plot(data = data) +
xgx_stat_ci(mapping = aes(x = x, response = response, colour = response),
distribution = "ordinal", bins = 4) +
scale_y_continuous(labels = scales::percent_format()) + facet_wrap(~covariate)
```
### Nonlinear smoothing (e.g. Emax), and ordinal response smoothing
The current ggplot2::geom_smooth does not allow for plotting confidence bands for method = "nls", as ggplot2 does not supply a `predictdf` for an object of class `nls`, which geom_smooth silently calls to calculate the ymin and ymax for the confidence bands. The xgxr package includes a definition of `predictdf.nls`, allowing for confidence bands for method = "nls".
```{r}
set.seed(123456)
Nsubj <- 10
Doses <- c(0, 25, 50, 100, 200)
Ntot <- Nsubj*length(Doses)
times <- c(0,14,30,60,90)
dat1 <- data.frame(ID = 1:(Ntot),
DOSE = rep(Doses, Nsubj),
E0 = 50*rlnorm(Ntot, 0, 0.3),
Emax = 100*rlnorm(Ntot, 0, 0.3),
ED50 = 50*rlnorm(Ntot, 0, 0.3)) %>%
dplyr::mutate(Response = (E0 + Emax*DOSE/(DOSE + ED50))*rlnorm(Ntot, 0, 0.3) ) %>%
merge(data.frame(ID = rep(1:(Ntot), each = length(times)), Time = times), by = "ID")
gg <- xgx_plot(data = dat1, aes(x = DOSE, y = Response))
gg <- gg + geom_point()
gg
gg + geom_smooth(method = "nlsLM",
formula = y ~ E0 + Emax*x/(ED50 + x),
method.args = list(start = list(E0 = 1, ED50 = 1, Emax = 1),
lower = c(-Inf, 0, -Inf)))
```
xgxr also includes an Emax smooth function called `xgx_geom_smooth_emax` which utilizes the "nlsLM" method, and silently calls the `predictdf.nls` defined by xgxr.
```{r}
gg + xgx_geom_smooth_emax()
gg +
xgx_geom_smooth_emax(geom = "ribbon", color = "black", fill = NA, linetype = "dashed") +
xgx_geom_smooth_emax(geom = "line", color = "red")
```
xgxr also modifies the stats method `predict.nls` for `nls` objects in order to include confidence interval prediction. Upon loading the xgxr package, the `predict` method for class `nls` should be updated to the xgxr version, and include functionality to supply confidence intervals. In order to output the confidence intervals, be sure to specify `interval = "confidence"`. The output will contain a "fit" data.frame with values for "fit", "lwr" and "upr" representing the prediction and lower and upper confidence intervals.
```{r}
mod <- nlsLM(formula = Response ~ E0 + Emax * DOSE / (ED50 + DOSE),
data = dat1,
start = list(E0 = 1, ED50 = 1, Emax = 1),
lower = c(-Inf, 0, -Inf))
predict(mod,
newdata = data.frame(DOSE = c(0, 25, 50, 100, 200)),
se.fit = TRUE)
predict(mod,
newdata = data.frame(DOSE = c(0, 25, 50, 100, 200)),
se.fit = TRUE, interval = "confidence", level = 0.95)
```
xgxr also includes ordinal response smoothing as an option under the `xgx_stat_smooth` function, indicated by `method = "polr"`. This requires a dataset of x values and response values, to be defined in the mapping. This method also allows defining of color, fill, facet, linetype, etc. by the response category, while preserving the ordinal response fit across these categories.
```{r}
# example with ordinal data (method = "polr")
set.seed(12345)
data = data.frame(x = 120*exp(stats::rnorm(100,0,1)),
response = sample(c("Mild","Moderate","Severe"), 100, replace = TRUE),
covariate = sample(c("Male","Female"), 100, replace = TRUE)) %>%
dplyr::mutate(y = (50 + 20*x/(200 + x))*exp(stats::rnorm(100, 0, 0.3)))
# example coloring by the response categories
xgx_plot(data = data) +
xgx_stat_smooth(mapping = ggplot2::aes(x = x, response = response,
colour = response, fill = response),
method = "polr") +
ggplot2::scale_y_continuous(labels = scales::percent_format())
# example faceting by the response categories, coloring by a different covariate
xgx_plot(data = data) +
xgx_stat_smooth(mapping = ggplot2::aes(x = x, response = response,
colour = covariate, fill = covariate),
method = "polr", level = 0.80) +
ggplot2::facet_wrap(~response) +
ggplot2::scale_y_continuous(labels = scales::percent_format())
```
### Nice log scale
This version of the log scale function shows the tick marks between the
major breaks (i.e. at 1, 2, 3, ... 10, instead of just 1 and 10). It also
uses $$10^x$$ notation when the labels are base 10 and are very small or
very large (<.001 or >9999)
```{r}
df <- data.frame(x = c(0, stats::rlnorm(1000, 0, 1)),
y = c(0, stats::rlnorm(1000, 0, 3)))
xgx_plot(data = df, aes(x = x, y = y)) +
geom_point() +
xgx_scale_x_log10() +
xgx_scale_y_log10()
```
### Reverse log transform
This transform is useful for plotting data on a percentage scale that can
approach 100% (such as receptor occupancy data).
```{r, fig.height=3.5, warning=FALSE}
conc <- 10^(seq(-3, 3, by = 0.1))
ec50 <- 1
data <- data.frame(concentration = conc,
bound_receptor = 1 * conc / (conc + ec50))
gy <- xgx_plot(data, aes(x = concentration, y = bound_receptor)) +
geom_point() +
geom_line() +
xgx_scale_x_log10() +
xgx_scale_y_reverselog10()
gx <- xgx_plot(data, aes(x = bound_receptor, y = concentration)) +
geom_point() +
geom_line() +
xgx_scale_y_log10() +
xgx_scale_x_reverselog10()
gridExtra::grid.arrange(gy, gx, nrow = 1)
```
### Nice scale for percent change data
This transform is useful for plotting percent change from baseline data.
Percent change data can range from -100% to +Inf%, and depending on the range
of the data, a linear scale can lose the desired resolution. This transform
plots percent change data on a scale of log10(PCHG + 100%), similar to a
log scale of ratio to baseline.
```{r, fig.height=3.5, warning=FALSE}
Nsubj <- 10
Doses <- c(0, 25, 50, 100, 200)
Ntot <- Nsubj*length(Doses)
times <- c(0,14,30,60,90)
dat1 <- data.frame(ID = 1:(Ntot),
DOSE = rep(Doses, Nsubj),
PD0 = rlnorm(Ntot, log(100), 1),
Kout = exp(rnorm(Ntot,-2, 0.3)),
Imax = 1,
ED50 = 25) %>%
dplyr::mutate(PDSS = PD0*(1 - Imax*DOSE/(DOSE + ED50))*exp(rnorm(Ntot, 0.05, 0.3)) ) %>%
merge(data.frame(ID = rep(1:(Ntot), each = length(times)), Time = times), by = "ID") %>%
dplyr::mutate(PD = ((PD0 - PDSS)*(exp(-Kout*Time)) + PDSS),
PCHG = (PD - PD0)/PD0)
ggplot2::ggplot(dat1 %>% subset(Time == 90),
ggplot2::aes(x = DOSE, y = PCHG, group = DOSE)) +
ggplot2::geom_boxplot() +
xgx_theme() +
xgx_scale_y_percentchangelog10() +
ylab("Percent Change from Baseline") +
xlab("Dose (mg)")
ggplot2::ggplot(dat1,
ggplot2::aes(x = Time, y = PCHG, group = ID, color = factor(DOSE))) +
ggplot2::geom_line() +
xgx_theme() +
xgx_scale_y_percentchangelog10() +
guides(color = guide_legend(title = "Dose (mg)")) +
ylab("Percent Change from Baseline")
dat2 <- data.frame(ID = 1:(Ntot),
DOSE = rep(Doses, Nsubj),
PD0 = rlnorm(Ntot, log(100), 1),
Kout = exp(rnorm(Ntot,-2, 0.3)),
Emax = 50*rlnorm(Ntot, 0, 0.3),
ED50 = 300) %>%
dplyr::mutate(PDSS = PD0*(1 + Emax*DOSE/(DOSE + ED50))*exp(rnorm(Ntot, -1, 0.3)) ) %>%
merge(data.frame(ID = rep(1:(Ntot), each = length(times)), Time = times), by = "ID") %>%
dplyr::mutate(PD = ((PD0 - PDSS)*(exp(-Kout*Time)) + PDSS),
PCHG = (PD - PD0)/PD0)
ggplot2::ggplot(dat2, ggplot2::aes(x = DOSE, y = PCHG, group = DOSE)) +
ggplot2::geom_boxplot() +
xgx_theme() +
xgx_scale_y_percentchangelog10() +
ylab("Percent Change from Baseline") +
xlab("Dose (mg)")
ggplot2::ggplot(dat2,
ggplot2::aes(x = Time, y = PCHG, group = ID, color = factor(DOSE))) +
ggplot2::geom_line() +
xgx_theme() +
xgx_scale_y_percentchangelog10() +
guides(color = guide_legend(title = "Dose (mg)")) +
ylab("Percent Change from Baseline")
```
### Scaling x-axis as a time scale
For time, it's often good for the x ticks to be spaced in a particular way.
For instance, for hours, subdividing in increments by 24, 12, 6, and 3 hours
can make more sense than by 10 or 100. Similarly for days, increments of 7
or 28 days are preferred over 5 or 10 days. `xgx_scale_x_time_units` allows
for this, where it is the input and output units.
```{r, fig.height=7}
data <- data.frame(x = 1:1000, y = stats::rnorm(1000))
g <- xgx_plot(data = data, aes(x = x, y = y)) +
geom_point()
g1 <- g + xgx_scale_x_time_units(units_dataset = "hours", units_plot = "hours")
g2 <- g + xgx_scale_x_time_units(units_dataset = "hours", units_plot = "days")
g3 <- g + xgx_scale_x_time_units(units_dataset = "hours", units_plot = "weeks")
g4 <- g + xgx_scale_x_time_units(units_dataset = "hours", units_plot = "months")
gridExtra::grid.arrange(g1, g2, g3, g4, nrow = 2)
```
## Data checking
### Numerical check
We've found that during exploration, it can be extremely important to
check the dataset for issues. This can be done using the `xgx_check_data`
or `xgx_summarize_data` function (the two functions are identical).
```{r, message=FALSE}
data <- mad_missing_duplicates %>%
filter(CMT %in% c(1, 2, 3)) %>%
rename(DV = LIDV,
YTYPE = CMT,
USUBJID = ID)
covariates <- c("WEIGHTB", "SEX")
check <- xgx_check_data(data, covariates)
knitr::kable(check$summary)
knitr::kable(head(check$data_subset))
```
You can also get an overview of the covariates in the dataset
with `xgx_summarize_covariates`. The covariate summaries are also provided
in the `xgx_check_data` and `xgx_summarize_data` functions.
```{r}
covar <- xgx_summarize_covariates(data,covariates)
knitr::kable(covar$cts_covariates)
knitr::kable(covar$cat_covariates)
```
| /scratch/gouwar.j/cran-all/cranData/xgxr/vignettes/xgxr_overview.Rmd |
#' @title Akaike's An Information Criterion for excess hazard model with
#' baseline hazard following a B-splines functions
#'
#' @description Calculates the Akaike's ‘An Information Criterion’ for fitted
#' models from `xhaz`.
#'
#' @param object a fitted model object obtained from `xhaz` function
#'
#' @param ... optionally more fitted model objects obtained from `xhaz` function
#'
#' @param k numeric, the penalty per parameter to be used; the default \code{k = 2}
#' is the classical AIC.
#'
#' @return the value corresponds to the AIC calculated from the total
#' log-likelihood of the fitted model if just one object is provided.
#' If multiple objects are provided, a data.frame with columns corresponding to the
#' objects and rows representing the number of parameters in the model (df) and the AIC
#'
#'
#'
#' @examples
#' \donttest{
#' library("xhaz")
#'
#' #Giorgi et al model: baseline excess hazard is a quadratic Bsplines
#' # function with two interior knots and allow here a
#' # linear and proportional effects for the covariates on
#' # baseline excess hazard.
#' levels(simuData$sex) <- c("male", "female")
#'
#' fitphBS <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData,
#' ratetable = survexp.us,
#' interval = c(0, NA, NA, 6),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "bsplines", pophaz = "classic")
#'
#' fitphBS
#' AIC(fitphBS)
#' }
#'
#'
#' @export
AIC.bsplines <- function(object, ..., k = 2) {
dots.object <- list(...)
if (length(dots.object) == 0) {
if (inherits(object, "bsplines")) {
df <- length(object$coefficients)
val <- (k * length(object$coefficients) - 2 * (object$loglik))[2]
} else{
stop("object must be a xhaz function output")
}
return(val)
} else{
object <- list(object, ...)
aic_bis <- function(i) {
if (inherits(object[[i]], "bsplines")) {
df <- length(object[[i]]$coefficients)
val <-
(k * length(object[[i]]$coefficients) - 2 * (object[[i]]$loglik))[2]
resval <- data.frame(df = df, AIC = val)
return(resval)
} else{
stop("object must be a xhaz function output")
}
}
val <- sapply(1:length(object), aic_bis)
Call <- match.call()
colnames(val) <- as.character(Call[-1])
return(val)
}
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/AIC.bsplines.R |
#' @title Akaike's An Information Criterion for excess hazard model with
#' baseline hazard following a piecewise constant function
#'
#' @description Calculates the Akaike's ‘An Information Criterion’ for fitted
#' models from `xhaz`.
#'
#' @param object a fitted model object obtained from `xhaz` function
#'
#' @param ... optionally more fitted model objects obtained from `xhaz` function
#'
#' @param k numeric, the penalty per parameter to be used; the default \code{k = 2}
#' is the classical AIC.
#'
#' @return the value corresponds to the AIC calculated from the total
#' log-likelihood of the fitted model if just one object is provided.
#' If multiple objects are provided, a data.frame with columns corresponding to the
#' objects and rows representing the number of parameters in the model (df) and the AIC
#'
#' @examples
#' library("xhaz")
#'
#'# Esteve et al. model: baseline excess hazard is a piecewise function
#'# linear and proportional effects for the covariates on
#'# baseline excess hazard.
#'
#' levels(simuData$sex) <- c("male", "female")
#'
#' set.seed(1980)
#' simuData2 <- simuData[sample(nrow(simuData), size = 500), ]
#'
#' fit.estv2 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData2,
#' ratetable = survexp.us,
#' interval = c(0, NA, NA, NA, NA, NA, 6),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "constant", pophaz = "classic")
#'
#'
#' fit.estv2
#'
#' AIC(fit.estv2)
#'
#'
#' @export
AIC.constant <- function(object, ..., k = 2) {
dots.object <- list(...)
if (length(dots.object) == 0) {
if (inherits(object, "constant")) {
df <- length(object$coefficients)
val <-
(k * length(object$coefficients) - 2 * (object$loglik))[2]
} else{
stop("object must be a xhaz function output")
}
return(val)
} else{
object <- list(object, ...)
aic_bis <- function(i) {
if (inherits(object[[i]], "constant")) {
df <- length(object[[i]]$coefficients)
val <-
(k * length(object[[i]]$coefficients) - 2 * (object[[i]]$loglik))[2]
resval <- data.frame(df = df, AIC = val)
return(resval)
} else{
stop("object must be a xhaz function output")
}
}
val <- sapply(1:length(object), aic_bis)
Call <- match.call()
colnames(val) <- as.character(Call[-1])
return(val)
}
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/AIC.constant.R |
#' @title Bayesian Information Criterion for excess hazard model with
#' baseline hazard following a B-splines functions
#'
#' @description Calculates the Bayesian Information Criterion’ for fitted
#' models from `xhaz`.
#'
#' @param object a fitted model object obtained from `xhaz` function
#'
#' @param ... optionally more fitted model objects obtained from `xhaz` function
#'
#' @return the value corresponds to the BIC calculated from the total
#' log-likelihood of the fitted model if just one object is provided.
#' If multiple objects are provided, a data.frame with columns corresponding to the
#' objects and rows representing the number of parameters in the model (df) and the BIC.
#'
#' @examples
#' \donttest{
#' library("xhaz")
#'
#' #Giorgi et al model: baseline excess hazard is a quadratic Bsplines
#' # function with two interior knots and allow here a
#' # linear and proportional effects for the covariates on
#' # baseline excess hazard.
#' levels(simuData$sex) <- c("male", "female")
#'
#' fitphBS <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData,
#' ratetable = survexp.us,
#' interval = c(0, NA, NA, 6),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "bsplines", pophaz = "classic")
#'
#' fitphBS
#' BIC(fitphBS)
#' }
#' @export
BIC.bsplines <- function(object, ...) {
dots.object <- list(...)
if (length(dots.object) == 0) {
if (inherits(object, "bsplines")) {
df <- length(object$coefficients)
val <- (log(length(object$n)) * length(object$coefficients) - 2 * (object$loglik))[2]
} else{
stop("object must be a xhaz function output")
}
return(val)
} else{
object <- list(object, ...)
aic_bis <- function(i) {
if (inherits(object[[i]], "bsplines")) {
df <- length(object[[i]]$coefficients)
val <-
(log(length(object[[i]]$n)) * length(object[[i]]$coefficients) - 2 * (object[[i]]$loglik))[2]
resval <- data.frame(df = df, AIC = val)
return(resval)
} else{
stop("object must be a xhaz function output")
}
}
val <- sapply(1:length(object), aic_bis)
Call <- match.call()
colnames(val) <- as.character(Call[-1])
return(val)
}
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/BIC.bsplines.R |
#' @title Bayesian Information Criterion for excess hazard model with
#' baseline hazard following a piecewise constant function
#'
#' @description Calculates the Bayesian Information Criterion’ for fitted
#' models from `xhaz`.
#'
#' @param object a fitted model object obtained from `xhaz` function
#'
#' @param ... optionally more fitted model objects obtained from `xhaz` function
#'
#' @return the value corresponds to the BIC calculated from the total
#' log-likelihood of the fitted model if just one object is provided.
#' If multiple objects are provided, a data.frame with columns corresponding to the
#' objects and rows representing the number of parameters in the model (df) and the BIC.
#'
#' @examples
#' library("xhaz")
#'
#'# Esteve et al. model: baseline excess hazard is a piecewise function
#'# linear and proportional effects for the covariates on
#'# baseline excess hazard.
#'
#' levels(simuData$sex) <- c("male", "female")
#'
#' set.seed(1980)
#' simuData2 <- simuData[sample(nrow(simuData), size = 500), ]
#' fit.estv2 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData2,
#' ratetable = survexp.us,
#' interval = c(0, NA, NA, NA, NA, NA, 6),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "constant", pophaz = "classic")
#'
#'
#' fit.estv2
#'
#' BIC(fit.estv2)
#'
#'
#' @export
BIC.constant <- function(object, ...) {
dots.object <- list(...)
if (length(dots.object) == 0) {
if (inherits(object, "constant")) {
df <- length(object$coefficients)
val <- (log(length(object$n)) * length(object$coefficients) - 2 * (object$loglik))[2]
} else{
stop("object must be a xhaz function output")
}
return(val)
} else{
object <- list(object, ...)
aic_bis <- function(i) {
if (inherits(object[[i]], "constant")) {
df <- length(object[[i]]$coefficients)
val <-
(log(length(object[[i]]$n)) * length(object[[i]]$coefficients) - 2 * (object[[i]]$loglik))[2]
resval <- data.frame(df = df, AIC = val)
return(resval)
} else{
stop("object must be a xhaz function output")
}
}
val <- sapply(1:length(object), aic_bis)
Call <- match.call()
colnames(val) <- as.character(Call[-1])
return(val)
}
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/BIC.constant.R |
Csurvsplit2 <- function(){
newfunc <- eval(parse(text = paste0(
"Csurvsplit", '<-survival:::', "Csurvsplit"
)))
newfunc
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/Csurvsplit2.R |
#' @title anova.bsplines function used for likelihood-ratio Test of two models
#' from xhaz function
#'
#' @description This function compute an analysis of deviance table for two
#' excess hazard models fitted using xhaz R package.
#'
#' @param object an object of class bsplines
#'
#' @param ... an object of class bsplines
#'
#' @param test a character string. The appropriate test is a likelihood-ratio
#' test, all other choices result in Not yet implemented test.
#'
#'
#' @keywords anova.bsplines
#'
#' @note As expected, the comparison between two or more models by anova or more
#' excess hazard models will only be valid if they are fitted to the same
#' dataset, and if the compared models are nested. This may be a problem if
#' there are missing values.
#'
#' @return An object of class \code{anova} inheriting from class \code{matrix}.
#' The different columns contain respectively the degrees of freedom and the
#' log-likelihood values of the two nested models, the degree of freedom of the
#' chi-square statistic, the chi-square statistic and the p-value of the
#' likelihood ratio test.
#'
#' @seealso \code{\link{xhaz}}, \code{\link{summary.bsplines}}, \code{\link{print.constant}}
#'
#'
#' @author Juste Goungounga, Robert Darlin Mba, Nathalie Grafféo and
#' Roch Giorgi
#'
#'
#' @references Goungounga JA, Touraine C, Grafféo N, Giorgi R;
#' CENSUR working survival group. Correcting for misclassification
#' and selection effects in estimating net survival in clinical trials.
#' BMC Med Res Methodol. 2019 May 16;19(1):104.
#' doi: 10.1186/s12874-019-0747-3. PMID: 31096911; PMCID: PMC6524224.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/31096911/}{PubMed})
#'
#' Touraine C, Grafféo N, Giorgi R; CENSUR working survival group.
#' More accurate cancer-related excess mortality through correcting
#' background mortality for extra variables.
#' Stat Methods Med Res. 2020 Jan;29(1):122-136.
#' doi: 10.1177/0962280218823234. Epub 2019 Jan 23. PMID: 30674229.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/30674229/}{PubMed})
#'
#' Mba RD, Goungounga JA, Grafféo N, Giorgi R; CENSUR working survival group.
#' Correcting inaccurate background mortality in excess hazard models
#' through breakpoints. BMC Med Res Methodol. 2020 Oct 29;20(1):268.
#' doi: 10.1186/s12874-020-01139-z. PMID: 33121436; PMCID: PMC7596976.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/33121436/}{PubMed})
#'
#'
#' Giorgi R, Abrahamowicz M, Quantin C, Bolard P, Esteve J, Gouvernet J,
#' Faivre J. A relative survival regression model using B-spline functions
#' to model non-proportional hazards.
#' Statistics in Medicine 2003; 22: 2767-84.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/12939785/}{PubMed})
#'
#'
#' @examples
#'
#' # load the data set in the package
#' \donttest{
#' library("survival")
#' library("numDeriv")
#' library("survexp.fr")
#' library("statmod")
#'
#' data("dataCancer", package = "xhaz") # load the data set in the package
#'
#' fit.phBS <- xhaz(
#' formula = Surv(obs_time_year, event) ~ ageCentre + immuno_trt,
#' data = dataCancer,
#' ratetable = survexp.fr::survexp.fr,
#' interval = c(0, NA, NA, max(dataCancer$obs_time_year)),
#' rmap = list(age = 'age', sex = 'sexx', year = 'year_date'),
#' baseline = "bsplines", pophaz = "classic")
#'
#'
#'
#' fit.nphBS <- xhaz(
#' formula = Surv(obs_time_year, event) ~ ageCentre + qbs(immuno_trt),
#' data = dataCancer,
#' ratetable = survexp.fr::survexp.fr,
#' interval = c(0, NA, NA, max(dataCancer$obs_time_year)),
#' rmap = list(age = 'age', sex = 'sexx', year = 'year_date'),
#' baseline = "bsplines", pophaz = "classic")
#'
#' anova(fit.phBS, fit.nphBS)
#' }
#'
#' @importFrom stats printCoefmat
#' @export
anova.bsplines <- function(object, ..., test = "LRT") {
if (test == "LRT") {
if (!inherits(object, "bsplines"))
stop("argument must be a xhaz fitted model")
args <- list(...)
if (length(args) >= 1 & any("bsplines" %in% unlist(lapply(1:length(args),
function(i) {
class(args[[i]])
})))) {
nmodels <- length(unlist(lapply(1:length(args),
function(i) {
inherits(args[[i]], "bsplines")
})))
} else{
nmodels <- 0
}
if (nmodels == 1) {
object2 <- args[[1]]
}else{
stop("The anova function compare only two models")
}
if (!inherits(object2, c("bsplines", "constant")))
stop("argument must be a xhaz fitted model")
if (length(object$loglik) > 1) {
pvalue <- 1 - pchisq(2*(abs(object$loglik[2] - object2$loglik[2])),
df = abs(length(object$coefficients) - length(object2$coefficients)))
}else if (length(object$loglik) == 1) {
pvalue <- 1 - pchisq(2*(abs(object$loglik[1] - object2$loglik[1])),
df = abs(length(object$coefficients) - length(object2$coefficients)))
}
cat("Assumption: Model 1 nested within Model 2
\n")
cat("Likelihood ratio test\n")
cat("Model 1: \n")
print(object$call[2][[1]])
cat("Model 2: \n")
print(object2$call[2][[1]])
df <- c(length(object$coef), length(object2$coef))
if (length(object$loglik) > 1) {
loglik <- c(object$loglik[2], object2$loglik[2])
dif.df <- c(NA, abs(length(object2$coef) - length(object$coef)))
Chisq <- c(NA, round(abs(object2$loglik[2] - object$loglik[2]),
3))
}else if (length(object$loglik) == 1) {
loglik <- c(object$loglik[1], object2$loglik[1])
dif.df <- c(NA, abs(length(object2$coef) - length(object$coef)))
Chisq <- c(NA, round(abs(object2$loglik[1] - object$loglik[1]),
3))
}
p.value <- c(NA, round(pvalue, 10))
x <- cbind(df, loglik, dif.df, Chisq, p.value)
colnames(x) <- c("Model.df", "loglik",
"Df", "Chisq", "Pr(>Chisq)")
class(x) <- c("anova","matrix", "array" )
printCoefmat(x,
P.values = TRUE,
digits = max(getOption("digits") - 2L, 3L),
signif.stars = TRUE,
na.print = "",
has.Pvalue = TRUE)
}else {
stop("Not yet implemented test!")
}
invisible(x)
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/anova.bsplines.R |
#' @title anova.constant function used for likelihood-ratio Test of two models
#' from xhaz function
#'
#' @description This function compute an analysis of deviance table for two
#' excess hazard models fitted using xhaz R package.
#'
#' @param object an object of class constant
#'
#' @param ... an object of class constant
#'
#' @param test a character string. The appropriate test is a likelihood-ratio
#' test, all other choices result in Not yet implemented test.
#'
#' @keywords anova.constant
#'
#' @note As expected, the comparison between two or more models by anova or more
#' excess hazard models will only be valid if they are fitted to the same
#' dataset, and if the compared models are nested. This may be a problem if
#' there are missing values.
#'
#'
#'
#' @return An object of class \code{anova} inheriting from class \code{matrix}.
#' The different columns contain respectively the degrees of freedom and the
#' log-likelihood values of the two nested models, the degree of freedom of the
#' chi-square statistic, the chi-square statistic and the p-value of the
#' likelihood ratio test.
#'
#'
#' @seealso \code{\link{xhaz}}, \code{\link{summary.bsplines}}, \code{\link{print.constant}}
#'
#'
#' @author Juste Goungounga, Robert Darlin Mba, Nathalie Grafféo and Roch Giorgi
#'
#'
#' @references Goungounga JA, Touraine C, Grafféo N, Giorgi R;
#' CENSUR working survival group. Correcting for misclassification
#' and selection effects in estimating net survival in clinical trials.
#' BMC Med Res Methodol. 2019 May 16;19(1):104.
#' doi: 10.1186/s12874-019-0747-3. PMID: 31096911; PMCID: PMC6524224.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/31096911/}{PubMed})
#'
#' Touraine C, Grafféo N, Giorgi R; CENSUR working survival group.
#' More accurate cancer-related excess mortality through correcting
#' background mortality for extra variables.
#' Stat Methods Med Res. 2020 Jan;29(1):122-136.
#' doi: 10.1177/0962280218823234. Epub 2019 Jan 23. PMID: 30674229.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/30674229/}{PubMed})
#'
#' Mba RD, Goungounga JA, Grafféo N, Giorgi R; CENSUR working survival group.
#' Correcting inaccurate background mortality in excess hazard models
#' through breakpoints. BMC Med Res Methodol. 2020 Oct 29;20(1):268.
#' doi: 10.1186/s12874-020-01139-z. PMID: 33121436; PMCID: PMC7596976.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/33121436/}{PubMed})
#'
#'
#' Giorgi R, Abrahamowicz M, Quantin C, Bolard P, Esteve J, Gouvernet J,
#' Faivre J. A relative survival regression model using B-spline functions
#' to model non-proportional hazards.
#' Statistics in Medicine 2003; 22: 2767-84.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/12939785/}{PubMed})
#'
#'
#' @examples
#' \donttest{
#' # load the data set in the package
#' library("survival")
#' library("numDeriv")
#' library("survexp.fr")
#'
#'
#'data("dataCancer") # load the data set in the package
#'
#' fit.ph <- xhaz(
#' formula = Surv(obs_time_year, event) ~ ageCentre + immuno_trt,
#' data = dataCancer,
#' ratetable = survexp.fr::survexp.fr,
#' interval = c(0, NA, NA, NA, max(dataCancer$obs_time_year)),
#' rmap = list(age = 'age', sex = 'sexx', year = 'year_date'),
#' baseline = "constant", pophaz = "classic")
#'
#'
#'
#' fit.ph2 <- xhaz(
#' formula = Surv(obs_time_year, event) ~ ageCentre ,
#' data = dataCancer,
#' ratetable = survexp.fr::survexp.fr,
#' interval = c(0, NA, NA, NA, max(dataCancer$obs_time_year)),
#' rmap = list(age = 'age', sex = 'sexx', year = 'year_date'),
#' baseline = "constant", pophaz = "classic")
#'
#' anova(fit.ph2, fit.ph)
#' }
#'
#' @importFrom stats printCoefmat
#' @export
anova.constant <- function(object, ...,
test = "LRT") {
if (test == "LRT") {
if (!inherits(object, "constant"))
stop("argument must be a xhaz fitted model")
args <- list(...)
if (length(args) >= 1 & any("constant" %in% unlist(lapply(1:length(args),
function(i) {
class(args[[i]])
})))) {
nmodels <- length(unlist(lapply(1:length(args),
function(i) {
inherits(args[[i]], "constant")
})))
} else{
nmodels <- 0
}
if (nmodels == 1) {
object2 <- args[[1]]
}else{
stop("The anova function compare only two models")
}
if (!inherits(object2, "constant"))
stop("argument must be a xhaz fitted model")
if (length(object$loglik) > 1) {
pvalue <- 1 - pchisq(2*(abs(object$loglik[2] - object2$loglik[2])),
df = abs(length(object$coefficients) - length(object2$coefficients)))
} else if (length(object$loglik) == 1) {
pvalue <- 1 - pchisq(2*(abs(object$loglik[1] - object2$loglik[1])),
df = abs(length(object$coefficients) - length(object2$coefficients)))
}
cat("Assumption: Model 1 nested within Model 2
\n")
cat("Likelihood ratio test\n")
cat("Model 1: \n")
print(object$call[2][[1]])
cat("Model 2: \n")
print(object2$call[2][[1]])
df <- c(length(object$coef), length(object2$coef))
if (length(object$loglik) > 1) {
loglik <- c(object$loglik[2], object2$loglik[2])
dif.df <- c(NA, abs(length(object2$coef) - length(object$coef)))
Chisq <- c(NA, round(abs(object2$loglik[2] - object$loglik[2]), 3))
} else if (length(object$loglik) == 1) {
loglik <- c(object$loglik[1], object2$loglik[1])
dif.df <- c(NA, abs(length(object2$coef) - length(object$coef)))
Chisq <- c(NA, round(abs(object2$loglik[1] - object$loglik[1]), 3))
}
p.value <- c(NA, round(pvalue, 10))
x <- cbind(df, loglik, dif.df, Chisq, p.value)
colnames(x) <- c("Model.df", "loglik",
"df", "Chisq", "Pr(>Chisq)")
class(x) <- c("anova","matrix", "array" )
printCoefmat(x,
P.values = TRUE,
digits = max(getOption("digits") - 2L, 3L),
signif.stars = TRUE,
na.print = "NA", has.Pvalue = TRUE)
}else {
stop("Not yet implemented test!")
}
invisible(x)
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/anova.constant.R |
breakpoint_with_cut <- function(formula = formula,
data = data,
ratetable = ratetable,
rmap = rmap,
baseline = baseline,
pophaz = pophaz,
only_ehazard = only_ehazard,
add.rmap = add.rmap,
add.rmap.cut = add.rmap.cut,
interval = interval,
ratedata = ratedata,
subset = subset,
na.action = na.action,
init = init,
control = control,
optim = optim,
scale = scale,
trace = trace,
speedy = speedy,
nghq = nghq, m_int = m_int, rcall,...) {
time_elapsed0 <- as.numeric(base::proc.time()[3])
tsplitting <- splitting <- TRUE
tnewdata <- data
newdata2 <- tosplit(formula = formula,
add.rmap.cut = add.rmap.cut,
data = data, rmap = rmap,
interval = interval, subset = subset)
splitting <- FALSE
data <- newdata2$tdata2
fit <- xhaz2(formula = formula,
data = data,
ratetable = ratetable, rmap = rmap,
baseline = baseline,
pophaz = pophaz,
only_ehazard = only_ehazard,
add.rmap = add.rmap,
add.rmap.cut = add.rmap.cut,
splitting = splitting,
interval = interval,
ratedata = ratedata,
subset = subset,
na.action = na.action,
init = init,
control = control,
optim = optim,
scale = scale,
trace = trace,
speedy = speedy,
nghq = nghq, m_int = m_int,
rcall = rcall, ...)
fit$splitting <- tsplitting
return(fit)
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/breakpoint_with_cut.R |
longDF2ratetable <-
function(DF,
value.var = "haz",
by.vars = setdiff(names(DF), value.var)) {
univals <- lapply(DF[, by.vars], unique)
names(univals) <- NULL
dimvec <- sapply(DF[, by.vars], function(x) {
length(unique(x))
},
simplify = TRUE)
ar <- array(DF[, value.var], dim = dimvec)
dimnames(ar) <- univals
attr(ar, "class") <- "ratetable"
attr(ar, "dimid") <- colnames(DF)
ar
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/classical_functions.R |
cumpop <- function(i,
ratetable,
Indic00,
nb.anne00,
coefs00,
nb.age.dc,
dateDiag00,
dateAnniv00) {
Indic <- as.data.frame(Indic00)[i,]
coef01 <- coefs00$coef01[i]
coef02 <- coefs00$coef02[i]
coef03 <- coefs00$coef03[i]
coef04 <- coefs00$coef04[i]
coef05 <- coefs00$coef05[i]
coef06 <- coefs00$coef06[i]
nb.anne <- nb.anne00[i]
dateDiag <- dateDiag00[i]
dateAnniv <- dateAnniv00[i]
if (as.integer(nb.anne) != 0) {
if (dateAnniv != dateDiag) {
if (dateAnniv <= as.Date(paste0(format(dateDiag, '%Y'), "-12-31"))) {
if (coef06 != 0) {
indic01 <-
do.call(rbind, lapply(1:(nb.anne + 1), function(x)
rbind(Indic)))
indic01$age <- indic01$age + c(0:(nb.anne))
indic01$year <- indic01$year + c(0:(nb.anne))
indic02 <-
do.call(rbind, lapply(1:(nb.anne + 1), function(x)
rbind(Indic)))
indic02$age <- indic02$age + c(1:(nb.anne + 1))
indic02$year <- indic02$year + c(0:nb.anne)
indic03 <- do.call(rbind, lapply(1:nb.anne, function(x)
rbind(Indic)))
indic03$age <- indic03$age + c(1:nb.anne)
indic03$year <- indic03$year + c(1:nb.anne)
if (max(indic01$age) < nb.age.dc) {
indic06 <- indic03[nb.anne,]
indic06$age <- indic06$age + 1
indic06$year <- indic06$year + 1
} else{
indic01$age[indic01$age >= nb.age.dc] <- nb.age.dc
indic02$age[indic02$age >= nb.age.dc] <- nb.age.dc
indic03$age[indic03$age >= nb.age.dc] <- nb.age.dc
indic06 <- indic03[nb.anne,]
indic06$age <- indic06$age + 1
indic06$year <- indic06$year + 1
}
results <- sum(coef01 * ratetable[as.matrix(indic01)]) +
sum(coef02 * ratetable[as.matrix(indic02)]) +
sum(coef03 * ratetable[as.matrix(indic03)]) +
coef06 * ratetable[as.matrix(indic06)]
}
else{
if (coef05 != 0) {
indic01 <- do.call(rbind,
lapply(1:(nb.anne + 1),
function(x)
rbind(Indic)))
indic01$age <- indic01$age + c(0:(nb.anne))
indic01$year <- indic01$year + c(0:(nb.anne))
indic02 <- do.call(rbind,
lapply(1:nb.anne,
function(x)
rbind(Indic)))
indic02$age <- indic02$age + c(1:nb.anne)
indic02$year <- indic02$year + c(0:(nb.anne - 1))
indic03 <- do.call(rbind,
lapply(1:nb.anne,
function(x)
rbind(Indic)))
indic03$age <- indic03$age + c(1:nb.anne)
indic03$year <- indic03$year + c(1:nb.anne)
if (max(indic01$age) < nb.age.dc) {
indic05 <- indic02[nb.anne,]
indic05$age <- indic05$age + 1
indic05$year <- indic05$year + 1
} else{
indic01$age[indic01$age >= nb.age.dc] <- nb.age.dc
indic02$age[indic02$age >= nb.age.dc] <- nb.age.dc
indic03$age[indic03$age >= nb.age.dc] <- nb.age.dc
indic05 <- indic02[nb.anne,]
indic05$age <- indic05$age + 1
indic05$year <- indic05$year + 1
}
results <- sum(coef01 * ratetable[as.matrix(indic01)]) +
sum(coef02 * ratetable[as.matrix(indic02)]) +
sum(coef03 * ratetable[as.matrix(indic03)]) +
coef05 * ratetable[as.matrix(indic05)]
}
else{
if (coef04 != 0) {
indic01 <- do.call(rbind,
lapply(1:nb.anne,
function(x)
rbind(Indic)))
indic01$age <- indic01$age + c(0:(nb.anne - 1))
indic01$year <- indic01$year + c(0:(nb.anne - 1))
indic02 <-
do.call(rbind, lapply(1:nb.anne, function(x)
rbind(Indic)))
indic02$age <- indic02$age + c(1:nb.anne)
indic02$year <- indic02$year + c(0:(nb.anne - 1))
indic03 <- do.call(rbind,
lapply(1:nb.anne, function(x)rbind(Indic)))
indic03$age <- indic03$age + c(1:nb.anne)
indic03$year <- indic03$year + c(1:nb.anne)
if (max(indic01$age) < nb.age.dc) {
indic04 <- indic01[nb.anne,]
indic04$age <- indic04$age + 1
indic04$year <- indic04$year + 1
} else{
indic01$age[indic01$age >= nb.age.dc] <- nb.age.dc
indic02$age[indic02$age >= nb.age.dc] <- nb.age.dc
indic03$age[indic03$age >= nb.age.dc] <- nb.age.dc
indic04 <- indic01[nb.anne,]
indic04$age <- indic04$age + 1
indic04$year <- indic04$year + 1
}
results <- sum(coef01 * ratetable[as.matrix(indic01)]) +
sum(coef02 * ratetable[as.matrix(indic02)]) +
sum(coef03 * ratetable[as.matrix(indic03)]) +
coef04 * ratetable[as.matrix(indic04)]
}
else{
indic01 <-
do.call(rbind, lapply(1:nb.anne, function(x)
rbind(Indic)))
indic01$age <- indic01$age + c(0:(nb.anne - 1))
indic01$year <- indic01$year + c(0:(nb.anne - 1))
indic02 <-
do.call(rbind, lapply(1:nb.anne, function(x)
rbind(Indic)))
indic02$age <- indic02$age + c(1:nb.anne)
indic02$year <- indic02$year + c(0:(nb.anne - 1))
indic03 <-
do.call(rbind, lapply(1:nb.anne, function(x)
rbind(Indic)))
indic03$age <- indic03$age + c(1:nb.anne)
indic03$year <- indic03$year + c(1:nb.anne)
results <-
sum(coef01 * ratetable[as.matrix(indic01)]) +
sum(coef02 * ratetable[as.matrix(indic02)]) +
sum(coef03 * ratetable[as.matrix(indic03)])
}
}
}
}
else{
if (coef06 != 0) {
indic01 <-
do.call(rbind, lapply(1:(nb.anne + 1), function(x)
rbind(Indic)))
indic01$age <- indic01$age + c(0:(nb.anne))
indic01$year <- indic01$year + c(0:(nb.anne))
indic02 <- do.call(rbind, lapply(1:(nb.anne + 1), function(x)
rbind(Indic)))
indic02$age <- indic02$age + c(0:nb.anne)
indic02$year <- indic02$year + c(1:(nb.anne + 1))
indic03 <- do.call(rbind, lapply(1:nb.anne, function(x)
rbind(Indic)))
indic03$age <- indic03$age + c(1:nb.anne)
indic03$year <- indic03$year + c(1:nb.anne)
if (max(indic01$age) < nb.age.dc) {
indic06 <- indic03[nb.anne,]
indic06$age <- indic06$age + 1
indic06$year <- indic06$year + 1
} else{
indic01$age[indic01$age >= nb.age.dc] <- nb.age.dc
indic02$age[indic02$age >= nb.age.dc] <- nb.age.dc
indic03$age[indic03$age >= nb.age.dc] <- nb.age.dc
indic06 <- indic03[nb.anne,]
indic06$age <- indic06$age + 1
indic06$year <- indic06$year + 1
}
results <- sum(coef01 * ratetable[as.matrix(indic01)]) +
sum(coef02 * ratetable[as.matrix(indic02)]) +
sum(coef03 * ratetable[as.matrix(indic03)]) +
coef06 * ratetable[as.matrix(indic06)]
}
else{
if (coef05 != 0) {
indic01 <- do.call(rbind, lapply(1:(nb.anne + 1), function(x)
rbind(Indic)))
indic01$age <- indic01$age + c(0:(nb.anne))
indic01$year <- indic01$year + c(0:(nb.anne))
indic02 <- do.call(rbind, lapply(1:nb.anne, function(x)
rbind(Indic)))
indic02$age <- indic02$age + c(0:(nb.anne - 1))
indic02$year <- indic02$year + c(1:nb.anne)
indic03 <- do.call(rbind, lapply(1:nb.anne, function(x)
rbind(Indic)))
indic03$age <- indic03$age + c(1:nb.anne)
indic03$year <- indic03$year + c(1:nb.anne)
if (max(indic01$age) < nb.age.dc) {
indic05 <- indic02[nb.anne,]
indic05$age <- indic05$age + 1
indic05$year <- indic05$year + 1
} else{
indic01$age[indic01$age >= nb.age.dc] <- nb.age.dc
indic02$age[indic02$age >= nb.age.dc] <- nb.age.dc
indic03$age[indic03$age >= nb.age.dc] <- nb.age.dc
indic05 <- indic02[nb.anne,]
indic05$age <- indic05$age + 1
indic05$year <- indic05$year + 1
}
results <-
sum(coef01 * ratetable[as.matrix(indic01)]) +
sum(coef02 * ratetable[as.matrix(indic02)]) +
sum(coef03 * ratetable[as.matrix(indic03)]) +
coef05 * ratetable[as.matrix(indic05)]
} else{
if (coef04 != 0) {
indic01 <- do.call(rbind, lapply(1:nb.anne, function(x)
rbind(Indic)))
indic01$age <- indic01$age + c(0:(nb.anne - 1))
indic01$year <- indic01$year + c(0:(nb.anne - 1))
indic02 <- do.call(rbind, lapply(1:nb.anne, function(x)
rbind(Indic)))
indic02$age <- indic02$age + c(0:(nb.anne - 1))
indic02$year <- indic02$year + c(1:nb.anne)
indic03 <- do.call(rbind, lapply(1:nb.anne, function(x)
rbind(Indic)))
indic03$age <- indic03$age + c(1:nb.anne)
indic03$year <- indic03$year + c(1:nb.anne)
if (max(indic01$age) < nb.age.dc) {
indic04 <- indic01[nb.anne,]
indic04$age <- indic04$age + 1
indic04$year <- indic04$year + 1
} else{
indic01$age[indic01$age >= nb.age.dc] <- nb.age.dc
indic02$age[indic02$age >= nb.age.dc] <- nb.age.dc
indic03$age[indic03$age >= nb.age.dc] <- nb.age.dc
indic04 <- indic01[nb.anne,]
indic04$age <- indic04$age + 1
indic04$year <- indic04$year + 1
}
results <- sum(coef01 * ratetable[as.matrix(indic01)]) +
sum(coef02 * ratetable[as.matrix(indic02)]) +
sum(coef03 * ratetable[as.matrix(indic03)]) +
coef04 * ratetable[as.matrix(indic04)]
} else{
indic01 <- do.call(rbind,
lapply(1:nb.anne,
function(x)
rbind(Indic)))
indic01$age <- indic01$age + c(0:(nb.anne - 1))
indic01$year <- indic01$year + c(0:(nb.anne - 1))
indic02 <- do.call(rbind, lapply(1:nb.anne, function(x)
rbind(Indic)))
indic02$age <- indic02$age + c(0:(nb.anne - 1))
indic02$year <- indic02$year + c(1:nb.anne)
indic03 <- do.call(rbind, lapply(1:nb.anne, function(x)
rbind(Indic)))
indic03$age <- indic03$age + c(1:nb.anne)
indic03$year <- indic03$year + c(1:nb.anne)
results <- sum(coef01 * ratetable[as.matrix(indic01)]) +
sum(coef02 * ratetable[as.matrix(indic02)]) +
sum(coef03 * ratetable[as.matrix(indic03)])
}
}
}
}
} else{
if (coef04 == 0) {
indic01 <- do.call(rbind, lapply(1:nb.anne, function(x)
rbind(Indic)))
indic01$age <- indic01$age + c(0:(nb.anne - 1))
indic01$year <- indic01$year + c(0:(nb.anne - 1))
indic02 <- do.call(rbind, lapply(1:nb.anne, function(x) rbind(Indic)))
indic02$age <- indic02$age + c(0:(nb.anne - 1))
indic02$year <- indic02$year + c(1:(nb.anne))
if (max(indic01$age) < nb.age.dc) {
indic03 <- indic01[nb.anne,]
indic03$age <- indic03$age + 1
indic03$year <- indic03$year + 1
} else{
indic01$age[indic01$age >= nb.age.dc] <- nb.age.dc
indic02$age[indic02$age >= nb.age.dc] <- nb.age.dc
indic03 <- indic01[nb.anne,]
indic03$age <- indic03$age + 1
indic03$year <- indic03$year + 1
}
results <-
sum(coef01 * ratetable[as.matrix(indic01)]) +
sum(coef02 * ratetable[as.matrix(indic02)]) +
coef03 * ratetable[as.matrix(indic03)]
} else{
indic01 <- do.call(rbind, lapply(1:(nb.anne + 1), function(x)
rbind(Indic)))
indic01$age <- indic01$age + c(0:(nb.anne))
indic01$year <- indic01$year + c(0:(nb.anne))
indic02 <- do.call(rbind, lapply(1:nb.anne, function(x)
rbind(Indic)))
indic02$age <- indic02$age + c(0:(nb.anne - 1))
indic02$year <- indic02$year + c(1:(nb.anne))
if (max(indic01$age) < nb.age.dc) {
indic04 <- indic02[nb.anne,]
indic04$age <- indic04$age + 1
indic04$year <- indic04$year + 1
} else{
indic01$age[indic01$age >= nb.age.dc] <- nb.age.dc
indic02$age[indic02$age >= nb.age.dc] <- nb.age.dc
indic04 <- indic02[nb.anne,]
indic04$age <- indic04$age + 1
indic04$year <- indic04$year + 1
}
results <- sum(coef01 * ratetable[as.matrix(indic01)]) +
sum(coef02 * ratetable[as.matrix(indic02)]) +
coef04 * ratetable[as.matrix(indic04)]
}
}
} else{
if (dateAnniv != dateDiag) {
if (dateAnniv <= as.Date(paste0(format(dateDiag, '%Y'), "-12-31"))) {
if (coef03 != 0) {
indic01 <- Indic
indic02 <- Indic
indic02$age <- indic02$age + 1
indic03 <- Indic
indic03$age <- indic03$age + 1
indic03$year <- indic03$year + 1
results <- coef01 * ratetable[as.matrix(indic01)] +
coef02 * ratetable[as.matrix(indic02)] +
coef03 * ratetable[as.matrix(indic03)]
}
else{
if (coef02 != 0) {
indic01 <- Indic
indic02 <- Indic
indic02$age <- indic02$age + 1
results <- coef01 * ratetable[as.matrix(indic01)] +
coef02 * ratetable[as.matrix(indic02)]
} else{
indic01 <- Indic
results <- coef01 * ratetable[as.matrix(indic01)]
}
}
} else{
if (coef03 != 0) {
indic01 <- Indic
indic02 <- Indic
indic02$year <- indic02$year + 1
indic03 <- Indic
indic03$age <- indic03$age + 1
indic03$year <- indic03$year + 1
results <- coef01 * ratetable[as.matrix(indic01)] +
coef02 * ratetable[as.matrix(indic02)] +
coef03 * ratetable[as.matrix(indic03)]
}
else{
if (coef02 != 0) {
indic01 <- Indic
indic02 <- Indic
indic02$year <- indic02$year + 1
results <- coef01 * ratetable[as.matrix(indic01)] +
coef02 * ratetable[as.matrix(indic02)]
}
else{
indic01 <- Indic
results <- coef01 * ratetable[as.matrix(indic01)]
}
}
}
} else{
if (coef02 != 0) {
indic01 <- Indic
indic02 <- Indic
indic02$year <- indic02$year + 1
results <- coef01 * ratetable[as.matrix(indic01)] +
coef02 * ratetable[as.matrix(indic02)]
} else{
indic01 <- Indic
results <- coef01 * ratetable[as.matrix(indic01)]
}
}
}
return(as.numeric(results))
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/cumpop.R |
#' Simulated data with cause death information with non comparability bias in term of individuals expected hazard
#'
#' Simulated data
#'
#'
#' @docType data
#'
#' @usage data(dataCancer)
#'
#' @format This dataset contains the following variables:
#' \describe{
#' \item{obs_time}{Follow-up time (months)}
#' \item{obs_time_year}{Follow-up time (years)}
#' \item{event}{Vital status}
#' \item{age}{Age at diagnosis}
#' \item{agegrp}{"<30" , "30_60" and ">=60" age groups }
#' \item{ageCentre}{centered age at diagnosis}
#' \item{sexx}{Sex(Female,Male).}
#' \item{immuno_trt}{Treatment group}
#' \item{year_date}{date of diagnosis.}
#' }
#'
#'
#' @keywords datasets
#'
#' @references Goungounga JA, Touraine C, Grafféo N, Giorgi R;
#' CENSUR working survival group. Correcting for misclassification
#' and selection effects in estimating net survival in clinical trials.
#' BMC Med Res Methodol. 2019 May 16;19(1):104.
#' doi: 10.1186/s12874-019-0747-3. PMID: 31096911; PMCID: PMC6524224.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/31096911/}{PubMed})
#'
#' Touraine C, Grafféo N, Giorgi R; CENSUR working survival group.
#' More accurate cancer-related excess mortality through correcting
#' background mortality for extra variables.
#' Stat Methods Med Res. 2020 Jan;29(1):122-136.
#' doi: 10.1177/0962280218823234. Epub 2019 Jan 23. PMID: 30674229.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/30674229/}{PubMed})
#'
#' Mba RD, Goungounga JA, Grafféo N, Giorgi R; CENSUR working survival group.
#' Correcting inaccurate background mortality in excess hazard models
#' through breakpoints. BMC Med Res Methodol. 2020 Oct 29;20(1):268.
#' doi: 10.1186/s12874-020-01139-z. PMID: 33121436; PMCID: PMC7596976.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/33121436/}{PubMed})
#'
#'
#' @examples
#' data(dataCancer)
#' summary(dataCancer)
"dataCancer"
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/dataCancer.R |
#' @title duplicate function
#'
#' @description Duplicate data for survival analysis in the context of
#' competing risks, where an individual can experience only one of alternative
#' events, using the Lunn & McNeil (Biometrics, 1995) approaches.
#' Duplication of data proceeds as follows: Suppose that we study \code{J}
#' distinct types of events. Each observation concerning a given subject is
#' duplicated \code{J} times, with one row for each type of event. In addition,
#' \code{(J-1)} dummy variables are created, each indicating the type of event
#' in relation with that observation (\code{delta.j=1} if the event of type j
#' is the observed one and \code{0} otherwise).
#' Since, for a given subject, only the first occurring event is considered,
#' the status indicator equals \code{1} for that event and \code{0} for all the
#' others. In the case of a censored observation (dropout or administrative
#' censoring), the same principle applies also: duplication of each subject's
#' data is made \code{J} times with \code{(J-1)} dummy variables and a status
#' indicator equal to \code{0} for all observations.
#'
#' @param status the censoring status indicator (numeric vector),
#' 0=alive, 1=dead.
#'
#'
#' @param event the indicator of the event type (numeric vector).
#' By default, the event==0 acts as the censoring indicator.
#'
#'
#' @param data a data frame containing the data to duplicate.
#'
#'
#' @keywords duplicate
#'
#' @return A data.frame containing the duplicated data with the new dummy
#' variables, named \code{delta.number_of_the_event}, indicating the type of
#' event.
#'
#' @author Roch Giorgi
#'
#' @references Lunn M and McNeil D. Applying Cox regression to competing risks.
#' Biometrics 1995;51:524-532
#' (\href{https://pubmed.ncbi.nlm.nih.gov/7662841/}{PubMed})
#'
#' @examples
#'
#' ## Create the simplest test data set
#' data1 <- data.frame(futime = c(1, 2, 5, 2, 1, 7, 3, 4, 8, 8),
#' fustat = c(0, 1, 1, 1, 0, 0, 1, 0, 1, 1),
#' firstevent = c(0, 2, 1, 2, 0, 0, 1, 0, 2, 2),
#' x = c(1, 0, 0, 1, 0, 1, 1, 1, 0, 0))
#'
#' ## Duplicate data1 with firstevent == 0 as the censoring indicator.
#' dupli.data <- duplicate(status=fustat, event=firstevent, data=data1)
#'
#'
#' data2 <- data.frame(futime = c(10, 2, 7, 3, 4, 9, 13, 2, 5, 9),
#' fustat = c(0, 1, 1, 1, 0, 0, 1, 0, 1, 1),
#' firstevent = c(3, 2, 1, 2, 3, 3, 1, 3, 2, 2),
#' x = c(1, 0, 0, 1, 0, 1, 1, 1, 0, 0))
#'
#'
#'## Duplicate data1 with firstevent == 3 as the censoring indicator.
#'
#' dupli.data <- duplicate(status = fustat,
#' event = firstevent == 3,
#' data = data2)
#'
#'
#' # Joint modeling
#' coxph(Surv(futime, fustat) ~ delta.2 + x + delta.2:(x), data = dupli.data)
#'
#' coxph(Surv(futime, fustat) ~ delta.1 + x + delta.1:(x), data = dupli.data)
#'
#'
#' @export
duplicate <- function(status, event, data){
call <- match.call()
status <- as.character(call[[2]])
event <- as.character(call[[3]])
ref.e <- 0
if (length(call[[3]]) != 1) {
event <- as.character(eval(expression(call[[3]][[2]])))
ref.e <- call[[3]][[3]]
}
if (!is.numeric(data[, status]))
stop("Status variable is not numeric.")
if (!is.numeric(data[, event]))
stop("Event variable is not numeric.")
n.d <- length(unique(data[, event]))
data <- cbind(data.frame(tid = c(1:nrow(data))), data)
d.data <- data.frame(mapply(rep, data, n.d, SIMPLIFY = FALSE))
d.data <- d.data[do.call(order, d.data), ]
dimnames(d.data)[[1]] <- 1:nrow(d.data)
temp.e <- sort(unique(data[, event]))
temp.e <- c(ref.e, sort(temp.e[!temp.e == ref.e]))
d.data[, event] <- rep(temp.e, nrow(d.data)/n.d)
d.data <- cbind(d.data,
mapply(
rep,
data.frame(delta = contr.treatment(as.factor(temp.e))),
nrow(data)))
d.data[, status] <- rep(0, nrow(d.data))
d.data[match(paste(data$tid, data[, event]),
paste(d.data$tid, d.data[, event])), status] <- data[, status]
d.data <- d.data[d.data[,event] != ref.e, -1]
dimnames(d.data)[[1]] <- 1:nrow(d.data)
class(d.data) <- c("data.frame","duplicate")
return(d.data)
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/duplicate.R |
esteve.ph.fit <- function(x, y, ehazard, ehazardInt, int, covtest, bsplines,
init, control, event, Terms,strats, add.rmap,
add.rmap.cut, ageDiag, ageDC, optim, trace, speedy) {
k <- length(int) - 1
nvar <- ncol(x)
nrowx <- nrow(x)
nstrata <- ifelse(is.null(strats), 1, max(strats))
attr(Terms, "nstrata") <- nstrata
if (!is.null(add.rmap)) {
if (!is.factor(add.rmap)) {
stop("The alpha argument must be a factor.")
} else{
if (add.rmap.cut$breakpoint) {
nalpha <- (length(add.rmap.cut$cut) + 1)*nlevels(add.rmap)
}else{
nalpha <- nlevels(add.rmap)
}
}
} else{
nalpha <- 0
}
#Initialization of theta0
if (is.null(init)) {
theta0 <- c(rep(0, nvar + k * nstrata + nalpha))
} else{
if (length(unlist(init)) != (nvar + k * nstrata + nalpha)) {
stop("The number of initials values must the same as
\nthe number of parameters to estimate.")
} else{
theta0 <- unlist(init)
names(theta0) <- NULL
}
}
#Initialization of tau
tau <- c(rep(0, k * nstrata))
names(tau) <- rep(sapply(1:k,
function(i, int1) {
paste("[", int1[i],
"-",
int1[i + 1],
"[", sep = "")},
int1 = round(int, 2)), nstrata)
if (nalpha) {
if (nalpha == 1) {
names(theta0) <- c(dimnames(x)[[2]], names(tau), "alpha")
} else{
if (add.rmap.cut$breakpoint) {
break.levels <- levels(cut(ageDC, breaks = c(min(ageDC), add.rmap.cut$cut, max(ageDC))))
names(theta0) <- c(dimnames(x)[[2]],
names(tau),
c(unlist(
lapply(1:nlevels(add.rmap),
function(i)
paste(
paste0("alpha.",levels(add.rmap))[i],
break.levels,
# 1:(length(add.rmap.cut$cut) + 1),
sep = "_")))))
} else {
names(theta0) <- c(dimnames(x)[[2]],
names(tau),
paste0('alpha.',
levels(add.rmap)))
}
}
} else{
names(theta0) <- c(dimnames(x)[[2]], names(tau))
}
integr1 <- matrix(rep(0, nrowx * k), ncol = k)
indic <- matrix(rep(0, nrowx * k), ncol = k)
#The indicator function is different if a there is a time-dependent covariate
if (ncol(y) == 2) {
integr1 <- sapply(1:k, function(i, int, y)
(int[i + 1] < y[, 1]) * (int[i + 1] - int[i]) +#/ 12 +
(int[i] <= y[, 1] & y[, 1] <= int[i + 1]) * (y[, 1] - int[i]), #/ 12,
int = int, y = y)
indic <- sapply(1:k, function(i, int, y)
(int[i] <= y[, 1] & y[, 1] < int[i + 1]),
int = int, y = y)
} else {
integr1 <- sapply(1:k, function(i, int, y)
((y[, 1] <= int[i]) &
(int[i + 1] < y[, 2])) * (int[i + 1] - int[i]) + #/ 12 +
((y[, 1] <= int[i]) & (int[i] <= y[, 2] &
y[, 2] <= int[i + 1])) * (y[, 2] - int[i]) +#/ 12 +
((y[, 1] > int[i] & y[, 1] < int[i + 1]) &
(int[i + 1] < y[, 2])) * (int[i + 1] - y[, 1]) +#/ 12 +
((y[, 1] > int[i] & y[, 1] < int[i + 1]) &
(int[i] <= y[, 2] & y[, 2] <= int[i + 1])) *
(y[, 2] - y[, 1]), #/ 12,
int = int, y = y)
indic <- sapply(1:k, function(i, int, y)
(int[i] <= y[, 2] & y[, 2] < int[i + 1]),
int = int,
y = y)}
#Full model
if (optim) {
Fmodel <- esteve.ph.optim.maxim(x = x, y = y,
theta0 = theta0,
nvar = nvar,
k = k,
indic = indic,
event = event,
integr1 = integr1,
ehazard = ehazard,
ehazardInt = ehazardInt,
control.iter.max = control$iter.max,
control.eps = control$eps,
Terms = Terms,
strats = strats,
nstrata = nstrata,
add.rmap = add.rmap,
add.rmap.cut = add.rmap.cut,
ageDiag = ageDiag,
trace = trace,
speedy = speedy)
}
else{
Fmodel <- esteve.ph.maxim(x = x, y,
theta0 = theta0,
nvar = nvar,
k = k,
indic = indic,
event = event,
integr1 = integr1,
ehazard = ehazard,
ehazardInt = ehazardInt,
control.iter.max = control$iter.max,
control.eps = control$eps,
Terms = Terms,
strats = strats,
nstrata = nstrata,
add.rmap = add.rmap,
add.rmap.cut = add.rmap.cut,
ageDiag = ageDiag,
trace = trace)
}
#Null model: only coefficients for the baseline
if (nvar > 0) {
theta0N <- Fmodel$theta0[(nvar + 1):(nvar + k * nstrata + nalpha)]
nvarN <- 0
xN <- as.matrix(rep(1, nrow(x)), ncol = 1)
if (optim) {
Nmodel <- esteve.ph.optim.maxim(x = xN, y = y,
theta0 = theta0N,
nvar = nvarN,
k = k,
indic = indic,
event = event,
integr1 = integr1,
ehazard = ehazard,
ehazardInt = ehazardInt,
control.iter.max = control$iter.max,
control.eps = control$eps,
Terms = Terms,
strats = strats,
nstrata = nstrata,
add.rmap = add.rmap,
add.rmap.cut = add.rmap.cut,
ageDiag = ageDiag,
trace = trace,
speedy = speedy)
} else{
Nmodel <- esteve.ph.maxim(x = xN, y = y,
theta0 = theta0N,
nvar = nvarN,
k = k,
indic = indic,
event = event,
integr1 = integr1,
ehazard = ehazard,
ehazardInt = ehazardInt,
control.iter.max = control$iter.max,
control.eps = control$eps,
Terms = Terms,
strats = strats,
nstrata = nstrata,
add.rmap = add.rmap,
add.rmap.cut = add.rmap.cut,
ageDiag = ageDiag,
trace = trace)
}
} else{
Nmodel <- Fmodel
}
if (sum(covtest) > 0) {
cov.test <- covtest <- rep(TRUE, ncol(x))
xT <- as.matrix(x[, grep("FALSE", as.character(covtest))])
theta0T <- c(Fmodel$theta0[grep("FALSE", as.character(covtest))],
Fmodel$theta0[(nvar + 1):(nvar + k * nstrata)])
nvarT <- ncol(xT)
Tmodel <- esteve.ph.maxim(xT, y,
theta0T,
nvarT,
k,
indic,
event,
integr1,
ehazard,
ehazardInt,
control$iter.max,
control$eps,
Terms,
strats,
nstrata,
add.rmap,
add.rmap.cut,
ageDiag,
trace = trace
)
dum <- c(rep(0, length(theta0)))
names(dum) <- names(theta0)
Tmodel$theta0 <- replace(dum,
grep("FALSE",
as.character(is.na(
match(names(theta0),
names(Tmodel$theta0))))),
Tmodel$theta0)
TF <- esteve.ph.maxim(x, y,
Tmodel$theta0,
nvar,
k,
indic,
event,
integr1,
ehazard,
ehazardInt,
control$iter.max,
control$eps,
Terms,
strats,
nstrata,
add.rmap,
add.rmap.cut,
ageDiag,
trace = trace)
loglik.test <- -2 * (Tmodel$ll - Fmodel$ll)
names(loglik.test) <- NULL
wald.test <- t(Fmodel$theta0 - Tmodel$theta0) %*%
(Fmodel$SD) %*% (Fmodel$theta0 - Tmodel$theta0)
score.test <- t(TF$FD) %*% solve(TF$SD) %*% TF$FD
Tmodel$wald.test <- wald.test
Tmodel$score.test <- score.test
Tmodel$loglik.test <- loglik.test
var <- solve(Fmodel$SD)
dimnames(var) <- list(names(theta0), names(theta0))
list(
coefficients = Fmodel$theta0,
var = var,
loglik = c(Nmodel$ll, Fmodel$ll),
wald.test = Tmodel$wald.test,
score.test = Tmodel$score.test,
loglik.test = Tmodel$loglik.test,
iterations = Fmodel$iter,
cov.test = cov.test,
cov.df = (ncol(x) - nvarT)
)
} else{
cov.test <- FALSE
var <- solve(Fmodel$SD)
dimnames(var) <- list(names(theta0), names(theta0))
if (optim) {
list(
coefficients = Fmodel$theta0,
var = var,
loglik = c(Nmodel$ll, Fmodel$ll),
iterations = Fmodel$iter,
cov.test = cov.test,
message = Fmodel$message,
convergence = Fmodel$convergence
)
} else{
list(
coefficients = Fmodel$theta0,
var = var,
loglik = c(Nmodel$ll, Fmodel$ll),
iterations = Fmodel$iter,
cov.test = cov.test
)
}
}
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/esteve_ph_fit_function.R |
esteve.ph.maxim <- function(x, y, theta0, nvar, k, indic, event, integr1, ehazard,
ehazardInt, control.iter.max, control.eps, Terms,
strats, nstrata, add.rmap, add.rmap.cut, ageDiag, trace) {
ageDC <- ageDiag + y[, 1]
if (add.rmap.cut$breakpoint == FALSE) {
iter <- 0
FD <- 1
dum <- 1
if (!is.null(add.rmap)) {
nalpha <- nlevels(add.rmap)
} else{
nalpha = 0
}
if (is.null(strats)) {
strats <- rep(1, nrow(x))
}
while (max(abs(FD)) > control.eps) {
ll <- 0
SDP <- FDtau <- SDtau <- FDalpha <- SDalpha <- NULL
FDbeta <- SDbeta <- SDTauAlpha <- SDPBetaAlpha <- SDPBetaTau <- NULL
if (iter > control.iter.max)
stop(paste("Ran out of iterations",
control.iter.max,
"and did not converge."))
iter <- iter + 1
if (nvar > 0) {
rr <- exp(rowSums(t(theta0[1:nvar] * t(x))))
} else{
rr <- 1
}
if (nalpha) {
Ind.alpha <- outer(add.rmap, levels(add.rmap), '==')
nvarstrata <- (nvar + k * nstrata + 1):(nvar + k * nstrata + nalpha)
alpha <- t(t(Ind.alpha) * as.vector(exp(theta0[nvarstrata])))
} else{
alpha <- 1
}
for (h in 1:nstrata) {
tauid <- (nvar + (h - 1) * k + 1):(nvar + h * k)
tau <- t(exp(theta0[tauid]) * t(indic * (strats == h)))
tauInt <- t(exp(theta0[tauid]) * t(integr1 * (strats == h)))
chazard <- rowSums(tau * rr)
if (nalpha) {
ohazard <- chazard + rowSums(as.matrix(alpha * ehazard))
tll <- sum(-rr * rowSums(tauInt) -
rowSums(as.matrix(alpha * ehazardInt)) +
event * log(ohazard),
na.rm = TRUE)
} else{
ohazard <- chazard + ehazard
tll <- sum(-rr * rowSums(tauInt) -
rowSums(as.matrix(ehazardInt)) +
event * log(ohazard),na.rm = TRUE)
}
ll <- tll + ll
ohazard <- chazard + rowSums(as.matrix(alpha * ehazard))
tFDtau <- colSums(-tauInt * rr +
(event * tau * rr) / c(ohazard),
na.rm = T)
tSDtau <- diag(colSums((
-tauInt * rr +
event * tau * rr * (c(chazard + rowSums(
as.matrix(alpha * ehazard)
)) - tau * rr) / c(chazard + rowSums(as.matrix(alpha * ehazard))) ^ 2
), na.rm = T),
k,
k)
FDtau <- c(FDtau, tFDtau)
SDtau <- cbind(SDtau, tSDtau)
if (nalpha) {
tFDalpha <- colSums(-alpha * ehazardInt + event * (alpha * ehazard) /
c(chazard + rowSums(as.matrix(alpha * ehazard))),
na.rm = T)
TSDalpha <- diag(colSums(-alpha * ehazardInt +
event * alpha * ehazard *
(chazard + rowSums(
as.matrix(alpha * ehazard))
- alpha * ehazard) /
(c(chazard + rowSums(
as.matrix(alpha * ehazard))) ^ 2),
na.rm = T),
nalpha,
nalpha)
SDPTauAlpha <- -t(event * tau * rr) %*%
((alpha * ehazard) /
c(chazard + rowSums(as.matrix(alpha * ehazard))) ^ 2)
SDPBetaAlpha <- -t(x) %*% (event * alpha * (ehazard * chazard) /
(c(chazard +
rowSums(as.matrix(alpha * ehazard))
) ^ 2))
FDalpha <- c(FDalpha, tFDalpha)
SDalpha <- cbind(SDalpha, TSDalpha)
}
if (nvar != 0) {
tFDbeta <- -t(x) %*% (rr * rowSums(tauInt)) +
t(event * x) %*% (chazard /
c(chazard + rowSums(as.matrix(alpha * ehazard))))
tSDbeta <- -t(x) %*% (x * rr * rowSums(tauInt)) +
t(event * x) %*% (x * chazard *
rowSums(as.matrix(alpha * ehazard)) /
c(chazard + rowSums(as.matrix(alpha * ehazard))) ^ 2)
tSDPBetaTau <- -t(x) %*% (tauInt * rr) +
t(x) %*% (tau * event * rr *
(rowSums(as.matrix(alpha * ehazard))) /
c(chazard + rowSums(as.matrix(alpha * ehazard))) ^ 2)
FDbeta <- c(FDbeta, tFDbeta)
SDbeta <- cbind(SDbeta, tSDbeta)
SDPBetaTau <- cbind(SDPBetaTau, tSDPBetaTau)
}
}
if (nvar != 0) {
if (nalpha) {
FD <- c(FDbeta, FDtau, FDalpha)
SD <- -rbind( cbind(SDbeta, SDPBetaTau, SDPBetaAlpha),
cbind(t(SDPBetaTau), SDtau, SDPTauAlpha),
cbind(t(SDPBetaAlpha), t(SDPTauAlpha), SDalpha))
colnames(SD) <- names(theta0)
rownames(SD) <- names(theta0)
} else{
FD <- c(FDbeta, FDtau)
SD <- -rbind(cbind(SDbeta, SDPBetaTau), cbind(t(SDPBetaTau), SDtau))
colnames(SD) <- names(theta0)
rownames(SD) <- names(theta0)
}
} else{
if (nalpha) {
FD <- c(FDtau, FDalpha)
SD <- rbind(cbind(SDtau, SDPTauAlpha), cbind(t(SDPTauAlpha), SDalpha))
colnames(SD) <- names(theta0)
rownames(SD) <- names(theta0)
} else{
FD <- FDtau
SD <- -SDtau
colnames(SD) <- names(theta0)
rownames(SD) <- names(theta0)
}
}
dum <- try(solve(qr(SD), FD))
if (!is.numeric(dum)) {
stop("Matrix not definite positive. Check for colinearity in the data set.")
return(list(
theta0 = theta0,
ll = ll,
FD = FD,
SD = SD,
iter = iter
))
}
theta0 <- theta0 + dum
#diff.dum <- theta0- theta
if (trace == TRUE) {
cat("#### Iteration number:", iter, "####", "\n")
cat("\n")
cat("## Inverse of Hessian matrix (-H) at",
paste('theta', iter, sep = '_'),
"====",
"\n")
print(solve(SD))
cat("\n")
cat("## Gradiant at", paste('theta', iter, sep = '_'), "====", "\n")
print(FD)
cat("\n")
cat("## Estimate values of",
paste('theta', iter, sep = '_'),
"====",
"\n")
print(theta0)
cat("\n")
}
# If 'covtest' are required.
# Used for the score test which needs only 1 iteration.
if (control.iter.max == 1) {
return(list(
theta0 = theta0,
ll = ll,
FD = FD,
SD = SD,
iter = iter
))
}
NULL
}
list(theta0 = theta0, ll = ll, FD = FD, SD = SD, iter = iter)
}else {
stop("Please use optim method for breakpoint model")
}
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/esteve_ph_maxim_function_new.R |
esteve.ph.optim.maxim <- function(x, y,
theta0,
nvar,
k,
indic,
event,
integr1,
ehazard,
ehazardInt,
control.iter.max,
control.eps,
Terms,
strats,
nstrata,
add.rmap,
add.rmap.cut,
ageDiag,
trace = 0,
speedy = FALSE)
{
ageDC <- ageDiag + y[, 1]
if (add.rmap.cut$breakpoint == FALSE) {
nvarStrata <- (nvar + k * nstrata + 1)
FD <- 1
no <- length(add.rmap)
if (is.null(strats)) {
strats <- rep(1, nrow(x))
}
if (!is.null(add.rmap)) {
nalpha <- nlevels(add.rmap)
} else{
nalpha = 0
}
f <- function(theta0) {
ll <- FDbeta <- SDbeta <- 0
if (nvar > 0) {
rr <- exp(rowSums(t(theta0[1:nvar] * t(x))))
} else{
rr <- 1
}
if (nalpha) {
levels(add.rmap) <- 1:nlevels(add.rmap)
if (length(levels(add.rmap)) < 2) {
alpha0 <- exp(theta0[nvarStrata])
alpha <- exp(theta0[nvarStrata])
} else{
idxtheta0 <- (nvarStrata):(nvar + k * nstrata + nalpha)
alpha0 <- exp(theta0[idxtheta0])
Madd.rmap <- model.matrix( ~ add.rmap - 1)
alpha <- (Madd.rmap %*% alpha0)
}
}else{
alpha <- 1
}
for (h in 1:nstrata) {
indextau <- (nvar + (h - 1) * k + 1):(nvar + h * k)
tau <- t(exp(theta0[indextau]) * t(indic * (strats == h)))
tauInt <- t(exp(theta0[indextau]) * t(integr1 * (strats == h)))
chazard <- colSums(as.matrix(t(tau))) * rr
ohazard <- chazard + alpha * ehazard
eHazard <- alpha * ehazardInt
tll <- colSums(as.matrix(-rr * rowSums(tauInt)
- eHazard +
event * log(ohazard)))
ll <- tll + ll
}
return(ll)
}
gradient <- function(theta0) {
FDbeta <- 0
FDtau <- FDalpha <- NULL
if (nvar > 0) {
rr <- exp(rowSums(t(theta0[1:nvar] * t(x))))
} else{
rr <- 1
}
if (nalpha) {
levels(add.rmap) <- 1:nlevels(add.rmap)
if (length(levels(add.rmap)) < 2) {
alpha0 <- exp(theta0[nvarStrata])
alpha <- exp(theta0[nvarStrata])
} else{
idxtheta0 <- (nvarStrata):(nvar + k * nstrata + nalpha)
alpha0 <- exp(theta0[idxtheta0])
Madd.rmap <- model.matrix( ~ add.rmap - 1)
alpha <- (Madd.rmap %*% alpha0)
}
} else{
alpha <- 1
}
for (h in 1:nstrata) {
indextau <- (nvar + (h - 1) * k + 1):(nvar + h * k)
tau <- t(exp(theta0[indextau]) * t(indic * (strats == h)))
tauInt <- t(exp(theta0[indextau]) * t(integr1 * (strats == h)))
chazard <- colSums(as.matrix(t(tau))) * rr
ohazard <- chazard + alpha * ehazard
tFDtau <- colSums(
as.matrix(-rr * tauInt + (event * tau * rr) / c(ohazard)))
if (nstrata == 1) {
FDtau <- tFDtau
}
else{
FDtau <- c(FDtau, tFDtau)
}
if (nalpha) {
ohazard <- chazard + alpha * ehazard
if (length(levels(add.rmap)) < 2) {
eHazard <- alpha * ehazardInt
tFDalpha <- colSums(as.matrix(-eHazard +
event * alpha * c(ehazard) / c(ohazard)))
FDalpha <- tFDalpha
} else{
Malpha <- t(alpha0 * t(Madd.rmap))
tFDalpha <- colSums(as.matrix(-Malpha * c(ehazardInt) +
event * Malpha * c(ehazard) / c(ohazard)))
FDalpha <- tFDalpha
}
}
if (nvar != 0) {
ohazard <- chazard + alpha * ehazard
tFDbeta <- -x * rr * rowSums(tauInt) + event * x * chazard / c(ohazard)
if (nstrata == 1) {
FDbeta <- tFDbeta
}
else{
FDbeta <- FDbeta + tFDbeta
}
}
else{
if (nalpha) {
FD <- c(FDtau, FDalpha)
}else{
FD <- c(FDtau)
}
}
}
if (nvar != 0) {
if (nalpha) {
FD <- c(colSums(as.matrix(FDbeta)), FDtau, FDalpha)
} else{
FD <- c(colSums(as.matrix(FDbeta)), FDtau)
}
}
FD
}
if (is.null(trace)) {
trace <- 0
}
if (nalpha) {
nalpha <- 0
theta0[1:(nvar + k * nstrata)] <- optim(par = theta0[1:(nvar + k * nstrata)],
fn = f,
gr = gradient,
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 500,
fnscale = -1,
trace = trace))$par
if (length(levels(add.rmap)) < 2) {
nalpha <- 1
} else{
nalpha <- nlevels(add.rmap)
}
}
if(speedy) {
max_cores <- detectCores()
used_cores <- min(max_cores, (max_cores - 2))
if (used_cores < 2) {
stop("We didn't detect enough cores for speedy")
}
cl <- makeCluster(used_cores)
setDefaultCluster(cl = cl)
res <- optimParallel(
par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 1000,
fnscale = -1,
trace = trace),
parallel = list(loginfo = TRUE)
)
setDefaultCluster(cl = NULL)
stopCluster(cl)
}else{
res <- optim(par = theta0,
fn = f,
gr = gradient,
method = "L-BFGS-B",
hessian = TRUE,
control = list(REPORT = 1,
maxit = 1000,
fnscale = -1,
trace = trace))
}
ll <- res$value
theta0 <- res$par
FD <- -gradient(theta0)
SD <- -res$hessian
iter <- res$counts[2]
message <- res$message
convergence <- res$convergence
return(
list(
theta0 = theta0,
ll = ll,
FD = FD,
SD = SD,
iter = iter,
convergence = convergence,
message = message
)
)
}
else{
nvarStrata <- (nvar + k * nstrata + 1)
FD <- 1
no <- length(add.rmap)
if (is.null(strats)) {
strats <- rep(1, nrow(x))
}
if (!is.null(add.rmap)) {
nalpha <- (length(add.rmap.cut$cut) + 1)*nlevels(add.rmap)
} else{
nalpha <- 0
}
f <- function(theta0) {
ll <- FDbeta <- SDbeta <- 0
if (nvar > 0) {
rr <- exp(rowSums(t(theta0[1:nvar] * t(x))))
} else{
rr <- 1
}
if (nalpha) {
levels(add.rmap) <- 1:nlevels(add.rmap)
if (length(levels(add.rmap)) < 2) {
alpha0 <- exp(theta0[nvarStrata])
alpha <- exp(theta0[nvarStrata])
} else{
idxtheta0 <- (nvarStrata):(nvar + k * nstrata + (length(add.rmap.cut$cut) + 1)*nlevels(add.rmap))
alpha0 <- exp(theta0[idxtheta0])
interval <- sort(c( add.rmap.cut$cut, max(ageDC) + 1, min(ageDC) - 1))
nbcut <- length(add.rmap.cut$cut) + 1
colnames_Madd.rmap <- c(unlist(
lapply(1:nlevels(add.rmap),
function(i)
paste(
paste0("add.rmap",1:nlevels(add.rmap))[i],
1:nbcut, sep = "_"))))
ageDCgroup <- cut(ageDC, breaks = c(0, add.rmap.cut$cut, c(max(ageDC) + 1)))
Madd.rmap <-
do.call("cbind", lapply(1:ncol(model.matrix( ~ add.rmap + 0)), function(i)
c((model.matrix( ~ add.rmap + 0))[, i]) * (model.matrix( ~ ageDCgroup - 1))))
colnames(Madd.rmap) <- c(colnames_Madd.rmap)
alpha <- (Madd.rmap %*% alpha0)
}
}else{
alpha <- 1
}
for (h in 1:nstrata) {
indextau <- (nvar + (h - 1) * k + 1):(nvar + h * k)
tau <- t(exp(theta0[indextau]) * t(indic * (strats == h)))
tauInt <- t(exp(theta0[indextau]) * t(integr1 * (strats == h)))
chazard <- colSums(as.matrix(t(tau))) * rr
ohazard <- chazard + alpha * ehazard
eHazard <- alpha * ehazardInt
tll <- colSums(as.matrix(-rr * rowSums(tauInt)
- eHazard +
event * log(ohazard)))
ll <- tll + ll
}
return(ll)
}
gradient <- function(theta0) {
FDbeta <- 0
FDtau <- FDalpha <- NULL
if (nvar > 0) {
rr <- exp(rowSums(t(theta0[1:nvar] * t(x))))
} else{
rr <- 1
}
if (nalpha) {
levels(add.rmap) <- 1:nlevels(add.rmap)
if (length(levels(add.rmap)) < 2) {
alpha0 <- exp(theta0[nvarStrata])
alpha <- exp(theta0[nvarStrata])
} else{
idxtheta0 <- (nvarStrata):(nvar + k * nstrata + (length(add.rmap.cut$cut) + 1)*nlevels(add.rmap))
alpha0 <- exp(theta0[idxtheta0])
interval <- sort(c(add.rmap.cut$cut, max(ageDC) + 1, min(ageDC) - 1))
nbcut <- length(add.rmap.cut$cut) + 1
colnames_Madd.rmap <- c(unlist(
lapply(1:nlevels(add.rmap),
function(i)
paste(
paste0("add.rmap",1:nlevels(add.rmap))[i],
1:nbcut, sep = "_"))))
ageDCgroup <- cut(ageDC, breaks = c(0, add.rmap.cut$cut, c(max(ageDC) + 1)))
Madd.rmap <-
do.call("cbind", lapply(1:ncol(model.matrix( ~ add.rmap + 0)), function(i)
c((model.matrix( ~ add.rmap + 0))[, i]) * (model.matrix( ~ ageDCgroup - 1))))
colnames(Madd.rmap) <- c(colnames_Madd.rmap)
alpha <- (Madd.rmap %*% alpha0)
}
} else{
alpha <- 1
}
for (h in 1:nstrata) {
indextau <- (nvar + (h - 1) * k + 1):(nvar + h * k)
tau <- t(exp(theta0[indextau]) * t(indic * (strats == h)))
tauInt <- t(exp(theta0[indextau]) * t(integr1 * (strats == h)))
chazard <- colSums(as.matrix(t(tau))) * rr
ohazard <- chazard + alpha * ehazard
tFDtau <- colSums(
as.matrix(-rr * tauInt + (event * tau * rr) / c(ohazard)))
if (nstrata == 1) {
FDtau <- tFDtau
}
else{
FDtau <- c(FDtau, tFDtau)
}
if (nalpha) {
ohazard <- chazard + alpha * ehazard
if (length(levels(add.rmap)) < 2) {
eHazard <- alpha * ehazardInt
tFDalpha <- colSums(as.matrix(-eHazard +
event * alpha * c(ehazard) / c(ohazard)))
FDalpha <- tFDalpha
} else{
Malpha <- t(alpha0 * t(Madd.rmap))
tFDalpha <- colSums(as.matrix(-Malpha * c(ehazardInt) +
event * Malpha * c(ehazard) / c(ohazard)))
FDalpha <- tFDalpha
}
}
if (nvar != 0) {
ohazard <- chazard + alpha * ehazard
tFDbeta <- -x * rr * rowSums(tauInt) + event * x * chazard / c(ohazard)
if (nstrata == 1) {
FDbeta <- tFDbeta
}
else{
FDbeta <- FDbeta + tFDbeta
}
}
else{
if (nalpha) {
FD <- c(FDtau, FDalpha)
}else{
FD <- c(FDtau)
}
}
}
if (nvar != 0) {
if (nalpha) {
FD <- c(colSums(as.matrix(FDbeta)), FDtau, FDalpha)
} else{
FD <- c(colSums(as.matrix(FDbeta)), FDtau)
}
}
FD
}
if (is.null(trace)) {
trace <- 0
}
if (nalpha) {
# nalpha <- 0
# theta0 [1:(nvar + k * nstrata)] <- optim(par = theta0[1:(nvar + k * nstrata)],
# fn = f,
# gr = gradient,
# method = "L-BFGS-B",
# control = list(REPORT = 1,
# maxit = 500,
# fnscale = -1,
# trace = trace))$par
theta0 <- optim(
par = theta0,
fn = f,
gr = gradient,
method = "L-BFGS-B",
control = list(
REPORT = 1,
maxit = 500,
fnscale = -1,
trace = trace
)
)$par
if (length(levels(add.rmap)) < 2) {
nalpha <- 1
} else{
nalpha <- nlevels(add.rmap)
}
}
if(speedy) {
max_cores <- detectCores()
used_cores <- min(max_cores, (max_cores - 2))
if (used_cores < 2) {
stop("We didn't detect enough cores for speedy")
}
cl <- makeCluster(used_cores)
setDefaultCluster(cl = cl)
res <- optimParallel(
par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 1000,
fnscale = -1,
trace = trace),
parallel = list(loginfo = TRUE)
)
setDefaultCluster(cl = NULL)
stopCluster(cl)
}else{
res <- optim(par = theta0,
fn = f,
gr = gradient,
method = "L-BFGS-B",
hessian = TRUE,
control = list(REPORT = 1,
maxit = 1000,
fnscale = -1,
trace = trace))
}
ll <- res$value
theta0 <- res$par
FD <- -gradient(theta0)
SD <- -res$hessian
iter <- res$counts[2]
message <- res$message
convergence <- res$convergence
return(
list(
theta0 = theta0,
ll = ll,
FD = FD,
SD = SD,
iter = iter,
convergence = convergence,
message = message
)
)
}
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/esteve_ph_optim_maxim_function.R |
#' @title exphaz function
#'
#' @description Calculate the expected hazard and survival.
#'
#' @param formula a formula object of the \code{\link{Surv}} function with the
#' response on the left of a \code{~} operator and the terms on the right. The
#' response must be a survival object as returned by the \code{\link{Surv}}
#' function (\code{time} in first and \code{status} in second).
#' @note \code{Time} is OBLIGATORY in YEARS.
#'
#'
#' @param data a data frame in which to interpret the variables named in the
#' formula
#'
#' @param ratetable a rate table stratified by \code{age}, \code{sex},
#' \code{year} (if missing, ratedata is used)
#'
#' @param rmap a list that maps data set names to the ratetable names.
#'
#' @param ratedata a data frame of the hazards mortality in general population.
#'
#' @param only_ehazard a boolean argument (by default, \code{only_ehazard=TRUE}).
#' If \code{TRUE}, the cumulative population hazard is not provided.
#'
#' @param subset an expression indicating which subset of the rows in data
#' should be used in the fit. All observations are included by default
#'
#' @param na.action a missing-data filter function. The default is na.fail,
#' which returns an error if any missing values are found. An alternative is
#' na.exclude, which deletes observations that contain one or more missing
#' values.
#'
#'
#' @param scale a numeric argument specifying if the ratetable contains death
#' rates per day (default \code{scale = 365.2425}) or death rates per
#' year (\code{scale = 1}).
#'
#' @return An object of class \code{list} containing the following components:
#'
#'
#' \item{ehazard}{expected hazard calculated from the matching \code{ratetable}.}
#'
#' \item{ehazardInt}{cumulative expected hazard calculated from the matching \code{ratetable}. if \code{only_ehazard=TRUE}, this quantity is not provided.}
#'
#' \item{dateDiag}{date of diagnosis}
#'
#' @references Goungounga JA, Touraine C, Grafféo N, Giorgi R;
#' CENSUR working survival group. Correcting for misclassification
#' and selection effects in estimating net survival in clinical trials.
#' BMC Med Res Methodol. 2019 May 16;19(1):104.
#' doi: 10.1186/s12874-019-0747-3. PMID: 31096911; PMCID: PMC6524224.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/31096911/}{PubMed})
#'
#' Touraine C, Grafféo N, Giorgi R; CENSUR working survival group.
#' More accurate cancer-related excess mortality through correcting
#' background mortality for extra variables.
#' Stat Methods Med Res. 2020 Jan;29(1):122-136.
#' doi: 10.1177/0962280218823234. Epub 2019 Jan 23. PMID: 30674229.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/30674229/}{PubMed})
#'
#' Mba RD, Goungounga JA, Grafféo N, Giorgi R; CENSUR working survival group.
#' Correcting inaccurate background mortality in excess hazard models
#' through breakpoints. BMC Med Res Methodol. 2020 Oct 29;20(1):268.
#' doi: 10.1186/s12874-020-01139-z. PMID: 33121436; PMCID: PMC7596976.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/33121436/}{PubMed})
#'
#' @examples
#'
#' library(survexp.fr)
#' library(xhaz)
#' fit.haz <- exphaz(
#' formula = Surv(obs_time_year, event) ~ 1,
#' data = dataCancer,
#' ratetable = survexp.fr, only_ehazard = TRUE,
#' rmap = list(age = 'age', sex = 'sexx', year = 'year_date')
#' )
#'
#' @export
exphaz <- function(formula = formula(data),
data = sys.parent(),
ratetable, rmap = list(age = NULL, sex = NULL, year = NULL),
ratedata = sys.parent(),
only_ehazard = TRUE,
subset,
na.action,
scale = 365.2425) {
Call <- match.call()
m <- match.call(expand.dots = FALSE)
indx <- match(c("formula", "data", "subset", "na.action"),
names(Call),
nomatch = 0)
if (indx[1] == 0)
stop("A formula argument is required")
temp <- Call[c(1, indx)]
temp[[1]] <- as.name("model.frame")
special <- c("strata")
Terms <- if (missing(data)) {
terms(formula, special)
}
else{
terms(formula, special, data = data)
}
temp$formula <- Terms
m <- eval(temp, sys.parent())
if (missing(na.action))
na.action <- NULL
ehazardInt <- NULL
# controls on data & ratetable parameters
if (missing(ratedata) & missing(ratetable)) {
stop("Missing rate table from general population.")
}
if (missing(data)) {
stop("Missing data data frame in which to interpret
the variables named in the formula.")
} else{
if (is.na(match(rmap$age, names(data))))
stop("Must have informations for age on the data set.")
if (is.na(match(rmap$sex, names(data))))
stop("Must have informations for sex on the data set.")
if (is.na(match(rmap$year, names(data))))
stop("Must have informations for date on the data set.")
}
if (!missing(ratetable)) {
if (is.ratetable(ratetable)) {
varlist <- attr(ratetable, "dimid")
if (is.null(varlist)) {
varlist <- names(attr(ratetable, "dimnames"))
}
if (is.null(attributes(ratetable)$dimid)) {
attributes(ratetable)$dimid <- varlist
}
}
else{
stop("Invalid rate table")
}
varsexID <- try(which(varlist == 'sex'))
conditionVsex <-
attr(ratetable, which = "dimnames")[[varsexID]]
if (any(!conditionVsex %in% c('male', 'female'))) {
conditionVsex <- c('male', 'female')
}
if (!missing(rmap)) {
rcall <- substitute(rmap)
if (!is.call(rcall) || rcall[[1]] != as.name("list"))
stop("Invalid rcall argument")
}
else
rcall <- NULL
temp01 <- match(names(rcall)[-1], varlist)
if (any(is.na(temp01)))
stop("Variable not found in the ratetable:",
(names(rcall))[is.na(temp01)])
temp02 <- match(as.vector(unlist(rmap)), names(data))
if (any(is.na(temp02))) {
stop("Variable not found in the data set:",
(names(rcall))[is.na(temp02)])
}
}
myvarnames <- colnames(model.matrix(Terms, m)[,-1, drop = FALSE])
Y <- model.extract(m, "response")
if (!inherits(Y, "Surv"))
stop("Response must be a survival object.")
strats <- attr(Terms, "specials")$strata
dropx <- NULL
attr(Terms, "intercept") <- 1
if (length(dropx)) {
X <- model.matrix(Terms[-dropx], m)[, -1, drop = FALSE]
} else{
X <- model.matrix(Terms, m)[, -1, drop = FALSE]
}
###If there is a time-dependent covariate
if (ncol(Y) == 2) {
time <- Y[, 1]
event <- Y[, 2]
} else{
time <- Y[, 2] - Y[, 1]
event <- Y[, 3]
}
ageDiag <- data[, rmap$age]
if (missing(ratetable)) {
exphaz <- exphaz_years(
ageDiag = ageDiag,
time = time,
data = data,
rmap = rmap,
ratetable = ratetable,
varlist = varlist,
temp01 = temp01,
scale = scale,
pophaz = "rescaled",
only_ehazard = only_ehazard
)
ehazard <- exphaz$ehazard
ehazardInt <- try(exphaz$ehazardInt, TRUE)
} else{
exphaz <- exphaz_years(
ageDiag = ageDiag,
time = time,
data = data,
rmap = rmap,
ratetable = ratetable,
varlist = varlist,
temp01 = temp01,
scale = scale,
pophaz = "rescaled",
only_ehazard = only_ehazard
)
if (only_ehazard) {
ehazard <- exphaz$ehazard
dateDiag <- exphaz$dateDiag
return(list(ehazard = ehazard,
dateDiag = dateDiag))
}else {
ehazard <- exphaz$ehazard
ehazardInt <- exphaz$ehazardInt
dateDiag <- exphaz$dateDiag
return(list(ehazard = ehazard,
ehazardInt = ehazardInt,
dateDiag = dateDiag))
}
}
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/exphaz.R |
exphaz_years <- function(ageDiag,
time,
data,
rmap,
ratetable,
ratedata = NULL,
add.rmap = NULL,
varlist,
temp01,
scale,
pophaz,
only_ehazard){
if (missing(ratetable)) {
ageDC <- ageDiag + floor(time)
ageDC <- (ageDC >= nrow(ratedata)) * (nrow(ratedata) - 1) +
(ageDC < nrow(ratedata)) * ageDC
ehazard <- (data[, rmap$sex] == "male") *
ratedata[(ageDC + 1), 1] + (data[, rmap$sex] == "female") *
ratedata[(ageDC + 1), 2]
ratedataInt <- apply(ratedata, 2, cumsum)
if (!is.null(add.rmap)) {
ehazardInt_u <- (data[, rmap$sex] == "male") * (ageDC >= 1) *
ratedataInt[ageDC, 1] +
ratedata[(ageDC + 1), 1] * (ageDC - trunc(ageDC)) +
(data[, rmap$sex] == "female") * (ageDC >= 1) *
(ratedataInt[ageDC, 2]) +
ratedata[ageDC + 1, 2] * (ageDC - trunc(ageDC))
ehazardInt_l <-
(data[, rmap$sex] == "male") * (ageDiag >= 1) *
(ratedataInt[ageDiag, 1]) +
ratedata[ageDiag + 1, 1] * (ageDiag - trunc(ageDiag)) +
(data[, rmap$sex] == "female") * (ageDiag >= 1) *
(ratedataInt[ageDiag, 2]) +
ratedata[ageDiag + 1, 2] * (ageDiag - trunc(ageDiag))
ehazardInt <- ehazardInt_u - ehazardInt_l
}
return(list(ehazard = ehazard,
ehazardInt = ehazardInt))
} else{
if (!missing(ratedata)) {
stop("Don't provide ratedata if ratetable is available!")
}
ageDC <- ageDiag + time
if (max(ageDC) > 115) {
stop(
"Please check the scale used for time of follow-up: it must be in year; also check the age of diagnosis for older patients.\n"
)
}
year <- as.integer(format(((data[, rmap$year]) +
as.difftime(time * scale, #30.43685,
units = "days")
), "%Y"))
RT <- attr(ratetable, which = "dimnames")
RTyear <- which(varlist == 'year')
if (max(year) > max(RT[[RTyear]]))
year[c(year > max(RT[[RTyear]]))] <-
max(RT[[which(varlist == 'year')]])
nb.age.dc <- length(RT[[which(varlist == 'age')]])
newdata01 <- data[, as.vector(unlist(rmap))[temp01]]
names(newdata01) <- varlist
newdata01$age <- trunc(ageDC)
newdata01$year <- year
if (length(levels(newdata01$sex)) < 2) {
stop("It is required to provide the two levels for variable sex, i.e. male and female.")
}
Indic01 <- sapply(1:ncol(newdata01), function(i)
apply(outer(newdata01[, temp01[i]], RT[[temp01[i]]], "=="), 1,
function(x)
which(x)))
colnames(Indic01) <- names(newdata01)
ehazard <- mapply(function(i) {
return(ratetable[matrix(Indic01[i, ], nrow = 1)])
}, 1:length(ageDC)) * scale
if (only_ehazard) {
dateDiag <- data[, rmap$year]
return(list(ehazard = ehazard,
dateDiag = dateDiag))
} else{
newdata02 <-
data[, as.vector(unlist(rmap))[temp01]]#table matching life table and data
names(newdata02) <- varlist
newdata02$age <- trunc(ageDiag)
newdata02$year <- as.integer(format(data[, rmap$year], '%Y'))
Indic02 <-
sapply(1:length(temp01), function(i)
apply(outer(newdata02[, temp01[i]],
RT[[temp01[i]]], "=="),
1,
function(x)
which(x)))
colnames(Indic02) <- names(newdata02)
dateDC <-
data[, rmap$year] + as.difftime(time * scale, units = "days")
diffAge <- ceiling(data[, rmap$age]) - data[, rmap$age]
dateAnniv <-
data[, rmap$year] + as.difftime(diffAge * scale, units = "days")
nb.anne <- trunc(time)
dateapdiag <- as.Date(data[, rmap$year] + scale)
nbj <- difftime(dateapdiag,
data[, rmap$year],
units = "days")
coef01 <-
coef02 <-
coef03 <-
coef04 <-
coef05 <- coef06 <- numeric(length = length(nb.anne))
id1 <- which(nb.anne != 0 &
dateAnniv != data[, rmap$year])
if (length(id1) != 0) {
id1AF <- intersect(id1, which(dateAnniv <= as.Date(paste0(
format(data[, rmap$year], '%Y'), "-12-31"
))))
if (length(id1AF) != 0) {
coef01[id1AF] <- difftime(dateAnniv[id1AF],
data[id1AF, rmap$year],
units = "days")
coef02[id1AF] <- difftime(as.Date(paste0(format(data[id1AF, rmap$year], '%Y'), "-12-31")),
dateAnniv[id1AF], units = "days")
coef03[id1AF] <-
nbj[id1AF] - coef01[id1AF] - coef02[id1AF]
id11 <- intersect(id1AF, which(dateDC <= as.Date(paste(
format(dateAnniv + nbj * nb.anne, '%Y'),
format(dateAnniv, '%m-%d'),
sep = '-'
))))
if (length(id11) != 0) {
res <- paste(format(data[id11, rmap$year] + nbj[id11] * nb.anne[id11], '%Y'),
format(data[id11, rmap$year], '%m-%d'),
sep = '-')
res2 <- try(class(as.Date(res)), TRUE)
if (inherits(res2, "try-error")) {
resfin <- paste(format(data[id11, rmap$year] + nbj[id11] * nb.anne[id11], '%Y'),
format(data[id11, rmap$year] + 1, '%m-%d'),
sep = '-')
} else{
resfin <- paste(format(data[id11, rmap$year] + nbj[id11] * nb.anne[id11], '%Y'),
format(data[id11, rmap$year], '%m-%d'),
sep = '-')
}
coef04[id11] <-
difftime(dateDC[id11], as.Date(resfin), units = "days")
coef04[id11][which(coef04[id11] < 0)] <- 0
}
id12 <- intersect(id1AF,
which(dateDC > as.Date(paste(
format(dateAnniv + nbj * nb.anne, '%Y'),
format(dateAnniv, '%m-%d'),
sep = '-'
)) &
dateDC <= as.Date(paste0(
format(data[, rmap$year] + nbj * nb.anne, '%Y'),
"-12-31"
))))
if (length(id12) != 0) {
coef05[id12] <- difftime(dateDC[id12], as.Date(paste(
format(dateAnniv[id12] + nbj[id12] * nb.anne[id12], '%Y'),
format(dateAnniv[id12], '%m-%d'),
sep = '-'
)), units = "days")
}
id13 <- intersect(id1AF, which(dateDC >= as.Date(paste0(
format(data[, rmap$year] + nbj * (1 + nb.anne), '%Y'), "-01-01"
))))
if (length(id13) != 0) {
coef06[id13] <- difftime(dateDC[id13],
as.Date(paste0(
format(data[id13, rmap$year] +
nbj[id13] * (1 + nb.anne[id13]), '%Y'),
"-01-01"
)),
units = "days")
}
}
id1FA <- intersect(id1,
which(dateAnniv > as.Date(paste0(
format(data[, rmap$year], '%Y'), "-12-31"
))))
if (length(id1FA) != 0) {
coef01[id1FA] <- difftime(as.Date(paste0(format(data[id1FA, rmap$year], '%Y'), "-12-31")),
data[id1FA, rmap$year], units = "days")
coef02[id1FA] <- difftime(dateAnniv[id1FA],
as.Date(paste0(
format(data[id1FA, rmap$year] + nbj[id1FA],
'%Y'),
"-01-01"
)),
units = "days")
coef03[id1FA] <-
nbj[id1FA] - coef01[id1FA] - coef02[id1FA]
id14 <- intersect(id1FA, which(dateDC <= as.Date(paste0(
format(data[, rmap$year] + nbj * nb.anne, '%Y'),
"-12-31"
))))
if (length(id14) != 0) {
coef04[id14] <- difftime(dateDC[id14],
as.Date(paste(
format(data[id14, rmap$year] +
nbj[id14] * nb.anne[id14], '%Y'),
format(data[id14, rmap$year], '%m-%d'),
sep = '-'
)),
units = "days")
coef04[id14][which(coef04[id14] < 0)] <- 0
}
id15 <- intersect(id1FA,
which(dateDC <= as.Date(paste(
format(dateAnniv + nbj * nb.anne, '%Y'),
format(dateAnniv, '%m-%d'),
sep = '-'
)) &
dateDC >= as.Date(paste0(
format(data[, rmap$year] +
nbj * (1 + nb.anne), '%Y'),
"-01-01"
))))
if (length(id15) != 0) {
coef05[id15] = difftime(dateDC[id15],
as.Date(paste0(
format(data[id15, rmap$year] +
nbj[id15] * (1 + nb.anne[id15]),
'%Y'), "-01-01"
)),
units = "days")
}
id16 <- intersect(id1FA,
which(dateDC > as.Date(paste(
format(dateAnniv + nbj * nb.anne, '%Y'),
format(dateAnniv, '%m-%d'),
sep = '-'
))))
if (length(id16) != 0) {
coef06[id16] = difftime(dateDC[id16], as.Date(paste(
format(dateAnniv[id16] + nbj[id16] * nb.anne[id16], '%Y'),
format(dateAnniv[id16], '%m-%d'),
sep = '-'
)), units = "days")
}
}
}
id2 <- which(nb.anne != 0 &
dateAnniv == data[, rmap$year])
if (length(id2) != 0) {
coef01[id2] <-
difftime(as.Date(paste0(format(data[id2, rmap$year], '%Y'),
"-12-31")),
data[id2, rmap$year], units = "days")
coef02[id2] <- nbj[id2] - coef01[id2]
#which have died before the yearDC - 12-31
id21 <- intersect(id2,
which(dateDC <= as.Date(paste0(
format(data[, rmap$year] + nbj * nb.anne, '%Y'),
"-12-31"
))))
if (length(id21) != 0) {
coef03[id21] <- difftime(dateDC[id21],
as.Date(data[id21, rmap$year] +
nbj[id21] * nb.anne[id21]),
units = "days")
coef03[id21][which(coef03[id21] < 0)] <- 0
}
id22 <- intersect(id2, which(dateDC >= as.Date(paste0(
format(data[, rmap$year] + nbj * (1 + nb.anne), '%Y'), "-01-01"
))))
if (length(id22) != 0) {
coef04[id22] <- difftime(dateDC[id22],
as.Date(paste0(
format(data[id22, rmap$year] +
nbj[id22] * (1 + nb.anne[id22]),
'%Y'),
"-01-01"
)),
units = "days")
}
}
id3 <- which(nb.anne == 0 & dateAnniv != data[, rmap$year])
if (length(id3) != 0) {
id3AF <- intersect(id3,
which(dateAnniv <= as.Date(paste0(
format(data[, rmap$year], '%Y'), "-12-31"
))))
if (length(id3AF) != 0) {
id31 <- intersect(id3AF, which(dateDC <= as.Date(paste(
format(dateAnniv, '%Y'),
format(dateAnniv, '%m-%d'),
sep = '-'
))))
if (length(id31) != 0) {
coef01[id31] <- difftime(dateDC[id31],
data[id31, rmap$year],
units = "days")
}
id32 <- intersect(id3AF, which(dateDC > as.Date(paste(
format(dateAnniv, '%Y'),
format(dateAnniv, '%m-%d'),
sep = '-'
)) &
dateDC <= as.Date(paste0(
format(data[, rmap$year], '%Y'), "-12-31"
))))
if (length(id32) != 0) {
coef01[id32] <- difftime(dateAnniv[id32],
data[id32, rmap$year],
units = "days")
coef02[id32] <- difftime(dateDC[id32],
as.Date(paste(
format(dateAnniv[id32], '%Y'),
format(dateAnniv[id32], '%m-%d'),
sep = '-'
)),
units = "days")
}
id33 <- intersect(id3AF,
which(dateDC >= as.Date(paste0(
format(data[, rmap$year] +
nbj * (1 + nb.anne),
'%Y'),
"-01-01"
))))
if (length(id33) != 0) {
coef01[id33] <- difftime(dateAnniv[id33],
data[id33, rmap$year],
units = "days")
coef02[id33] <- difftime(as.Date(paste0(format(
data[id33, rmap$year], '%Y'
), "-12-31")),
dateAnniv[id33],
units = "days")
coef03[id33] <- difftime(dateDC[id33],
as.Date(paste0(
format(data[id33, rmap$year] +
nbj[id33] * (1 + nb.anne[id33]), '%Y'),
"-01-01"
)),
units = "days")
}
}
id3FA <- intersect(id3,
which(dateAnniv >= as.Date(paste0(
format(data[, rmap$year] + nbj, '%Y'),
"-01-01"
))))
if (length(id3FA) != 0) {
id34 <- intersect(id3FA,
which(dateDC <= as.Date(paste0(
format(data[, rmap$year], '%Y'),
"-12-31"
))))
if (length(id34) != 0) {
coef01[id34] <- difftime(dateDC[id34],
data[id34, rmap$year],
units = "days")
}
id35 <- intersect(id3FA,
which(dateDC <= as.Date(paste(
format(dateAnniv, '%Y'),
format(dateAnniv, '%m-%d'),
sep = '-'
)) &
dateDC >= as.Date(paste0(
format(data[, rmap$year] +
nbj * (1 + nb.anne), '%Y'),
"-01-01"
))))
if (length(id35) != 0) {
coef01[id35] <-
difftime(as.Date(paste0(format(
data[id35, rmap$year], '%Y'
),
"-12-31")),
data[id35, rmap$year],
units = "days")
coef02[id35] <- difftime(dateDC[id35],
as.Date(paste0(
format(data[id35, rmap$year] +
nbj[id35] * (1 + nb.anne[id35]), '%Y'),
"-01-01"
)),
units = "days")
}
id36 <- intersect(id3FA,
which(dateDC > as.Date(paste(
format(dateAnniv, '%Y'),
format(dateAnniv, '%m-%d'),
sep = '-'
))))
if (length(id36) != 0) {
coef01[id36] <- difftime(as.Date(paste0(format(
data[id36, rmap$year], '%Y'
), "-12-31")),
data[id36, rmap$year], units = "days")
coef02[id36] <- difftime(dateAnniv[id36],
as.Date(paste0(
format(data[id36, rmap$year] +
nbj[id36], '%Y'), "-01-01"
)),
units = "days")
coef03[id36] <- difftime(dateDC[id36], as.Date(paste(
format(dateAnniv[id36], '%Y'),
format(dateAnniv[id36], '%m-%d'),
sep = '-'
)),
units = "days")
}
}
}
id4 <- which(nb.anne == 0 & dateAnniv == data[, rmap$year])
if (length(id4) != 0) {
id41 <- intersect(id4,
which(dateDC <= as.Date(paste0(
format(data[, rmap$year], '%Y'), "-12-31"
))))
if (length(id41) != 0) {
coef01[id41] <- difftime(dateDC[id41],
data[id41, rmap$year],
units = "days")
}
id42 <- intersect(id4, which(dateDC >= as.Date(paste0(
format(data[, rmap$year] + nbj * (1 + nb.anne), '%Y'),
"-01-01"
))))
if (length(id42) != 0) {
coef01[id42] <- difftime(as.Date(paste0(format(data[id42, rmap$year], '%Y'), "-12-31")),
data[id42, rmap$year], units = "days")
coef02[id42] <- difftime(dateDC[id42],
as.Date(paste0(
format(data[id42, rmap$year] +
nbj[id42] * (1 + nb.anne[id42]),
'%Y'), "-01-01"
)),
units = "days")
}
}
coefs <-
data.frame(coef01, coef02, coef03, coef04, coef05, coef06)
dateDiag <- data[, rmap$year]
ehazardInt <- mapply(
FUN = cumpop,
1:nrow(data),
MoreArgs = list(
ratetable,
Indic02,
nb.anne,
coefs,
nb.age.dc,
dateDiag,
dateAnniv
)
)
return(list(
ehazard = ehazard,
ehazardInt = ehazardInt,
dateDiag = dateDiag
))
}
}
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/exphaz_years.R |
#' @import statmod
giorgi.tdph.fit <- function(x, y, ehazard, ehazardInt, int, covtest, bsplines,
init, control, event, Terms, strats, add.rmap,
add.rmap.cut, ageDiag, ageDC, optim, trace, speedy,
nghq = nghq) {
k <- 3
nrowx <- nrow(x)
ehazard <- c(ehazard)
event <- c(event)
y[, 1] <- c(y[, 1])# / 12)
cst <- 1
cpti <- 1
cptj <- 1
int <- int# / 12
alpha <- NULL
#Coefficients for the basis functions
int23 <- int[2] * int[3]
int4232 <- (int[4] - int[2]) * (int[3] - int[2])
int4243 <- (int[4] - int[2]) * (int[4] - int[3])
int424353 <- int4243 * (int[4] - int[3])
int5343 <- (int[4] - int[3]) ^ 2
#Basis functions
spline1 <- c(1, -2 / int[2], 1 / (int[2] ^ 2),
0, 0, 0, 0, 0, 0)
spline2 <- c(0,
(1 / int[2]) + (int[3] / int23),
(-1 / int[2] ^ 2) - (1 / int23),
(int[3] / (int[3] - int[2])),
-2 / (int[3] - int[2]),
1 / (int[3] * (int[3] - int[2])),
0, 0, 0)
spline3 <- c(0, 0,
1 / int23,
-((int[4] * int[2]) / int4232),
(1 / (int[3] - int[2])) +
(int[4] / int4232) +
(int[2] / int4232),
-1 / (int[3] * (int[3] - int[2])) - 1 / int4232,
int[4] ^ 2 / int4243,
-2 * int[4] / int4243,
1 / int4243)
spline4 <- c(0, 0, 0,
int[2] ^ 2 / int4232,
-2 * int[2] / int4232,
1 / int4232,
-(int[2] * int[4] * (int[4] - int[3]) +
int[3] * int[4] * (int[4] - int[2])) / int424353,
(2 * int[4] * int[4] - 2 * int[2] * int[3]) / int424353,
(int[2] + int[3] - 2 * int[4]) / int424353)
spline5 <- c(0, 0, 0, 0, 0, 0,
int[3] ^ 2 / int5343,
-2 * int[3] / int5343,
1 / int5343)
#The resulting B-spline function
p <- matrix(c(spline1, spline2, spline3, spline4, spline5),
nrow = 5,
byrow = T)
knot <- c(int[c(-1,-length(int))])
delta <- sort(c(rep(c(int[1], int[length(int)]), k), knot))
P <- splines::splineDesign(knots = delta, x = y[, 1], ord = k)
#Boundaries of the integral
boundmax <- matrix(0, nrow(y), k)
boundmin <- matrix(0, nrow(y), k)
for (i in 1:k) {
boundmax[, i] <- (y[, 1] > int[i + 1]) * int[i + 1] +
(y[, 1] <= int[i + 1] & y[, 1] >= int[i]) * y[, 1]
boundmin[, i] <- (y[, 1] >= int[i]) * int[i]
}
if (!is.null(add.rmap)) {
if (!is.factor(add.rmap)) {
stop("The alpha argument must be a factor.")
} else{
nalpha <- nlevels(add.rmap)
}
} else{
nalpha <- 0
}
#Reorganize 'x' with TD covariates following by PH covariates.
theta0 <- c(rep(0, nalpha + 5 + 5 * (length(bsplines))))
thetaPH <- NULL
nTD <- ncol(as.matrix(x[, grep("TRUE", as.character(bsplines))]))
nPH <- (length(bsplines) - nTD)
if (nalpha) {
#Initialization of theta0
if (!is.null(init)) {
if (length(unlist(init)) != (5 + nTD * 5 + nPH + nalpha))
stop("The number of initials values must the same as
\nthe number of parameters to estimate.")
}
} else{
#Initialization of theta0
if (!is.null(init)) {
if (length(unlist(init)) != (5 + nTD * 5 + nPH))
stop("The number of initials values must the same as
\nthe number of parameters to estimate.")
}
}
x_new <- x
if (nTD != length(bsplines)) {
if (!is.null(init)) {
if (nPH != 0) {
dummyF <- dimnames(x)[[2]][grep("FALSE", as.character(bsplines))]
dummyF <- sapply(1:nPH,
function(i, init, dummyF)
unlist(init[grep(dummyF[i], names(init))]),
init = init,
dummyF = dummyF)
}
else {
dummyF <- NULL
}
if (nTD != 0) {
dummyT <- dimnames(x)[[2]][grep("TRUE", as.character(bsplines))]
dummyT <- sapply(1:nTD,
function(i, init, dummyT)
unlist(init[grep(dummyT[i], names(init))]),
init = init,
dummyT = dummyT)
}
else{
dummyT <- NULL
}
indxAlpha <- (5 + nTD * 5 + nPH + 1) : (5 + nTD * 5 + nPH + nalpha)
if (nalpha) {
if (nTD != 0 & nPH != 0) {
theta0 <- c(unlist(init[1][1:5]),
dummyT,
dummyF,
unlist(init[length(init)]))
}
else{
if (nTD == 0 &
nPH != 0) {
theta0 <-
c(unlist(init[1][1:5]), dummyF, unlist(init[indxAlpha#length(init)
]))
}
if (nTD != 0 &
nPH == 0) {
theta0 <-
c(unlist(init[1][1:5]), dummyT, unlist(init[indxAlpha#length(init)
]))
}
}
} else{
if (nTD != 0 & nPH != 0) {
theta0 <- c(unlist(init[1][1:5]), dummyT, dummyF)
} else{
if (nTD == 0 & nPH != 0) {
theta0 <- c(unlist(init[1][1:5]), dummyF)
}
if (nTD != 0 &
nPH == 0) {
theta0 <- c(unlist(init[1][1:5]), dummyT)
}
}
}
names(theta0) <- NULL
thetaPH <- theta0[(5 + 5 * nTD + 1):(5 + 5 * nTD + nPH)]
}
else{
theta0 <- c(rep(0, 5 + 5 * nTD + nPH + nalpha))
thetaPH <- theta0[(5 + 5 * nTD + 1):(5 + 5 * nTD + nPH)]
}
if (nTD != 0 & nPH != 0) {
namesx <- c(dimnames(x)[[2]][grep("TRUE", as.character(bsplines))],
dimnames(x)[[2]][grep("FALSE", as.character(bsplines))])
x <- cbind(as.matrix(x[, grep("TRUE", as.character(bsplines))]),
as.matrix(x[, grep("FALSE", as.character(bsplines))]))
}
else{
if (nTD == 0 & nPH != 0) {
namesx <- c(dimnames(x)[[2]][grep("FALSE", as.character(bsplines))])
x <- as.matrix(x[, grep("FALSE", as.character(bsplines))])
}
if (nTD != 0 & nPH == 0) {
namesx <- c(dimnames(x)[[2]][grep("TRUE", as.character(bsplines))])
x <- as.matrix(x[, grep("TRUE", as.character(bsplines))])
}
}
dimnames(x)[[2]] <- namesx
}
else{
if (!is.null(init)) {
theta0 <- c(unlist(init))
names(theta0) <- NULL
if (nalpha) {
thet0 <- theta0[5 + 5 * nTD + nPH]
if (nPH != 0) {
thetaPH <- thet0[(5 + 5 * nTD + 1):(5 + 5 * nTD + nPH)]
}
else{
thetaPH <- 0
}
}
else{
if (nPH != 0) {
thetaPH <- theta0[(5 + 5 * nTD + 1):(5 + 5 * nTD + nPH)]
} else{
thetaPH <- 0
}
}
} else{
if (nalpha) {
theta0 <- c(rep(0, 5 + 5 * nTD + nPH + nalpha))
thet0 <- theta0[5 + 5 * nTD + nPH]
thetaPH <- thet0[(5 + 5 * nTD + 1):(5 + 5 * nTD + nPH)]
}
else{
theta0 <- c(rep(0, 5 + 5 * nTD + nPH))
thetaPH <- theta0[(5 + 5 * nTD + 1):(5 + 5 * nTD + nPH)]
}
}
namesx <- c(dimnames(x)[[2]][grep("TRUE", as.character(bsplines))],
dimnames(x)[[2]][grep("FALSE", as.character(bsplines))])
x <- cbind(as.matrix(x[, grep("TRUE", as.character(bsplines))]),
as.matrix(x[, grep("FALSE", as.character(bsplines))]))
dimnames(x)[[2]] <- namesx
}
#Integral function using Gauss-Legendre quadrature
IntGL <- function(f, bound, cst, cpti, cptj, theta, x, nTD, p, nghq = nghq) {
# default nghq=12
GL <- gauss.quad(n = nghq, kind = "legendre")
e <- matrix(GL$nodes,ncol = 1)
w <- matrix(GL$weights,ncol = 1)
dif <- t(bound[1, ]) - t(bound[2, ])
addbound <- t(bound[2, ]) + t(bound[1, ])
nodes2 <- 0.5 * (matrix(rep(addbound, nghq), ncol = nghq) + t(e %*% dif))
weights2 <- t(as.vector(w) %*% (dif))
GL$nodes2 <- c(nodes2)
GL$weights2 <- weights2
fx <- matrix(c(rep(0, nrow(nodes2) * ncol(nodes2))), ncol = ncol(nodes2)) +
f(nodes2, cst, cpti, cptj, theta, x, nTD, p)
fx <- colSums(t(as.matrix((fx))*weights2))
fx
}
#Used for the integral calculus of the disease-related mortality
#hazard function ("corrected hazard")
Int.chazard <- function(u, cst, cpti, cptj, theta, x, nTD, p) {
tau.bs <- 0
beta.bs <- 0
nghq <- dim(u)[2]
beta.bs <- matrix(0, nrow(x), nghq)
for (i in 1:5) {
#
tau.bs <- tau.bs +
theta[i] * (p[i, ((cst - 1) * 3 + 1)] +
p[i, ((cst - 1) * 3 + 2)] * u +
p[i, ((cst - 1) * 3 + 3)] * u ^ 2)
}
#The number of col is 12 or nghq because the shape of IntGL function
if (nTD != 0) {
for (niv in 1:nTD) {
for (h in 1:5) {
beta.bs <- beta.bs +
theta[6 + ((h - 1) * nTD) + niv - 1] *
x[, niv] * (p[h, ((cst - 1) * 3 + 1)] +
p[h, ((cst - 1) * 3 + 2)] * u +
p[h, ((cst - 1) * 3 + 3)] * u ^ 2)
}
}
}
exp(tau.bs + beta.bs)
}
#Used for the integral calculus of the first derivative
#of the baseline function
Int.FDbase <- function(u, cst, cpti, cptj, theta, x, nTD, p) {
tau.bs <- 0
beta.bs <- 0
nghq <- dim(u)[2]
beta.bs <- matrix(0, nrow(x), nghq)
for (i in 1:5) {
tau.bs <- tau.bs +
theta[i] *
(p[i, ((cst - 1) * 3 + 1)] +
p[i, ((cst - 1) * 3 + 2)] * u +
p[i, ((cst - 1) * 3 + 3)] * u ^ 2)
}
if (nTD != 0) {
for (niv in 1:nTD) {
for (h in 1:5) {
beta.bs <- beta.bs +
theta[6 + ((h - 1) * nTD) + niv - 1] *
x[, niv] * (p[h, ((cst - 1) * 3 + 1)] +
p[h, ((cst - 1) * 3 + 2)] * u +
p[h, ((cst - 1) * 3 + 3)] * u ^ 2)
}
}
}
((p[cpti, ((cst - 1) * 3 + 1)] +
p[cpti, ((cst - 1) * 3 + 2)] * u +
p[cpti, ((cst - 1) * 3 + 3)] * u ^ 2) *
exp(tau.bs + beta.bs))
}
#Used for the integral calculus of the second derivative of the baseline function
Int.SDbase <- function(u, cst, cpti, cptj, theta, x, nTD, p) {
tau.bs <- 0
beta.bs <- 0
nghq <- dim(u)[2]
beta.bs <- matrix(0, nrow(x), nghq)
for (i in 1:5) {
tau.bs <- tau.bs +
theta[i] *
(p[i, ((cst - 1) * 3 + 1)] +
p[i, ((cst - 1) * 3 + 2)] * u +
p[i, ((cst - 1) * 3 + 3)] * u ^ 2)
}
if (nTD != 0) {
for (niv in 1:nTD) {
for (h in 1:5) {
beta.bs <- beta.bs +
theta[6 + ((h - 1) * nTD) + niv - 1] *
x[, niv] *
(p[h, ((cst - 1) * 3 + 1)] +
p[h, ((cst - 1) * 3 + 2)] * u +
p[h, ((cst - 1) * 3 + 3)] * u ^ 2)
}
}
}
(p[cpti, ((cst - 1) * 3 + 1)] +
p[cpti, ((cst - 1) * 3 + 2)] * u +
p[cpti, ((cst - 1) * 3 + 3)] * u ^ 2) *
(p[cptj, ((cst - 1) * 3 + 1)] +
p[cptj, ((cst - 1) * 3 + 2)] * u +
p[cptj, ((cst - 1) * 3 + 3)] * u ^ 2) *
exp(tau.bs + beta.bs)
}
#Full model
if (optim) {
Fmodel <- giorgi.tdph.optim.maxim(x,
theta0,
nTD,
nPH,
event,
ehazard,
ehazardInt,
P,
p,
k,
nrowx,
IntGL,
cpti,
cptj,
cst,
boundmin,
boundmax,
Int.chazard,
Int.FDbase,
Int.SDbase,
int,
control$iter.max,
control$eps,
Terms,
add.rmap,
trace,
speedy,
nghq = nghq)
}
else{
Fmodel <- giorgi.tdph.maxim(x,
theta0,
nTD,
nPH,
event,
ehazard,
ehazardInt,
P,
p,
k,
nrowx,
IntGL,
cpti,
cptj,
cst,
boundmin,
boundmax,
Int.chazard,
Int.FDbase,
Int.SDbase,
int,
control, nghq = nghq)
}
#Tested model: if the likelihood ratio test of PH is required
if (sum(covtest) > 0) {
nPH.Full <- nPH
nTD.Full <- nTD
cov.test <- covtest
theta0 <- Fmodel$coefficients
nPH <- sum(covtest) + nPH
nTD <- length(covtest) - nPH
#Reorganize 'x' with TD covariates following by the new PH (tested)
#and old PH covariates.
dummy <- rep(1:length(bsplines), 1)
attr(Terms, "term.labels") <- colnames(x)
xTDarg <- ((bsplines == TRUE)*(covtest == FALSE))*dummy
xPH1arg <- ((bsplines == TRUE)*(covtest == TRUE))*dummy
xPH2arg <- ((bsplines == FALSE)*(covtest == FALSE))*dummy
namesx <- c(attr(Terms, "term.labels")[xTDarg],
attr(Terms, "term.labels")[xPH1arg],
attr(Terms, "term.labels")[xPH2arg])
xTD <- matrix(x[, attr(Terms, "term.labels")[xTDarg]])
xPH1 <- matrix(x[, attr(Terms, "term.labels")[xPH1arg]])
xPH2 <- matrix(x[, attr(Terms, "term.labels")[xPH2arg]])
if (nrow(xTD) != 0) {
x <- cbind(xTD, xPH1)
if (nrow(xPH2) != 0) {
x <- cbind(x, xPH2)
}
}
else {
x <- cbind(xPH1)
if (nrow(xPH2) != 0) {
x <- cbind(x, xPH2)
}
}
dimnames(x)[[2]] <- namesx
if (nPH.Full != 0)
thetaPH <- theta0[(5 + 5 * nTD.Full + 1):length(theta0)]
thetaPH <- c(rep(0, ncol(xPH1)), thetaPH)
if (nTD >= 1) {
vec <- c(rep(0, 5 * nTD))
for (i in 1:nTD) {
vectnTD <- ((i - 1) * 5 + 1):((i - 1) * 5 + 5)
vec[vectnTD] <- grep(dimnames(x)[[2]][i], names(theta0))
}
thetaTD <- theta0[vec]
if (nalpha) {
theta0 <- c(theta0[1:5], thetaTD, thetaPH, alpha)
} else{
theta0 <- c(theta0[1:5], thetaTD, thetaPH)
}
names(theta0) <- NULL
}
else{
theta0 <- c(theta0[1:5], thetaPH)
}
if (optim) {
Tmodel <- giorgi.tdph.optim.maxim(x,
theta0,
nTD,
nPH,
event,
ehazard,
ehazardInt,
P,
p,
k,
nrowx,
IntGL,
cpti,
cptj,
cst,
boundmin,
boundmax,
Int.chazard,
Int.FDbase,
Int.SDbase,
int,
control$iter.max,
control$eps,
Terms,
add.rmap,
trace,
speedy,
nghq = nghq)
}
else{
Tmodel <- giorgi.tdph.maxim(x,
theta0,
nTD,
nPH,
event,
ehazard,
ehazardInt,
P,
p,
k,
nrowx,
IntGL,
nghq,
cpti,
cptj,
cst,
boundmin,
boundmax,
Int.chazard,
Int.FDbase,
Int.SDbase,
int,
control)
}
list(
coefficients = Fmodel$coefficients,
var = Fmodel$var,
loglik = c(Tmodel$loglik, Fmodel$loglik),
loglik.test = (-2 * (Tmodel$loglik - Fmodel$loglik)),
iterations = Fmodel$iterations,
cov.test = cov.test,
cov.df = (4 * abs(nPH.Full - nPH)),
message = Fmodel$message,
convergence = Fmodel$convergence,
p = p,
nTD = nTD.Full,
nPH = nPH.Full,
nalpha = nalpha
)
}
else{
list(
coefficients = Fmodel$coefficients,
var = Fmodel$var,
loglik = Fmodel$loglik,
iterations = Fmodel$iterations,
cov.test = FALSE,
message = Fmodel$message,
convergence = Fmodel$convergence,
nTD = nTD,
nPH = nPH,
nalpha = nalpha,
p = p
)
}
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/giorgi.tdph.fit.R |
giorgi.tdph.maxim <- function(x, theta0, nTD, nPH, event, ehazard, ehazardInt,
P, p, k, nrowx, IntGL, nghq = nghq, cpti, cptj, cst, boundmin,
boundmax, Int.chazard, Int.FDbase, Int.SDbase,
int, control, speedy = FALSE) {
control.iter.max <- control$iter.max
control.eps <- control$eps
name.bs <- dimnames(x)[[2]]
name <- c(rep(0, 5 + 5 * (nTD) + nPH))
name[1:5] <- c("qbs base ( 1 )", "qbs base ( 2 )", "qbs base ( 3 )",
"qbs base ( 4 )", "qbs base ( 5 )")
if (nTD >= 1) {
name[6:(5 + (5 * nTD))] <- sapply(1:nTD,
function(i, name.bs)
rep(
{paste("qbs",
name.bs[i],
collapse = "") },
5),
name.bs = name.bs)
value <- c(rep(1:5, nTD))
name[6:(5 + (5 * nTD))] <- sapply(1:(5 * (nTD)),
function(i, name, value) {
paste(name[5 + i],
"(", value[i], ")",
collapse = "")},
name = name, value = value)
if (nPH != 0)
name[(5 + (5 * nTD) + 1):(5 + (5 * nTD) + nPH)] <-
name.bs[(nTD + 1):(nTD + nPH)]
}
else
name[(5 + 1):(5 + nPH)] <- name.bs[(nTD + 1):(nTD + nPH)]
covxx <- matrix(0, nrow(x), ncol(x) ^ 2)
covxx <- matrix(sapply(1:ncol(x),
function(i, xx)
xx * xx[, i], xx = x),
ncol = ncol(x) ^ 2)
iter <- 0
FD <- 1
SD <- NULL
#Maximlisation loop
while (sum(abs(FD)) > (control.eps))
{
if (iter > control.iter.max)
stop(paste("Ran out of iterations", control.iter.max,
"and did not converge." ))
iter <- iter + 1
#Calculation of the disease-related mortality hazard function with the
#TD parameters, and with the PH parameters if the are some
rrBetaPH <- 1
rrBetaTD <- 1
rrTauTD <- exp(colSums(theta0[1:5] * t(P)))
if (nPH != 0) {
thetaPH <- theta0[(5 + 5 * nTD + 1):(5 + 5 * nTD + nPH)]
rrBetaPH <-
exp(colSums(as.matrix((thetaPH * t(x[, (1 + nTD):(nTD + nPH)])))))
}
if (nTD != 0) {
rrBetaTD <- c(rep(0, nrowx))
rrBetaTD <-
exp(rowSums(
sapply(1:5, function(i, theta, nTD, x, P)
colSums(as.matrix((theta[(6 + (i - 1) * nTD):(5 + i * nTD)] *
t(x[, 1:nTD] * P[, i])))),
theta = theta0[1:(5 + 5 * nTD)],
nTD = nTD, x = x, P = P)
))
}
chazardTDPH <- rrTauTD * rrBetaPH * rrBetaTD
int.chazard <- matrix(0, nrowx, k)
int.FDbase <- matrix(0, nrowx, 5 * k)
int.SDbase <- matrix(0, 5 * nrowx, 5 * k)
#Loop for integral calculation
for (cst in 1:k) {
int.chazard[, cst] <-
IntGL(Int.chazard, t(matrix(c(
boundmax[, cst], boundmin[, cst]
), ncol = 2)), cst, cpti, cptj, theta0[1:(5 + 5 * nTD)], x, nTD, p, nghq = nghq)
for (cpti in 1:5) {
int.FDbase[, (cpti - 1) * k + cst] <-
IntGL(Int.FDbase,
t(matrix(c(boundmax[, cst], boundmin[, cst]), ncol = 2)),
cst, cpti, cptj,
theta0[1:(5 + 5 * nTD)],
x,
nTD,
p, nghq)
for (cptj in cpti:5) {
int.SDbase[((cpti - 1) * nrowx + 1):(cpti * nrowx),
(cptj - 1) * k + cst] <-
IntGL(Int.SDbase,
t(matrix(c(boundmax[, cst], boundmin[, cst] ), ncol = 2)),
cst, cpti, cptj,
theta0[1:(5 + 5 * nTD)],
x,
nTD,
p, nghq = nghq)
int.SDbase[((cptj - 1) * nrowx + 1):(cptj * nrowx),
(cpti - 1) * k + cst] <-
int.SDbase[((cpti - 1) * nrowx + 1):(cpti * nrowx),
(cptj - 1) * k + cst]
}
}
}
#First derivative for the baseline
FDbase <-
sapply(1:5, function(i,
event,
int.FDbase,
chazardTDPH,
rrBetaPH,
ehazard,
nrowx,
P)
- sum(rrBetaPH * int.FDbase[, ((i - 1) * 3 + 1):(i * 3)]) +
colSums(as.matrix(
(event[1:nrowx] * P[, i] * chazardTDPH /
(chazardTDPH + ehazard)))),
event = event, int.FDbase = int.FDbase, chazardTDPH = chazardTDPH,
rrBetaPH = rrBetaPH, ehazard = ehazard,
nrowx = nrowx, P = P)
#Second derivative for the baseline
SDbase <- sapply(1:5, function(i, rrBetaPH, int.SDbase)
(rrBetaPH) * rowSums(int.SDbase[, ((i - 1) * 3 + 1):(i * 3)]),
rrBetaPH = rrBetaPH, int.SDbase = int.SDbase)
SDbase <- sapply(1:5,
function(i, SDbase, nrowx)
colSums(
as.matrix(
(SDbase[
((i - 1) * nrowx + 1):((i - 1) * nrowx + nrowx),
]))),
SDbase = SDbase, nrowx = nrowx)
covpp <- matrix(0, nrow(x), ncol(P) ^ 2)
covpp <- matrix(sapply(1:ncol(P), function(i, PP)
PP * PP[, i], PP = P), ncol = ncol(P) ^ 2)
SDbase <- SDbase - colSums(event[1:nrowx] * covpp * chazardTDPH *
ehazard / (chazardTDPH + ehazard) ^ 2)
#ATTENTION: the matrix of the second derivative is ordered in this way:
# - components of the baseline
# - components of the TD coavriates: columns for the first elementary
# spline for the nTD covariates
# following by the columns for the second elementary spline for the nTD
# covariates, and so on
# for the 5 elementary splines
# - components of the PH covariates if necessary
#If there are some TD covariates
if (nTD != 0) {
#First derivative for the TD beta
FDbTD <- lapply(1:nTD, function(i, int.FDbase, x)
int.FDbase * x[, i], int.FDbase = int.FDbase, x = x)
dum <- matrix(0, 5, nTD)
for (j in 1:nTD) {
dum[, j] <- sapply(1:5,
function(i, j, FDbTD, rrBetaPH)
- sum(
(rrBetaPH) *
FDbTD[[j]][,(3 * (i - 1) + 1):(3 * i)]),
j = j,
FDbTD = FDbTD,
rrBetaPH = rrBetaPH)
}
FDbTD <- c(t(dum) + matrix(
sapply(1:5,
function(i,
event,
chazardTDPH,
x,
nTD,
ehazard,
P)
colSums(as.matrix((event * x[, 1:nTD] * P[, i] * chazardTDPH /
(chazardTDPH + ehazard)))),
ehazard = ehazard,
event = event,
chazardTDPH = chazardTDPH,
x = x,
nTD = nTD,
P = P),
nrow = nTD
))
FD <- c(FDbase, FDbTD)
#Second partial derivative
SDP <-
matrix(
sapply(1:nTD, function(i, int.SDbase, x, nTD)
int.SDbase * x[, i], int.SDbase = int.SDbase, x = x, nTD = nTD),
ncol = ncol(int.SDbase) * nTD
)
dummy1 <-
sapply(1:(5 * nTD), function(i, SDP, rrBetaPH)
(rrBetaPH) * rowSums(SDP[, ((i - 1) * 3 + 1):(i * 3)]), SDP = SDP,
rrBetaPH = rrBetaPH)
dummy1 <-
lapply(1:5, function(i, dummy1, nrowx, nTD)
matrix(colSums(as.matrix(
(dummy1[((i - 1) * nrowx + 1):((i - 1) * nrowx + nrowx), ]))),
ncol = nTD), dummy1 = dummy1, nrowx = nrowx, nTD = nTD)
dummy2 <- sapply(1:nTD, function(i,
event,
chazardTDPH,
covpp,
ehazard,
nrowx,
x)
(-colSums(as.matrix((event[1:nrowx] * x[, i] * covpp * chazardTDPH *
ehazard / (chazardTDPH + ehazard) ^ 2)))),
event = event, chazardTDPH = chazardTDPH, x = x, nrowx = nrowx,
ehazard = ehazard, covpp = covpp)
SDP <- dummy2 + do.call("rbind", dummy1)
SDPTD <-
matrix(sapply(1:5, function(i, SDP)
(SDP[((i - 1) * 5 + 1):(i * 5), ]), SDP = SDP), 5, 5 * nTD)
#Second derivative for the TD beta
covxxTD <- matrix(0, nrow(x), (nTD ^ 2))
covxxTD <-
matrix(sapply(1:(nTD), function(i, xx, nTD)
xx[, 1:nTD] * xx[, i], xx = x, nTD = nTD),
ncol = (nTD ^ 2))
SDbTD <-
matrix(
sapply(1:(nTD ^ 2), function(i, int.SDbase, covxxTD)
int.SDbase * covxxTD[, i], int.SDbase = int.SDbase,
covxxTD = covxxTD),
ncol = ncol(int.SDbase) * (nTD ^ 2)
)
n2 <- sapply(1:(5 * nTD ^ 2), function(i, SDbTD, rrBetaPH)
(rrBetaPH) * rowSums(SDbTD[, ((i - 1) * 3 + 1):(i * 3)]),
SDbTD = SDbTD,
rrBetaPH = rrBetaPH)
n1 <- lapply(1:5, function(i, n2, nrowx, nTD)
matrix(colSums(as.matrix(
(n2[((i - 1) * nrowx + 1):((i - 1) * nrowx + nrowx), ]))),
ncol = (nTD ^ 2)), n2 = n2, nrowx = nrowx, nTD = nTD)
n3 <- sapply(1:ncol(covpp), function(i,
event,
chazardTDPH,
nrowx,
covxxTD,
covpp,
ehazard)
(-colSums(as.matrix((event[1:nrowx] * covxxTD * covpp[, i] *
chazardTDPH * ehazard /
(chazardTDPH + ehazard) ^ 2)))),
event = event, chazardTDPH = chazardTDPH, ehazard = ehazard,
nrowx = nrowx, covxxTD = covxxTD, covpp = covpp)
SDbTD <- t(n3) + c(do.call("rbind", n1))
if (nTD == 1) {
SDbTD <- matrix(SDbTD, 5, 5)
}
else{
SDbTD <-
do.call("rbind",
lapply(1:25, function(i, SDbTD, nTD)
matrix(c(SDbTD[i, ]), ncol = nTD, nrow = nTD),
SDbTD = SDbTD, nTD = nTD))
SDbTD <-
matrix(sapply(1:5, function(i, SDbTD, nTD)
(SDbTD[((i - 1) * (5 * nTD) + 1):(i * (5 * nTD)), ]),
SDbTD = SDbTD, nTD = nTD),
5 * nTD,
5 * nTD)
NULL
}
SD <- rbind(cbind(SDbase, SDPTD), cbind(t(SDPTD), SDbTD))
#If there are some PH covariates associated
if (nPH != 0) {
FDbPH <- colSums(as.matrix((-x[, (1 + nTD):(nTD + nPH)] * rrBetaPH *
rowSums(int.chazard) +
event * x[, (1 + nTD):(nTD + nPH)] *
(chazardTDPH /
(chazardTDPH + ehazard)))))
FD <- c(FD, FDbPH)
SDPPH <- sapply(1:5, function(i, int.FDbase)
rowSums(int.FDbase[, (3 * (i - 1) + 1):(3 * i)]),
int.FDbase = int.FDbase)
SDPPH <- sapply(1:nPH, function(i, SDPPH, rrBetaPH, x, nTD)
- colSums(as.matrix(((-rrBetaPH) * x[, nTD + i] * SDPPH))),
SDPPH = SDPPH, rrBetaPH = rrBetaPH, x = x, nTD = nTD)
SDPPH1 <-
sapply(1:nPH, function(i,
event,
chazardTDPH,
nrowx,
x,
nTD,
P,
ehazard)
- colSums(as.matrix((event[1:nrowx] * x[, nTD + i] * P *
chazardTDPH *ehazard /
(chazardTDPH + ehazard) ^ 2))),
event = event, chazardTDPH = chazardTDPH, nrowx = nrowx,
x = x, nTD = nTD, P = P, ehazard = ehazard)
SDPPH <- SDPPH + SDPPH1
covxxTDPH <- matrix(0, nrowx, (nTD * nPH))
covxxTDPH <- matrix(sapply(1:(nTD), function(i, xx, nTD, nPH)
xx[, (nTD + 1):(nTD + nPH)] * xx[, i], xx = x,
nTD = nTD, nPH = nPH),
ncol = (nTD * nPH))
SDPbTDPH1 <- matrix(t(sapply(1:ncol(covxxTDPH),
function(i,
event,
chazardTDPH,
nrowx,
covxxTDPH,
ehazard,
P)
- colSums(as.matrix((event[1:nrowx] * covxxTDPH[, i] * P *
chazardTDPH * ehazard /
(chazardTDPH + ehazard) ^ 2))),
event = event, chazardTDPH = chazardTDPH, nrowx = nrowx,
covxxTDPH = covxxTDPH, ehazard = ehazard, P = P)
), nPH, 5 * nTD)
int.FDbTD <- matrix(
sapply(1:nTD, function(i, int.FDbase, x)
int.FDbase * x[, i], int.FDbase = int.FDbase, x = x),
ncol = nTD * ncol(int.FDbase)
)
int.FDbTD <- matrix(
sapply(1:(5 * nTD),
function(i, int.FDbTD, rrBetaPH, x, nTD, nPH)
(-colSums(as.matrix(
(-x[, (nTD + 1):(nTD + nPH)] * (rrBetaPH) *
rowSums(int.FDbTD[, ((i - 1) * 3 + 1):(i * 3)]))))),
int.FDbTD = int.FDbTD, rrBetaPH = rrBetaPH, x = x,
nTD = nTD, nPH = nPH),
ncol = nTD)
SDPbTDPH <- SDPbTDPH1 +
c(do.call("cbind",
lapply(1:5, function(i, int.FDbTD, nPH)
int.FDbTD[((i - 1) * nPH + 1):((i - 1) * nPH + nPH), ],
int.FDbTD = int.FDbTD, nPH)))
covxxPH <- matrix(0, nrowx, (nPH ^ 2))
covxxPH <- matrix(sapply(1:(nPH), function(i, xx, nTD, nPH)
xx[, (nTD + 1):(nTD + nPH)] * xx[, nTD + i],
xx = x,
nTD = nTD,
nPH = nPH),
ncol = (nPH ^ 2))
SDbPH <- (-colSums(as.matrix(
(-covxxPH * rrBetaPH * rowSums(int.chazard) +
(event * covxxPH * (chazardTDPH * ehazard) /
(chazardTDPH + ehazard) ^ 2)))))
SDbPH <- matrix(SDbPH, nPH, nPH)
SD <- rbind(cbind(SDbase, SDPTD, SDPPH),
(cbind(t(SDPTD), SDbTD, t(SDPbTDPH))),
(cbind(t(SDPPH), SDPbTDPH, SDbPH)))
NULL
}
}
#If all the covariates are PH
else{
FDbPH <- colSums(-x[, (1 + nTD):(nTD + nPH)] * rrBetaPH *
rowSums(int.chazard) +
event * x[, (1 + nTD):(nTD + nPH)] *
(chazardTDPH / (chazardTDPH + ehazard)))
FD <- c(FDbase, FDbPH)
SDPPH <-
sapply(1:5, function(i, int.FDbase)
rowSums(int.FDbase[, (3 * (i - 1) + 1):(3 * i)]),
int.FDbase = int.FDbase)
SDPPH <-
sapply(1:nPH, function(i, SDPPH, rrBetaPH, x, nTD)
- colSums(as.matrix(((-rrBetaPH) * x[, nTD + i] * SDPPH))),
SDPPH = SDPPH, rrBetaPH = rrBetaPH, x = x, nTD = nTD)
SDPPH1 <-
sapply(1:nPH, function(i,
event,
chazardTDPH,
nrowx,
x,
nTD,
P,
ehazard)
- colSums(as.matrix((event[1:nrowx] * x[, nTD + i] * P *
chazardTDPH * ehazard /
(chazardTDPH + ehazard) ^ 2))),
event = event, chazardTDPH = chazardTDPH, nrowx = nrowx, x = x,
nTD = nTD, P = P, ehazard = ehazard)
SDPPH <- SDPPH + SDPPH1
covxxPH <- matrix(0, nrowx, (nPH ^ 2))
covxxPH <-
matrix(sapply(1:(nPH), function(i, xx, nTD, nPH)
xx[, (nTD + 1):(nTD + nPH)] * xx[, nTD + i], xx = x,
nTD = nTD, nPH = nPH),
ncol = (nPH ^ 2))
SDbPH <-
(-colSums(as.matrix(
(-covxxPH * rrBetaPH * rowSums(int.chazard) +
(event * covxxPH * (chazardTDPH * ehazard) /
(chazardTDPH + ehazard) ^ 2)))))
SDbPH <- matrix(SDbPH, nPH, nPH)
SD <- rbind(cbind(SDbase, SDPPH), (cbind(t(SDPPH), SDbPH)))
NULL
}
diff <- try(solve(qr(SD), FD))
if (!is.numeric(diff))
stop("Matrix not definite positive. Check for colinearity in the data set.")
theta0 <- diff + theta0
NULL
}
logLik <- colSums(as.matrix(((-rrBetaPH * rowSums(int.chazard)) -
exp(0) * ehazardInt +
(event[1:nrowx] * log(chazardTDPH +
ehazard)))))
if (nTD != 0) {
v <- c(rep(0, (5 * nTD)))
for (i in 1:nTD) {
v[((i - 1) * 5 + 1):((i - 1) * 5 + 5)] <- sapply(1:5,
function(j,
i,
theta0,
nTD)
theta0[
(6 + (j - 1) * nTD) +
(i - 1)],
i = i,
theta0 = theta0,
nTD)
}
theta0[6:(5 + (5 * nTD))] <- v
}
names(theta0) <- name
list(
coefficients = theta0,
varcov = try(solve(SD), TRUE),
std_err = try(sqrt(diag(solve(SD))), TRUE),
loglik = logLik,
iterations = iter,
intervalles = int,
nPH = nPH,
nTD = nTD
)
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/giorgi.tdph.maxim.R |
#' @import statmod
#' @import numDeriv
giorgi.tdph.optim.maxim <- function(x,
theta0,
nTD,
nPH,
event,
ehazard,
ehazardInt,
P,
p,
k,
nrowx,
IntGL,
cpti,
cptj,
cst,
boundmin,
boundmax,
Int.chazard,
Int.FDbase,
Int.SDbase,
int,
control.iter.max,
control.eps,
Terms,
add.rmap,
add.rmap.cut,
trace,
speedy = FALSE,
nghq = nghq) {
if (is.null(trace)) {
trace <- 0
}
alpha <- NULL
no <- length(add.rmap)
k <- 3
nrowx <- nrow(x)
ehazard <- c(ehazard)
event <- c(event)
cst <- 1
cpti <- 1
cptj <- 1
rrBetaPH <- 1
rrBetaTD <- 1
int.chazard <- matrix(0, nrowx, k)
int.FDbase <- matrix(0, nrowx, 5 * k)
int.SDbase <- matrix(0, 5 * nrowx, 5 * k)
#Loop for integral calculation
for (cst in 1:k) {
int.chazard[, cst] <-
IntGL(Int.chazard, t(matrix(c(
boundmax[, cst], boundmin[, cst]
), ncol = 2)), cst, cpti, cptj, theta0[1:(5 + 5 * nTD)], x, nTD, p, nghq = nghq)
}
name.bs <- dimnames(x)[[2]]
name <- c(rep(0, 5 + 5 * (nTD) + nPH))
name[1:5] <- c("qbs base ( 1 )",
"qbs base ( 2 )",
"qbs base ( 3 )",
"qbs base ( 4 )",
"qbs base ( 5 )")
if (!is.null(add.rmap)) {
nalpha <- nlevels(add.rmap)
rescale <- TRUE
} else{
nalpha <- 0
rescale <- FALSE
}
if (rescale) {
if (nTD >= 1) {
name[6:(5 + (5 * nTD))] <- sapply(1:nTD,
function(i, name.bs)
rep({paste("qbs",
name.bs[i],
collapse = "")},
5),
name.bs = name.bs)
value <- c(rep(1:5, nTD))
name[6:(5 + (5 * nTD))] <- sapply(1:(5 * (nTD)),
function(i, name, value) {
paste(name[5 + i],
"(", value[i], ")",
collapse = "")
},
name = name,
value = value)
#name alpha
if (nalpha == 1) {
name[(5 + (5 * nTD) + 1):(5 + (5 * nTD) + nalpha)] <- "log(alpha)"
}else{
name[(5 + (5 * nTD) + 1):(5 + (5 * nTD) + nalpha)] <- paste0(
'log(alpha.', levels(add.rmap), ")")
}
#name alpha
if (nPH != 0) {
name[(5 + (5 * nTD) + 1):(5 + (5 * nTD) + nPH)] <- name.bs[
(nTD + 1):(nTD + nPH)]
if (nalpha == 1) {
name[(5 + 1 + (5 * nTD) + nPH):(nalpha + 5 + (5 * nTD) + nPH)] <-
"log(alpha)"
}else{
name[
(1 + 5 + (5 * nTD) + nPH):(nalpha + 5 + (5 * nTD) + nPH)
] <- paste0('log(alpha.', levels(add.rmap), ")")
}
}
}
else{
if (nalpha == 1) {
name[(5 + 1):(5 + nPH)] <- name.bs[(nTD + 1):(nTD + nPH)]
name[(1 + 5 + nPH):(nalpha + 5 + nPH)] <- rep("alpha", nalpha)
}else{
name[(5 + 1):(5 + nPH)] <- name.bs[(nTD + 1):(nTD + nPH)]
name[(5 + 1 + nPH):(nalpha + 5 + nPH)] <- paste0('alpha.',
levels(add.rmap))
}
}
##If no nalpha
} else{
if (nTD >= 1) {
name[6:(5 + (5 * nTD))] <-
sapply(1:nTD, function(i, name.bs)
rep({
paste("qbs", name.bs[i], collapse = "")
}, 5), name.bs = name.bs)
value <- c(rep(1:5, nTD))
name[6:(5 + (5 * nTD))] <- sapply(1:(5 * (nTD)),
function(i, name, value) {
paste(name[5 + i],
"(",
value[i],
")",
collapse = "")
},
name = name,
value = value)
if (nPH != 0) {
name[(5 + (5 * nTD) + 1):(5 + (5 * nTD) + nPH)] <- name.bs[
(nTD + 1):(nTD + nPH)]
}
}
else{
name[(5 + 1):(5 + nPH)] <- name.bs[(nTD + 1):(nTD + nPH)]
}
}
covxx <- matrix(0, nrow(x), ncol(x) ^ 2)
covxx <- matrix(
sapply(1:ncol(x),
function(i, xx)
xx * xx[, i],
xx = x),
ncol = ncol(x) ^ 2)
f <- function(theta0) {
logLik <- 0
rrBetaPH <- 1
rrBetaTD <- 1
rrTauTD <- exp(colSums(as.matrix(theta0[1:5] * t(P))))
if (rescale) {
levels(add.rmap) <- 1:nlevels(add.rmap)
if (length(levels(add.rmap)) < 2) {
alpha <- alpha0 <- (theta0[(5 + 5 * nTD + nPH + 1)])
} else{
no <- length(add.rmap)
alpha0 <- (theta0[
(5 + 5 * nTD + nPH + 1):(5 + 5 * nTD + nPH + nalpha)])
Madd.rmap <- t(sapply(1:no, function(i, add.rmap) {
ligne <- rep(0, nlevels(add.rmap))
ligne[as.numeric(add.rmap[i])] <- 1
return(ligne)
}, add.rmap = add.rmap))
alpha <- (Madd.rmap %*% alpha0)
}
} else{
alpha <- 0
}
rrBetaPH <- 1
rrBetaTD <- 1
rrTauTD <- exp(colSums(as.matrix(theta0[1:5] * t(P))))
if (nPH != 0) {
thetaPH <- theta0[(5 + 5 * nTD + 1):(5 + 5 * nTD + nPH)]
rrBetaPH <- exp(colSums(as.matrix(
thetaPH * t(x[, (1 + nTD):(nTD + nPH)]))))
}
if (nTD != 0) {
rrBetaTD <- c(rep(0, nrowx))
rrBetaTD <-
exp(rowSums(
sapply(1:5, function(i, theta, nTD, x, P)
colSums(as.matrix(
theta[(6 + (i - 1) * nTD):(5 + i * nTD)] *
t(x[, 1:nTD] * P[, i]))), theta = theta0[1:(5 + 5 * nTD)],
nTD = nTD, x = x, P = P)
))
}
chazardTDPH <- rrTauTD * rrBetaPH * rrBetaTD
int.chazard <- matrix(0, nrowx, k)
int.FDbase <- matrix(0, nrowx, 5 * k)
int.SDbase <- matrix(0, 5 * nrowx, 5 * k)
#Loop for integral calculation
for (cst in 1:k) {
int.chazard[, cst] <-
IntGL(Int.chazard, t(matrix(
c(boundmax[, cst], boundmin[, cst]),
ncol = 2)),
cst,
cpti,
cptj,
theta0[1:(5 + 5 * nTD)],
x,
nTD,
p, nghq = nghq)
}
if (rescale) {
tlogLik <- colSums(as.matrix((-rrBetaPH * rowSums(int.chazard)) -
exp(alpha) * ehazardInt +
event[1:nrowx] * log(chazardTDPH +
exp(alpha) *
ehazard)))
}
else{
tlogLik <- colSums(as.matrix(((-rrBetaPH * rowSums(int.chazard)) -
exp(0) * ehazardInt +
event[1:nrowx] *
log(chazardTDPH + ehazard))))
}
logLik <- logLik + tlogLik
return(logLik)
}
gradient <- function(theta0) {
FD <- FDbPH <- FDbase <- FDbTD <- 0
FDalpha <- 0
if (rescale) {
levels(add.rmap) <- 1:nlevels(add.rmap)
if (length(levels(add.rmap)) < 2) {
alpha <- alpha0 <- (theta0[(5 + 5 * nTD + nPH + 1)])
} else{
no <- length(add.rmap)
alpha0 <- (theta0[
(5 + 5 * nTD + nPH + 1):(5 + 5 * nTD + nPH + nalpha)])
Madd.rmap <- t(sapply(1:no, function(i, add.rmap) {
ligne <- rep(0, nlevels(add.rmap))
ligne[as.numeric(add.rmap[i])] <- 1
return(ligne)
}, add.rmap = add.rmap))
alpha <- (Madd.rmap %*% alpha0)
}
} else{
alpha <- 0
}
rrBetaPH <- 1
rrBetaTD <- 1
rrTauTD <- exp(colSums(as.matrix(theta0[1:5] * t(P))))
if (nPH != 0) {
thetaPH <- theta0[(5 + 5 * nTD + 1):(5 + (5 * nTD) + nPH)]
rrBetaPH <- exp(colSums(as.matrix(
thetaPH * t(x[, (1 + nTD):(nTD + nPH)]))))
}
if (nTD != 0) {
rrBetaTD <- c(rep(0, nrowx))
rrBetaTD <-
exp(rowSums(
sapply(1:5, function(i, theta, nTD, x, P)
colSums(as.matrix(theta[(6 + (i - 1) * nTD):(5 + i * nTD)] *
t(x[, 1:nTD] * P[, i]))),
theta = theta0[1:(5 + 5 * nTD)], nTD = nTD, x = x, P = P)
))
}
chazardTDPH <- rrTauTD * rrBetaPH * rrBetaTD
int.chazard <- matrix(0, nrowx, k)
int.FDbase <- matrix(0, nrowx, 5 * k)
int.SDbase <- matrix(0, 5 * nrowx, 5 * k)
#Loop for integral calculation
for (cst in 1:k) {
int.chazard[, cst] <-
IntGL(Int.chazard, t(matrix(c(
boundmax[, cst], boundmin[, cst]
), ncol = 2)), cst, cpti, cptj, theta0[1:(5 + 5 * nTD)], x, nTD, p, nghq = nghq)
for (cpti in 1:5) {
int.FDbase[, (cpti - 1) * k + cst] <- IntGL(
Int.FDbase,
t(matrix(c(boundmax[, cst],
boundmin[, cst]),
ncol = 2)),
cst,
cpti,
cptj,
theta0[1:(5 + 5 * nTD)],
x,
nTD,
p, nghq = nghq)
for (cptj in cpti:5) {
int.SDbase[((cpti - 1) * nrowx + 1):(cpti * nrowx),
(cptj - 1) * k + cst] <- IntGL(Int.SDbase,
t(matrix(
c(boundmax[, cst],
boundmin[, cst]),
ncol = 2)),
cst,
cpti,
cptj,
theta0[1:(5 + 5 * nTD)],
x,
nTD,
p, nghq = nghq)
int.SDbase[((cptj - 1) * nrowx + 1):(cptj * nrowx),
(cpti - 1) * k + cst] <- int.SDbase[(
(cpti - 1) * nrowx + 1):(cpti * nrowx),
(cptj - 1) * k + cst]
}
}
}
#First derivative for the baseline
if (rescale) {
FDbase <- sapply(1:5, function(i,
event,
int.FDbase,
chazardTDPH,
rrBetaPH,
ehazard,
nrowx,
P)
-sum(rrBetaPH * int.FDbase[, ((i - 1) * 3 + 1):(i * 3)]) +
colSums(as.matrix(
event[1:nrowx] * P[, i] *
chazardTDPH / (chazardTDPH + exp(alpha) * ehazard))),
event = event, int.FDbase = int.FDbase,
chazardTDPH = chazardTDPH, rrBetaPH = rrBetaPH,
ehazard = ehazard, nrowx = nrowx,
P = P)
} else{
FDbase <-
sapply(1:5, function(i,
event,
int.FDbase,
chazardTDPH,
rrBetaPH,
ehazard,
nrowx,
P)
-sum(rrBetaPH * int.FDbase[, ((i - 1) * 3 + 1):(i * 3)]) +
colSums(as.matrix(event[1:nrowx] * P[, i] *
chazardTDPH / (chazardTDPH + ehazard))),
event = event, int.FDbase = int.FDbase,
chazardTDPH = chazardTDPH, rrBetaPH = rrBetaPH,
ehazard = ehazard, nrowx = nrowx, P = P)
}
if (nTD != 0) {
# First derivative for the TD beta
FDbTD <- lapply(1:nTD,
function(i, int.FDbase, x)
int.FDbase * x[, i]
,int.FDbase = int.FDbase,
x = x)
dum <- matrix(0, 5, nTD)
for (j in 1:nTD) {
dum[, j] <- sapply(1:5,
function(i, j, FDbTD, rrBetaPH)
-sum((rrBetaPH) * FDbTD[[j]][, (3 * (i - 1) + 1):(3 * i)]),
j = j,
FDbTD = FDbTD,
rrBetaPH = rrBetaPH)
}
if (rescale) {
FDbTD <-
c(t(dum) + matrix(
sapply(1:5, function(i,
event,
chazardTDPH,
x,
nTD,
ehazard,
P)
colSums(as.matrix(
event * x[, 1:nTD] * P[, i] * chazardTDPH /
(chazardTDPH + exp(alpha) * ehazard))),
ehazard = ehazard, event = event,
chazardTDPH = chazardTDPH,
x = x, nTD = nTD, P = P),
nrow = nTD
))
} else{
FDbTD <- c(t(dum) + matrix(
sapply(1:5, function(i,
event,
chazardTDPH,
x,
nTD,
ehazard,
P)
colSums(as.matrix(event * x[, 1:nTD] * P[, i] *
chazardTDPH / (chazardTDPH + ehazard))),
ehazard = ehazard,
event = event,
chazardTDPH = chazardTDPH,
x = x,
nTD = nTD, P = P),
nrow = nTD
))
}
if (rescale) {
if (length(levels(add.rmap)) < 2) {
FDalpha <- colSums(as.matrix(
exp(alpha) * (-ehazardInt) + event[1:nrowx] *
c((exp(alpha)) * ehazard) / c(chazardTDPH +
exp(alpha) * ehazard)))
} else{
Malpha <- t(alpha0 * t(Madd.rmap))
FDalpha <- colSums(as.matrix(
-exp(Malpha) * c(ehazardInt) + event[1:nrowx] *
c(exp(Malpha) * ehazard) / c(chazardTDPH + exp(Malpha) *
ehazard)))
}
FD <- c(FDbase , FDalpha, FDbTD)
} else{
FD <- c(FDbase, FDbTD)
}
#If there are some PH covariates associated
if (nPH != 0) {
if (rescale) {
FDbPH <- colSums(as.matrix(
(-x[, (1 + nTD):(nTD + nPH)] * rrBetaPH *
rowSums(int.chazard) + event[1:nrowx] *
x[, (1 + nTD):(nTD + nPH)] *
(chazardTDPH / (chazardTDPH + (exp(alpha)) * ehazard)))))
if (length(levels(add.rmap)) < 2) {
FDalpha <- colSums(as.matrix(
(exp(alpha)) * (-ehazardInt) + event[1:nrowx] *
c((exp(alpha)) * ehazard) / c(chazardTDPH +
(exp(alpha)) * ehazard)))
} else{
Malpha <- t(alpha0 * t(Madd.rmap))
FDalpha <- colSums(as.matrix(
-exp(Malpha) * c(ehazardInt) + event[1:nrowx] *
c(exp(Malpha) * ehazard) / c(chazardTDPH +
exp(Malpha) * ehazard)))
}
FD <- c(FDbase, FDbTD, FDbPH, FDalpha)
} else{
FDbPH <- colSums(as.matrix(
-x[, (1 + nTD):(nTD + nPH)] *
rrBetaPH * rowSums(int.chazard) +
event * x[, (1 + nTD):(nTD + nPH)] *
(chazardTDPH / (chazardTDPH + ehazard))))
FD <- c(FDbase, FDbTD, FDbPH)
}
} else{
if (rescale) {
FD <- c(FDbase , FDalpha, FDbTD)
} else{
FD <- c(FDbase, FDbTD)
}
}
}
#If all the covariates are PH
else{
if (rescale) {
if (length(levels(add.rmap)) < 2) {
FDbPH <- colSums(as.matrix(
-x[, (1 + nTD):(nTD + nPH)] * rrBetaPH *
rowSums(int.chazard) + event * x[, (1 + nTD):(nTD + nPH)] *
(chazardTDPH / (chazardTDPH + ehazard * exp(alpha)))))
FDalpha <- colSums(as.matrix(
-exp(alpha) * ehazardInt + event[1:nrowx] * exp(alpha) *
c(ehazard) / c(chazardTDPH + exp(alpha) * ehazard)))
} else{
##If there is more than j =1 alpha
Malpha <- t(alpha0 * t(Madd.rmap))
FDbPH <- colSums(as.matrix(-x[, (1 + nTD):(nTD + nPH)] * rrBetaPH *
rowSums(int.chazard) +
event * x * chazardTDPH /
c(chazardTDPH +
ehazard * exp(alpha))))
FDalpha <- colSums(as.matrix(
-exp(Malpha) * ehazardInt + event * exp(Malpha) *
c(ehazard) / c(chazardTDPH + exp(alpha) * ehazard)))
}
FD <- c(FDbase, FDbPH, FDalpha)
} else{
FDbPH <- colSums(as.matrix(
-x[, (1 + nTD):(nTD + nPH)] * rrBetaPH *
rowSums(int.chazard) + event *
x[, (1 + nTD):(nTD + nPH)] *
(chazardTDPH / (chazardTDPH + ehazard))))
FD <- c(FDbase, FDbPH)
}
FD
}
}
#initialization
if (rescale) {
if ((length(levels(add.rmap)) < 2) & (length(levels(add.rmap)) >= 1)) {
nalphaLev <- 1
} else{
if (length(levels(add.rmap)) >= 2) {
nalphaLev <- nlevels(add.rmap)
}
}
theta0 <- theta0[1:(length(theta0) - nalphaLev)]
nalpha <- 0
rescale <- FALSE
if (nTD != 0 & nPH != 0) {
theta0 <- optim(par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
lower = c(rep(-10, 5), rep(-5, (5) * nTD),
rep((-5), nPH),
rep(-5, nlevels(add.rmap))),
upper = c(rep(2, 5), rep(2, 5 * nTD), rep(3, nPH),
rep(5, nlevels(add.rmap))),
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 400,
fnscale = -1,
trace = trace))$par
} else{
if (nTD == 0 & nPH != 0) {
if (speedy) {
max_cores <- parallel::detectCores()
used_cores <- min(max_cores, (max_cores - 2))
if (used_cores < 2) {
stop("We didn't detect enough cores for speedy")
}
cl <- makeCluster(used_cores)
setDefaultCluster(cl = cl)
theta0 <- optimParallel(par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 400,
fnscale = -1,
trace = trace),
parallel = list(loginfo = TRUE))
setDefaultCluster(cl = NULL)
stopCluster(cl)
} else{
theta0 <- optim(par = theta0,
fn = f,
gr = gradient,
lower = c(rep(-10, 5), rep(-5, (5) * nTD),
rep((-5), nPH), rep(-5, nalpha)),
upper = c(rep(2, 5), rep(2, 5 * nTD), rep(3, nPH),
rep(5, nalpha)),
hessian = TRUE,
control = list(maxit = 400),
method = "L-BFGS-B")$par
}
} else {
if (nTD != 0 & nPH == 0) {
theta0 <- optim(par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
lower = c(rep(-10, 5),
rep(-5 * nTD),
rep(-5, nalpha)),
upper = c(rep(2, 5),
rep(2, 5 * nTD),
rep(5, nalpha)),
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 400,
fnscale = -1,
trace = trace))$par
}
}
}
}else{
if (nTD != 0 & nPH != 0) {
theta0 <- optim(par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
lower = c(rep(-10, 5),
rep(-5, (5) * nTD),
rep((-5), nPH)),
upper = c(rep(10, 5),
rep(5, 5 * nTD),
rep(5, nPH)),
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 400,
fnscale = -1,
trace = trace))$par
}
else{
if (nTD == 0 & nPH != 0) {
theta0 <- optim(par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
method = "L-BFGS-B",
lower = c(rep(-10, 5), rep((-5), nPH)),
upper = c(rep(2, 5), rep(3, nPH)),
control = list(REPORT = 1,
maxit = 500,
fnscale = -1,
trace = trace))$par
}else{
if (nTD != 0 & nPH == 0) {
theta0 <- optim(par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
lower = c(rep(-10), rep(-5 * nTD)),
upper = c(rep(2, 5), rep(2, 5 * nTD)),
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 500,
fnscale = -1,
trace = trace))$par
}
}
}
}
if (rescale == FALSE) {
if (nTD != 0 & nPH != 0) {
res <- optim(par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 5000,
fnscale = -1,
trace = trace))
}else{
if (nTD == 0 & nPH != 0) {
if (speedy) {
max_cores <- parallel::detectCores()
used_cores <- min(max_cores, (max_cores - 2))
if (used_cores < 2) {
stop("We didn't detect enough cores for speedy")
}
cl <- makeCluster(used_cores)
setDefaultCluster(cl = cl)
res <- optimParallel(par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 5000,
fnscale = -1,
trace = trace),
parallel = list(loginfo = TRUE))
setDefaultCluster(cl = NULL)
stopCluster(cl)
}
else{
res <- optim(par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 5000,
fnscale = -1,
trace = trace))
}
}else{
if (nTD != 0 & nPH == 0) {
res <- optim( par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 5000,
fnscale = -1,
trace = trace))
}
}
}
}
if ((length(levels(add.rmap)) < 2) & length(levels(add.rmap)) >= 1) {
nalpha <- 1
rescale <- TRUE
} else{
if (length(levels(add.rmap)) >= 2) {
rescale <- TRUE
nalpha <- nlevels(add.rmap)
} else{
if (length(levels(add.rmap)) < 1) {
rescale <- FALSE
nalpha <- 0
}
}
}
if (rescale) {
theta0 <- c(res$par, rep(0.1, nalpha))
if (nTD != 0 & nPH != 0) {
res <- optim(par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 1000,
fnscale = -1,
trace = trace))
}
else{
if (nTD == 0 & nPH != 0) {
if (speedy) {
max_cores <- parallel::detectCores()
used_cores <- min(max_cores, (max_cores - 2))
if (used_cores < 2) {
stop("We didn't detect enough cores for speedy")
}
cl <- parallel::makeCluster(used_cores)
setDefaultCluster(cl = cl)
res <- optimParallel(par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 5000,
fnscale = -1,
trace = trace),
parallel = list(loginfo = TRUE))
setDefaultCluster(cl = NULL)
stopCluster(cl)
}
else{
res <- optim(par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 5000,
fnscale = -1,
trace = trace))
}
} else{
if (nTD != 0 & nPH == 0) {
res <- optim(par = theta0,
fn = f,
gr = gradient,
hessian = TRUE,
method = "L-BFGS-B",
control = list(REPORT = 1,
maxit = 5000,
fnscale = -1,
trace = FALSE))
}
}
}
}
logLik <- res$value
theta0 <- res$par
FD <- -gradient(theta0)
SD <- -numDeriv::hessian(f, theta0)
iter <- res$counts[2]
convergence <- res$convergence
message <- res$message
names(theta0) <- name
return(list(coefficients = theta0,
varcov = try(solve(SD), TRUE),
std_err = try(sqrt(diag(solve(SD))), TRUE),
loglik = logLik,
iterations = iter,
intervalles = int,
convergence = convergence,
message = message,
nTD = nTD,
nPH = nPH,
nalpha = nalpha))
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/giorgi.tdph.optim.maxim.R |
#' @title plot.bsplines
#'
#' @description to plot the log hazard ratio functions for non-proportional
#' hazards model
#'
#'
#' @param x An object of class xhaz
#'
#' @param cov specify covariates for which a plot is required.
#'
#' @param conf.int a vector of logical values indicating whether (if TRUE)
#' confidence intervals will be plotted. The default is to do so if the plot
#' concerns only one curve.
#'
#' @param baseline a vector of logical values indicating whether (if \code{baseline = TRUE})
#' to plot the curve for the baseline group. Default is FALSE, except if cov
#' is unspecified.
#'
#' @param xrange vector indicating the minimum and the maximum values of the
#' x axis. By default, these values are automatically calculated for the first
#' plot (i.e before the use of add argument).
#'
#' @param yrange vector indicating the minimum and the maximum values of the y
#' axis. By default, these values are automatically calculated for the
#' first plot (i.e before the use of add argument).
#'
#' @param xlegend value indicating the location of the legend over x axis.
#' By default, location at the left of the plot.
#'
#' @param ylegend value indicating the location of the legend over y axis.
#' By default, location at the top of the plot
#'
#' @param glegend vectors of names attributed to each lines of the excess hazard
#' to be displayed in the plot. If (\code{baseline = TRUE}), glegend is \code{"baseline"}.
#'
#' @param xaxs the x axis style, as listed in 'par'. Survival curves are
#' traditionally drawn with the curve touching the bounding box on the left
#' edge, but not touching it on the right edge. This corresponds to neither
#' of the two standard S axis styles of "e" (neither touches) or "i" (both touch).
#' If xaxis is missing or NULL the internal axis style is used (xaxs= i) but
#' only after the right endpoint has been extended.
#'
#' @param add a logical value indicating whether to add the survival curves to the
#' current plot (if \code{add = TRUE}). Default is FALSE.
#'
#' @param col a vector of integers specifying colors for each curve. The default
#' value is 1.
#'
#' @param lty a vector of integers specifying line types for each curve. The
#' default value is fixed by the number of covariates (plus 1 if \code{baseline = TRUE}).
#'
#' @param lwd a vector of numeric values for line widths. The default value is 1.
#'
#' @param ... additional arguments affecting the plot function
#'
#' @keywords plot.bsplines
#'
#' @return The return of this function produce graphics of log hazard ratio
#' functions for non-proportional hazards model
#'
#' @author Juste Goungounga, Robert Darlin Mba, Nathalie Grafféo and Roch Giorgi
#' @export
#'
#'
#' @references Goungounga JA, Touraine C, Grafféo N, Giorgi R;
#' CENSUR working survival group. Correcting for misclassification
#' and selection effects in estimating net survival in clinical trials.
#' BMC Med Res Methodol. 2019 May 16;19(1):104.
#' doi: 10.1186/s12874-019-0747-3. PMID: 31096911; PMCID: PMC6524224.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/31096911/}{PubMed})
#'
#' Touraine C, Grafféo N, Giorgi R; CENSUR working survival group.
#' More accurate cancer-related excess mortality through correcting
#' background mortality for extra variables.
#' Stat Methods Med Res. 2020 Jan;29(1):122-136.
#' doi: 10.1177/0962280218823234. Epub 2019 Jan 23. PMID: 30674229.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/30674229/}{PubMed})
#'
#' Mba RD, Goungounga JA, Grafféo N, Giorgi R; CENSUR working survival group.
#' Correcting inaccurate background mortality in excess hazard models
#' through breakpoints. BMC Med Res Methodol. 2020 Oct 29;20(1):268.
#' doi: 10.1186/s12874-020-01139-z. PMID: 33121436; PMCID: PMC7596976.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/33121436/}{PubMed})
#'
#'
#' Giorgi R, Abrahamowicz M, Quantin C, Bolard P, Esteve J, Gouvernet J,
#' Faivre J. A relative survival regression model using B-spline functions
#' to model non-proportional hazards.
#' Statistics in Medicine 2003; 22: 2767-84.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/12939785/}{PubMed})
#'
#' @examples
#' \donttest{
#' # load the data set in the package
#' library("xhaz")
#' library("survexp.fr")
#'
#'data("dataCancer", package = "xhaz") # load the data set in the package
#'
#' fit.nphBS <- xhaz(
#' formula = Surv(obs_time_year, event) ~ ageCentre + qbs(immuno_trt),
#' data = dataCancer,
#' ratetable = survexp.fr,
#' interval = c(0, NA, NA, max(dataCancer$obs_time_year)),
#' rmap = list(age = 'age', sex = 'sexx', year = 'year_date'),
#' baseline = "bsplines", pophaz = "classic")
#'
#' plot(fit.nphBS, cov = "immuno_trt", col = "blue", baseline = FALSE)
#' }
#' @importFrom graphics plot lines grid legend
plot.bsplines <- function(x,
cov,
conf.int = TRUE,
baseline = FALSE,
xrange,
yrange,
xlegend,
ylegend,
glegend,
xaxs = NULL,
add = FALSE,
col = 1,
lty = 1,
lwd = 1,
...) {
rsfit <- x
covlev <- levels(rsfit$data[, cov])
if (inherits(rsfit, "bsplines")) {
int <- rsfit$int
n.tau <- 5
id_coefvov <- (which(stringr::str_detect(names(
rsfit$coefficients
),
pattern = cov)))
nTD <- length(id_coefvov) / 5
if (missing(cov)) {
if (ncol(attr(rsfit$terms, "factors")) != sum(rsfit$bsplines))
stop(
"At least one covariate is not TD. You must have to specify the TD covariates for which plot is required."
)
coef <- rsfit$coef
ncov <-
(length(coef) - n.tau) / 5 #/5 because there are five parameters for one covariate TD
baseline <- TRUE
npar <- ncov + 1 #All the tested variables plus the baseline
cov <- (n.tau + 1):(length(coef)) #Location of the variables
names.cov <- names(coef)[cov]
}
else {
coef <- rsfit$coef
if (is.character(cov)) {
dum <- 0
for (i in 1:length(cov))
dum <- sum(length(grep(cov[i],
as.character(
names(rsfit$coef)
))) / n.tau + dum)
#if (dum != length(cov))
if (rsfit$nTD == 0)
stop(
"At least one covariate is not TD. You must have to specify the TD covariates for which plot is required."
)
npar <-
length(cov) #Used to take account when the baseline is needed.
covv <- c(1:length(cov))
for (i in 1:length(cov)) {
covv[i] <-
grep(stringr::str_sub(cov[i], start = 1, end = nchar(cov[i]) - 1),
as.character(attr(rsfit$terms, "term.labels")))
}
names.cov <- attr(rsfit$terms, "term.labels")[covv]
id_cov <- which(stringr::str_detect(names(rsfit$coefficients), pattern = cov))
cov <- names(rsfit$coefficients)[id_cov]
}
else{
if (length(grep("T", as.character(rsfit$bsplines[cov]))) != length(rsfit$bsplines[cov]))
stop(
"At least one covariate is not TD. You must have to specify the TD covariates for which plot is required."
)
npar <-
length(cov) #Used to take account when the baseline is needed.
names.cov <- attr(rsfit$terms, "term.labels")[cov]
cov <-
c(sapply(1:npar, function(i, names.cov, coef)
(grep(names.cov[i], as.character(names(
coef
)))), names.cov = names.cov, coef = rsfit$coef))
}
ncov <-
length(cov) / 5 #There are 5 coefficients for one covariable
coef <- c(coef[1:n.tau], coef[cov])
var <-
rsfit$var[c(1:(n.tau + sum(length(id_coefvov) / 5) * 5)), c(1:(n.tau +
sum(length(id_coefvov) / 5) * 5))]
if (baseline == TRUE)
npar <-
length(cov) + 1 #used to take account when the baseline is needed.
if (any(is.na(cov)) ||
length(cov) > ncov * 5 || length(cov) < 1)
stop("Invalid variable requested")
}
if (missing(conf.int)) {
if (npar > 1) {
conf.int <- FALSE
} else {
conf.int <- TRUE
}
} else
conf.int <- TRUE
if (length(conf.int) < npar)
conf.int <- c(conf.int, rep(FALSE, npar - length(conf.int)))
else if (length(conf.int) > npar)
conf.int <- conf.int[seq(npar)]
rsfit.coef <-
c(rsfit$coef[cov], rsfit$coef[(ncol(attr(rsfit$terms, "factors")) + 1):(length(rsfit$coef))])
x.time <- seq(0, int[4], 0.01) #c(0:int[4])
knots <- c(0, 0, int[1], int[2], int[3], int[4], int[4], int[4])
splinesbase <- splines::spline.des(knots, x.time, 3)$design
logHRbeta <- matrix(rep(0, length(x.time) * ncov), ncol = ncov)
for (n in 1:ncov) {
dum <- t(coef[((n - 1) * 5 + 6):((n - 1) * 5 + 10)] * t(splinesbase))
logHRbeta[, n] <- rowSums(dum)
}
if (baseline == FALSE) {
logHRi <- logHRbeta
}
else{
dum <- t(coef[1:n.tau] * t(splinesbase))
logHRtau <- rowSums(dum)
logHRi <- cbind(logHRtau, logHRbeta)
}
if (missing(xrange))
xrange <- c(0, max(x.time))
if (is.null(xaxs)) {
xrange <- 1.04 * xrange
xaxs <- "i"
}
ncov2 <- ncol(logHRi)
if (length(col) != npar) {
col <- rep(col, length = ncov2)
}
if (missing(lty))
lty <- seq(ncov2)
else if (length(lty) != ncov2)
lty <- rep(lty, length = ncov2)
if (length(lwd) != npar)
lwd <- rep(lwd, length = ncov2)
if (any(conf.int)) {
lcibeta <- matrix(rep(0, length(x.time) * ncol(logHRi)), ncol = ncov)
lcubeta <-
matrix(rep(0, length(x.time) * ncol(logHRi)), ncol = ncov)
varB <- diag(var)[(n.tau + 1):(n.tau + n.tau * nTD)]
VcovB <-
var[(n.tau + 1):(n.tau + n.tau * nTD), (n.tau + 1):(n.tau + n.tau * nTD)]
for (n in 1:ncov) {
varBB <-
c(varB[n], varB[n + nTD], varB[n + (2 * nTD)], varB[n + (3 * nTD)], varB[n +
(4 * nTD)])
w1 <- t(varBB * t((splinesbase ^ 2)))
w1 <- rowSums(w1)
w2 <- {
0
}
for (i in 1:(n.tau - 1)) {
for (j in (i + 1):n.tau) {
w2 <-
w2 + 2 * ((splinesbase[, i] * splinesbase[, j]) * VcovB[((i - 1) * ncov +
n), ((j - 1) * ncov + n)])
}
lcibeta[, n] <-
(logHRbeta[, n] - abs(qnorm((
1 - rsfit$level
) / 2)) * sqrt(w1 + w2))
lcubeta[, n] <-
(logHRbeta[, n] + abs(qnorm((
1 - rsfit$level
) / 2)) * sqrt(w1 + w2))
}
}
if (baseline == FALSE) {
lcii <- lcibeta
lcui <- lcubeta
}
else{
vartau <- diag(var)[1:n.tau]
Vcovtau <- var[1:n.tau, 1:n.tau]
w1t <- t(vartau * t(splinesbase ^ 2))
w1t <- rowSums(w1t)
w2t <- {
0
}
for (i in 1:(n.tau - 1)) {
for (j in (i + 1):n.tau) {
w2t <- w2t + 2 * ((splinesbase[, i] * splinesbase[, j]) * Vcovtau[j, i])
}
}
lcitau <-
(logHRtau - abs(qnorm((
1 - rsfit$level
) / 2)) * sqrt(w1t + w2t))
lcutau <-
(logHRtau + abs(qnorm((
1 - rsfit$level
) / 2)) * sqrt(w1t + w2t))
lcii <- cbind(lcitau, lcibeta)
lcui <- cbind(lcutau, lcubeta)
}
if (!add) {
if (missing(yrange))
yrange <-
c(min(logHRi, lcii, lcui) * 0.96, c(max(logHRi, lcii, lcui)) * 0.96)
plot(xrange, yrange, type = "n", xaxs = xaxs, ...)
}
}
else if (!add) {
if (missing(yrange))
yrange <- c(min(logHRi) * 0.96, c(max(logHRi)) * 0.96)
plot(xrange, yrange, type = "n", xaxs = xaxs, ...)
}
if (any(conf.int)) {
sapply(1:ncol(logHRi), function(i) {
lines(x.time,
logHRi[, i],
lty = lty[i],
col = col[i],
lwd = lwd[i])
lines(
x.time,
lcii[, i],
lty = ifelse(ncov == 1,
lty[i] +
ifelse(add, 0, 1),
lty[i]),
col = col[i],
lwd = lwd[i]
)
lines(
x.time,
lcui[, i],
lty = ifelse(ncov == 1,
lty[i] +
ifelse(add, 0, 1), lty[i]),
col = col[i],
lwd = lwd[i]
)
})
} else {
sapply(1:ncol(logHRi), function(i) {
lines(x.time,
logHRi[, i],
lty = lty[i],
col = col[i],
lwd = lwd[i])
})
}
if (missing(glegend)) {
glegend <- paste(names.cov,
c(covlev)[2:(length(covlev))])
}
if (baseline == TRUE) {
names.cov <- c(c("baseline"), glegend)
} else {
names.cov <- glegend
}
if (missing(ylegend) & missing(xlegend)) {
legend(
"bottomleft",
legend = names.cov[1:ncol(logHRi)],
lty = lty,
lwd = lwd,
col = col,
bty = "n"
)
} else{
legend(
x = xlegend,
y = ylegend,
legend = names.cov[1:ncol(logHRi)],
lty = lty,
lwd = lwd,
col = col,
bty = "n"
)
}
} else{
stop("only implemented for time-dependent covariate effect")
}
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/plot.bsplines.R |
#' @title plots of excess hazard and net Survival from an
#' \code{predxhaz} object
#'
#' @description Function to plot excess hazard or net survival
#'
#'
#' @param x An object of class predxhaz
#'
#' @param what allow to choose between excess hazard
#' (\code{what="hazard"}) or net survival (\code{what="survival"}).
#'
#' @param ... additional arguments affecting the plot function
#'
#' @keywords plot.predxhaz
#'
#' @return The return of this function produce graphics of excess hazard or net
#' survival, or time-dependent effects, when times.pts argument is provided
#' in prediction call.
#'
#' @author Juste Goungounga, Robert Darlin Mba, Nathalie Grafféo and Roch Giorgi
#'
#' @importFrom graphics plot lines grid legend
#'
#' @export
#'
#'
#' @references Goungounga JA, Touraine C, Grafféo N, Giorgi R;
#' CENSUR working survival group. Correcting for misclassification
#' and selection effects in estimating net survival in clinical trials.
#' BMC Med Res Methodol. 2019 May 16;19(1):104.
#' doi: 10.1186/s12874-019-0747-3. PMID: 31096911; PMCID: PMC6524224.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/31096911/}{PubMed})
#'
#' Touraine C, Grafféo N, Giorgi R; CENSUR working survival group.
#' More accurate cancer-related excess mortality through correcting
#' background mortality for extra variables.
#' Stat Methods Med Res. 2020 Jan;29(1):122-136.
#' doi: 10.1177/0962280218823234. Epub 2019 Jan 23. PMID: 30674229.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/30674229/}{PubMed})
#'
#' Mba RD, Goungounga JA, Grafféo N, Giorgi R; CENSUR working survival group.
#' Correcting inaccurate background mortality in excess hazard models
#' through breakpoints. BMC Med Res Methodol. 2020 Oct 29;20(1):268.
#' doi: 10.1186/s12874-020-01139-z. PMID: 33121436; PMCID: PMC7596976.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/33121436/}{PubMed})
#'
#' @examples
#' \donttest{
#' data("dataCancer")
#' # load the data set in the package
#' library("survival")
#' library("numDeriv")
#' library("survexp.fr")
#' data("simuData", package = "xhaz") # load the data sets 'simuData'
#'
#' #define the levels of variable sex
#' levels(simuData$sex) <- c("male", "female")
#'
#' # Esteve et al. model
#'
#'
#' fit.estv1 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData, ratetable = survexp.us,
#' interval = c(0, NA, NA, NA, NA, NA, max(simuData$time_year)),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "constant", pophaz = "classic")
#'
#'
#' predict_est <- predict(object = fit.estv1,
#' new.data = simuData,
#' times.pts = c(seq(0, 4, 0.1)),
#' baseline = TRUE)
#'
#' plot(predict_est, what = "survival",
#' xlab = "time since diagnosis (year)",
#' ylab = "net survival", ylim = c(0, 1))
#
#' data("dataCancer", package = "xhaz") # load the data set in the package
#'
#' fit.phBS <- xhaz(
#' formula = Surv(obs_time_year, event) ~ ageCentre + immuno_trt,
#' data = dataCancer, ratetable = survexp.fr::survexp.fr,
#' interval = c(0, NA, NA, max(dataCancer$obs_time_year)),
#' rmap = list(age = 'age', sex = 'sexx', year = 'year_date'),
#' baseline = "bsplines", pophaz = "classic")
#'
#'
#' predict_mod1 <- predict(object = fit.phBS, new.data = dataCancer,
#' times.pts = c(seq(0, 10, 0.1)), baseline = FALSE)
#'
#' old.par <- par(no.readonly = TRUE)
#' par(mfrow = c(2, 1))
#'
#'
#' plot(predict_mod1, what = "survival",
#' xlab = "time since diagnosis (year)",
#' ylab = "net survival", ylim = c(0, 1))
#'
#' plot(predict_mod1, what = "hazard",
#' xlab = "time since diagnosis (year)",
#' ylab = "excess hazard")
#'
#' par(old.par)
#' }
plot.predxhaz <- function(x, what = "survival", ...){
if (any(class(x) == "predxhaz")) {
time <- sapply(1:length(x), function(i)unique(x[[i]]$times.pts))
if (what == "survival") {
survival <- sapply(1:length(x), function(i) mean(x[[i]]$survival))
plot(time, survival, type = "l",...)
grid()
} else if (what == "hazard") {
hazard <-
sapply(1:length(x), function(i) {
(sum(x[[i]]$hazard * x[[i]]$survival) / sum(x[[i]]$survival))
})
if (attr(x, "baseline") == "constant") {
plot(time, hazard, type = "s",...)
grid()
}else {
plot(time, hazard, type = "l",...)
grid()
}
} else if (what == "beta") {
stop("not yet implemented")
}
} else {
stop("not yet implemented")
}
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/plot.predxhaz.R |
#' @title Predictions of excess hazard and net Survival from a \code{bsplines}
#' object
#'
#' @description Function to predict excess hazard and net survival based on
#' an object of class \code{bsplines}. The function allows the
#' predictions at several time points but not exceeding the maximum time of
#' follow-up from the baseline model.
#'
#'
#' @param object an object of class \code{bsplines}
#'
#' @param new.data new.data where is covariates
#'
#' @param times.pts time in year scale to calculate the excess hazard. The
#' default value is NULL. In this case, time variable must be provided in the
#' new.data
#'
#' @param baseline default is survival baseline; put \code{baseline = FALSE}
#' to estimate the net survival with covariates
#'
#' @param ... additional arguments affecting the predictions of excess hazard
#' and net survival
#'
#' @keywords predict.bsplines
#'
#' @return An object of class predxhaz, which is a list of data.frame. Each
#' element of the list contains the estimates of hazard and survival at a fixed
#' time point. The return of this function can be used to produce graphics of
#' excess hazard or net survival, when times.pts argument is provided. This
#' object contains:
#'
#' \item{times.pts}{the times value in year at which the excess hazard
#' and or the net survival have been estimated}
#'
#' \item{hazard}{the excess hazard values based on the model of interest}
#'
#' \item{survival}{the net survival values based on the model of interest}
#'
#'
#'
#' @author Juste Goungounga, Robert Darlin Mba, Nathalie Grafféo and Roch Giorgi
#'
#' @references Goungounga JA, Touraine C, Grafféo N, Giorgi R;
#' CENSUR working survival group. Correcting for misclassification
#' and selection effects in estimating net survival in clinical trials.
#' BMC Med Res Methodol. 2019 May 16;19(1):104.
#' doi: 10.1186/s12874-019-0747-3. PMID: 31096911; PMCID: PMC6524224.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/31096911/}{PubMed})
#'
#' Touraine C, Grafféo N, Giorgi R; CENSUR working survival group.
#' More accurate cancer-related excess mortality through correcting
#' background mortality for extra variables.
#' Stat Methods Med Res. 2020 Jan;29(1):122-136.
#' doi: 10.1177/0962280218823234. Epub 2019 Jan 23. PMID: 30674229.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/30674229/}{PubMed})
#'
#' Mba RD, Goungounga JA, Grafféo N, Giorgi R; CENSUR working survival group.
#' Correcting inaccurate background mortality in excess hazard models
#' through breakpoints. BMC Med Res Methodol. 2020 Oct 29;20(1):268.
#' doi: 10.1186/s12874-020-01139-z. PMID: 33121436; PMCID: PMC7596976.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/33121436/}{PubMed})
#'
#'
#'
#'
#' @seealso \code{\link{xhaz}}, \code{\link{print.bsplines}}, \code{\link{print.constant}}
#'
#' @examples
#'
#' \donttest{
#' library("survival")
#' library("numDeriv")
#' library("survexp.fr")
#' library("splines")
#' data("dataCancer", package = "xhaz") # load the data set in the package
#'
#' fit.phBS <- xhaz(
#' formula = Surv(obs_time_year, event) ~ ageCentre + immuno_trt,
#' data = dataCancer, ratetable = survexp.fr,
#' interval = c(0, NA, NA, max(dataCancer$obs_time_year)),
#' rmap = list(age = 'age', sex = 'sexx', year = 'year_date'),
#' baseline = "bsplines", pophaz = "classic")
#'
#'
#' print(fit.phBS)
#'
#'
#' predicted <- predict(object = fit.phBS,
#' new.data = dataCancer[1:10,],
#' times.pts = c(seq(0,10,1)),
#' baseline = TRUE)
#'
#'
#' #a list of predicted hazard and survival at different time points
#' print(predicted)
#'
#'
#' #predicted hazard and survival at time points 10 years
#' print(predicted[[10]])
#' }
#' @export
predict.bsplines <- function(object,
new.data = NULL,
times.pts = NULL,
baseline = TRUE,
...) {
Call <- match.call()
if (any(object$bsplines) == TRUE)
stop("Predict.bplines is not yet implemented for non-proportional hazards setting\n")
int <- (object$interval)
if (inherits(object, "bsplines")) {
coeffBS <- object$coefficients[1:5]
if (is.null(new.data)) {
new.data <- object$data
xx <- as.data.frame(
model.matrix((object$terms),
(new.data)))[, -1, drop = FALSE]
if (is.null(times.pts)) {
times.pts_init <- times.pts
m <- eval(object$terms, sys.parent())
m[[1]] <- as.name("model.frame")
times.pts <- object$data[,toString(as.name(m[[2]][[2]]))]
}else{
times.pts_init <- times.pts
m <- eval(object$terms, sys.parent())
m[[1]] <- as.name("model.frame")
index_time <- which(colnames(object$data) %in% c(toString(as.name(m[[2]][[2]]))))
time_name <- colnames(object$data)[index_time]
time_data <- data.frame(times.pts)
new.data <- lapply(1:nrow(time_data),
function(i){
my_colnames <- c(colnames(new.data), time_name)
my_new.data <- data.frame(cbind(new.data,
rep(time_data[i,],
nrow(new.data))))
colnames(my_new.data) <- my_colnames
return(my_new.data)
})
}
}else{
if (is.null(times.pts)) {
times.pts_init <- times.pts
m <- eval(object$terms, sys.parent())
m[[1]] <- as.name("model.frame")
times.pts <- try(new.data[,toString(as.name(m[[2]][[2]]))], TRUE)
if (inherits(times.pts, "try-error"))
stop("Need to provides time variable in the new.data or in the time.pts parameter")
index_event <- which(colnames(object$data) %in% c(toString(as.name(m[[2]][[3]]))))
index_time <- which(colnames(object$data) %in% c(toString(as.name(m[[2]][[2]]))))
event_name <- colnames(object$data)[index_event]
event_data <- data.frame(rep(0, nrow(new.data)))
colnames(event_data) <- event_name
time_name <- colnames(object$data)[index_time]
time_data <- data.frame(times.pts)
colnames(time_data) <- time_name
new.data <- data.frame(cbind(new.data, event_data, time_data))
xx <- as.data.frame(model.matrix((object$terms),(new.data)))[, -1, drop = FALSE]
}else {
times.pts_init <- times.pts
m <- eval(object$terms, sys.parent())
m[[1]] <- as.name("model.frame")
index_event <- which(colnames(object$data) %in% c(toString(as.name(m[[2]][[3]]))))
index_time <- which(colnames(object$data) %in% c(toString(as.name(m[[2]][[2]]))))
event_name <- colnames(object$data)[index_event]
event_data <- data.frame(rep(0, nrow(new.data)))
colnames(event_data) <- event_name
time_name <- colnames(object$data)[index_time]
time_data <- data.frame(times.pts)
colnames(time_data) <- time_name
new.data <- lapply(1:nrow(time_data),
function(i){
my_colnames <- c(colnames(new.data),event_name, time_name)
my_new.data <- data.frame(cbind(new.data,
event_data,
rep(time_data[i,],
nrow(new.data))))
colnames(my_new.data) <- my_colnames
return(my_new.data)
})
xx <- as.data.frame(model.matrix((object$terms),
(new.data[[1]])))[, -1, drop = FALSE]
}
}
if (object$pophaz == "classic") {
nalpha <- 0
} else if (object$pophaz == "rescaled" |
object$pophaz == "corrected") {
indxAlpha <- which(stringr::str_detect(names(object$coefficients),
pattern = "alpha"))
nalpha <- length(indxAlpha)
}
nPH <- object$nPH
nTD <- object$nTD
coeffPred <- object$coefficients[(5 + 5*nTD + 1):(5 + 5*nTD + nPH )]
k <- 3
knot <- c(int[2], int[3])
delta <- sort(c(rep(c(int[1], int[4]), k), knot))
object$linear.predictors <- exp(as.matrix(xx) %*% coeffPred)
rrBetaZ <- t(object$linear.predictors)
CMUint <- list()
for (i in 1:length(times.pts)) {
CMUint[[i]] <- exp(-(
integrate(function(times.pts, coeffBS)
(exp(
apply((coeffBS) * t(
splines::spline.des(knots = delta, x = times.pts, ord = k)$design
), 2, sum)
)), 0, times.pts[i], coeffBS)$value
))
}
CHBSplines <-
exp(apply((coeffBS) * t(
splines::splineDesign(
knots = delta,
x = times.pts,
ord = k,
outer.ok = FALSE
)
), 2, sum))
if (is.null(times.pts_init)) {
mypred <- suppressWarnings(round(data.frame(times.pts = times.pts,
hazard = CHBSplines,
survival = unlist(CMUint)), 4))
class(mypred) <- c("data.frame","predxhaz")
} else{
mypred <- lapply(1:nrow(time_data),
function(i){
suppressWarnings(
round(data.frame(times.pts = rep(times.pts[[i]], nrow(new.data[[i]])),
hazard = rep(CHBSplines[i], nrow(new.data[[i]])),
survival = rep(CMUint[[i]], nrow(new.data[[i]]))), 4))
})
class(mypred) <- c("list","predxhaz")
}
attributes(mypred)$call <- Call
attributes(mypred)$baseline <- object$baseline
attributes(mypred)$pophaz <- object$pophaz
attributes(mypred)$coefficients <- object$coefficients
attributes(mypred)$intervall <- object$interval
if (max(times.pts) > max(attr(mypred, "interval"))) {
stop( "time must be inferior or equal to max value in interval specified to estimate the model parameter")
}
if (baseline) {
return(mypred)
} else{
if (is.null(times.pts_init)) {
mypred$hazard <- c(mypred$hazard * rrBetaZ)
mypred$survival <- c(mypred$survival^rrBetaZ)
}else{
for (i in 1:length(times.pts)) {
mypred[[i]]$hazard <- c(mypred[[i]]$hazard * rrBetaZ)
mypred[[i]]$survival <- c(mypred[[i]]$survival^rrBetaZ)
}
}
return(mypred)
}
}
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/predict.xhaz.bsplines.R |
#' @title Predictions of excess hazard and net Survival from an \code{constant}
#' object
#'
#' @description Function to predict excess hazard and net survival based on an
#' object of class \code{constant}. The function allows the
#' predictions at several time points but not exceeding the maximum time of
#' follow-up from the baseline model.
#'
#'
#' @param object An object of class constant
#'
#' @param new.data new.data where is covariates
#'
#' @param times.pts time in year scale to calculate the excess hazard. The
#' default value is NULL. In this case, time variable must be provided in the
#' new.data
#'
#' @param baseline default is survival baseline; put \code{baseline = FALSE}
#' to estimate the net survival with covariates
#'
#' @param ... additional arguments affecting the predictions of excess hazard
#' and net survival
#'
#' @keywords predict.constant
#'
#' @return An object of class predxhaz. The return of this fonction can be
#' used to produce graphics of excess hazard or net survival, when times.pts argument is provided. This object
#' contains:
#'
#'
#' \item{times.pts}{the times value in year at which the excess hazard and or the net survival have been estimated}
#'
#' \item{hazard}{the excess hazard values based on the model of interest}
#'
#' \item{survival}{the net survival values based on the model of interest}
#'
#'
#'
#' @author Juste Goungounga, Robert Darlin Mba, Nathalie Grafféo and Roch Giorgi
#'
#' @references Goungounga JA, Touraine C, Grafféo N, Giorgi R;
#' CENSUR working survival group. Correcting for misclassification
#' and selection effects in estimating net survival in clinical trials.
#' BMC Med Res Methodol. 2019 May 16;19(1):104.
#' doi: 10.1186/s12874-019-0747-3. PMID: 31096911; PMCID: PMC6524224.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/31096911/}{PubMed})
#'
#' Touraine C, Grafféo N, Giorgi R; CENSUR working survival group.
#' More accurate cancer-related excess mortality through correcting
#' background mortality for extra variables.
#' Stat Methods Med Res. 2020 Jan;29(1):122-136.
#' doi: 10.1177/0962280218823234. Epub 2019 Jan 23. PMID: 30674229.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/30674229/}{PubMed})
#'
#' Mba RD, Goungounga JA, Grafféo N, Giorgi R; CENSUR working survival group.
#' Correcting inaccurate background mortality in excess hazard models
#' through breakpoints. BMC Med Res Methodol. 2020 Oct 29;20(1):268.
#' doi: 10.1186/s12874-020-01139-z. PMID: 33121436; PMCID: PMC7596976.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/33121436/}{PubMed})
#'
#'
#'
#' @seealso \code{\link{xhaz}}, \code{\link{print.bsplines}}, \code{\link{print.constant}}
#'
#' @examples
#'
#' # load the data set in the package
#' library("xhaz")
#' library("numDeriv")
#'
#' # load the data sets 'simuData
#'
#' data("simuData", package = "xhaz")
#'
#' #define the levels of variable sex
#' levels(simuData$sex) <- c("male", "female")
#'
#' # Esteve et al. model
#'
#' set.seed(1980)
#' simuData2 <- simuData[sample(nrow(simuData), size = 500), ]
#'
#' fit.estv2 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData2,
#' ratetable = survexp.us,
#' interval = c(0, NA, NA, NA, NA, NA, 6),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "constant", pophaz = "classic")
#'
#'
#' predict_est <- predict(object = fit.estv2,
#' new.data = simuData2,
#' times.pts = c(seq(0, 4, 1)),
#' baseline = TRUE)
#' predict_est
#'
#' @export
predict.constant <- function(object,
new.data = NULL,
times.pts = NULL,
baseline = TRUE,
...) {
Call <- match.call()
int <- (object$interval)
if (inherits(object, "constant")) {
coeff <- object$coefficients
if (object$pophaz == "classic") {
length_alpha <- 0
} else if (object$pophaz == "corrected" | object$pophaz == "rescaled") {
indxAlpha <- which(stringr::str_detect(names(object$coefficients),
pattern = "alpha"))
length_alpha <- length(indxAlpha)
}
nvar <- length(object$coef) - length(int) + 1 - length_alpha
coeffPred <- object$coef[1:nvar]
coefPWSE <- exp(object$coef[(nvar + 1):(length(object$coefficients) -
length_alpha)])
if (is.null(new.data)) {
new.data <- object$data
xx <- as.data.frame(model.matrix((object$terms),
(new.data)))[,-1, drop = FALSE]
if (is.null(times.pts)) {
times.pts_init <- times.pts
m <- eval(object$terms, sys.parent())
m[[1]] <- as.name("model.frame")
times.pts <- object$data[, toString(as.name(m[[2]][[2]]))]
} else{
times.pts_init <- times.pts
m <- eval(object$terms, sys.parent())
m[[1]] <- as.name("model.frame")
index_time <-
which(colnames(object$data) %in% c(toString(as.name(m[[2]][[2]]))))
time_name <- colnames(object$data)[index_time]
time_data <- data.frame(times.pts)
new.data <- lapply(1:nrow(time_data),
function(i) {
my_colnames <- c(colnames(new.data), time_name)
my_new.data <-
data.frame(cbind(new.data,
rep(time_data[i, ],
nrow(new.data))))
colnames(my_new.data) <- my_colnames
return(my_new.data)
})
}
} else {
if (is.null(times.pts)) {
times.pts_init <- times.pts
m <- eval(object$terms, sys.parent())
m[[1]] <- as.name("model.frame")
times.pts <-
try(new.data[, toString(as.name(m[[2]][[2]]))], TRUE)
if (inherits(times.pts, "try-error"))
stop("Need to provides time variable in the new.data or in the time.pts parameter")
index_event <-
which(colnames(object$data) %in% c(toString(as.name(m[[2]][[3]]))))
index_time <-
which(colnames(object$data) %in% c(toString(as.name(m[[2]][[2]]))))
event_name <- colnames(object$data)[index_event]
event_data <- data.frame(rep(0, nrow(new.data)))
colnames(event_data) <- event_name
time_name <- colnames(object$data)[index_time]
time_data <- data.frame(times.pts)
colnames(time_data) <- time_name
new.data <-
data.frame(cbind(new.data, event_data, time_data))
xx <-
as.data.frame(model.matrix((object$terms), (new.data)))[,-1, drop = FALSE]
} else {
times.pts_init <- times.pts
m <- eval(object$terms, sys.parent())
m[[1]] <- as.name("model.frame")
index_event <-
which(colnames(object$data) %in% c(toString(as.name(m[[2]][[3]]))))
index_time <-
which(colnames(object$data) %in% c(toString(as.name(m[[2]][[2]]))))
event_name <- colnames(object$data)[index_event[1]]
event_data <- data.frame(rep(0, nrow(new.data)))
colnames(event_data) <- event_name
time_name <- colnames(object$data)[index_time[1]]
time_data <- data.frame(times.pts)
colnames(time_data) <- time_name
new.data <- lapply(1:nrow(time_data),
function(i) {
my_colnames <- c(colnames(new.data), event_name, time_name)
my_new.data <-
data.frame(cbind(new.data,
event_data,
rep(time_data[i, ],
nrow(new.data))))
colnames(my_new.data) <- my_colnames
return(my_new.data)
})
xx <- as.data.frame(model.matrix((object$terms),
(new.data[[1]])))[,-1, drop = FALSE]
}
}
rrBetaZ <- exp(t(apply(coeffPred * xx, 1, sum)))
i <- 1:(length(int) - 1)
hazcst <- sapply(1:(length(int) - 1),
function(i, times.pts) {
condI <- (times.pts >= int[i])
condII <- (times.pts < int[i + 1])
conditionId <- condI & condII
return(coefPWSE[i] * ifelse(conditionId, 1, 0))
},
times.pts = times.pts)
cum.rate <- sapply(1:(length(int) - 1),
function(i, times.pts) {
condI <- (times.pts >= int[i])
condII <- (times.pts < int[i + 1])
condIII <- (times.pts >= int[i + 1])
conditionId <- condI & condII
TI <-
ifelse(conditionId, (times.pts - int[i]), 0)
TII <-
ifelse(condIII, (int[i + 1] - int[i]), 0)
return(coefPWSE[i] * (TI + TII))
},
times.pts = times.pts)
haz_cst <- unlist(rowSums(hazcst))
surv_cst <- exp(-as.matrix(rowSums(cum.rate)))
if (is.null(times.pts_init)) {
mypred <- suppressWarnings(round(
data.frame(
times.pts = times.pts,
hazard = haz_cst,
survival = surv_cst
),
4
))
class(mypred) <- c("data.frame", "predxhaz")
} else{
mypred <- lapply(1:nrow(time_data),
function(i) {
suppressWarnings(round(
data.frame(
times.pts = rep(times.pts[[i]], nrow(new.data[[i]])),
hazard = rep(haz_cst[i], nrow(new.data[[i]])),
survival = rep(surv_cst[[i]], nrow(new.data[[i]]))
),
4
))
})
class(mypred) <- c("list", "predxhaz")
}
attributes(mypred)$call <- Call
attributes(mypred)$baseline <- object$baseline
attributes(mypred)$pophaz <- object$pophaz
attributes(mypred)$coefficients <- object$coefficients
attributes(mypred)$intervall <- object$interval
if (max(times.pts) > max(attr(mypred, "interval"))) {
stop(
"time must be inferior or equal to max value in interval specified to estimate the model parameter"
)
}
if (baseline) {
return(mypred)
}
else{
if (is.null(times.pts_init)) {
mypred$hazard <- c(mypred$hazard * rrBetaZ)
mypred$survival <- c(mypred$survival ^ rrBetaZ)
} else{
for (i in 1:length(times.pts)) {
mypred[[i]]$hazard <- c(mypred[[i]]$hazard * rrBetaZ)
mypred[[i]]$survival <- c(mypred[[i]]$survival ^ rrBetaZ)
}
}
return(mypred)
}
}
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/predict.xhaz.constant.R |
#' @title A print.bsplines Function used to print a object of class \code{bsplines}
#'
#' @description This function present the estimated coefficients for the excess
#' hazard baseline coefficient and for the covariate effects
#'
#' @param x an object of class \code{bsplines}
#'
#' @param digits minimal number of significant digits.
#'
#' @param ... additionnal parameters which can be used in the \code{print}
#' function
#'
#' @return Estimated parameters of the model in different scales for interpretation purposes.
#'
#' @keywords print.bsplines
#'
#' @seealso \code{\link{xhaz}}, \code{\link{plot.predxhaz}}, \code{\link{print.constant}}
#'
#'
#' @references Goungounga JA, Touraine C, Grafféo N, Giorgi R;
#' CENSUR working survival group. Correcting for misclassification
#' and selection effects in estimating net survival in clinical trials.
#' BMC Med Res Methodol. 2019 May 16;19(1):104.
#' doi: 10.1186/s12874-019-0747-3. PMID: 31096911; PMCID: PMC6524224.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/31096911/}{PubMed})
#'
#' Touraine C, Grafféo N, Giorgi R; CENSUR working survival group.
#' More accurate cancer-related excess mortality through correcting
#' background mortality for extra variables.
#' Stat Methods Med Res. 2020 Jan;29(1):122-136.
#' doi: 10.1177/0962280218823234. Epub 2019 Jan 23. PMID: 30674229.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/30674229/}{PubMed})
#'
#' Mba RD, Goungounga JA, Grafféo N, Giorgi R; CENSUR working survival group.
#' Correcting inaccurate background mortality in excess hazard models
#' through breakpoints. BMC Med Res Methodol. 2020 Oct 29;20(1):268.
#' doi: 10.1186/s12874-020-01139-z. PMID: 33121436; PMCID: PMC7596976.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/33121436/}{PubMed})
#'
#' @examples
#'
#' \donttest{
#'
#' library("xhaz")
#' library("survival")
#' library("numDeriv")
#' library("survexp.fr")
#' library("splines")
#' data("dataCancer", package = "xhaz") # load the data set in the package
#'
#' fit.phBS <- xhaz(
#' formula = Surv(obs_time_year, event) ~ ageCentre + immuno_trt,
#' data = dataCancer, ratetable = survexp.fr,
#' interval = c(0, NA, NA, max(dataCancer$obs_time_year)),
#' rmap = list(age = 'age', sex = 'sexx', year = 'year_date'),
#' baseline = "bsplines", pophaz = "classic")
#'
#' print(fit.phBS)
#' }
#'
#' @importFrom stats printCoefmat
#'
#'
#' @export
print.bsplines <-
function(x, digits = max(options()$digits - 4, 3), ...)
{
cl <- try(x$call)
if (!is.null(cl)) {
cat("Call:\n")
dput(cl)
cat("\n")
}
if (!is.null(x$fail)) {
cat(" xhaz.bsplines failed.", x$fail, "\n")
return()
}
savedig <- options(digits = digits)
on.exit(options(savedig))
if (!is.null(x$add.rmap)) {
nalpha <- nlevels(x$add.rmap)
} else{
nalpha <- 0
}
if (nalpha) {
if (nalpha == 1) {
indxAlpha <- which(stringr::str_detect(names(x$coefficients),
pattern = "alpha"))
x$mycoef <- x$coefficients
names(x$mycoef)[indxAlpha] <- "alpha"
names(x$coefficients)[indxAlpha] <- "log(alpha)"
}
else{
x$mycoef <- x$coefficients
indxAlpha <- which(stringr::str_detect(names(x$coefficients),
pattern = "alpha"))
names(x$coefficients)[c(indxAlpha)] <- paste("log(",
paste0('alpha.',
levels(x$add.rmap)),
")",
sep = "")
names(x$mycoef)[c(indxAlpha)] <-
paste0('alpha.', levels(x$add.rmap))
}
coef <- c(x$coefficients)
} else{
coef <- c((x$coefficients))
}
x$var <- x$varcov
# WARNING: works for one single alpha
if (nalpha) {
se <- sqrt((c(diag(x$var[1:(length(diag(x$var)) - 1),
1:(length(diag(x$var)) - 1)]),
(x$var[length(diag(x$var)),
length(diag(x$var))]))))
}
else{
se <- sqrt((diag(x$var)))
}
if (is.null(coef) | is.null(se))
stop("Input is not valid")
if (nalpha) {
tmp <- cbind(coef,
se,
(c((x$coef[1:length(x$coef) - 1]),
(x$coef[length(x$coef)])) -
abs(qnorm((
1 - x$level
) / 2)) * sqrt((diag(
x$var
)))),
(c((x$coef[1:length(x$coef) - 1]),
(x$coef[length(x$coef)])) +
abs(qnorm((
1 - x$level
) / 2)) * sqrt((diag(
x$var
)))),
coef / se,
signif(1 - pchisq((coef / se) ^ 2, 1), digits - 1))
dimnames(tmp) <- list(names(coef),
c(
"coef",
"se(coef)",
paste("lower", x$level, sep = " "),
paste("upper", x$level, sep = " "),
"z",
"Pr(>|z|)"
))
} else{
tmp <-
cbind(coef,
se,
(coef - abs(qnorm((
1 - x$level
) / 2)) * se),
(coef + abs(qnorm((
1 - x$level
) / 2)) * se),
coef / se,
signif(1 - pchisq((coef / se) ^ 2, 1), digits - 1))
dimnames(tmp) <- list(names(coef),
c(
"coef",
"se(coef)",
paste("lower", x$level, sep = " "),
paste("upper", x$level, sep = " "),
"z",
"Pr(>|z|)"
))
}
cat("\n")
printCoefmat(
tmp,
P.values = TRUE,
digits = digits,
signif.stars = TRUE,
na.print = "NA",
...
)
nPH <- x$nPH
nTD <- x$nTD
if (nPH > 0) {
tmp <- exp(tmp)[(5 + 5 * nTD + 1):(5 + 5 * nTD + nPH + nalpha), c(1, 3, 4)]
if (is.null(dim(tmp))) {
tmp <- t(as.matrix(tmp))
}
dimnames(tmp) <-
list(c(names(coef[(5 + 5 * nTD + 1):(5 + 5 * nTD + nPH)]),
names(x$mycoef[(5 + 5 * nTD + nPH + 1):(5 + 5 * nTD + nPH + nalpha)])),
c(
"exp(coef)",
paste("lower", x$level, sep = " "),
paste("upper", x$level, sep = " ")
))
cat("\n")
if (x$pophaz != "classic") {
if (x$pophaz == "rescaled") {
cat(
"Excess hazard and expected hazard ratio(s) \n(proportional effect variable(s) for exess hazard ratio(s) )\n"
)
print(tmp, digits = digits + 1)
cat("\n")
cat(
"Excess hazard ratio(s)\n(proportional effect variable(s) for exess hazard ratio(s))\n"
)
lines_al <-
which(stringr::str_detect(rownames(tmp), pattern = "alpha"))
print(tmp[-c(lines_al), ], digits = digits + 1)
cat("\n")
cat("and rescaled parameter on population hazard \n")
tmp_new_alpha <- matrix(tmp[c(lines_al), ], nrow = 1)
dimnames(tmp_new_alpha)[[1]] <- "alpha"
dimnames(tmp_new_alpha)[[2]] <- dimnames(tmp)[[2]]
print(tmp_new_alpha, digits = digits + 1)
}
} else{
cat(
"Excess hazard ratio(s) \n(proportional effect variable(s) for exess hazard ratio(s) )\n"
)
print(tmp, digits = digits + 1)
}
}
cat("\n")
cat("number of observations:",
paste0(format(x$n), "; "),
"number of events:",
x$n.events)
cat("\n")
cat(
"log-likelihood: ",
format(x$loglik),
" (for ",
length(x$coef),
" degree(s) of freedom)"
)
cat("\n")
if (sum(x$cov.test) > 0) {
cat("\n")
cat(
" Likelihood ratio test of PH effect for '",
gsub("\\(|\\)", "",
as.character(stringr::str_remove((
attr(x$terms, "term.labels")[x$cov.test]
),
"qbs"))),
"'=",
format(round(x$loglik.test, 2)),
",\n on ",
x$cov.df,
" degree(s) of freedom,"
)
cat(" p=",
format(1 - pchisq(x$loglik.test, x$cov.df)), sep =
"")
}
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/print.xhaz.bsplines.R |
#' @title A print.constant Function used to print a object of class constant
#'
#' @description This function present the estimated coefficients for the excess
#' hazard baseline coefficient and for the covariate effects
#'
#' @param x an object of class xhaz.constant
#'
#' @param digits minimal number of significant digits.
#'
#' @param ci_type method for confidence intervals calculation
#'
#' @param ... additionnal parameters which can be used in the \code{print}
#' function
#'
#' @return Estimated parameters of the model in different scales for interpretation purposes.
#'
#'
#' @keywords print.constant
#'
#' @seealso \code{\link{xhaz}}, \code{\link{summary.constant}}, \code{\link{print.bsplines}}
#'
#' @examples
#'
#' library("numDeriv")
#' library("survexp.fr")
#'
#' data("simuData","rescaledData", "dataCancer")
#' # load the data sets 'simuData', 'rescaledData' and 'dataCancer'.
#'
#' # Esteve et al. model: baseline excess hazard is a piecewise function
#' # linear and proportional effects for the covariates on
#' # baseline excess hazard.
#'
#' levels(simuData$sex) <- c("male", "female")
#' set.seed(1980)
#' simuData2 <- simuData[sample(nrow(simuData), size = 500), ]
#'
#' fit.estv2 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData2,
#' ratetable = survexp.us,
#' interval = c(0, NA, NA, NA, NA, NA, 6),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "constant", pophaz = "classic")
#'
#'
#' print(fit.estv2)
#'
#'
#' @importFrom stats printCoefmat
#'
#'
#' @export
print.constant <-
function(x,
ci_type = "lognormal",
digits = max(options()$digits - 4, 3),
...)
{
if (is.null(x$coefficients)) {
return(x)
}
cl <- try(x$call)
if (!is.null(cl)) {
cat("Call:\n")
dput(cl)
cat("\n")
}
if (!is.null(x$fail)) {
cat(" Esteveph failed.", x$fail, "\n")
return()
}
savedig <- options(digits = digits)
on.exit(options(savedig))
if (!is.null(x$add.rmap)) {
nalpha <- nlevels(x$add.rmap)
if (nalpha == 1) {
indxAlpha <- which(stringr::str_detect(names(x$coefficients),
pattern = "alpha"))
names(x$coef)[indxAlpha] <- "log(alpha)"
}
else{
indxAlpha <- which(stringr::str_detect(names(x$coefficients),
pattern = "alpha"))
if (x$add.rmap.cut$breakpoint == TRUE) {
initnames <- names(x$coef)[c(indxAlpha)]
names(x$coef)[c(indxAlpha)] <- paste("log(", initnames,
")",
sep = "")
} else {
names(x$coef)[c(indxAlpha)] <- paste("log(", paste0('alpha.',
levels(x$add.rmap)),
")",
sep = "")
}
nalpha <- length(indxAlpha)
}
} else{
nalpha <- 0
}
nstrata <- ifelse(is.null(attr(x$terms, "nstrata")),
1,
attr(x$terms, "nstrata"))
nvar <-
length(x$coef) - nstrata * (length(x$interval) - 1) - nalpha
coef <- x$coef
x$var <- x$varcov
se <- numeric(length(coef))
if (nvar > 0) {
if (nvar != 1) {
se[1:nvar] <- sqrt(diag(x$var[1:nvar, 1:nvar]))
}
else{
se[1:nvar] <- sqrt(x$var[1])
}
}
if (ci_type == "delta.method") {
if (!is.null(x$add.rmap)) {
se[(nvar + 1):(length(x$coef) - length(indxAlpha))] <- c(sqrt(exp(x$coef[(nvar + 1):(length(x$coef) - length(indxAlpha))]) %*%
x$var[(nvar + 1):(length(x$coef) - length(indxAlpha)),
(nvar + 1):(length(x$coef) - length(indxAlpha))] %*%
exp(x$coef[(nvar + 1):(length(x$coef) - length(indxAlpha))])))
coef[(nvar + 1):(length(x$coef) - length(indxAlpha))] <-
exp(coef[(nvar + 1):(length(x$coef) - length(indxAlpha))])
if (length(indxAlpha) > 1) {
se[c(indxAlpha)] <- sqrt(diag(x$var[c(indxAlpha), c(indxAlpha)]))
} else{
se[c(indxAlpha)] <- (sqrt(x$var[c(indxAlpha), c(indxAlpha)]))
}
} else{
se[(nvar + 1):length(x$coef)] <- sqrt(exp(x$coef[(nvar + 1):length(x$coef)]) %*%
x$var[(nvar + 1):length(x$coef),
(nvar + 1):length(x$coef)] %*%
exp(x$coef[(nvar + 1):length(x$coef)]))
coef[(nvar + 1):length(x$coef)] <-
exp(coef[(nvar + 1):length(x$coef)])
}
} else if (ci_type == "lognormal") {
#lognormal based CI
if (!is.null(x$add.rmap)) {
se[(nvar + 1):(length(x$coef) - length(indxAlpha))] <- c(sqrt(diag((
exp(2 * x$coef[(nvar + 1):(length(x$coef) - length(indxAlpha))] +
x$var[(nvar + 1):(length(x$coef) - length(indxAlpha)),
(nvar + 1):(length(x$coef) - length(indxAlpha))])
) *
(
exp(x$var[(nvar + 1):(length(x$coef) - length(indxAlpha)),
(nvar + 1):(length(x$coef) - length(indxAlpha))]) - 1
))))
coef[(nvar + 1):(length(x$coef) - length(indxAlpha))] <-
exp(coef[(nvar + 1):(length(x$coef) - length(indxAlpha))] + 1 / 2 * diag(x$var[(nvar + 1):(length(x$coef) - length(indxAlpha)), (nvar + 1):(length(x$coef) - length(indxAlpha))]))
if (length(indxAlpha) > 1) {
se[c(indxAlpha)] <- (sqrt(diag(x$var[c(indxAlpha), c(indxAlpha)])))
} else{
se[c(indxAlpha)] <- (sqrt(x$var[c(indxAlpha), c(indxAlpha)]))
}
} else{
se[(nvar + 1):length(x$coef)] <- sqrt(diag(exp(2 * x$coef[(nvar + 1):length(x$coef)] + (x$var[(nvar + 1):length(x$coef),
(nvar + 1):length(x$coef)])) * (exp(x$var[(nvar + 1):length(x$coef),
(nvar + 1):length(x$coef)]) - 1)))
coef[(nvar + 1):length(x$coef)] <-
exp(coef[(nvar + 1):length(x$coef)] + 1 / 2 * diag(x$var[(nvar + 1):length(x$coef), (nvar + 1):length(x$coef)]))
}
}
if (is.null(coef) | is.null(se))
stop("Input is not valid")
tmp <-
cbind(
coef,
se,
coef - abs(qnorm((1 - x$level) / 2)) * se,
coef + abs(qnorm((1 - x$level) / 2)) * se,
coef / se,
signif(1 - pchisq((coef / se) ^ 2, 1), digits - 1)
)
dimnames(tmp) <-
list(names(coef),
c(
"coef",
"se(coef)",
paste("lower", x$level, sep = " "),
paste("upper", x$level, sep = " "),
"z",
"Pr(>|z|)"
))
cat("\n")
printCoefmat(
tmp,
P.values = TRUE,
digits = digits,
signif.stars = TRUE,
na.print = "NA",
...
)
if (nvar > 0) {
if (!is.null(x$add.rmap)) {
index <- c(1:nvar, indxAlpha)
nalpha <- nlevels(x$add.rmap)
if (nalpha == 1) {
names(x$coef)[indxAlpha] <- "alpha"
}
else{
if (x$add.rmap.cut$breakpoint == FALSE) {
names(x$coef)[c(indxAlpha)] <- paste0('alpha.', levels(x$add.rmap))
} else {
names(x$coef)[c(indxAlpha)] <- initnames
}
}
}
coef <- x$coef
if (!is.null(x$add.rmap)) {
if (ci_type == "delta.method") {
se_alpha <- sapply(1:length(indxAlpha), function(i)
(matrix(exp(x$coef[c(indxAlpha[i])])) %*% sqrt(x$var[indxAlpha[i], indxAlpha[i]])))
coef_alpha <- exp(coef[indxAlpha])
mlevel <- abs(qnorm((1 - x$level) / 2))
tmp_new <- cbind((exp(coef[index])),
c(exp(coef[1:nvar] - abs(qnorm((1 - x$level) / 2
)) * se[1:nvar]),
c(coef_alpha - mlevel * c(se_alpha))),
c(exp(coef[1:nvar] + abs(qnorm((1 - x$level) / 2
)) * se[1:nvar]),
coef_alpha + mlevel * c(se_alpha)))
} else {
#lognormal based
if (length(indxAlpha) > 1) {
se_alpha <- sqrt(diag(x$var[indxAlpha, indxAlpha]))
} else{
se_alpha <- sqrt((x$var[indxAlpha, indxAlpha]))
}
coef_alpha <- exp(coef[indxAlpha])
mlevel <- abs(qnorm((1 - x$level) / 2))
tmp_new <- cbind(c(exp(coef[1:nvar]), coef_alpha),
c(exp(coef[1:nvar] - abs(qnorm((1 - x$level) / 2
)) *
se[1:nvar]),
exp(c(
coef[indxAlpha] - mlevel * c(se_alpha)
))),
c(exp(coef[1:nvar] + abs(qnorm((1 - x$level) / 2
)) *
se[1:nvar]),
exp(coef[indxAlpha] + mlevel * c(se_alpha))))
}
} else{
index <- c(1:nvar)
tmp_new <- cbind(exp(coef[1:nvar]),
exp(coef[1:nvar] - abs(qnorm((1 - x$level) / 2
)) * se[1:nvar]),
exp(coef[1:nvar] + abs(qnorm((1 - x$level) / 2
)) * se[1:nvar]))
}
dimnames(tmp_new) <- list(names(coef[index]),
c(
"exp(coef)",
paste("lower", x$level, sep = " "),
paste("upper", x$level, sep = " ")
))
cat("\n")
if (x$pophaz != "classic") {
if (x$pophaz == "rescaled") {
cat("\n")
cat(
"Excess hazard ratio(s)\n(proportional effect variable(s) for exess hazard ratio(s))\n"
)
lines_al <-
which(stringr::str_detect(rownames(tmp_new), pattern = "alpha"))
print(tmp_new[-c(lines_al), ], digits = digits + 1)
cat("\n")
cat("and rescaled parameter on population hazard \n")
tmp_new_alpha <- matrix(tmp_new[c(lines_al), ], nrow = 1)
dimnames(tmp_new_alpha)[[1]] <- "alpha"
dimnames(tmp_new_alpha)[[2]] <- dimnames(tmp_new)[[2]]
print(tmp_new_alpha, digits = digits + 1)
} else if (x$pophaz == "corrected" &
x$add.rmap.cut$breakpoint == FALSE) {
cat("\n")
cat(
"Excess hazard hazard ratio(s)\n(proportional effect variable(s) for exess hazard ratio(s))\n"
)
lines_al <-
which(stringr::str_detect(rownames(tmp_new), pattern = "alpha"))
print(tmp_new[-c(lines_al), ], digits = digits + 1)
cat("\n")
cat("and corrected scale parameters on population hazard \n")
print(tmp_new[c(lines_al), ], digits = digits + 1)
} else if (x$pophaz == "corrected" &
x$add.rmap.cut$breakpoint == TRUE) {
cat("\n")
cat(
"Excess hazard hazard ratio(s)\n(proportional effect variable(s) for exess hazard ratio(s))\n"
)
lines_al <-
which(stringr::str_detect(rownames(tmp_new), pattern = "alpha"))
print(tmp_new[-c(lines_al), ], digits = digits + 1)
cat("\n")
cat(
"and corrected scale parameters on population hazard \n (non proportional correction using breakpoint approach)\n"
)
cat("\n")
print(tmp_new[c(lines_al), ], digits = digits + 1)
if (!is.na(x$add.rmap.cut$cut[1])) {
n_break <- length(x$add.rmap.cut$cut)
n_int <- 1 + n_break
}
cat("\n")
}
} else{
cat("\n")
cat(
"Excess hazard hazard ratio(s)\n(proportional effect variable(s) for exess hazard ratio(s))\n"
)
print(tmp_new, digits = digits + 1)
}
logtest <- (-2 * (x$loglik[1] - x$loglik[2]))
df <- length(x$coef)
cat("\n")
cat("number of observations:",
paste0(format(x$n), "; "),
"number of events:",
x$n.events)
cat("\n")
cat(
"Likelihood ratio test: ",
format(round(logtest, 2)),
" on ",
df,
" degree(s) of freedom,",
" p=",
format(1 - pchisq(logtest, df)),
sep = ""
)
cat("\n")
}
if (sum(x$cov.test) > 0) {
cat("\n")
cat("Results of tests for '",
names(coef)[grep("T", as.character(x$cov.test))], "' equal to 0")
cat("\n")
cat(
" Likelihood ratio test=",
format(round(x$loglik.test, 2)),
" on ",
x$cov.df,
" degree(s) of freedom,",
" p=",
format(1 - pchisq(x$loglik.test, x$cov.df)),
sep = ""
)
cat("\n")
cat(
" Wald test=",
format(round(x$wald.test, 2)),
" on ",
x$cov.df,
" degree(s) of freedom,",
" p=",
format(1 - pchisq(x$wald.test, x$cov.df)),
sep = ""
)
cat("\n")
cat(
" Score test=",
format(round(x$score.test, 2)),
" on ",
x$cov.df,
" degree(s) of freedom,",
" p=",
format(1 - pchisq(x$score.test, x$cov.df)),
sep = ""
)
cat("\n")
cat("number of observations:",
format(x$n),
"number of events:",
x$n.events)
cat("\n")
}
if (any(tmp_new[, "lower 0.95"] < 0)) {
warning("\nlower 0.95 CI approximation may be incorrect")
}
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/print.xhaz.constant.R |
#' @title A print.predxhaz Function used to print a object of class predxhaz
#'
#' @description This function present the print of the predict function
#'
#' @param x an object of class predxhaz
#'
#' @param ... other parameters used for print function
#'
#' @return an object of class data.frame containing the following components:
#'
#'
#' \item{times.pts}{The time at which the estimations of excess hazard and net
#' survival are predicted}
#'
#' \item{hazard}{the predicted excess hazard at the fixed times}
#'
#' \item{survival}{the predicted net survival at the fixed times}
#'
#' @keywords print.predxhaz
#'
#' @examples
#'
#' \donttest{
#'
#' library("xhaz")
#' library("survexp.fr")
#' library("splines")
#'
#' data("dataCancer", package = "xhaz") # load the data set in the package
#'
#' fit.phBS <- xhaz(
#' formula = Surv(obs_time_year, event) ~ ageCentre + immuno_trt,
#' data = dataCancer, ratetable = survexp.fr,
#' interval = c(0, NA, NA, max(dataCancer$obs_time_year)),
#' rmap = list(age = 'age', sex = 'sexx', year = 'year_date'),
#' baseline = "bsplines", pophaz = "classic")
#'
#'
#' fit.phBS
#'
#'
#' predicted <- predict(object = fit.phBS,
#' new.data = dataCancer[1:10,],
#' times.pts = c(seq(0,10,1)),
#' baseline = TRUE)
#'
#'
#'
#' #a list of predicted hazard and survival at different time points
#' print(predicted)
#'
#'
#' #predicted hazard and survival at time points 10 years
#' print(predicted[[10]])
#' }
#'
#' @export
print.predxhaz <- function(x, ...)
{
if (any(class(x) == "predxhaz")) {
cl <- try(attributes(x)$call)
if (!is.null(cl)) {
cat("Call:\n")
dput(cl)
cat("\n")
}
attributes(x) <- NULL
print(x, ...)
invisible()
}
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/print.xhaz.predxhaz.R |
#' @title qbs function
#'
#' @description a function indicating which covariates have a time-dependent
#' effect in the formula.
#'
#' @param x a covariate to be considered in the \code{xhaz} formula with a
#' time-dependant effect. Quadratic B-splines with two interior knots are used.
#'
#' @return No return value, called for side effects.
#'
#' @keywords qbs
#'
#' @examples
#'
#' \donttest{
#'
#' library("xhaz")
#' library("numDeriv")
#' library("survexp.fr")
#' library("splines")
#'
#' fit.tdphBS <- xhaz(
#' formula = Surv(obs_time_year, event) ~ ageCentre + qbs(immuno_trt),
#' data = dataCancer, ratetable = survexp.fr,
#' interval = c(0, NA, NA, max(dataCancer$obs_time_year)),
#' rmap = list(age = 'age', sex = 'sexx', year = 'year_date'),
#' baseline = "bsplines", pophaz = "classic")
#'
#' print(fit.tdphBS)
#' }
#'
#' @export
qbs <- function(x){
to.eval <- quote(x)
evaluated <- eval(to.eval)
return(evaluated)
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/qbs.R |
#' Simulated data with cause death information with non comparability bias in term of individuals expected hazard
#'
#' Simulated data
#'
#'
#' @docType data
#'
#' @usage data(rescaledData)
#'
#' @format This dataset contains the following variables:
#' \describe{
#' \item{time}{Follow-up time (months)}
#' \item{status}{Vital status}
#' \item{age}{Age at diagnosis}
#' \item{age.c}{Centred age}
#' \item{sex}{Sex(Female,Male)}
#' \item{hormTh}{Treatment group variable}
#' \item{date}{date of diagnosis}
#'
#' }
#'
#'
#' @keywords datasets
#'
#' @references Goungounga JA, Touraine C, Grafféo N, Giorgi R;
#' CENSUR working survival group. Correcting for misclassification
#' and selection effects in estimating net survival in clinical trials.
#' BMC Med Res Methodol. 2019 May 16;19(1):104.
#' doi: 10.1186/s12874-019-0747-3. PMID: 31096911; PMCID: PMC6524224.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/31096911/}{PubMed})
#'
#'
#' @examples
#' data(rescaledData)
#' summary(rescaledData)
"rescaledData"
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/rescaledData.R |
#' Simulated data with cause death information in long term follow-up setting without non comparability bias in term of individuals expected hazard
#'
#' Simulated data
#'
#'
#' @docType data
#'
#' @usage data(simuData)
#'
#' @format This dataset contains the following variables:
#' \describe{
#' \item{age}{Age at diagnosis}
#' \item{agec}{Centered age}
#' \item{sex}{Sex(Female,Male)}
#' \item{race}{Race}
#' \item{date}{date of diagnosis.}
#' \item{time}{Follow-up time (months)}
#' \item{time_year}{Follow-up time (years)}
#' \item{status}{Vital status}
#'
#' }
#'
#'
#' @keywords datasets
#'
#' @references Goungounga JA, Touraine C, Grafféo N, Giorgi R;
#' CENSUR working survival group. Correcting for misclassification
#' and selection effects in estimating net survival in clinical trials.
#' BMC Med Res Methodol. 2019 May 16;19(1):104.
#' doi: 10.1186/s12874-019-0747-3. PMID: 31096911; PMCID: PMC6524224.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/31096911/}{PubMed})
#'
#' Touraine C, Grafféo N, Giorgi R; CENSUR working survival group.
#' More accurate cancer-related excess mortality through correcting
#' background mortality for extra variables.
#' Stat Methods Med Res. 2020 Jan;29(1):122-136.
#' doi: 10.1177/0962280218823234. Epub 2019 Jan 23. PMID: 30674229.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/30674229/}{PubMed})
#'
#'
#'
#' @examples
#' data(simuData)
#' summary(simuData)
"simuData"
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/simuData.R |
#' @title A summary.bsplines Function used to print a object of class \code{bsplines}
#'
#' @description This function present the estimated coefficients for the excess
#' hazard baseline coefficient and for the covariate effects
#'
#' @param object an object of class \code{bsplines}
#'
#'
#' @param ... additionnal parameters which can be used in the \code{summary}
#' function
#'
#' @return Estimated parameters of the model in different scales for interpretation purposes.
#'
#' @keywords summary.bsplines
#'
#' @seealso \code{\link{xhaz}}, \code{\link{summary.bsplines}}, \code{\link{plot.bsplines}}
#'
#' @references Goungounga JA, Touraine C, Grafféo N, Giorgi R;
#' CENSUR working survival group. Correcting for misclassification
#' and selection effects in estimating net survival in clinical trials.
#' BMC Med Res Methodol. 2019 May 16;19(1):104.
#' doi: 10.1186/s12874-019-0747-3. PMID: 31096911; PMCID: PMC6524224.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/31096911/}{PubMed})
#'
#' Touraine C, Grafféo N, Giorgi R; CENSUR working survival group.
#' More accurate cancer-related excess mortality through correcting
#' background mortality for extra variables.
#' Stat Methods Med Res. 2020 Jan;29(1):122-136.
#' doi: 10.1177/0962280218823234. Epub 2019 Jan 23. PMID: 30674229.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/30674229/}{PubMed})
#'
#' Mba RD, Goungounga JA, Grafféo N, Giorgi R; CENSUR working survival group.
#' Correcting inaccurate background mortality in excess hazard models
#' through breakpoints. BMC Med Res Methodol. 2020 Oct 29;20(1):268.
#' doi: 10.1186/s12874-020-01139-z. PMID: 33121436; PMCID: PMC7596976.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/33121436/}{PubMed})
#'
#'
#' @examples
#'
#' \donttest{
#' library("xhaz")
#' library("survival")
#' library("numDeriv")
#' library("survexp.fr")
#' library("splines")
#'
#' data("dataCancer", package = "xhaz") # load the data set in the package
#'
#' fit.phBS <- xhaz(
#' formula = Surv(obs_time_year, event) ~ ageCentre + immuno_trt,
#' data = dataCancer, ratetable = survexp.fr,
#' interval = c(0, NA, NA, max(dataCancer$obs_time_year)),
#' rmap = list(age = 'age', sex = 'sexx', year = 'year_date'),
#' baseline = "bsplines", pophaz = "classic")
#'
#' summary(fit.phBS)
#' }
#'
#' @importFrom stats printCoefmat
#'
#' @export
summary.bsplines <- function(object, ...)
{
digits <- max(options()$digits - 4, 3)
cl <- try(object$call)
if (!is.null(cl)) {
cat("Call:\n")
dput(cl)
cat("\n")
}
if (!is.null(object$fail)) {
cat(" xhaz.bsplines failed.", object$fail, "\n")
return()
}
savedig <- options(digits = digits)
on.exit(options(savedig))
if (!is.null(object$add.rmap)) {
nalpha <- nlevels(object$add.rmap)
} else{
nalpha <- 0
}
if (nalpha) {
if (nalpha == 1) {
indxAlpha <- which(stringr::str_detect(names(object$coefficients),
pattern = "alpha"))
object$mycoef <- object$coefficients
names(object$mycoef)[indxAlpha] <- "alpha"
names(object$coefficients)[indxAlpha] <- "log(alpha)"
}
else{
object$mycoef <- object$coefficients
indxAlpha <-
which(stringr::str_detect(names(object$coefficients),
pattern = "alpha"))
names(object$coefficients)[c(indxAlpha)] <- paste("log(",
paste0('alpha.',
levels(object$add.rmap)),
")",
sep = "")
names(object$mycoef)[c(indxAlpha)] <-
paste0('alpha.', levels(object$add.rmap))
}
coef <- c(object$coefficients)
} else{
coef <- c((object$coefficients))
}
object$var <- object$varcov
# WARNING: works for one single alpha
if (nalpha) {
se <- sqrt((c(diag(object$var[1:(length(diag(object$var)) - 1),
1:(length(diag(object$var)) - 1)]),
(object$var[length(diag(object$var)),
length(diag(object$var))]))))
}
else{
se <- sqrt((diag(object$var)))
}
if (is.null(coef) | is.null(se))
stop("Input is not valid")
if (nalpha) {
tmp <- cbind(coef,
se,
(c((object$coef[1:length(object$coef) - 1]),
(object$coef[length(object$coef)])) -
abs(qnorm((1 - object$level) / 2
)) * sqrt((
diag(object$var)
))),
(c((object$coef[1:length(object$coef) - 1]),
(object$coef[length(object$coef)])) +
abs(qnorm((1 - object$level) / 2
)) * sqrt((
diag(object$var)
))),
coef / se,
signif(1 - pchisq((coef / se) ^ 2, 1), digits - 1))
dimnames(tmp) <- list(names(coef),
c(
"coef",
"se(coef)",
paste("lower", object$level, sep = " "),
paste("upper", object$level, sep = " "),
"z",
"Pr(>|z|)"
))
} else{
tmp <-
cbind(coef,
se,
(coef - abs(qnorm((1 - object$level) / 2
)) * se),
(coef + abs(qnorm((1 - object$level) / 2
)) * se),
coef / se,
signif(1 - pchisq((coef / se) ^ 2, 1), digits - 1))
dimnames(tmp) <- list(names(coef),
c(
"coef",
"se(coef)",
paste("lower", object$level, sep = " "),
paste("upper", object$level, sep = " "),
"z",
"Pr(>|z|)"
))
}
cat("\n")
printCoefmat(
tmp,
P.values = TRUE,
digits = digits,
signif.stars = TRUE,
na.print = "NA",
...
)
nPH <- object$nPH
nTD <- object$nTD
if (nPH > 0) {
tmp <-
exp(tmp)[(5 + 5 * nTD + 1):(5 + 5 * nTD + nPH + nalpha), c(1, 3, 4)]
if (is.null(dim(tmp))) {
tmp <- t(as.matrix(tmp))
}
dimnames(tmp) <-
list(c(names(coef[(5 + 5 * nTD + 1):(5 + 5 * nTD + nPH)]),
names(object$mycoef[(5 + 5 * nTD + nPH + 1):(5 + 5 * nTD + nPH + nalpha)])),
c(
"exp(coef)",
paste("lower", object$level, sep = " "),
paste("upper", object$level, sep = " ")
))
cat("\n")
if (object$pophaz != "classic") {
if (object$pophaz == "rescaled") {
cat(
"Excess hazard and expected hazard ratio(s) \n(proportional effect variable(s) for exess hazard ratio(s) )\n"
)
print(tmp, digits = digits + 1)
cat("\n")
cat(
"Excess hazard ratio(s)\n(proportional effect variable(s) for exess hazard ratio(s))\n"
)
lines_al <-
which(stringr::str_detect(rownames(tmp), pattern = "alpha"))
print(tmp[-c(lines_al), ], digits = digits + 1)
cat("\n")
cat("and rescaled parameter on population hazard \n")
tmp_new_alpha <- matrix(tmp[c(lines_al), ], nrow = 1)
dimnames(tmp_new_alpha)[[1]] <- "alpha"
dimnames(tmp_new_alpha)[[2]] <- dimnames(tmp)[[2]]
print(tmp_new_alpha, digits = digits + 1)
}
} else{
cat(
"Excess hazard ratio(s) \n(proportional effect variable(s) for exess hazard ratio(s) )\n"
)
print(tmp, digits = digits + 1)
}
}
cat("\n")
cat(
"number of observations:",
paste0(format(object$n), "; "),
"number of events:",
object$n.events
)
cat("\n")
cat(
"log-likelihood: ",
format(object$loglik),
" (for ",
length(object$coef),
" degree(s) of freedom)"
)
cat("\n")
if (sum(object$cov.test) > 0) {
cat("\n")
cat(
" Likelihood ratio test of PH effect for '",
gsub("\\(|\\)", "",
as.character(stringr::str_remove((
attr(object$terms, "term.labels")[object$cov.test]
),
"qbs"))),
"'=",
format(round(object$loglik.test, 2)),
",\n on ",
object$cov.df,
" degree(s) of freedom,"
)
cat(" p=",
format(1 - pchisq(object$loglik.test, object$cov.df)), sep =
"")
}
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/summary.xhaz.bsplines.R |
#' @title A summary.constant Function used to print a object of class \code{xhaz.constant}
#'
#' @description This function present the estimated coefficients for the excess
#' hazard baseline coefficient and for the covariate effects
#'
#' @param object an object of class xhaz.constant
#'
#' @param ci_type method for confidence intervals calculation
#'
#'
#' @param ... additionnal parameters which can be used in the \code{print}
#' function
#'
#'
#' @return Estimated parameters of the model in different scales for interpretation purposes.
#'
#'
#' @keywords summary.constant
#'
#' @seealso \code{\link{xhaz}}, \code{\link{summary.constant}}, \code{\link{print.bsplines}}
#'
#' @examples
#'
#' library("xhaz")
#' library("numDeriv")
#' data("simuData", package = "xhaz") # load the data sets 'simuData'
#'
#' # Esteve et al. model: baseline excess hazard is a piecewise function
#' # linear and proportional effects for the covariates on
#' # baseline excess hazard.
#'
#' levels(simuData$sex) <- c("male", "female")
#'
#' set.seed(1980)
#' simuData2 <- simuData[sample(nrow(simuData), size = 500), ]
#'
#' fit.estv2 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData2,
#' ratetable = survexp.us,
#' interval = c(0, NA, NA, NA, NA, NA, 6),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "constant", pophaz = "classic")
#'
#'
#'
#'
#'
#' summary(fit.estv2)
#'
#' @importFrom stats printCoefmat
#' @export
summary.constant <-
function(object,
ci_type = "lognormal",
...)
{
digits <- max(options()$digits - 4, 3)
if (is.null(object$coefficients)) {
return(object)
}
cl <- try(object$call)
if (!is.null(cl)) {
cat("Call:\n")
dput(cl)
cat("\n")
}
if (!is.null(object$fail)) {
cat(" Esteveph failed.", object$fail, "\n")
return()
}
savedig <- options(digits = digits)
on.exit(options(savedig))
if (!is.null(object$add.rmap)) {
nalpha <- nlevels(object$add.rmap)
if (nalpha == 1) {
indxAlpha <- which(stringr::str_detect(names(object$coefficients),
pattern = "alpha"))
names(object$coef)[indxAlpha] <- "log(alpha)"
}
else{
indxAlpha <- which(stringr::str_detect(names(object$coefficients),
pattern = "alpha"))
if (object$add.rmap.cut$breakpoint == TRUE) {
initnames <- names(object$coef)[c(indxAlpha)]
names(object$coef)[c(indxAlpha)] <- paste("log(", initnames,
")",
sep = "")
} else {
names(object$coef)[c(indxAlpha)] <- paste("log(", paste0('alpha.',
levels(object$add.rmap)),
")",
sep = "")
}
nalpha <- length(indxAlpha)
}
} else{
nalpha <- 0
}
nstrata <- ifelse(is.null(attr(object$terms, "nstrata")),
1,
attr(object$terms, "nstrata"))
nvar <-
length(object$coef) - nstrata * (length(object$interval) - 1) - nalpha
coef <- object$coef
object$var <- object$varcov
se <- numeric(length(coef))
if (nvar > 0) {
if (nvar != 1) {
se[1:nvar] <- sqrt(diag(object$var[1:nvar, 1:nvar]))
}
else{
se[1:nvar] <- sqrt(object$var[1])
}
}
if (ci_type == "delta.method") {
if (!is.null(object$add.rmap)) {
se[(nvar + 1):(length(object$coef) - length(indxAlpha))] <- c(sqrt(exp(object$coef[(nvar + 1):(length(object$coef) - length(indxAlpha))]) %*%
object$var[(nvar + 1):(length(object$coef) - length(indxAlpha)),
(nvar + 1):(length(object$coef) - length(indxAlpha))] %*%
exp(object$coef[(nvar + 1):(length(object$coef) - length(indxAlpha))])))
coef[(nvar + 1):(length(object$coef) - length(indxAlpha))] <-
exp(coef[(nvar + 1):(length(object$coef) - length(indxAlpha))])
if (length(indxAlpha) > 1) {
se[c(indxAlpha)] <-
sqrt(diag(object$var[c(indxAlpha), c(indxAlpha)]))
} else{
se[c(indxAlpha)] <- (sqrt(object$var[c(indxAlpha), c(indxAlpha)]))
}
} else{
se[(nvar + 1):length(object$coef)] <- sqrt(exp(object$coef[(nvar + 1):length(object$coef)]) %*%
object$var[(nvar + 1):length(object$coef),
(nvar + 1):length(object$coef)] %*%
exp(object$coef[(nvar + 1):length(object$coef)]))
coef[(nvar + 1):length(object$coef)] <-
exp(coef[(nvar + 1):length(object$coef)])
}
} else if (ci_type == "lognormal") {
#lognormal based CI
if (!is.null(object$add.rmap)) {
se[(nvar + 1):(length(object$coef) - length(indxAlpha))] <-
c(sqrt(diag((
exp(2 * object$coef[(nvar + 1):(length(object$coef) - length(indxAlpha))] +
object$var[(nvar + 1):(length(object$coef) - length(indxAlpha)),
(nvar + 1):(length(object$coef) - length(indxAlpha))])
) *
(
exp(object$var[(nvar + 1):(length(object$coef) - length(indxAlpha)),
(nvar + 1):(length(object$coef) - length(indxAlpha))]) - 1
))))
coef[(nvar + 1):(length(object$coef) - length(indxAlpha))] <-
exp(coef[(nvar + 1):(length(object$coef) - length(indxAlpha))] + 1 / 2 *
diag(object$var[(nvar + 1):(length(object$coef) - length(indxAlpha)), (nvar + 1):(length(object$coef) - length(indxAlpha))]))
if (length(indxAlpha) > 1) {
se[c(indxAlpha)] <-
(sqrt(diag(object$var[c(indxAlpha), c(indxAlpha)])))
} else{
se[c(indxAlpha)] <- (sqrt(object$var[c(indxAlpha), c(indxAlpha)]))
}
} else{
se[(nvar + 1):length(object$coef)] <- sqrt(diag(exp(2 * object$coef[(nvar + 1):length(object$coef)] + (object$var[(nvar + 1):length(object$coef),
(nvar + 1):length(object$coef)])) * (exp(object$var[(nvar + 1):length(object$coef),
(nvar + 1):length(object$coef)]) - 1)))
coef[(nvar + 1):length(object$coef)] <-
exp(coef[(nvar + 1):length(object$coef)] + 1 / 2 * diag(object$var[(nvar + 1):length(object$coef), (nvar + 1):length(object$coef)]))
}
}
if (is.null(coef) | is.null(se))
stop("Input is not valid")
tmp <-
cbind(
coef,
se,
coef - abs(qnorm((1 - object$level) / 2)) * se,
coef + abs(qnorm((1 - object$level) / 2)) * se,
coef / se,
signif(1 - pchisq((coef / se) ^ 2, 1), digits - 1)
)
dimnames(tmp) <-
list(names(coef),
c(
"coef",
"se(coef)",
paste("lower", object$level, sep = " "),
paste("upper", object$level, sep = " "),
"z",
"Pr(>|z|)"
))
cat("\n")
printCoefmat(
tmp,
P.values = TRUE,
digits = digits,
signif.stars = TRUE,
na.print = "NA",
...
)
if (nvar > 0) {
if (!is.null(object$add.rmap)) {
index <- c(1:nvar, indxAlpha)
nalpha <- nlevels(object$add.rmap)
if (nalpha == 1) {
names(object$coef)[indxAlpha] <- "alpha"
}
else{
if (object$add.rmap.cut$breakpoint == FALSE) {
names(object$coef)[c(indxAlpha)] <-
paste0('alpha.', levels(object$add.rmap))
} else {
names(object$coef)[c(indxAlpha)] <- initnames
}
}
}
coef <- object$coef
if (!is.null(object$add.rmap)) {
if (ci_type == "delta.method") {
se_alpha <- sapply(1:length(indxAlpha), function(i)
(matrix(exp(
object$coef[c(indxAlpha[i])]
)) %*% sqrt(object$var[indxAlpha[i], indxAlpha[i]])))
coef_alpha <- exp(coef[indxAlpha])
mlevel <- abs(qnorm((1 - object$level) / 2))
tmp_new <- cbind((exp(coef[index])),
c(exp(coef[1:nvar] - abs(qnorm((1 - object$level) / 2
)) * se[1:nvar]),
c(coef_alpha - mlevel * c(se_alpha))),
c(exp(coef[1:nvar] + abs(qnorm((1 - object$level) / 2
)) * se[1:nvar]),
coef_alpha + mlevel * c(se_alpha)))
} else {
#lognormal based
if (length(indxAlpha) > 1) {
se_alpha <- sqrt(diag(object$var[indxAlpha, indxAlpha]))
} else{
se_alpha <- sqrt((object$var[indxAlpha, indxAlpha]))
}
coef_alpha <- exp(coef[indxAlpha])
mlevel <- abs(qnorm((1 - object$level) / 2))
tmp_new <- cbind(c(exp(coef[1:nvar]), coef_alpha),
c(exp(coef[1:nvar] - abs(qnorm((1 - object$level) / 2
)) *
se[1:nvar]),
exp(c(
coef[indxAlpha] - mlevel * c(se_alpha)
))),
c(exp(coef[1:nvar] + abs(qnorm((1 - object$level) / 2
)) *
se[1:nvar]),
exp(coef[indxAlpha] + mlevel * c(se_alpha))))
}
} else{
index <- c(1:nvar)
tmp_new <- cbind(exp(coef[1:nvar]),
exp(coef[1:nvar] - abs(qnorm((1 - object$level) / 2
)) * se[1:nvar]),
exp(coef[1:nvar] + abs(qnorm((1 - object$level) / 2
)) * se[1:nvar]))
}
dimnames(tmp_new) <- list(names(coef[index]),
c(
"exp(coef)",
paste("lower", object$level, sep = " "),
paste("upper", object$level, sep = " ")
))
cat("\n")
if (object$pophaz != "classic") {
if (object$pophaz == "rescaled") {
cat("\n")
cat(
"Excess hazard ratio(s)\n(proportional effect variable(s) for exess hazard ratio(s))\n"
)
lines_al <-
which(stringr::str_detect(rownames(tmp_new), pattern = "alpha"))
print(tmp_new[-c(lines_al), ], digits = digits + 1)
cat("\n")
cat("and rescaled parameter on population hazard \n")
tmp_new_alpha <- matrix(tmp_new[c(lines_al), ], nrow = 1)
dimnames(tmp_new_alpha)[[1]] <- "alpha"
dimnames(tmp_new_alpha)[[2]] <- dimnames(tmp_new)[[2]]
print(tmp_new_alpha, digits = digits + 1)
} else if (object$pophaz == "corrected" &
object$add.rmap.cut$breakpoint == FALSE) {
cat("\n")
cat(
"Excess hazard hazard ratio(s)\n(proportional effect variable(s) for exess hazard ratio(s))\n"
)
lines_al <-
which(stringr::str_detect(rownames(tmp_new), pattern = "alpha"))
print(tmp_new[-c(lines_al), ], digits = digits + 1)
cat("\n")
cat("and corrected scale parameters on population hazard \n")
print(tmp_new[c(lines_al), ], digits = digits + 1)
} else if (object$pophaz == "corrected" &
object$add.rmap.cut$breakpoint == TRUE) {
cat("\n")
cat(
"Excess hazard hazard ratio(s)\n(proportional effect variable(s) for exess hazard ratio(s))\n"
)
lines_al <-
which(stringr::str_detect(rownames(tmp_new), pattern = "alpha"))
print(tmp_new[-c(lines_al), ], digits = digits + 1)
cat("\n")
cat(
"and corrected scale parameters on population hazard \n (non proportional correction using breakpoint approach)\n"
)
cat("\n")
print(tmp_new[c(lines_al), ], digits = digits + 1)
if (!is.na(object$add.rmap.cut$cut[1])) {
n_break <- length(object$add.rmap.cut$cut)
n_int <- 1 + n_break
}
cat("\n")
}
} else{
cat("\n")
cat(
"Excess hazard hazard ratio(s)\n(proportional effect variable(s) for exess hazard ratio(s))\n"
)
print(tmp_new, digits = digits + 1)
}
logtest <- (-2 * (object$loglik[1] - object$loglik[2]))
df <- length(object$coef)
cat("\n")
cat("number of observations:",
paste0(format(object$n), "; "),
"number of events:",
object$n.events)
cat("\n")
cat(
"Likelihood ratio test: ",
format(round(logtest, 2)),
" on ",
df,
" degree(s) of freedom,",
" p=",
format(1 - pchisq(logtest, df)),
sep = ""
)
cat("\n")
}
if (sum(object$cov.test) > 0) {
cat("\n")
cat("Results of tests for '",
names(coef)[grep("T", as.character(object$cov.test))], "' equal to 0")
cat("\n")
cat(
" Likelihood ratio test=",
format(round(object$loglik.test, 2)),
" on ",
object$cov.df,
" degree(s) of freedom,",
" p=",
format(1 - pchisq(object$loglik.test, object$cov.df)),
sep = ""
)
cat("\n")
cat(
" Wald test=",
format(round(object$wald.test, 2)),
" on ",
object$cov.df,
" degree(s) of freedom,",
" p=",
format(1 - pchisq(object$wald.test, object$cov.df)),
sep = ""
)
cat("\n")
cat(
" Score test=",
format(round(object$score.test, 2)),
" on ",
object$cov.df,
" degree(s) of freedom,",
" p=",
format(1 - pchisq(object$score.test, object$cov.df)),
sep = ""
)
cat("\n")
cat("number of observations:",
format(object$n),
"number of events:",
object$n.events)
cat("\n")
}
if (any(tmp_new[, "lower 0.95"] < 0)) {
warning("\nlower 0.95 CI approximation may be incorrect")
}
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/summary.xhaz.constant.R |
#' @import survival
survSplit2 <-
function(formula, data, subset, na.action = na.pass, cut, start = "tstart",
id, zero = 0, episode, end = "tstop",
event = "event") {
Call <- match.call()
if (missing(formula) || is.data.frame(formula)) {
if (missing(data)) {
if (!missing(formula)) {
names(Call)[[2]] <- "data"
data <- formula
}
else stop("a data frame is required")
}
if (missing(end) || missing(event))
stop("either a formula or the end and event arguments are required")
if (!(is.character(event) && length(event) == 1 && event %in%
names(data)))
stop("'event' must be a variable name in the data set")
if (!(is.character(end) && length(end) == 1 && end %in%
names(data)))
stop("'end' must be a variable name in the data set")
if (!(is.character(start) && length(start) == 1))
stop("'start' must be a variable name")
if (start %in% names(data))
temp <- paste(start, end, event, sep = ",")
else temp <- paste(end, event, sep = ",")
formula <- as.formula(paste("Surv(", temp, ")~ ."))
}
else if (missing(formula))
stop("either a formula or the end and event arguments are required")
indx <- match(c("data", "weights", "subset"),
names(Call), nomatch = 0)
temp <- Call[c(1L, indx)]
temp$formula <- formula
temp$na.action <- na.action
temp[[1L]] <- quote(stats::model.frame)
mf <- eval.parent(temp)
Y <- model.response(mf)
states <- attr(Y, "states")
if (!is.Surv(Y))
stop("the model must have a Surv object as the response")
if (!(attr(Y, "type") %in% c("right", "mright",
"counting", "mcounting")))
stop(paste("not valid for", attr(Y, "type"),
"censored survival data"))
nY <- ncol(Y)
ymiss <- is.na(Y)
if (nY == 2) {
if (any(Y[!ymiss, 1] <= zero))
stop("'zero' parameter must be less than any observed times")
Y <- cbind(zero, Y)
}
temp <- (Y[!ymiss, 1] >= Y[!ymiss, 2])
if (any(temp))
stop("start time must be < stop time")
if (!is.numeric(cut) || any(!is.finite(cut)))
stop("cut must be a vector of finite numbers")
cut <- unique(sort(cut))
ntimes <- length(cut)
n <- nrow(data)
if (!missing(id)) {
if (!is.character(id))
stop("id must be a variable name")
if (id %in% names(mf))
stop("the suggested id name is already present")
id <- make.names(id)
if (id %in% names(mf))
stop("the suggested id name is already present")
mf[[id]] <- 1:nrow(mf)
}
storage.mode(Y) <- "double"
Csurvsplit <- Csurvsplit2()
index <- .Call(Csurvsplit, mf$tage, Y[, 2], as.double(cut))
newdata <- mf[index$row, -1, drop = FALSE]
row.names(newdata) <- NULL
attr(newdata, "terms") <- NULL
status <- Y[index$row, 3]
status[index$censor] <- 0
if (!is.null(states))
status <- factor(status, labels = c("censor", states))
if (inherits(formula[[2]], "call") && formula[[2]][[1]] ==
as.name("Surv")) {
temp <- match.call(Surv, formula[[2]])
if (nY == 2) {
if (missing(end) && !is.null(temp[["time"]]) &&
is.name(temp[["time"]]))
end <- as.character(temp[["time"]])
if (missing(event) && !is.null(temp$time2) && is.name(temp$time2))
event <- as.character(temp$time2)
if (missing(event) && !is.null(temp$event) && is.name(temp$event))
event <- as.character(temp$event)
}
else {
if (missing(end) && !is.null(temp[["time"]]) &&
is.name(temp["time"]))
start <- as.character(temp[["time"]])
if (missing(end) && !is.null(temp$time2) && is.name(temp$time2))
end <- as.character(temp$time2)
if (missing(event) && !is.null(temp$event) && is.name(temp$event))
event <- as.character(temp$event)
if (missing(start) && !is.null(temp$time) && is.name(temp$time))
start <- as.character(temp$time)
}
newdata[[start]] <- index$start
newdata[[end]] <- index$end
newdata[[event]] <- status
}
else {
if (inherits(formula[[2]], "name") == FALSE)
stop("left hand side not recognized")
temp <- as.character(formula[[2]])
newdata[temp] <- Surv(index$start, index$end, status)
}
if (!missing(episode)) {
if (!is.character(episode))
stop("episode must be a character string")
newdata[[make.names(episode)]] <- index$interval + 1
}
newdata
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/survSplit2.R |
testM <- function(X,
Y,
ehazard,
ehazardInt,
int,
covtest,
bsplines = bsplines,
init,
control, event, Terms, strats,
add.rmap, add.rmap.cut, ageDiag, ageDC,
optim, trace, speedy, data) {
if (min(add.rmap.cut$cut) < min(c(data$age, data$age + data$time))) {
if (max(add.rmap.cut$cut) <= max(c(data$age, data$age + data$time))) {
stop("Breakpoint(s) is (are) smaller than the minimum age")
} else
stop(
"Breakpoint(s) is (are) smaller than the minimum age and breakpoint(s) greater than the maximum age"
)
} else{
if (max(add.rmap.cut$cut) > max(c(data$age, data$age + data$time)))
stop("Breakpoint(s) is (are) greater than the maximum age")
}
fitter <- get("esteve.ph.fit")
res <- try(fitter(X,
Y,
ehazard,
ehazardInt,
int = int,
covtest,
bsplines = bsplines,
init,
control, event, Terms, strats,
add.rmap, add.rmap.cut, ageDiag, ageDC,
optim, trace, speedy), TRUE)
if (class(res)[[1]] != "try-error") {
res$AIC <- (2 * length(res$coefficients) - 2 * (res$loglik))[2]
res$BIC <- (log(length(Y)) * length(res$coefficients) - 2 * (res$loglik))[2]
}
res
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/testM.R |
#' @import survival
tosplit <- function(formula = formula,
add.rmap.cut = add.rmap.cut,
data = data, na.action, rmap, interval, subset) {
Call <- match.call()
m <- match.call(expand.dots = FALSE)
# indx <- match(c("formula", "data", "subset", "na.action"),
# names(Call), nomatch = 0)
indx <- match(c("formula", "data", "na.action"),
names(Call), nomatch = 0)
if (indx[1] == 0)
stop("A formula argument is required")
temp <- Call[c(1, indx)]
temp[[1L]] <- as.name("model.frame")
special <- c("strata")
Terms <- if (missing(data)) {
terms(formula, special)
}
else{
terms(formula, special, data = data)
}
temp$formula <- Terms
m <- eval(temp, sys.parent())
if (missing(na.action)) {
na.action <- NULL
} else if (length(attr(m, "na.action"))) {
temp$na.action <- na.pass
m <- eval(temp, sys.parent())
}
if (missing(data)) {
stop("Missing data data frame in which to interpret
the variables named in the formula.")
} else{
if (is.na(match(rmap$age, names(data))))
stop("Must have informations for age on the data set.")
if (is.na(match(rmap$sex, names(data))))
stop("Must have informations for sex on the data set.")
if (is.na(match(rmap$year, names(data))))
stop("Must have informations for date on the data set.")
}
myvarnames <- colnames(model.matrix(Terms, m)[,-1, drop = FALSE])
Y <- model.extract(m, "response")
if (!inherits(Y, "Surv"))
stop("Response must be a survival object.")
attr(Terms, "intercept") <- 1
type <- attr(Y, "type")
if (ncol(Y) == 2) {
time <- Y[, 1]
event <- Y[, 2]
} else{
time <- Y[, 2] - Y[, 1]
event <- Y[, 3]
}
event[time > max(interval, na.rm = TRUE)] <- 0
time[time > max(interval, na.rm = TRUE)] <- max(interval, na.rm = TRUE)
data$tage2 <- data$tage <- data$ageDiag <- ageDiag <- data[, rmap$age]
data$tageDC <- data$ageDC <- ageDC <- ageDiag + time
data2 <- data
data2$id <- 1:nrow(data2)
data2$time_2 <- data2$time_old <- data2$time <- time
data2$tageDC <- data2$ageDC <- data2$age + data2$time
tdata2 <- survSplit2(Surv(tage2 + time_2, event == 1) ~ ., data2,
cut = add.rmap.cut$cut,
episode = "break_interval")
tdata2$time <- with(tdata2, c(tstop - tstart))
colnames(tdata2)[which(colnames(tdata2) == "time")] <- toString(Terms[[2]][[2]])
colnames(tdata2)[which(colnames(tdata2) == "event")] <- toString(Terms[[2]][[3]])
return(list(tdata2 = tdata2, Call = Call))
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/tosplit.R |
without_breakpoint_without_cut <- function(formula = formula,
data = data,
ratetable = ratetable,
rmap = rmap,
baseline = baseline,
pophaz = pophaz,
only_ehazard = only_ehazard,
add.rmap = add.rmap,
add.rmap.cut = add.rmap.cut,
interval = interval,
splitting = splitting,
ratedata = ratedata,
subset = subset,
na.action = na.action,
init = init,
control = control,
optim = optim,
scale = scale,
trace = trace,
speedy = speedy,
nghq = nghq,
m_int = m_int,
rcall = rcall,
...) {
time_elapsed0 <- as.numeric(base::proc.time()[3])
Call <- match.call()
m <- match.call(expand.dots = FALSE)
indx <- match(c("formula", "data"),
names(Call),
nomatch = 0)
if (indx[1] == 0)
stop("A formula argument is required")
temp <- Call[c(1, indx)]
temp[[1L]] <- as.name("model.frame")
special <- c("strata")
Terms <- if (missing(data)) {
terms(formula, special)
}
else{
terms(formula, special, data = data)
}
temp$formula <- Terms
m <- eval(temp, sys.parent())
if (missing(na.action)) {
na.action <- NULL
} else if (length(attr(m, "na.action"))) {
temp$na.action <- na.pass
m <- eval(temp, sys.parent())
}
ehazardInt <- NULL
# controls on data & ratetable parameters
if (missing(ratedata) & missing(ratetable)) {
stop("Missing rate table from general population.")
}
if (missing(data)) {
stop("Missing data data frame in which to interpret
the variables named in the formula.")
} else{
if (is.na(match(rmap$age, names(data))))
stop("Must have informations for age on the data set.")
if (is.na(match(rmap$sex, names(data))))
stop("Must have informations for sex on the data set.")
if (is.na(match(rmap$year, names(data))))
stop("Must have informations for date on the data set.")
}
if (!missing(ratetable)) {
if (is.ratetable(ratetable)) {
varlist <- attr(ratetable, "dimid")
if (is.null(varlist)) {
varlist <- names(attr(ratetable, "dimnames"))
}
if (is.null(attributes(ratetable)$dimid)) {
attributes(ratetable)$dimid <- varlist
}
}
else{
stop("Invalid rate table")
}
varsexID <- try(which(varlist == 'sex'))
conditionVsex <- attr(ratetable, which = "dimnames")[[varsexID]]
if (any(!conditionVsex %in% c('male', 'female'))) {
conditionVsex <-
c('male', 'female')[c(which(conditionVsex %in% c('male', 'female')))]
}
if (!missing(rmap)) {
condition2 <-
add.rmap.cut$breakpoint == TRUE &
is.na(add.rmap.cut$cut[1]) & !is.null(add.rmap.cut$probs)
if ((!splitting & missing(rcall)) | (condition2)) {
rcall <- substitute(rmap)
} else if (!splitting & !missing(rcall)) {
rmap <- eval(rmap)
}
if (!is.call(rcall) || rcall[[1]] != as.name("list"))
stop("Invalid rcall argument")
}
else
rcall <- NULL
temp01 <- match(names(rcall)[-1], varlist)
if (any(is.na(temp01)))
stop("Variable not found in the ratetable:",
(names(rcall))[is.na(temp01)])
temp02 <- match(as.vector(unlist(rmap)), names(data))
if (any(is.na(temp02))) {
stop("Variable not found in the data set:",
(names(rcall))[is.na(temp02)])
}
}
if (pophaz == "corrected") {
if (is.null(add.rmap.cut$breakpoint)) {
stop("Missing breakpoint information")
} else {
if (add.rmap.cut$breakpoint == TRUE) {
if (!is.na(add.rmap.cut$cut[1])) {
if (min(add.rmap.cut$cut) < min(c(data$age, data$age + data$time))) {
if (max(add.rmap.cut$cut) <= max(c(data$age, data$age + data$time))) {
stop("Breakpoint(s) is (are) smaller than the minimum age")
} else
stop(
"Breakpoint(s) is (are) smaller than the minimum age and breakpoint(s) greater than the maximum age"
)
} else{
if (max(add.rmap.cut$cut) > max(c(data$age, data$age + data$time)))
stop("Breakpoint(s) is (are) greater than the maximum age")
}
}
}
}
}
if (control$iter.max < 0)
stop("Invalid value for iterations.")
if (control$eps <= 0)
stop("Invalid convergence criteria.")
if (control$level < 0 | control$level > 1)
stop("Invalid value for the level of confidence interval.")
if (missing(init))
init <- NULL
if (missing(interval))
stop("Missing cutpoints definition for intervals.")
if (!is.numeric(interval))
stop("Wrong values for intervals. Must be numeric.")
if (min(interval, na.rm = TRUE) != 0)
stop("First interval must start at 0.")
if (sum((interval < 0) * 1, na.rm = TRUE) > 0)
stop("Negative value is not allowed for interval.")
myvarnames <- colnames(model.matrix(Terms, m)[, -1, drop = FALSE])
qbs_id <- which(stringr::str_detect(c(myvarnames),
pattern = "qbs"))
if (length(qbs_id) > 0) {
if (length(interval) > 4)
stop(
"Interval must have 4 values using bsplines
(2 internal knots plus '0' and the end of the study)."
)
} else{
if (baseline == "bsplines") {
if (length(interval) > 4)
stop(
"Interval must have 4 values using bsplines
(2 internal knots plus '0' and the end of the study)."
)
}
}
Y <- model.extract(m, "response")
if (!inherits(Y, "Surv"))
stop("Response must be a survival object.")
strats <- attr(Terms, "specials")$strata
dropx <- NULL
if (length(strats)) {
if (length(qbs_id) > 0)
stop("Strata function is not yet implemented for the B-splines model.")
temp <- untangle.specials(Terms, "strata", 1)
dropx <- c(dropx, temp$terms)
if (length(temp$vars) == 1)
strata.keep <- m[[temp$vars]]
else
strata.keep <- strata(m[, temp$vars], shortlabel = TRUE)
strats <- as.numeric(strata.keep)
attr(Terms, "nstrata") <- max(strats)
}
attr(Terms, "intercept") <- 1
if (length(dropx)) {
X <- model.matrix(Terms[-dropx], m)[,-1, drop = FALSE]
} else{
X <- model.matrix(Terms, m)[,-1, drop = FALSE]
}
if (length(qbs_id) > 0) {
z_bsplines <-
as.data.frame(model.matrix(Terms, m)[, -1, drop = FALSE][, c(qbs_id)])
z_bsplines_names <- stringr::str_remove(myvarnames[c(qbs_id)],
"qbs")
colnames(z_bsplines) <- gsub("\\(|\\)",
"",
as.character(z_bsplines_names))
colnames(X)[c(qbs_id)] <- colnames(z_bsplines)
z_bsplines <- as.matrix(z_bsplines)
z_bsplines_vect <- rep(TRUE, ncol(z_bsplines))
z_X_vect <- rep(FALSE, ncol(X))
z_X_vect[c(qbs_id)] <- z_bsplines_vect
covtest <- z_X_vect
}else{
covtest <- rep(FALSE, ncol(X))
}
type <- attr(Y, "type")
###If there is a time-dependent covariate
if (ncol(Y) == 2) {
time <- Y[, 1]
event <- Y[, 2]
} else{
time <- Y[, 2] - Y[, 1]
event <- Y[, 3]
}
event[time > max(interval, na.rm = TRUE)] <- 0
time[time > max(interval, na.rm = TRUE)] <-
max(interval, na.rm = TRUE)
if (length(qbs_id) > 0) {
Y[, 1] <- time
}
if (is.null(data$break_interval)) {
ageDiag <- data[, rmap$age]
ageDC <- ageDiag + time
} else{
ageDiag <- data$tstart
ageDC <- data$tstop
}
pophaz <- match.arg(pophaz, c("classic", "rescaled", "corrected"))
if (pophaz == "corrected") {
if (!is.null(add.rmap)) {
add.rmap.var <- add.rmap
add.rmap <- data[, add.rmap]
} else{
stop("Additional demographic variable must be specified")
}
} else{
if (pophaz == "rescaled") {
if (!is.null(add.rmap)) {
stop("Additional demographic variable is not required")
} else{
add.rmap <- as.factor(rep(1, nrow(data)))
}
}
if (pophaz == "classic") {
if (!is.null(add.rmap)) {
stop("Additional demographic variable is not required")
}
}
}
if (only_ehazard == TRUE & pophaz != "classic") {
stop("cumulative expected hazard if also required for this type of model")
}
#
condition0 <- add.rmap.cut$breakpoint == FALSE
condition1 <-
add.rmap.cut$breakpoint == TRUE &
!is.na(add.rmap.cut$cut[1]) & is.null(add.rmap.cut$probs)
condition2 <-
add.rmap.cut$breakpoint == TRUE &
is.na(add.rmap.cut$cut[1]) & !is.null(add.rmap.cut$probs)
if (!is.null(data$break_interval)) {
if (missing(ratetable)) {
exphaz <- exphaz_years(
ageDiag = data$tstart,
time = time,
data = data,
rmap = rmap,
ratetable = ratetable,
varlist = varlist,
temp01 = temp01,
scale = scale,
pophaz = pophaz,
add.rmap = add.rmap,
only_ehazard = only_ehazard
)
ehazard <- exphaz$ehazard
ehazardInt <- try(exphaz$ehazardInt, TRUE)
} else{
exphaz <- exphaz_years(
ageDiag = data$tstart,
time = time,
data = data,
rmap = rmap,
ratetable = ratetable,
varlist = varlist,
temp01 = temp01,
scale = scale,
pophaz = pophaz,
only_ehazard = only_ehazard
)
ehazard <- exphaz$ehazard
ehazardInt <- exphaz$ehazardInt
dateDiag <- exphaz$dateDiag
}
} else {
if (missing(ratetable)) {
exphaz <- exphaz_years(
ageDiag = ageDiag,
time = time,
data = data,
rmap = rmap,
ratetable = ratetable,
ratedata = ratedata,
varlist = varlist,
temp01 = temp01,
scale = scale,
pophaz = pophaz,
add.rmap = add.rmap,
only_ehazard = only_ehazard
)
ehazard <- exphaz$ehazard
ehazardInt <- try(exphaz$ehazardInt, TRUE)
} else{
exphaz <- exphaz_years(
ageDiag = ageDiag,
time = time,
data = data,
rmap = rmap,
ratetable = ratetable,
varlist = varlist,
temp01 = temp01,
scale = scale,
pophaz = pophaz,
only_ehazard = only_ehazard
)
ehazard <- exphaz$ehazard
ehazardInt <- exphaz$ehazardInt
dateDiag <- exphaz$dateDiag
}
}
if (sum(is.na(interval)) > 0) {
n.cut <- sum(is.na(interval))
q.values <- cumsum(rep(1 / (n.cut + 1), n.cut))
if (baseline == "bsplines" & (n.cut != 2)) {
if (n.cut != 3) {
q.values <- c(0.05, 0.95)
}
else {
stop("Must have 2 internal knots using bsplines.")
}
}
l.cut <- quantile(time[which(event %in% 1)], q.values)
names(l.cut) <- NULL
interval <- c(min(interval, na.rm = TRUE),
l.cut,
max(interval, na.rm = TRUE))
}
if ((length(interval) - 1) != sum(sapply(1:(length(interval) - 1),
function(i, interval)
(interval[i + 1] > interval[i]),
interval = interval)))
stop("Interval values are not in ascending order.")
if ((sum(covtest) == ncol(X)) && (length(qbs_id) == 0))
stop(
"Do not use 'covtest' for this hypothesis.
\nLikelihood ratio test of the full versus null model
\nis always provided."
)
if (length(covtest) != ncol(X))
stop(
"Number of arguments of 'qbs' must be the same
\nas the number of fitted binaries covariates or
\nas the number of levels if data type is factor."
)
if (length(qbs_id) > 0) {
if ((length(z_X_vect) != ncol(X)) ||
(!is.logical(z_X_vect)) || (sum(is.na(z_X_vect)) > 0))
stop(
"Invalid values for 'qbs()':
\nmust be well specified for covariable(s) used in the formula."
)
if (ncol(Y) > 2)
stop(
"Time-dependent covariate not yet implemented for
\nnon-proportional hazards situation."
)
for (i in 1:length(z_X_vect))
if ((z_X_vect[i] == FALSE) && (covtest[i] == TRUE) == TRUE)
stop("You mustn't test a PH effect (covtest=TRUE) for
\na PH covariate (z_X_vect=FALSE)!")
} else{
z_X_vect <- covtest <- rep(FALSE, ncol(X))
if ((length(z_X_vect) != ncol(X)) ||
(!is.logical(z_X_vect)) || (sum(is.na(z_X_vect)) > 0))
# stop(
# "Invalid values for 'bsplines':
# \nmust be a vector of logical values with the same number of elements
# \nas for covariable used in the formula."
# )
stop("You mustn't test a PH effect (covtest=TRUE) for
\na PH covariate (z_X_vect=FALSE)!")
if (ncol(Y) > 2)
stop(
"Time-dependent covariate not yet implemented for
\nnon-proportional hazards situation."
)
for (i in 1:length(z_X_vect))
if ((z_X_vect[i] == FALSE) && (covtest[i] == TRUE) == TRUE)
stop("You mustn't test a PH effect (covtest=TRUE) for
\na PH covariate (z_X_vect=FALSE)!")
}
baseline <- match.arg(baseline, c("constant", "bsplines"))
if (baseline == "constant") {
if (add.rmap.cut$breakpoint == FALSE) {
fitter <- get("esteve.ph.fit")
fit <- fitter(
X,
Y,
ehazard,
ehazardInt,
int = interval,
covtest,
bsplines = z_X_vect,
init,
control,
event,
Terms,
strats,
add.rmap,
add.rmap.cut,
ageDiag,
ageDC,
optim,
trace,
speedy
)
} else if (add.rmap.cut$breakpoint == TRUE &
!is.na(add.rmap.cut$cut[1]) &
is.null(add.rmap.cut$probs)) {
fitter <- get("esteve.ph.fit")
fit <- fitter(
X,
Y,
ehazard,
ehazardInt,
int = interval,
covtest,
bsplines = z_X_vect,
init,
control,
event,
Terms,
strats,
add.rmap,
add.rmap.cut,
ageDiag,
ageDC,
optim,
trace,
speedy
)
} else if (add.rmap.cut$breakpoint == TRUE &
is.na(add.rmap.cut$cut[1]) &
!is.null(add.rmap.cut$probs)) {
fitter <- get("esteve.ph.fit")
if (splitting) {
nbreak <- length(add.rmap.cut$cut)
allpos_break <-
with(data, quantile(ageDC[event == 1], probs = c(add.rmap.cut$probs)))
cuted <- gtools::permutations(n = length(allpos_break),
r = nbreak,
v = allpos_break)
if (nbreak > 1) {
cut2 <- unique(t(sapply(1:nrow(cuted), function(i)
sort(cuted[i, ]))))
} else{
cut2 <-
unique(matrix(sapply(1:nrow(cuted), function(i)
sort(cuted[i, ])),
ncol = 1))
}
nmodels <- nrow(cut2)
tofit <- lapply(1:nmodels, function(i) {
add.rmap.cut$cut <- cut2[i,]
newdata2 <- tosplit(
formula = formula,
add.rmap.cut = add.rmap.cut,
data = data,
rmap = rmap,
interval = interval,
subset
)
data <- newdata2$tdata2
if (is.null(data$break_interval)) {
ageDiag <- data[, rmap$age]
ageDC <- ageDiag + time
} else if (!is.null(data$break_interval)) {
ageDiag <- data$tstart
ageDC <- data$tstop
time <- with(data, c(tstop - tstart))
add.rmap <- data[, add.rmap.var]
}
if (!survival::is.ratetable(ratetable)) {
exphaz2 <- exphaz_years(
ageDiag = ageDiag,
time = time,
data = data,
rmap = rmap,
ratetable = ratetable,
varlist = varlist,
temp01 = temp01,
scale = scale,
pophaz = pophaz,
add.rmap = add.rmap,
only_ehazard = only_ehazard
)
ehazard2 <- exphaz2$ehazard
ehazardInt2 <- try(exphaz2$ehazardInt, TRUE)
} else{
exphaz2 <- exphaz_years(
ageDiag = ageDiag,
time = time,
data = data,
rmap = rmap,
ratetable = ratetable,
varlist = varlist,
temp01 = temp01,
scale = scale,
pophaz = pophaz,
only_ehazard = only_ehazard
)
ehazard2 <- data$ehazard2 <- exphaz2$ehazard
ehazardInt2 <- data$ehazardInt2 <- exphaz2$ehazardInt
dateDiag2 <- data$dateDiag2 <- exphaz2$dateDiag
}
newfit <- xhaz_split(
formula = formula,
data = data,
ratetable = ratetable,
rmap = rmap,
baseline = baseline,
pophaz = pophaz,
only_ehazard = only_ehazard,
add.rmap = add.rmap,
add.rmap.cut = add.rmap.cut,
splitting = splitting,
interval = interval,
covtest = covtest,
init = init,
control = control,
optim = optim,
scale = scale ,
trace = trace,
speedy = speedy,
nghq,
rcall = rcall,
...
)
X <- newfit$X
Y <- newfit$Y
event <- newfit$event
ageDC <- newfit$ageDC
ageDiag <- newfit$ageDiag
testM(
X,
Y,
ehazard = ehazard2,
ehazardInt = ehazardInt2,
int = interval,
covtest,
bsplines = z_X_vect,
init,
control,
event,
Terms,
strats,
add.rmap,
add.rmap.cut,
ageDiag = ageDiag,
ageDC = ageDC,
optim,
trace,
speedy,
data
)
})
if (length(which(stringr::str_detect(
names(unlist(add.rmap.cut)), "print_stepwise"
))) > 0) {
if (add.rmap.cut$print_stepwise) {
sapply(1:length(tofit),
function(i) {
cat("Model:", i, "\n")
print(tofit[[i]])
cat("\n")
})
cat("\n")
}
}
allAIC <-
suppressWarnings(sapply(1:length(tofit), function(i)
as.numeric(try(tofit[[i]]$AIC, TRUE)
)))
allBIC <-
suppressWarnings(sapply(1:length(tofit), function(i)
as.numeric(try(tofit[[i]]$BIC, TRUE)
)))
if (add.rmap.cut$criterion == "AIC") {
fit <- tofit[[which.min(allAIC)]]
fit$add.rmap.cut$cut <- c(cut2[which.min(allAIC), ])
} else if (add.rmap.cut$criterion == "BIC") {
fit <- tofit[[which.min(allBIC)]]
fit$add.rmap.cut$cut <- c(cut2[which.min(allBIC), ])
}
fit$data <- data
} else{
nbreak <- length(add.rmap.cut$cut)
age_time <- ageDiag + time
allpos_break <-
with(data, quantile(age_time[event == 1], probs = c(add.rmap.cut$probs)))
cuted <- gtools::permutations(n = length(allpos_break),
r = nbreak,
v = allpos_break)
if (nbreak > 1) {
cut2 <- unique(t(sapply(1:nrow(cuted), function(i)
sort(cuted[i, ]))))
} else{
cut2 <-
unique(matrix(sapply(1:nrow(cuted), function(i)
sort(cuted[i, ])),
ncol = 1))
}
nmodels <- nrow(cut2)
tofit <- lapply(1:nmodels, function(i) {
add.rmap.cut$cut <- cut2[i,]
testM(
X,
Y,
ehazard,
ehazardInt,
int = interval,
covtest,
bsplines = z_X_vect,
init,
control,
event,
Terms,
strats,
add.rmap,
add.rmap.cut,
ageDiag,
ageDC,
optim,
trace,
speedy,
data
)
})
if (length(which(stringr::str_detect(
names(unlist(add.rmap.cut)), "print_stepwise"
))) > 0) {
if (add.rmap.cut$print_stepwise) {
sapply(1:length(tofit),
function(i) {
cat("Model:", i, "\n")
print(tofit[[i]])
cat("\n")
})
cat("\n")
}
}
allAIC <-
suppressWarnings(sapply(1:length(tofit), function(i)
as.numeric(try(tofit[[i]]$AIC, TRUE)
)))
allBIC <-
suppressWarnings(sapply(1:length(tofit), function(i)
as.numeric(try(tofit[[i]]$BIC, TRUE)
)))
if (which.min(allAIC) < 1) {
stop("no convergence with the proposed breakpoints")
}
if (add.rmap.cut$criterion == "AIC") {
fit <- tofit[[which.min(allAIC)]]
fit$add.rmap.cut$cut <- c(cut2[which.min(allAIC), ])
} else if (add.rmap.cut$criterion == "BIC") {
fit <- tofit[[which.min(allBIC)]]
fit$add.rmap.cut$cut <- c(cut2[which.min(allBIC), ])
}
}
}
oldClass(fit) <- "constant"
}
else {
fitter <- get("giorgi.tdph.fit")
fit <- fitter(
X,
Y,
ehazard,
ehazardInt,
int = interval,
covtest,
bsplines = z_X_vect,
init,
control,
event,
Terms,
strats,
add.rmap,
add.rmap.cut,
ageDiag,
ageDC,
optim,
trace,
speedy,
nghq
)
oldClass(fit) <- "bsplines"
fit$z_bsplines <- z_X_vect
}
time_elapsed1 <- as.numeric(base::proc.time()[3])
if (add.rmap.cut$breakpoint == TRUE &
!is.na(add.rmap.cut$cut[1])) {
fit$break.levels <-
levels(cut(ageDC, breaks = c(
min(ageDC), add.rmap.cut$cut, max(ageDC)
)))
} else if (add.rmap.cut$breakpoint == TRUE &
is.na(add.rmap.cut$cut[1])) {
fit$break.levels <-
levels(cut(ageDC, breaks = c(
min(ageDC), fit$add.rmap.cut$cut, max(ageDC)
)))
}
fit$level <- control$level
fit$interval <- interval
fit$na.action <- na.action
fit$n <- nrow(Y)
fit$n.events <- sum(event, na.rm = TRUE)
fit$formula <- as.vector(attr(Terms, "formula"))
fit$call <- m_int
fit$varcov <- fit$var
fit[["var"]] <- NULL
fit$pophaz <- pophaz
fit$baseline <- baseline
fit$add.rmap <- add.rmap
fit$ehazard <- ehazard
fit$ehazardInt <- ehazardInt
fit$add.rmap.cut <- add.rmap.cut
fit$time_elapsed <- time_elapsed1 - time_elapsed0
if (!splitting) {
fit$data <- data
fit$terms <- Terms
fit$assign <- attr(X, "assign")
}
return(fit)
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/without_breakpoint_without_cut.R |
#' @title Excess Hazard Modelling Considering Inappropriate Mortality Rates
#'
#' @aliases xhaz-package
#'
#' @description Contains functions to fit excess hazard models, with or without
#' proportional population hazards assumption. The baseline excess hazard could be a
#' piecewise constant function or a B-splines. When B-splines is choosen for
#' the baseline excess hazard, the user can specify some covariates which have
#' a time-dependent effect (using "bsplines") on the baseline excess hazard.
#' The user can also specify if the framework corresponds to the classical
#' excess hazard modeling, i.e. assuming that the expected mortality of studied
#' individuals is appropriate. He can also consider two other frameworks: first,
#' the expected mortality available in the life table is not accurate and
#' requires taking into account an additional variable in the life table by allowing the
#' latter acts on the general population morality with a proportional effect.
#' This approach is presented by Touraine et al. (2020) <doi:10.1177/0962280218823234>.
#' The user can also fit a model that relax the proportional expected hazards assumption
#' considered in the latter excess hazard model. This extension was proposed by
#' Mba et al. (2020) <doi:10.1186/s12874-020-01139-z> allows non-proportional
#' effect of the additional variable on the general population mortality;
#' second, there is a non-comparability source of bias in terms of expected mortality
#' of selected individuals in a non-population-based studies such as clinical trials.
#' The related excess hazard model correcting this source of bias is presented in
#' Goungounga et al. (2019) <doi:10.1186/s12874-019-0747-3>. The optimization process
#' in these presented models uses the maximum likelihood method through the routine
#' \code{optim} or an internal function of the \code{xhaz-package}.
#'
#' @details
#' \tabular{ll}{
#' Package: \tab xhaz\cr
#' Type: \tab Package\cr
#' Version: \tab 2.0.1\cr
#' Date: \tab 2022-09-12\cr
#' License: \tab GPL-3\cr
#' }
#'
#' @references Goungounga JA, Touraine C, Grafféo N, Giorgi R;
#' CENSUR working survival group. Correcting for misclassification
#' and selection effects in estimating net survival in clinical trials.
#' BMC Med Res Methodol. 2019 May 16;19(1):104.
#' doi: 10.1186/s12874-019-0747-3. PMID: 31096911; PMCID: PMC6524224.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/31096911/}{PubMed})
#'
#' Touraine C, Grafféo N, Giorgi R; CENSUR working survival group.
#' More accurate cancer-related excess mortality through correcting
#' background mortality for extra variables.
#' Stat Methods Med Res. 2020 Jan;29(1):122-136.
#' doi: 10.1177/0962280218823234. Epub 2019 Jan 23. PMID: 30674229.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/30674229/}{PubMed})
#'
#' Mba RD, Goungounga JA, Grafféo N, Giorgi R; CENSUR working survival group.
#' Correcting inaccurate background mortality in excess hazard models
#' through breakpoints. BMC Med Res Methodol. 2020 Oct 29;20(1):268.
#' doi: 10.1186/s12874-020-01139-z. PMID: 33121436; PMCID: PMC7596976.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/33121436/}{PubMed})
#'
#'
#' @examples
#'
#' \donttest{
#' library("numDeriv")
#' library("survexp.fr")
#' library("splines")
#' data("simuData", "dataCancer", package = "xhaz")
#' # load the data sets 'simuData' and 'dataCancer'.
#'
#' #define the levels of variable sex
#' levels(simuData$sex) <- c("male", "female")
#'
#'# Esteve et al. model: baseline excess hazard is a piecewise function
#'# linear and proportional effects for the covariates on
#'# baseline excess hazard.
#'
#'
#' fit.estv1 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData,
#' ratetable = survexp.us,
#' interval = c(0, NA, NA, NA, NA, NA, max(simuData$time_year)),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "constant",
#' pophaz = "classic")
#'
#'
#' fit.estv1
#'
#'
#' # Touraine et al. model: baseline excess hazard is a piecewise function
#' # with a linear and proportional effects for the
#' # covariates on the baseline excess hazard.
#' # An additionnal cavariate (here race) missing in the life table is
#' # considered by the model.
#'
#'
#' fit.corrected1 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData,
#' ratetable = survexp.us,
#' interval = c(0, NA, NA, NA, NA, NA,
#' max(simuData$time_year)),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "constant", pophaz = "corrected",
#' add.rmap = "race")
#'
#'
#'
#' fit.corrected1
#'
#'
#'
#' # An additionnal cavariate (here race) missing in the life table is
#' # considered by the model with a breakpoint at 75 years
#'
#' fit.corrected2 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData, ratetable = survexp.us,
#' interval = c(0, NA, NA, NA, NA, 6),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "constant", pophaz = "corrected",
#' add.rmap = "race",
#' add.rmap.cut = list(breakpoint = TRUE, cut = 75))
#'
#'
#' fit.corrected2
#'
#' #Giorgi et al model: baseline excess hazard is a quadratic Bsplines
#' # function with two interior knots and allow here a
#' # linear and proportional effects for the covariates on
#' # baseline excess hazard.
#'
#'
#' fitphBS <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData, baseline = "bsplines",
#' pophaz = "classic", ratetable = survexp.us,
#' interval = c(0, NA, NA, max(simuData$time_year)),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'))
#'
#' fitphBS
#'
#'
#'
#' }
#'
#'
#' @keywords internal
"_PACKAGE"
#> [1] "_PACKAGE"
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/xhaz-package.R |
#' @title xhaz function
#'
#' @description Fits the excess hazard models proposed by Esteve et al. (1990) <doi:10.1002/sim.4780090506>,
#' with the possibility to account for time dependent covariates. Fits also the
#' non-proportional excess hazard model proposed by Giorgi et al. (2005) <doi:10.1002/sim.2400>.
#' In addition, fits excess hazard models with possibility to rescale
#' (Goungounga et al. (2019) <doi:10.1186/s12874-019-0747-3>) or to correct the background mortality with a
#' proportional (Touraine et al. (2020) <doi:10.1177/0962280218823234>) or non-proportional (Mba et al. (2020) <doi:10.1186/s12874-020-01139-z>)
#' effect.
#'
#' @param formula a formula object of the function with the response on the left
#' of a \code{~} operator and the terms on the right. The response must be a
#' survival object as returned by the \code{Surv} function (time in first and status
#' in second).
#'
#' @note `time` is OBLIGATORY in YEARS.
#'
#'
#' @param data a data frame in which to interpret the variables named in the
#' formula
#'
#' @param ratetable a rate table stratified by age, sex, year (if missing,
#' `ratedata` is used)
#'
#' @param rmap a list that maps data set names to the ratetable names.
#'
#' @param baseline an argument to specify the baseline hazard: if it follows a
#' piecewise constant, \code{baseline = "constant"} is used and corresponds to
#' the baseline in Esteve et al. model; if the baseline follows a quadratic b-splines,
#' \code{baseline = "bsplines"} is used, corresponding to the baseline excess
#' hazard in Giorgi et al model.
#'
#' @param pophaz indicates three possibles arguments in character: classic or
#' rescaled and corrected. If \code{pophaz = "classic"} chosen, fits the model
#' that do not require to rescale or to correct the background mortality (i.e. the Esteve
#' et al. model or Giorgi et al. model); if \code{pophaz = "rescaled"} or
#' \code{pophaz = "corrected"} chosen, fits the models that require to rescale
#' or to correct the background mortality.
#'
#' @param only_ehazard a boolean argument (by default, \code{only_ehazard=FALSE}).
#' If \code{only_ehazard = TRUE}, \code{pophaz = "classic"} must be provided and
#' the total value of the log-likelihood will not account for the cumulative population hazard.
#'
#' @param add.rmap character that indicates the name in character of the additional
#' demographic variable from `data` to be used for correction of the life table,
#' in particular when one is in the presence of an insufficiently stratified life
#' table (see Touraine et al. model). This argument is not used if
#' \code{pophaz = "classic"} or \code{pophaz = "rescaled"}.
#'
#'
#' @param add.rmap.cut a list containing arguments to specify the modeling
#' strategy for breakpoint positions, which allows a non-proportional effect of
#' the correction term acting on the background mortality. By default
#' \code{list(breakpoint = FALSE)}, i.e. a proportional effect of the correction
#' term acting on the background mortality is needed; in this case, all the other
#' argument of the list are not working for the model specification;
#'
#' if \code{list(breakpoint = TRUE, cut = c(70))}, the chosen cut-point(s) is (are)
#' the numeric value(s) proposed. If \code{list(breakpoint = TRUE, cut = NA)},
#' there is the same number of breakpoints as the number of NA, with their possible
#' positions specified as here by \code{probs},
#' i.e. \code{list(breakpoint = TRUE, cut = NA, probs = seq(0, 1, 0.25))}.
#' That corresponds to a numeric vector of probabilities with values between 0 and 1
#' as in \code{quantile} function.
#' \code{criterion} is used to choose the best model, using the AIC or the
#' BIC (the default criterion). If needed, all the fitted models are printed
#' by the user by adding in the list \code{print_stepwise = FALSE}.
#'
#'
#' @param interval a vector indicating either the location of the year-scale time
#' intervals for models with piecewise constant function, or the location of the
#' knots for models with B-splines functions for their baseline hazard (see the
#' appropriate specification in \code{baseline} argument). The first component of
#' the vector is 0, and the last one corresponds to the maximum time fellow-up of the study.
#'
#' @param ratedata a data frame of the hazards mortality in general population.
#'
#' @param subset an expression indicating which subset of the data should be used
#' in the modeling. All observations are included by default
#'
#' @param na.action as in the \code{coxph} function, a missing-data filter function.
#'
#'
#' @param init a list of initial values for the parameters to estimate. For each
#' elements of the list, give the name of the covariate followed by the vector
#' of the fixed initials values
#' @param control a list of control values used to control the optimization
#' process. In this list, `eps`, is a convergence criteria (by default, \code{eps=10^-4}),
#' `iter.max` is the maximum number of iteration (by default, \code{iter.max=15}),
#' and \code{level}, is the level used for the confidence intervals (by default, \code{level=0.95}).
#'
#'
#' @param optim a Boolean argument (by default, \code{optim = FALSE}).
#' If \code{optim = TRUE}), the maximization algorithm uses the \code{optim} function
#'
#' @param scale a numeric argument to specify whether the life table contains death rates
#' per day (default \code{scale = 365.2425}) or death rates per year (\code{scale = 1}).
#'
#' @param trace a Boolean argument, if \code{trace = TRUE}), tracing information
#' on the progress of the optimization is produced
#'
#'
#' @param speedy a Boolean argument, if \code{speedy = TRUE}, optimization is done in a
#' parallel mode
#'
#'
#' @param nghq number of nodes and weights for Gaussian quadrature
#'
#' @param ... other parameters used with the \code{xhaz} function
#'
#'
#' @details Use the \code{Surv(time_start, time_stop, status)} notation for time
#' dependent covariate with the appropriate organization of the data set (see
#' the help page of the \code{Surv} function)
#'
#' Only two interior knots are possible for the model with B-splines functions
#' to fit the baseline (excess) hazard. Determination of the intervals might be
#' user's defined or automatically computed according to the quantile of the
#' distribution of deaths. Use NA for an automatic determination (for example,
#' \code{interval = c(0, NA, NA, 5)}).
#'
#'
#' @keywords xhaz
#'
#'
#' @return An object of class \code{xhaz.constant} or \code{xhaz.bsplines},
#' according to the type of functions chosen to fit the baseline hazard of
#' model (see details for argument \code{baseline}). This object is a list containing
#' the following components:
#'
#'
#' \item{coefficients}{estimates found for the model}
#'
#' \item{varcov}{the variance-covariance matrix}
#'
#' \item{loglik}{for the Estève et al. model: the log-likelihood of the null
#' model, i.e without covariate, and the log-likelihood of the full model,
#' i.e with all the covariates declared in the formula; for the Giorgi et al.
#' model: the log-likelihood of the full model}
#'
#' \item{cov.test}{for the Esteve et al.model: the log-likelihood of the null
#' model, i.e without covariate, and the log-likelihood of the full model,
#' i.e with all the covariates declared in the formula; for the Giorgi et al.
#' model: the log-likelihood of the full model}
#'
#' \item{message}{a character string returned by the optimizer
#' see details in \code{optim} help page}
#'
#' \item{convergence}{an integer code as in \code{optim} when `"L-BFGS-B"` method
#' is used.}
#'
#' \item{n}{the number of individuals in the dataset}
#'
#' \item{n.events}{the number of events in the dataset. Event are considered
#' as death whatever the cause}
#'
#' \item{level}{the confidence level used}
#'
#' \item{interval}{the intervals used to split time for piecewise baseline excess
#' hazard, or knots positions for Bsplines baseline}
#'
#' \item{terms}{the representation of the terms in the model}
#'
#' \item{call}{the function `call` based on model}
#'
#' \item{pophaz}{the assumption considered for the life table used in the
#' excess hazard model}
#'
#' \item{add.rmap}{the additional variable for which the life table is not
#' stratified}
#'
#' \item{ehazardInt}{the cumulative hazard of each individuals calculated from
#' the ratetable used in the model}
#'
#' \item{ehazard}{the individual expected hazard values from the ratetable
#' used to fit the model}
#'
#' \item{data}{the dataset used to run the model}
#'
#' \item{time_elapsed}{the time to run the model}
#'
#'
#'
#'
#' @author Juste Goungounga, Darlin Robert Mba, Nathalie Graffeo, Roch Giorgi
#'
#' @references Goungounga JA, Touraine C, Grafféo N, Giorgi R; CENSUR working
#' survival group. Correcting for misclassification and selection effects in
#' estimating net survival in clinical trials. BMC Med Res Methodol. 2019 May
#' 16;19(1):104. doi: 10.1186/s12874-019-0747-3. PMID: 31096911; PMCID:
#' PMC6524224. (\href{https://pubmed.ncbi.nlm.nih.gov/31096911/}{PubMed})
#'
#' Touraine C, Grafféo N, Giorgi R; CENSUR working survival group. More
#' accurate cancer-related excess mortality through correcting background
#' mortality for extra variables. Stat Methods Med Res. 2020 Jan;29(1):122-136.
#' doi: 10.1177/0962280218823234. Epub 2019 Jan 23. PMID: 30674229.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/30674229/}{PubMed})
#'
#' Mba RD, Goungounga JA, Grafféo N, Giorgi R; CENSUR working survival group.
#' Correcting inaccurate background mortality in excess hazard models through
#' breakpoints. BMC Med Res Methodol. 2020 Oct 29;20(1):268. doi:
#' 10.1186/s12874-020-01139-z. PMID: 33121436; PMCID: PMC7596976.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/33121436/}{PubMed})
#'
#' Giorgi R, Abrahamowicz M, Quantin C, Bolard P, Esteve J, Gouvernet J, Faivre
#' J. A relative survival regression model using B-spline functions to model
#' non-proportional hazards. Statistics in Medicine 2003; 22: 2767-84.
#' (\href{https://pubmed.ncbi.nlm.nih.gov/12939785/}{PubMed})
#'
#'
#' @examples
#'\donttest{
#' library("numDeriv")
#' library("survexp.fr")
#' library("splines")
#' library("statmod")
#' data("simuData","rescaledData", "dataCancer")
#' # load the data sets 'simuData', 'rescaledData' and 'dataCancer'.
#'
#'# Esteve et al. model: baseline excess hazard is a piecewise function
#'# linear and proportional effects for the covariates on
#'# baseline excess hazard.
#'
#' levels(simuData$sex) <- c("male", "female")
#'
#' fit.estv1 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData,
#' ratetable = survexp.us,
#' interval = c(0, NA, NA, NA, NA, NA, 6),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "constant", pophaz = "classic")
#'
#'
#' fit.estv1
#'
#'
#' # Touraine et al. model: baseline excess hazard is a piecewise function
#' # with a linear and proportional effects for the
#' # covariates on the baseline excess hazard.
#' # An additionnal cavariate (here race) missing in the life table is
#' # considered by the model.
#'
#'
#' fit.corrected1 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData,
#' ratetable = survexp.us,
#' interval = c(0, NA, NA, NA, NA, NA, 6),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "constant", pophaz = "corrected",
#' add.rmap = "race")
#'
#'
#'
#' fit.corrected1
#'
#' # extension of Touraine et al model: baseline excess hazard is a piecewise
#' # constant function with a linear and proportional effects for the covariates
#' # on the baseline excess hazard.
#'
#' # An additionnal cavariate (here race) missing in the life table is
#' # considered by the model with a breakpoint at 75 years
#'
#' fit.corrected2 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData,
#' ratetable = survexp.us,
#' interval = c(0, NA, NA, NA, NA, NA, 6),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "constant", pophaz = "corrected",
#' add.rmap = "race",
#' add.rmap.cut = list(breakpoint = TRUE, cut = 75))
#'
#'
#'
#' fit.corrected2
#'
#'
#' #Giorgi et al model: baseline excess hazard is a quadratic Bsplines
#' # function with two interior knots and allow here a
#' # linear and proportional effects for the covariates on
#' # baseline excess hazard.
#'
#'
#' fitphBS <- xhaz(formula = Surv(time_year, status) ~ agec + race,
#' data = simuData,
#' ratetable = survexp.us,
#' interval = c(0, NA, NA, 6),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "bsplines", pophaz = "classic")
#'
#' fitphBS
#'
#'
#'
#'
#'
#' # Application on `dataCancer`.
#' #Giorgi et al model: baseline excess hazard is a quadratic Bspline
#' # function with two interior knots and allow here a
#' # linear and proportional effect for the variable
#' # "immuno_trt" plus a non-proportional effect
#' # for the variable "ageCentre" on baseline excess hazard.
#'
#'
#' fittdphBS <- xhaz(formula = Surv(obs_time_year, event) ~ qbs(ageCentre) + immuno_trt,
#' data = dataCancer,
#' ratetable = survexp.fr,
#' interval = c(0, 0.5, 12, 15),
#' rmap = list(age = 'age', sex = 'sexx', year = 'year_date'),
#' baseline = "bsplines", pophaz = "classic")
#'
#' fittdphBS
#'
#'
#'
#'
#' # Application on `rescaledData`.
#' # rescaled model: baseline excess hazard is a piecewise function with a
#' # linear and proportional effects for the covariates on baseline excess hazard.
#'
#' # A scale parameter on the expected mortality of general population is
#' # considered to account for the non-comparability source of bias.
#'
#' rescaledData$timeyear <- rescaledData$time/12
#' rescaledData$agecr <- scale(rescaledData$age, TRUE, TRUE)
#'
#' fit.res <- xhaz(formula = Surv(timeyear, status) ~ agecr + hormTh,
#' data = rescaledData,
#' ratetable = survexp.fr,
#' interval = c(0, NA, NA, NA, NA, NA, max(rescaledData$timeyear)),
#' rmap = list(age = 'age', sex = 'sex', year = 'date'),
#' baseline = "constant", pophaz = "rescaled")
#'
#' fit.res
#' }
#'
#' @import survival
#' @import stats
#' @import parallel
#' @import optimParallel
#' @import statmod
#' @import splines
#' @import survexp.fr
#'
#' @export
xhaz <- function(formula = formula(data),
data = sys.parent(),
ratetable,
rmap = list(age = NULL, sex = NULL, year = NULL),
baseline = c("constant", "bsplines"),
pophaz = c("classic", "rescaled", "corrected"),
only_ehazard = FALSE,
add.rmap = NULL,
add.rmap.cut = list(
breakpoint = FALSE,
cut = NA,
probs = NULL,
criterion = "BIC",
print_stepwise = FALSE
),
interval,
ratedata = sys.parent(),
subset,
na.action,
init,
control = list(eps = 1e-4,
iter.max = 800,
level = 0.95),
optim = TRUE,
scale = 365.2425,
trace = 0,
speedy = FALSE,
nghq = 12,
...) {
m_int <- match.call(expand.dots = FALSE)
if ((
add.rmap.cut$breakpoint == TRUE &
!is.na(add.rmap.cut$cut[1]) & is.null(add.rmap.cut$probs)
)) {
if (!missing(rmap)) {
rcall <- substitute(rmap)
if (!is.call(rcall) || rcall[[1]] != as.name("list"))
stop("Invalid rcall argument")
}
else
rcall <- NULL
breakpoint_with_cut(
formula = formula,
data = data,
ratetable = ratetable,
rmap = rmap,
baseline = baseline,
pophaz = pophaz,
only_ehazard = only_ehazard,
add.rmap = add.rmap,
add.rmap.cut = add.rmap.cut,
interval = interval,
ratedata ,
subset ,
na.action ,
init ,
control ,
optim ,
scale ,
trace ,
speedy ,
nghq,
m_int,
rcall,
...
)
} else if ((
add.rmap.cut$breakpoint == TRUE &
is.na(add.rmap.cut$cut[1]) & !is.null(add.rmap.cut$probs)
)) {
if (!missing(rmap)) {
rcall <- substitute(rmap)
if (!is.call(rcall) || rcall[[1]] != as.name("list"))
stop("Invalid rcall argument")
}
else
rcall <- NULL
xhaz2(
formula = formula,
data = data,
ratetable = ratetable,
rmap = rmap,
baseline = baseline,
pophaz = pophaz,
only_ehazard = only_ehazard,
add.rmap = add.rmap,
add.rmap.cut = add.rmap.cut,
splitting = TRUE,
interval = interval,
ratedata = ratedata,
subset = subset,
na.action = na.action,
init = init,
control = control,
optim = optim,
scale = scale,
trace = trace,
speedy = speedy,
nghq = nghq,
m_int = m_int,
rcall = rcall,
...
)
} else if (add.rmap.cut$breakpoint == FALSE) {
if (!missing(rmap)) {
rcall <- substitute(rmap)
if (!is.call(rcall) || rcall[[1]] != as.name("list"))
stop("Invalid rcall argument")
}
else
rcall <- NULL
without_breakpoint_without_cut(
formula = formula,
data = data,
ratetable = ratetable,
rmap = rmap,
baseline = baseline,
pophaz = pophaz,
only_ehazard = only_ehazard,
add.rmap = add.rmap,
add.rmap.cut = add.rmap.cut,
interval = interval,
splitting = FALSE,
ratedata = ratedata,
subset = subset,
na.action = na.action,
init = init,
control = control,
optim = optim,
scale = scale,
trace = trace,
speedy = speedy,
nghq = nghq,
m_int = m_int,
rcall = rcall,
...
)
}
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/xhaz.R |
xhaz2 <- function(formula = formula,
data = data,
ratetable = ratetable,
rmap = rmap,
baseline = baseline,
pophaz = pophaz,
only_ehazard = only_ehazard,
add.rmap = add.rmap,
add.rmap.cut = add.rmap.cut,
splitting = splitting,
interval = interval,
ratedata = ratedata,
subset = subset,
na.action = na.action,
init = init,
control = control,
optim = optim,
scale = scale,
trace = trace,
speedy = speedy,
nghq = nghq,
m_int = m_int,
rcall = rcall,
...) {
time_elapsed0 <- as.numeric(base::proc.time()[3])
Call <- match.call()
m <- match.call(expand.dots = FALSE)
indx <- match(c("formula", "data"),
names(Call),
nomatch = 0)
if (indx[1] == 0)
stop("A formula argument is required")
temp <- Call[c(1, indx)]
temp[[1L]] <- as.name("model.frame")
special <- c("strata")
Terms <- if (missing(data)) {
terms(formula, special)
}
else{
terms(formula, special, data = data)
}
temp$formula <- Terms
if (missing(subset)) {
subset <- NULL
}
if (missing(na.action)) {
na.action <- "na.omit"
m <- eval(temp, sys.parent())
} else if (length(attr(m, "na.action"))) {
temp$na.action <- na.pass
m <- eval(temp, sys.parent())
}
ehazardInt <- NULL
# controls on data & ratetable parameters
if (missing(ratedata) & missing(ratetable)) {
stop("Missing rate table from general population.")
}
if (missing(data)) {
stop("Missing data data frame in which to interpret
the variables named in the formula.")
} else{
if (is.na(match(rmap$age, names(data))))
stop("Must have informations for age on the data set.")
if (is.na(match(rmap$sex, names(data))))
stop("Must have informations for sex on the data set.")
if (is.na(match(rmap$year, names(data))))
stop("Must have informations for date on the data set.")
}
if (!missing(ratetable)) {
if (is.ratetable(ratetable)) {
varlist <- attr(ratetable, "dimid")
if (is.null(varlist)) {
varlist <- names(attr(ratetable, "dimnames"))
}
if (is.null(attributes(ratetable)$dimid)) {
attributes(ratetable)$dimid <- varlist
}
}
else{
stop("Invalid rate table")
}
varsexID <- try(which(varlist == 'sex'))
conditionVsex <- attr(ratetable, which = "dimnames")[[varsexID]]
if (any(!conditionVsex %in% c('male', 'female'))) {
conditionVsex <-
c('male', 'female')[c(which(conditionVsex %in% c('male', 'female')))]
}
if (!missing(rmap)) {
condition2 <-
add.rmap.cut$breakpoint == TRUE &
is.na(add.rmap.cut$cut[1]) & !is.null(add.rmap.cut$probs)
if ((!splitting & missing(rcall)) & (condition2)) {
rcall <- substitute(rmap)
} else if (!splitting & !missing(rcall)) {
rmap <- eval(rmap)
}
if (!is.call(rcall) || rcall[[1]] != as.name("list"))
stop("Invalid rcall argument")
}
else
rcall <- NULL
temp01 <- match(names(rcall)[-1], varlist)
if (any(is.na(temp01)))
stop("Variable not found in the ratetable:",
(names(rcall))[is.na(temp01)])
temp02 <- match(as.vector(unlist(rmap)), names(data))
if (any(is.na(temp02))) {
stop("Variable not found in the data set:",
(names(rcall))[is.na(temp02)])
}
}
if (pophaz == "corrected") {
if (is.null(add.rmap.cut$breakpoint)) {
stop("Missing breakpoint information")
} else {
if (add.rmap.cut$breakpoint == TRUE) {
if (!is.na(add.rmap.cut$cut[1])) {
if (min(add.rmap.cut$cut) < min(c(data$age, data$age + data$time))) {
if (max(add.rmap.cut$cut) <= max(c(data$age, data$age + data$time))) {
stop("Breakpoint(s) is (are) smaller than the minimum age")
} else
stop(
"Breakpoint(s) is (are) smaller than the minimum age and breakpoint(s) greater than the maximum age"
)
} else{
if (max(add.rmap.cut$cut) > max(c(data$age, data$age + data$time)))
stop("Breakpoint(s) is (are) greater than the maximum age")
}
}
}
}
}
if (control$iter.max < 0)
stop("Invalid value for iterations.")
if (control$eps <= 0)
stop("Invalid convergence criteria.")
if (control$level < 0 | control$level > 1)
stop("Invalid value for the level of confidence interval.")
if (missing(init))
init <- NULL
if (missing(interval))
stop("Missing cutpoints definition for intervals.")
if (!is.numeric(interval))
stop("Wrong values for intervals. Must be numeric.")
if (min(interval, na.rm = TRUE) != 0)
stop("First interval must start at 0.")
if (sum((interval < 0) * 1, na.rm = TRUE) > 0)
stop("Negative value is not allowed for interval.")
myvarnames <- colnames(model.matrix(Terms, m)[,-1, drop = FALSE])
qbs_id <- which(stringr::str_detect(c(myvarnames),
pattern = "qbs"))
if (length(qbs_id) > 0) {
if (length(interval) > 4)
stop(
"Interval must have 4 values using bsplines
(2 internal knots plus '0' and the end of the study)."
)
} else{
if (baseline == "bsplines") {
if (length(interval) > 4)
stop(
"Interval must have 4 values using bsplines
(2 internal knots plus '0' and the end of the study)."
)
}
}
Y <- model.extract(m, "response")
if (!inherits(Y, "Surv"))
stop("Response must be a survival object.")
strats <- attr(Terms, "specials")$strata
dropx <- NULL
if (length(strats)) {
if (length(qbs_id) > 0)
stop("Strata function is not yet implemented for the B-splines model.")
temp <- untangle.specials(Terms, "strata", 1)
dropx <- c(dropx, temp$terms)
if (length(temp$vars) == 1)
strata.keep <- m[[temp$vars]]
else
strata.keep <- strata(m[, temp$vars], shortlabel = TRUE)
strats <- as.numeric(strata.keep)
attr(Terms, "nstrata") <- max(strats)
}
attr(Terms, "intercept") <- 1
if (length(dropx)) {
X <- model.matrix(Terms[-dropx], m)[, -1, drop = FALSE]
} else{
X <- model.matrix(Terms, m)[, -1, drop = FALSE]
}
if (length(qbs_id) > 0) {
z_bsplines <-
as.data.frame(model.matrix(Terms, m)[, -1, drop = FALSE][, c(qbs_id)])
z_bsplines_names <- stringr::str_remove(myvarnames[c(qbs_id)],
"qbs")
colnames(z_bsplines) <- gsub("\\(|\\)",
"",
as.character(z_bsplines_names))
colnames(X)[c(qbs_id)] <- colnames(z_bsplines)
z_bsplines <- as.matrix(z_bsplines)
z_bsplines_vect <- rep(TRUE, ncol(z_bsplines))
z_X_vect <- rep(FALSE, ncol(X))
z_X_vect[c(qbs_id)] <- z_bsplines_vect
covtest <- z_X_vect
} else{
covtest <- rep(FALSE, ncol(X))
}
type <- attr(Y, "type")
###If there is a time-dependent covariate
if (ncol(Y) == 2) {
time <- Y[, 1]
event <- Y[, 2]
} else{
time <- Y[, 2] - Y[, 1]
event <- Y[, 3]
}
event[time > max(interval, na.rm = TRUE)] <- 0
time[time > max(interval, na.rm = TRUE)] <-
max(interval, na.rm = TRUE)
if (length(qbs_id) > 0) {
Y[, 1] <- time
}
if (is.null(data$break_interval)) {
ageDiag <- data[, rmap$age]
ageDC <- ageDiag + time
} else{
ageDiag <- data$tstart
ageDC <- data$tstop
}
pophaz <- match.arg(pophaz, c("classic", "rescaled", "corrected"))
if (pophaz == "corrected") {
if (!is.null(add.rmap)) {
add.rmap.var <- add.rmap
add.rmap <- data[, add.rmap]
} else{
stop("Additional demographic variable must be specified")
}
} else{
if (pophaz == "rescaled") {
if (!is.null(add.rmap)) {
stop("Additional demographic variable is not required")
} else{
add.rmap <- as.factor(rep(1, nrow(data)))
}
}
if (pophaz == "classic") {
if (!is.null(add.rmap)) {
stop("Additional demographic variable is not required")
}
}
}
if (only_ehazard == TRUE & pophaz != "classic") {
stop("cumulative expected hazard if also required for this type of model")
}
#
condition0 <- add.rmap.cut$breakpoint == FALSE
condition1 <-
add.rmap.cut$breakpoint == TRUE &
!is.na(add.rmap.cut$cut[1]) & is.null(add.rmap.cut$probs)
condition2 <-
add.rmap.cut$breakpoint == TRUE &
is.na(add.rmap.cut$cut[1]) & !is.null(add.rmap.cut$probs)
if (!is.null(data$break_interval)) {
if (missing(ratetable)) {
exphaz <- exphaz_years(
ageDiag = data$tstart,
time = time,
data = data,
rmap = rmap,
ratetable = ratetable,
varlist = varlist,
temp01 = temp01,
scale = scale,
pophaz = pophaz,
add.rmap = add.rmap,
only_ehazard = only_ehazard
)
ehazard <- exphaz$ehazard
ehazardInt <- try(exphaz$ehazardInt, TRUE)
} else{
exphaz <- exphaz_years(
ageDiag = data$tstart,
time = time,
data = data,
rmap = rmap,
ratetable = ratetable,
varlist = varlist,
temp01 = temp01,
scale = scale,
pophaz = pophaz,
only_ehazard = only_ehazard
)
ehazard <- exphaz$ehazard
ehazardInt <- exphaz$ehazardInt
dateDiag <- exphaz$dateDiag
}
} else {
if (missing(ratetable)) {
exphaz <- exphaz_years(
ageDiag = ageDiag,
time = time,
data = data,
rmap = rmap,
ratetable = ratetable,
ratedata = ratedata,
varlist = varlist,
temp01 = temp01,
scale = scale,
pophaz = pophaz,
add.rmap = add.rmap,
only_ehazard = only_ehazard
)
ehazard <- exphaz$ehazard
ehazardInt <- try(exphaz$ehazardInt, TRUE)
} else{
exphaz <- exphaz_years(
ageDiag = ageDiag,
time = time,
data = data,
rmap = rmap,
ratetable = ratetable,
varlist = varlist,
temp01 = temp01,
scale = scale,
pophaz = pophaz,
only_ehazard = only_ehazard
)
ehazard <- exphaz$ehazard
ehazardInt <- exphaz$ehazardInt
dateDiag <- exphaz$dateDiag
}
}
if (sum(is.na(interval)) > 0) {
n.cut <- sum(is.na(interval))
q.values <- cumsum(rep(1 / (n.cut + 1), n.cut))
if (baseline == "bsplines" & (n.cut != 2)) {
if (n.cut != 3) {
q.values <- c(0.05, 0.95)
}
else {
stop("Must have 2 internal knots using bsplines.")
}
}
l.cut <- quantile(time[which(event %in% 1)], q.values)
names(l.cut) <- NULL
interval <- c(min(interval, na.rm = TRUE),
l.cut,
max(interval, na.rm = TRUE))
}
if ((length(interval) - 1) != sum(sapply(1:(length(interval) - 1),
function(i, interval)
(interval[i + 1] > interval[i]),
interval = interval)))
stop("Interval values are not in ascending order.")
if (!missing(covtest)) {
if ((sum(covtest) == ncol(X)) && (length(qbs_id) == 0))
stop(
"Do not use 'covtest' for this hypothesis.
\nLikelihood ratio test of the full versus null model
\nis always provided."
)
if (length(covtest) != ncol(X))
stop(
"Number of arguments of 'covtest' must be the same
\nas the number of fitted binaries covariates or
\nas the number of levels if data type is factor."
)
} else{
covtest <- c(rep(FALSE, ncol(X)))
}
if (length(qbs_id) > 0) {
if ((length(z_X_vect) != ncol(X)) ||
(!is.logical(z_X_vect)) || (sum(is.na(z_X_vect)) > 0))
stop(
"Invalid values for 'qbs()':
\nmust be well specified for covariable(s) used in the formula."
)
if (ncol(Y) > 2)
stop(
"Time-dependent covariate not yet implemented for
\nnon-proportional hazards situation."
)
for (i in 1:length(z_X_vect))
if ((z_X_vect[i] == FALSE) && (covtest[i] == TRUE) == TRUE)
stop("You mustn't test a PH effect (covtest=TRUE) for
\na PH covariate (z_X_vect=FALSE)!")
} else{
z_X_vect <- covtest <- rep(FALSE, ncol(X))
if ((length(z_X_vect) != ncol(X)) ||
(!is.logical(z_X_vect)) || (sum(is.na(z_X_vect)) > 0))
stop("You mustn't test a PH effect (covtest=TRUE) for
\na PH covariate (z_X_vect=FALSE)!")
if (ncol(Y) > 2)
stop(
"Time-dependent covariate not yet implemented for
\nnon-proportional hazards situation."
)
for (i in 1:length(z_X_vect))
if ((z_X_vect[i] == FALSE) && (covtest[i] == TRUE) == TRUE)
stop("You mustn't test a PH effect (covtest=TRUE) for
\na PH covariate (z_X_vect=FALSE)!")
}
baseline <- match.arg(baseline, c("constant", "bsplines"))
if (baseline == "constant") {
if (add.rmap.cut$breakpoint == FALSE) {
fitter <- get("esteve.ph.fit")
fit <- fitter(
X,
Y,
ehazard,
ehazardInt,
int = interval,
covtest,
bsplines = z_X_vect,
init,
control,
event,
Terms,
strats,
add.rmap,
add.rmap.cut,
ageDiag,
ageDC,
optim,
trace,
speedy
)
} else if (add.rmap.cut$breakpoint == TRUE &
!is.na(add.rmap.cut$cut[1]) &
is.null(add.rmap.cut$probs)) {
fitter <- get("esteve.ph.fit")
fit <- fitter(
X,
Y,
ehazard,
ehazardInt,
int = interval,
covtest,
bsplines = z_X_vect,
init,
control,
event,
Terms,
strats,
add.rmap,
add.rmap.cut,
ageDiag,
ageDC,
optim,
trace,
speedy
)
} else if (add.rmap.cut$breakpoint == TRUE &
is.na(add.rmap.cut$cut[1]) &
!is.null(add.rmap.cut$probs)) {
fitter <- get("esteve.ph.fit")
if (splitting) {
nbreak <- length(add.rmap.cut$cut)
allpos_break <-
with(data, quantile(ageDC[event == 1], probs = c(add.rmap.cut$probs)))
cuted <- gtools::permutations(n = length(allpos_break),
r = nbreak,
v = allpos_break)
if (nbreak > 1) {
cut2 <- unique(t(sapply(1:nrow(cuted), function(i)
sort(cuted[i,]))))
} else{
cut2 <-
unique(matrix(sapply(1:nrow(cuted), function(i)
sort(cuted[i,])),
ncol = 1))
}
nmodels <- nrow(cut2)
tofit <- lapply(1:nmodels, function(i) {
add.rmap.cut$cut <- cut2[i, ]
newdata2 <- tosplit(
formula = formula,
add.rmap.cut = add.rmap.cut,
data = data,
rmap = rmap,
interval = interval,
subset
)
data <- newdata2$tdata2
if (is.null(data$break_interval)) {
ageDiag <- data[, rmap$age]
ageDC <- ageDiag + time
} else if (!is.null(data$break_interval)) {
ageDiag <- data$tstart
ageDC <- data$tstop
time <- with(data, c(tstop - tstart))
add.rmap <- data[, add.rmap.var]
}
if (!survival::is.ratetable(ratetable)) {
exphaz2 <- exphaz_years(
ageDiag = ageDiag,
time = time,
data = data,
rmap = rmap,
ratetable = ratetable,
varlist = varlist,
temp01 = temp01,
scale = scale,
pophaz = pophaz,
add.rmap = add.rmap,
only_ehazard = only_ehazard
)
ehazard2 <- exphaz2$ehazard
ehazardInt2 <- try(exphaz2$ehazardInt, TRUE)
} else{
exphaz2 <- exphaz_years(
ageDiag = ageDiag,
time = time,
data = data,
rmap = rmap,
ratetable = ratetable,
varlist = varlist,
temp01 = temp01,
scale = scale,
pophaz = pophaz,
only_ehazard = only_ehazard
)
ehazard2 <- data$ehazard2 <- exphaz2$ehazard
ehazardInt2 <- data$ehazardInt2 <- exphaz2$ehazardInt
dateDiag2 <- data$dateDiag2 <- exphaz2$dateDiag
}
newfit <- xhaz_split(
formula = formula,
data = data,
ratetable = ratetable,
rmap = rmap,
baseline = baseline,
pophaz = pophaz,
only_ehazard = only_ehazard,
add.rmap = add.rmap,
add.rmap.cut = add.rmap.cut,
splitting = splitting,
interval = interval,
covtest = covtest,
init = init,
control = control,
optim = optim,
scale = scale ,
trace = trace,
speedy = speedy,
nghq,
rcall = rcall,
...
)
X <- newfit$X
Y <- newfit$Y
event <- newfit$event
ageDC <- newfit$ageDC
ageDiag <- newfit$ageDiag
testM(
X,
Y,
ehazard = ehazard2,
ehazardInt = ehazardInt2,
int = interval,
covtest,
bsplines = z_X_vect,
init,
control,
event,
Terms,
strats,
add.rmap,
add.rmap.cut,
ageDiag = ageDiag,
ageDC = ageDC,
optim,
trace,
speedy,
data
)
})
if (length(which(stringr::str_detect(
names(unlist(add.rmap.cut)), "print_stepwise"
))) > 0) {
if (add.rmap.cut$print_stepwise) {
sapply(1:length(tofit),
function(i) {
cat("Model:", i, "\n")
if (length(which(stringr::str_detect(
names(tofit[[i]]), "coefficients"
))) > 0) {
tofit[[i]]$n <- nrow(Y)
tofit[[i]]$level <- control$level
tofit[[i]]$interval <- interval
tofit[[i]]$n.events <-
sum(event, na.rm = TRUE)
tofit[[i]]$formula <-
as.vector(attr(Terms, "formula"))
tofit[[i]]$call <- m_int
tofit[[i]]$varcov <- tofit[[i]]$var
tofit[[i]][["var"]] <- NULL
tofit[[i]]$pophaz <- pophaz
tofit[[i]]$baseline <- baseline
tofit[[i]]$add.rmap <- add.rmap
tofit[[i]]$add.rmap.cut <- add.rmap.cut
if (!splitting) {
tofit[[i]]$terms <- Terms
tofit[[i]]$assign <- attr(X, "assign")
}
oldClass(tofit[[i]]) <- "constant"
}
if (length(which(stringr::str_detect(
names(tofit[[i]]), "coefficients"
))) > 0) {
# xhaz:::print.constant(tofit[[i]])
print.constant(tofit[[i]])
} else {
cat("Model", i, ": No convergence \n")
}
cat("\n")
})
cat("\n")
}
}
allAIC <-
suppressWarnings(sapply(1:length(tofit), function(i)
as.numeric(try(tofit[[i]]$AIC, TRUE))))
allBIC <-
suppressWarnings(sapply(1:length(tofit), function(i)
as.numeric(try(tofit[[i]]$BIC, TRUE))))
if (isTRUE(which(names(add.rmap.cut) %in% "criterion") > 0)) {
if (add.rmap.cut$criterion == "AIC") {
fit <- tofit[[which.min(allAIC)]]
fit$add.rmap.cut$cut <- c(cut2[which.min(allAIC),])
} else if (add.rmap.cut$criterion == "BIC") {
fit <- tofit[[which.min(allBIC)]]
fit$add.rmap.cut$cut <- c(cut2[which.min(allBIC),])
}
} else{
fit <- tofit[[which.min(allBIC)]]
fit$add.rmap.cut$cut <- c(cut2[which.min(allBIC),])
}
fit$data <- data
} else{
nbreak <- length(add.rmap.cut$cut)
age_time <- ageDiag + time
allpos_break <-
with(data, quantile(age_time[event == 1], probs = c(add.rmap.cut$probs)))
cuted <- gtools::permutations(n = length(allpos_break),
r = nbreak,
v = allpos_break)
if (nbreak > 1) {
cut2 <- unique(t(sapply(1:nrow(cuted), function(i)
sort(cuted[i,]))))
} else{
cut2 <-
unique(matrix(sapply(1:nrow(cuted), function(i)
sort(cuted[i,])),
ncol = 1))
}
nmodels <- nrow(cut2)
tofit <- lapply(1:nmodels, function(i) {
add.rmap.cut$cut <- cut2[i, ]
testM(
X,
Y,
ehazard,
ehazardInt,
int = interval,
covtest,
bsplines = z_X_vect,
init,
control,
event,
Terms,
strats,
add.rmap,
add.rmap.cut,
ageDiag,
ageDC,
optim,
trace,
speedy,
data
)
})
if (length(which(stringr::str_detect(
names(unlist(add.rmap.cut)), "print_stepwise"
))) > 0) {
if (add.rmap.cut$print_stepwise) {
sapply(1:length(tofit),
function(i) {
cat("Model:", i, "\n")
print(tofit[[i]])
cat("\n")
})
cat("\n")
}
}
allAIC <-
suppressWarnings(sapply(1:length(tofit), function(i)
as.numeric(try(tofit[[i]]$AIC, TRUE))))
allBIC <-
suppressWarnings(sapply(1:length(tofit), function(i)
as.numeric(try(tofit[[i]]$BIC, TRUE))))
if (which.min(allAIC) < 1) {
stop("no convergence with the proposed breakpoints")
}
if (add.rmap.cut$criterion == "AIC") {
fit <- tofit[[which.min(allAIC)]]
fit$add.rmap.cut$cut <- c(cut2[which.min(allAIC),])
} else if (add.rmap.cut$criterion == "BIC") {
fit <- tofit[[which.min(allBIC)]]
fit$add.rmap.cut$cut <- c(cut2[which.min(allBIC),])
}
}
}
oldClass(fit) <- "constant"
}
else {
fitter <- get("giorgi.tdph.fit")
fit <- fitter(
X,
Y,
ehazard,
ehazardInt,
int = interval,
covtest,
bsplines = z_X_vect,
init,
control,
event,
Terms,
strats,
add.rmap,
add.rmap.cut,
ageDiag,
ageDC,
optim,
trace,
speedy,
nghq
)
oldClass(fit) <- "bsplines"
fit$z_bsplines <- z_X_vect
}
time_elapsed1 <- as.numeric(base::proc.time()[3])
if (add.rmap.cut$breakpoint == TRUE &
!is.na(add.rmap.cut$cut[1])) {
fit$break.levels <-
levels(cut(ageDC, breaks = c(
min(ageDC), add.rmap.cut$cut, max(ageDC)
)))
} else if (add.rmap.cut$breakpoint == TRUE &
is.na(add.rmap.cut$cut[1])) {
fit$break.levels <-
levels(cut(ageDC, breaks = c(
min(ageDC), fit$add.rmap.cut$cut, max(ageDC)
)))
}
fit$level <- control$level
fit$interval <- interval
fit$na.action <- na.action
fit$n <- nrow(Y)
fit$n.events <- sum(event, na.rm = TRUE)
fit$formula <- as.vector(attr(Terms, "formula"))
fit$call <- m_int
fit$varcov <- fit$var
fit[["var"]] <- NULL
fit$pophaz <- pophaz
fit$baseline <- baseline
fit$add.rmap <- add.rmap
fit$ehazard <- ehazard
fit$ehazardInt <- ehazardInt
fit$add.rmap.cut <- add.rmap.cut
fit$time_elapsed <- time_elapsed1 - time_elapsed0
if (!splitting) {
fit$data <- data
fit$terms <- Terms
fit$assign <- attr(X, "assign")
}
return(fit)
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/xhaz2.R |
#' @import survival
#' @import stats
#' @import parallel
#' @import optimParallel
xhaz_split <- function(formula = formula(data),
data = sys.parent(),
ratetable, rmap = list(age = NULL, sex = NULL, year = NULL),
baseline = c("constant", "bsplines"),
pophaz = c("classic", "rescaled", "corrected"),
only_ehazard = FALSE,
add.rmap = NULL,
add.rmap.cut = list(breakpoint = FALSE,
cut = c(70),
probs = NULL,
criterion = "BIC",
print_stepwise = FALSE),
splitting = FALSE,
interval,
covtest,
ratedata = sys.parent(),
subset,
na.action,
init,
control = list(eps = 1e-4,
iter.max = 800,
level = 0.95),
optim = TRUE,
scale = 365.2425,
trace = 0,
speedy = FALSE,
nghq = 12, rcall, ...) {
time_elapsed0 <- as.numeric(base::proc.time()[3])
m <- match.call(expand.dots = FALSE)
Call <- match.call()
indx <- match(c("formula", "data", "subset", "na.action"),
names(Call),
nomatch = 0)
if (indx[1] == 0)
stop("A formula argument is required")
temp <- Call[c(1, indx)]
temp[[1L]] <- as.name("model.frame")
special <- c("strata")
Terms <- if (missing(data)) {
terms(formula, special)
}
else{
terms(formula, special, data = data)
}
temp$formula <- Terms
m <- eval(temp, sys.parent())
if (missing(na.action)) {
na.action <- NULL
} else if (length(attr(m, "na.action"))) {
temp$na.action <- na.pass
m <- eval(temp, sys.parent())
}
ehazardInt <- NULL
# controls on data & ratetable parameters
if (missing(ratedata) & missing(ratetable)) {
stop("Missing rate table from general population.")
}
if (missing(data)) {
stop("Missing data data frame in which to interpret
the variables named in the formula.")
} else{
if (is.na(match(rmap$age, names(data))))
stop("Must have informations for age on the data set.")
if (is.na(match(rmap$sex, names(data))))
stop("Must have informations for sex on the data set.")
if (is.na(match(rmap$year, names(data))))
stop("Must have informations for date on the data set.")
}
if (!missing(ratetable)) {
if (is.ratetable(ratetable)) {
varlist <- attr(ratetable, "dimid")
if (is.null(varlist)) {
varlist <- names(attr(ratetable, "dimnames"))
}
if (is.null(attributes(ratetable)$dimid)) {
attributes(ratetable)$dimid <- varlist
}
}
else{
stop("Invalid rate table")
}
varsexID <- try(which(varlist == 'sex'))
conditionVsex <- attr(ratetable, which = "dimnames")[[varsexID]]
if (any(!conditionVsex %in% c('male', 'female'))) {
conditionVsex <- c('male', 'female')[c(which(conditionVsex %in% c('male', 'female')))]
}
if (!missing(rmap)) {
if (!splitting & missing(rcall)) {
rcall <- substitute(rmap)
} else if (!splitting & !missing(rcall)) {
rmap <- eval(rmap)
}
if (!is.call(rcall) || rcall[[1]] != as.name("list"))
stop("Invalid rcall argument")
}
else
rcall <- NULL
temp01 <- match(names(rcall)[-1], varlist)
if (any(is.na(temp01)))
stop("Variable not found in the ratetable:",
(names(rcall))[is.na(temp01)])
temp02 <- match(as.vector(unlist(rmap)), names(data))
if (any(is.na(temp02))) {
stop("Variable not found in the data set:",
(names(rcall))[is.na(temp02)])
}
}
if (pophaz == "corrected") {
if (is.null(add.rmap.cut$breakpoint)) {
stop("Missing breakpoint information")
} else {
if (add.rmap.cut$breakpoint == TRUE) {
if (!is.na(add.rmap.cut$cut[1])) {
if (min(add.rmap.cut$cut) < min(c(data$age, data$age + data$time))) {
if (max(add.rmap.cut$cut) <= max(c(data$age, data$age + data$time))) {
stop("Breakpoint(s) is (are) smaller than the minimum age")
} else
stop(
"Breakpoint(s) is (are) smaller than the minimum age and breakpoint(s) greater than the maximum age"
)
} else{
if (max(add.rmap.cut$cut) > max(c(data$age, data$age + data$time)))
stop("Breakpoint(s) is (are) greater than the maximum age")
}
}
}
}
}
if (control$iter.max < 0)
stop("Invalid value for iterations.")
if (control$eps <= 0)
stop("Invalid convergence criteria.")
if (control$level < 0 | control$level > 1)
stop("Invalid value for the level of confidence interval.")
if (missing(init))
init <- NULL
if (missing(interval))
stop("Missing cutpoints definition for intervals.")
if (!is.numeric(interval))
stop("Wrong values for intervals. Must be numeric.")
if (min(interval, na.rm = TRUE) != 0)
stop("First interval must start at 0.")
if (sum((interval < 0) * 1, na.rm = TRUE) > 0)
stop("Negative value is not allowed for interval.")
myvarnames <- colnames( model.matrix(Terms, m)[,-1, drop = FALSE])
qbs_id <- which(stringr::str_detect(c(myvarnames),
pattern = "qbs"))
if (length(qbs_id) > 0) {
if (length(interval) > 4)
stop(
"Interval must have 4 values using bsplines
(2 internal knots plus '0' and the end of the study)."
)
}else{
if (baseline == "bsplines") {
if (length(interval) > 4)
stop(
"Interval must have 4 values using bsplines
(2 internal knots plus '0' and the end of the study)."
)
}
}
Y <- model.extract(m, "response")
if (!inherits(Y, "Surv"))
stop("Response must be a survival object.")
strats <- attr(Terms, "specials")$strata
dropx <- NULL
if (length(strats)) {
if (length(qbs_id) > 0)
stop("Strata function is not yet implemented for the B-splines model.")
temp <- untangle.specials(Terms, "strata", 1)
dropx <- c(dropx, temp$terms)
if (length(temp$vars) == 1)
strata.keep <- m[[temp$vars]]
else
strata.keep <- strata(m[, temp$vars], shortlabel = TRUE)
strats <- as.numeric(strata.keep)
attr(Terms, "nstrata") <- max(strats)
}
attr(Terms, "intercept") <- 1
if (length(dropx)) {
X <- model.matrix(Terms[-dropx], m)[, -1, drop = FALSE]
} else{
X <- model.matrix(Terms, m)[, -1, drop = FALSE]
}
if (length(qbs_id) > 0) {
z_bsplines <- as.data.frame(
model.matrix(Terms, m)[,-1, drop = FALSE][, c(qbs_id)])
z_bsplines_names <- stringr::str_remove(myvarnames[c(qbs_id)],
"qbs")
colnames(z_bsplines) <- gsub("\\(|\\)",
"",
as.character(z_bsplines_names))
colnames(X)[c(qbs_id)] <- colnames(z_bsplines)
z_bsplines <- as.matrix(z_bsplines)
z_bsplines_vect <- rep(TRUE, ncol(z_bsplines))
z_X_vect <- rep(FALSE, ncol(X))
z_X_vect[c(qbs_id)] <- z_bsplines_vect
}
type <- attr(Y, "type")
###If there is a time-dependent covariate
if (ncol(Y) == 2) {
time <- Y[, 1]
event <- Y[, 2]
} else{
time <- Y[, 2] - Y[, 1]
event <- Y[, 3]
}
event[time > max(interval, na.rm = TRUE)] <- 0
time[time > max(interval, na.rm = TRUE)] <- max(interval, na.rm = TRUE)
if (length(qbs_id) > 0) {
Y[, 1] <- time
}
ageDiag <- data[, rmap$age]
ageDC <- ageDiag + time
return(list(X = X,
Y = Y,
time = time,
event = event,
ageDC = ageDC,
ageDiag = ageDiag))
}
| /scratch/gouwar.j/cran-all/cranData/xhaz/R/xhaz_split.R |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(xhaz)
## -----------------------------------------------------------------------------
library(xhaz)
## -----------------------------------------------------------------------------
data("simuData", package = "xhaz")
head(simuData)
dim(simuData)
levels(simuData$sex) <- c("male", "female")
interval <- c(0, 0.718, 1.351, 2.143, 3.601, 6)
fit.estv1 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
data = simuData,
ratetable = survexp.us,
interval = interval,
rmap = list(age = 'age', sex = 'sex', year = 'date'),
baseline = c("constant"),
pophaz = "classic")
fit.estv1
## -----------------------------------------------------------------------------
fit.corrected1 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
data = simuData,
ratetable = survexp.us,
interval = interval,
rmap = list(age = 'age', sex = 'sex', year = 'date'),
baseline = "constant", pophaz = "corrected",
add.rmap = "race")
fit.corrected1
## -----------------------------------------------------------------------------
# An additionnal cavariate (here race) missing in the life table is
# considered by the model with a breakpoint at 75 years
fit.corrected2 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
data = simuData,
ratetable = survexp.us,
interval = interval,
rmap = list(age = 'age', sex = 'sex', year = 'date'),
baseline = "constant", pophaz = "corrected",
add.rmap = "race",
add.rmap.cut = list(breakpoint = TRUE, cut = 75))
fit.corrected2
## -----------------------------------------------------------------------------
AIC(fit.estv1)
AIC(fit.corrected1)
BIC(fit.estv1)
BIC(fit.corrected1)
## -----------------------------------------------------------------------------
anova(fit.corrected1, fit.corrected2)
## ---- fig.width=10, fig.height=10---------------------------------------------
#only add Surv variables (time_year and status) to have them in the new.data.
#They are not used for the prediction
simuData[1,]
newdata1 <-
expand.grid(
race = factor("black",
levels = levels(simuData$race)),
agec = simuData[1, "agec"], #i.e age 50.5 years
time_year = 0,
status = 0
)
predict_mod1 <- predict(object = fit.estv1,
new.data = newdata1,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
predict_mod2 <- predict(object = fit.corrected1,
new.data = newdata1,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
predict_mod3 <- predict(object = fit.corrected2,
new.data = newdata1,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
old.par <- par(no.readonly = TRUE)
par(mfrow = c(2, 1))
plot(
predict_mod1,
what = "survival",
xlab = "time since diagnosis (year)",
ylab = "net survival",
ylim = c(0, 1),
main = "Estève Model"
)
par(new = TRUE)
plot(
predict_mod2,
what = "survival",
xlab = "",
ylab = "",
ylim = c(0, 1),
lty = 2,
lwd = 2# main = "Touraine Model"
)
par(new = TRUE)
plot(
predict_mod3,
what = "survival",
xlab = "",
ylab = "",
ylim = c(0, 1),
lty = 3,
lwd = 3#main = "Mba Model"
)
legend(
"bottomleft",
legend = c("Esteve Model",
"Touraine Model",
"Mba Model"),
lty = c(1, 2 , 3),
lwd = c(1, 2 , 3)
)
plot(
predict_mod1,
what = "hazard",
xlab = "time since diagnosis (year)",
ylab = "excess hazard",
ylim = c(0, 0.30),
lty = 1
)
par(new = TRUE)
plot(
predict_mod2,
what = "hazard",
xlab = "",
ylab = "",
ylim = c(0, 0.30),
lty = 2,
lwd = 2
)
par(new = TRUE)
plot(
predict_mod3,
what = "hazard",
xlab = "",
ylab = "",
ylim = c(0, 0.30),
lty = 3,
lwd = 3
)
legend(
"topright",
legend = c("Esteve Model",
"Touraine Model",
"Mba Model"),
lty = c(1, 2 , 3),
lwd = c(1, 2 , 3)
)
par(old.par)
## ---- fig.width=10, fig.height=10---------------------------------------------
#only add Surv variables (time_year and status) to have them in the new.data.
#They are not used for the prediction
#we could be interested to the prediction of net survival and excess hazard of the individual we the same characteristics that this one (age 50.5, race black)
predmar_mod1 <- predict(object = fit.estv1,
new.data = simuData,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
predmar_mod2 <- predict(object = fit.corrected1,
new.data = simuData,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
predmar_mod3 <- predict(object = fit.corrected2,
new.data = simuData,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
par(mfrow = c(2, 1))
plot(
predmar_mod1,
what = "survival",
xlab = "time since diagnosis (year)",
ylab = "net survival",
ylim = c(0, 1),
main = "Estève Model"
)
par(new = TRUE)
plot(
predmar_mod2,
what = "survival",
xlab = "",
ylab = "",
ylim = c(0, 1),
lty = 2,
lwd = 2# main = "Touraine Model"
)
par(new = TRUE)
plot(
predmar_mod3,
what = "survival",
xlab = "",
ylab = "",
ylim = c(0, 1),
lty = 3,
lwd = 3#main = "Mba Model"
)
legend(
"bottomleft",
legend = c("Esteve Model",
"Touraine Model",
"Mba Model"),
lty = c(1, 2 , 3),
lwd = c(1, 2 , 3)
)
plot(
predmar_mod1,
what = "hazard",
xlab = "time since diagnosis (year)",
ylab = "excess hazard",
ylim = c(0, 0.30),
lty = 1
)
par(new = TRUE)
plot(
predmar_mod2,
what = "hazard",
xlab = "",
ylab = "",
ylim = c(0, 0.30),
lty = 2,
lwd = 2
)
par(new = TRUE)
plot(
predmar_mod3,
what = "hazard",
xlab = "",
ylab = "",
ylim = c(0, 0.30),
lty = 3,
lwd = 3
)
legend(
"topright",
legend = c("Esteve Model",
"Touraine Model",
"Mba Model"),
lty = c(1, 2 , 3),
lwd = c(1, 2 , 3)
)
par(old.par)
| /scratch/gouwar.j/cran-all/cranData/xhaz/inst/doc/introduction.R |
---
title: "Introduction to Excess Hazard Modelling Considering Inappropriate Mortality Rates"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{introduction}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(xhaz)
```
# Introduction
xhaz is an R package to fit excess hazard models, with or without proportional population hazards assumption.
The baseline excess hazard could be a piecewise constant function or a B-splines.
When B-splines is choosen for the baseline excess hazard, the user can specify some covariates which have
a time-dependent effect (using "bsplines") on the baseline excess hazard.
The user can also specify if the framework corresponds to the classical excess hazard modeling,
i.e. assuming that the expected mortality of studied individuals is appropriate.
He can also consider two other framework: first, the expected mortality available in the life table is
not accurate and requires taking into account an additional variable in the life table with a proportional [Touraine et al. (2020)](https://pubmed.ncbi.nlm.nih.gov/30674229/) or non-proportional [(Mba et al. (2020)](https://pubmed.ncbi.nlm.nih.gov/33121436/) effect;
second, there is a non-comparability source of bias in terms of expected mortality of selected individuals in a clinical trials [Goungounga et al. (2019)](https://pubmed.ncbi.nlm.nih.gov/31096911/).
(*Here's the abstract from Touraine et al. paper:* Relative survival methods used to estimate the excess mortality of cancer patients rely on the background (or expected) mortality derived from general population life tables. These methods are based on splitting the observed mortality into the excess mortality and the background mortality. By assuming a regression model for the excess mortality, usually a Cox-type model, one may investigate the effects of certain covariates on the excess mortality. Some covariates are cancer-specific whereas others are variables that may influence the background mortality as well. The latter should be taken into account in the background mortality to avoid biases in estimating their effects on the excess mortality. Unfortunately, the available life table might not include such variables and, consequently, might provide inaccurate values of the background mortality. We propose a model that uses multiplicative parameters to correct potentially inaccurate background mortality. The model can be seen as an extension of the frequently used Esteve model because we assume a Cox-type model for the excess mortality with a piecewise constant baseline function and introduce additional parameters that multiply the background mortality. The original and the extended model are compared, first in a simulation study, then in an application to colon cancer registry data.
A related software package can be found at a gitlab webpage or at https://CRAN.R-project.org/package=xhaz.
## Installation
The most recent version of `xhaz` can be installed directly from the cran repository using
```
install.packages("xhaz")
```
`xhaz` depends on the `stats`, `survival`, `optimParallel`, `numDeriv`, `statmod`, `gtools` and `splines` packages which can be installed directly from CRAN.
It also utilizes `survexp.fr`, the R package containing the French life table. For example, to install `survexp.fr` follow the instructions available at the [RStudio page on R and survexp.fr](https://cran.r-project.org/package=survexp.fr).
First, install the R package via github.
```
devtools::install_github("rstudio/survexp.fr")
```
Then, when these other packages are installed, please load the xhaz R package.
```{r}
library(xhaz)
```
### Fitting an classical excess hazard model with a piecewise constant baseline hazard
We illustrate the Esteve model using a simulated dataset from the original Touraine et al. (2020) paper. This dataset is comprised of 2,000 patients with an information regarding their race as this information can impact the patient background mortality. The US life table can be used for the estimation of the model parameters.
```{r}
data("simuData", package = "xhaz")
head(simuData)
dim(simuData)
levels(simuData$sex) <- c("male", "female")
interval <- c(0, 0.718, 1.351, 2.143, 3.601, 6)
fit.estv1 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
data = simuData,
ratetable = survexp.us,
interval = interval,
rmap = list(age = 'age', sex = 'sex', year = 'date'),
baseline = c("constant"),
pophaz = "classic")
fit.estv1
```
### Fitting an excess hazard model with a piecewise constant baseline hazard with background mortality corrected with proportional effect for race variable
The new parameter to be added to xhaz() function is "add.rmap": it allows to specify the additional variable for the life table needed for the estimation of the excess hazard parameters. This model concerns that proposed by Touraine et al (2020).
```{r}
fit.corrected1 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
data = simuData,
ratetable = survexp.us,
interval = interval,
rmap = list(age = 'age', sex = 'sex', year = 'date'),
baseline = "constant", pophaz = "corrected",
add.rmap = "race")
fit.corrected1
```
### Fitting an excess hazard model with a piecewise constant baseline hazard with background mortality corrected with non proportional effect for race variable
The new parameter to be added to xhaz() function is "add.rmap.cut": it furthermore allows to specify that the additional variable have a non proportional effect on the background mortality. This excess hazard model concerns that proposed by Mba et al (2020).
```{r}
# An additionnal cavariate (here race) missing in the life table is
# considered by the model with a breakpoint at 75 years
fit.corrected2 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
data = simuData,
ratetable = survexp.us,
interval = interval,
rmap = list(age = 'age', sex = 'sex', year = 'date'),
baseline = "constant", pophaz = "corrected",
add.rmap = "race",
add.rmap.cut = list(breakpoint = TRUE, cut = 75))
fit.corrected2
```
We can compare the output of these two models using AIC or BIC criteria.
```{r}
AIC(fit.estv1)
AIC(fit.corrected1)
BIC(fit.estv1)
BIC(fit.corrected1)
```
A statistical comparison between two nested models can be performed with a likelihood ratio test calculated by function anova method implemented in xhaz.
As an example, say that we want to test whether we can drop all the complex terms in the Mba model compared to the Touraine model.
As in survival package, We compare the two models using anova(), i.e.,
```{r}
anova(fit.corrected1, fit.corrected2)
```
Note that the user is responsible to supply appropriately nested excess hazard models such that the LRT to be valid.
The result suggests that we could use the Mba model with non-proportional population hazards, correcting for the life table with additional stratification on the variable "race".
### Plot of net survival and excess hazard for different models
One could be interested to the prediction of net survival and excess hazard of the individual with the same characteristics as individual 1 on the SimuData (age 50.5, race black)
```{r, fig.width=10, fig.height=10}
#only add Surv variables (time_year and status) to have them in the new.data.
#They are not used for the prediction
simuData[1,]
newdata1 <-
expand.grid(
race = factor("black",
levels = levels(simuData$race)),
agec = simuData[1, "agec"], #i.e age 50.5 years
time_year = 0,
status = 0
)
predict_mod1 <- predict(object = fit.estv1,
new.data = newdata1,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
predict_mod2 <- predict(object = fit.corrected1,
new.data = newdata1,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
predict_mod3 <- predict(object = fit.corrected2,
new.data = newdata1,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
old.par <- par(no.readonly = TRUE)
par(mfrow = c(2, 1))
plot(
predict_mod1,
what = "survival",
xlab = "time since diagnosis (year)",
ylab = "net survival",
ylim = c(0, 1),
main = "Estève Model"
)
par(new = TRUE)
plot(
predict_mod2,
what = "survival",
xlab = "",
ylab = "",
ylim = c(0, 1),
lty = 2,
lwd = 2# main = "Touraine Model"
)
par(new = TRUE)
plot(
predict_mod3,
what = "survival",
xlab = "",
ylab = "",
ylim = c(0, 1),
lty = 3,
lwd = 3#main = "Mba Model"
)
legend(
"bottomleft",
legend = c("Esteve Model",
"Touraine Model",
"Mba Model"),
lty = c(1, 2 , 3),
lwd = c(1, 2 , 3)
)
plot(
predict_mod1,
what = "hazard",
xlab = "time since diagnosis (year)",
ylab = "excess hazard",
ylim = c(0, 0.30),
lty = 1
)
par(new = TRUE)
plot(
predict_mod2,
what = "hazard",
xlab = "",
ylab = "",
ylim = c(0, 0.30),
lty = 2,
lwd = 2
)
par(new = TRUE)
plot(
predict_mod3,
what = "hazard",
xlab = "",
ylab = "",
ylim = c(0, 0.30),
lty = 3,
lwd = 3
)
legend(
"topright",
legend = c("Esteve Model",
"Touraine Model",
"Mba Model"),
lty = c(1, 2 , 3),
lwd = c(1, 2 , 3)
)
par(old.par)
```
One could be interested to the prediction of marginal net survival and marginal excess hazard of the individual with the same characteristics as observed in the simuData.
```{r, fig.width=10, fig.height=10}
#only add Surv variables (time_year and status) to have them in the new.data.
#They are not used for the prediction
#we could be interested to the prediction of net survival and excess hazard of the individual we the same characteristics that this one (age 50.5, race black)
predmar_mod1 <- predict(object = fit.estv1,
new.data = simuData,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
predmar_mod2 <- predict(object = fit.corrected1,
new.data = simuData,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
predmar_mod3 <- predict(object = fit.corrected2,
new.data = simuData,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
par(mfrow = c(2, 1))
plot(
predmar_mod1,
what = "survival",
xlab = "time since diagnosis (year)",
ylab = "net survival",
ylim = c(0, 1),
main = "Estève Model"
)
par(new = TRUE)
plot(
predmar_mod2,
what = "survival",
xlab = "",
ylab = "",
ylim = c(0, 1),
lty = 2,
lwd = 2# main = "Touraine Model"
)
par(new = TRUE)
plot(
predmar_mod3,
what = "survival",
xlab = "",
ylab = "",
ylim = c(0, 1),
lty = 3,
lwd = 3#main = "Mba Model"
)
legend(
"bottomleft",
legend = c("Esteve Model",
"Touraine Model",
"Mba Model"),
lty = c(1, 2 , 3),
lwd = c(1, 2 , 3)
)
plot(
predmar_mod1,
what = "hazard",
xlab = "time since diagnosis (year)",
ylab = "excess hazard",
ylim = c(0, 0.30),
lty = 1
)
par(new = TRUE)
plot(
predmar_mod2,
what = "hazard",
xlab = "",
ylab = "",
ylim = c(0, 0.30),
lty = 2,
lwd = 2
)
par(new = TRUE)
plot(
predmar_mod3,
what = "hazard",
xlab = "",
ylab = "",
ylim = c(0, 0.30),
lty = 3,
lwd = 3
)
legend(
"topright",
legend = c("Esteve Model",
"Touraine Model",
"Mba Model"),
lty = c(1, 2 , 3),
lwd = c(1, 2 , 3)
)
par(old.par)
```
## License
GPL 3.0, for academic use.
## Acknowledgments
We are grateful to the members of the CENSUR Survival Group for their helpful comments.
| /scratch/gouwar.j/cran-all/cranData/xhaz/inst/doc/introduction.Rmd |
---
title: "Introduction to Excess Hazard Modelling Considering Inappropriate Mortality Rates"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{introduction}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(xhaz)
```
# Introduction
xhaz is an R package to fit excess hazard models, with or without proportional population hazards assumption.
The baseline excess hazard could be a piecewise constant function or a B-splines.
When B-splines is choosen for the baseline excess hazard, the user can specify some covariates which have
a time-dependent effect (using "bsplines") on the baseline excess hazard.
The user can also specify if the framework corresponds to the classical excess hazard modeling,
i.e. assuming that the expected mortality of studied individuals is appropriate.
He can also consider two other framework: first, the expected mortality available in the life table is
not accurate and requires taking into account an additional variable in the life table with a proportional [Touraine et al. (2020)](https://pubmed.ncbi.nlm.nih.gov/30674229/) or non-proportional [(Mba et al. (2020)](https://pubmed.ncbi.nlm.nih.gov/33121436/) effect;
second, there is a non-comparability source of bias in terms of expected mortality of selected individuals in a clinical trials [Goungounga et al. (2019)](https://pubmed.ncbi.nlm.nih.gov/31096911/).
(*Here's the abstract from Touraine et al. paper:* Relative survival methods used to estimate the excess mortality of cancer patients rely on the background (or expected) mortality derived from general population life tables. These methods are based on splitting the observed mortality into the excess mortality and the background mortality. By assuming a regression model for the excess mortality, usually a Cox-type model, one may investigate the effects of certain covariates on the excess mortality. Some covariates are cancer-specific whereas others are variables that may influence the background mortality as well. The latter should be taken into account in the background mortality to avoid biases in estimating their effects on the excess mortality. Unfortunately, the available life table might not include such variables and, consequently, might provide inaccurate values of the background mortality. We propose a model that uses multiplicative parameters to correct potentially inaccurate background mortality. The model can be seen as an extension of the frequently used Esteve model because we assume a Cox-type model for the excess mortality with a piecewise constant baseline function and introduce additional parameters that multiply the background mortality. The original and the extended model are compared, first in a simulation study, then in an application to colon cancer registry data.
A related software package can be found at a gitlab webpage or at https://CRAN.R-project.org/package=xhaz.
## Installation
The most recent version of `xhaz` can be installed directly from the cran repository using
```
install.packages("xhaz")
```
`xhaz` depends on the `stats`, `survival`, `optimParallel`, `numDeriv`, `statmod`, `gtools` and `splines` packages which can be installed directly from CRAN.
It also utilizes `survexp.fr`, the R package containing the French life table. For example, to install `survexp.fr` follow the instructions available at the [RStudio page on R and survexp.fr](https://cran.r-project.org/package=survexp.fr).
First, install the R package via github.
```
devtools::install_github("rstudio/survexp.fr")
```
Then, when these other packages are installed, please load the xhaz R package.
```{r}
library(xhaz)
```
### Fitting an classical excess hazard model with a piecewise constant baseline hazard
We illustrate the Esteve model using a simulated dataset from the original Touraine et al. (2020) paper. This dataset is comprised of 2,000 patients with an information regarding their race as this information can impact the patient background mortality. The US life table can be used for the estimation of the model parameters.
```{r}
data("simuData", package = "xhaz")
head(simuData)
dim(simuData)
levels(simuData$sex) <- c("male", "female")
interval <- c(0, 0.718, 1.351, 2.143, 3.601, 6)
fit.estv1 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
data = simuData,
ratetable = survexp.us,
interval = interval,
rmap = list(age = 'age', sex = 'sex', year = 'date'),
baseline = c("constant"),
pophaz = "classic")
fit.estv1
```
### Fitting an excess hazard model with a piecewise constant baseline hazard with background mortality corrected with proportional effect for race variable
The new parameter to be added to xhaz() function is "add.rmap": it allows to specify the additional variable for the life table needed for the estimation of the excess hazard parameters. This model concerns that proposed by Touraine et al (2020).
```{r}
fit.corrected1 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
data = simuData,
ratetable = survexp.us,
interval = interval,
rmap = list(age = 'age', sex = 'sex', year = 'date'),
baseline = "constant", pophaz = "corrected",
add.rmap = "race")
fit.corrected1
```
### Fitting an excess hazard model with a piecewise constant baseline hazard with background mortality corrected with non proportional effect for race variable
The new parameter to be added to xhaz() function is "add.rmap.cut": it furthermore allows to specify that the additional variable have a non proportional effect on the background mortality. This excess hazard model concerns that proposed by Mba et al (2020).
```{r}
# An additionnal cavariate (here race) missing in the life table is
# considered by the model with a breakpoint at 75 years
fit.corrected2 <- xhaz(formula = Surv(time_year, status) ~ agec + race,
data = simuData,
ratetable = survexp.us,
interval = interval,
rmap = list(age = 'age', sex = 'sex', year = 'date'),
baseline = "constant", pophaz = "corrected",
add.rmap = "race",
add.rmap.cut = list(breakpoint = TRUE, cut = 75))
fit.corrected2
```
We can compare the output of these two models using AIC or BIC criteria.
```{r}
AIC(fit.estv1)
AIC(fit.corrected1)
BIC(fit.estv1)
BIC(fit.corrected1)
```
A statistical comparison between two nested models can be performed with a likelihood ratio test calculated by function anova method implemented in xhaz.
As an example, say that we want to test whether we can drop all the complex terms in the Mba model compared to the Touraine model.
As in survival package, We compare the two models using anova(), i.e.,
```{r}
anova(fit.corrected1, fit.corrected2)
```
Note that the user is responsible to supply appropriately nested excess hazard models such that the LRT to be valid.
The result suggests that we could use the Mba model with non-proportional population hazards, correcting for the life table with additional stratification on the variable "race".
### Plot of net survival and excess hazard for different models
One could be interested to the prediction of net survival and excess hazard of the individual with the same characteristics as individual 1 on the SimuData (age 50.5, race black)
```{r, fig.width=10, fig.height=10}
#only add Surv variables (time_year and status) to have them in the new.data.
#They are not used for the prediction
simuData[1,]
newdata1 <-
expand.grid(
race = factor("black",
levels = levels(simuData$race)),
agec = simuData[1, "agec"], #i.e age 50.5 years
time_year = 0,
status = 0
)
predict_mod1 <- predict(object = fit.estv1,
new.data = newdata1,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
predict_mod2 <- predict(object = fit.corrected1,
new.data = newdata1,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
predict_mod3 <- predict(object = fit.corrected2,
new.data = newdata1,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
old.par <- par(no.readonly = TRUE)
par(mfrow = c(2, 1))
plot(
predict_mod1,
what = "survival",
xlab = "time since diagnosis (year)",
ylab = "net survival",
ylim = c(0, 1),
main = "Estève Model"
)
par(new = TRUE)
plot(
predict_mod2,
what = "survival",
xlab = "",
ylab = "",
ylim = c(0, 1),
lty = 2,
lwd = 2# main = "Touraine Model"
)
par(new = TRUE)
plot(
predict_mod3,
what = "survival",
xlab = "",
ylab = "",
ylim = c(0, 1),
lty = 3,
lwd = 3#main = "Mba Model"
)
legend(
"bottomleft",
legend = c("Esteve Model",
"Touraine Model",
"Mba Model"),
lty = c(1, 2 , 3),
lwd = c(1, 2 , 3)
)
plot(
predict_mod1,
what = "hazard",
xlab = "time since diagnosis (year)",
ylab = "excess hazard",
ylim = c(0, 0.30),
lty = 1
)
par(new = TRUE)
plot(
predict_mod2,
what = "hazard",
xlab = "",
ylab = "",
ylim = c(0, 0.30),
lty = 2,
lwd = 2
)
par(new = TRUE)
plot(
predict_mod3,
what = "hazard",
xlab = "",
ylab = "",
ylim = c(0, 0.30),
lty = 3,
lwd = 3
)
legend(
"topright",
legend = c("Esteve Model",
"Touraine Model",
"Mba Model"),
lty = c(1, 2 , 3),
lwd = c(1, 2 , 3)
)
par(old.par)
```
One could be interested to the prediction of marginal net survival and marginal excess hazard of the individual with the same characteristics as observed in the simuData.
```{r, fig.width=10, fig.height=10}
#only add Surv variables (time_year and status) to have them in the new.data.
#They are not used for the prediction
#we could be interested to the prediction of net survival and excess hazard of the individual we the same characteristics that this one (age 50.5, race black)
predmar_mod1 <- predict(object = fit.estv1,
new.data = simuData,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
predmar_mod2 <- predict(object = fit.corrected1,
new.data = simuData,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
predmar_mod3 <- predict(object = fit.corrected2,
new.data = simuData,
times.pts = c(seq(0, 6, 0.1)),
baseline = FALSE)
par(mfrow = c(2, 1))
plot(
predmar_mod1,
what = "survival",
xlab = "time since diagnosis (year)",
ylab = "net survival",
ylim = c(0, 1),
main = "Estève Model"
)
par(new = TRUE)
plot(
predmar_mod2,
what = "survival",
xlab = "",
ylab = "",
ylim = c(0, 1),
lty = 2,
lwd = 2# main = "Touraine Model"
)
par(new = TRUE)
plot(
predmar_mod3,
what = "survival",
xlab = "",
ylab = "",
ylim = c(0, 1),
lty = 3,
lwd = 3#main = "Mba Model"
)
legend(
"bottomleft",
legend = c("Esteve Model",
"Touraine Model",
"Mba Model"),
lty = c(1, 2 , 3),
lwd = c(1, 2 , 3)
)
plot(
predmar_mod1,
what = "hazard",
xlab = "time since diagnosis (year)",
ylab = "excess hazard",
ylim = c(0, 0.30),
lty = 1
)
par(new = TRUE)
plot(
predmar_mod2,
what = "hazard",
xlab = "",
ylab = "",
ylim = c(0, 0.30),
lty = 2,
lwd = 2
)
par(new = TRUE)
plot(
predmar_mod3,
what = "hazard",
xlab = "",
ylab = "",
ylim = c(0, 0.30),
lty = 3,
lwd = 3
)
legend(
"topright",
legend = c("Esteve Model",
"Touraine Model",
"Mba Model"),
lty = c(1, 2 , 3),
lwd = c(1, 2 , 3)
)
par(old.par)
```
## License
GPL 3.0, for academic use.
## Acknowledgments
We are grateful to the members of the CENSUR Survival Group for their helpful comments.
| /scratch/gouwar.j/cran-all/cranData/xhaz/vignettes/introduction.Rmd |
## Emilio Torres Manzanera
## University of Oviedo
## Time-stamp: <2018-05-23 17:38 emilio on emilio-despacho>
## ============================================================
##' Creates an XKCD theme
##'
##' This function creates an XKCD theme
##'
##' @return A layer with the theme.
##' @import ggplot2
##' @import extrafont
##' @note
##' See the vignette \code{vignette("xkcd-intro")}
##' @export
##' @examples
##' \dontrun{
##' p <- ggplot() + geom_point(aes(mpg, wt), data=mtcars) +
##' theme_xkcd()
##' p
##' }
theme_xkcd <- function(){
if( "xkcd" %in% extrafont::fonts() ) {
theme(panel.grid.major = element_blank(),
##axis.ticks = element_blank(),
axis.ticks = element_line(colour = "black"),
panel.background = element_blank(),
panel.grid.minor = element_blank(),
legend.key = element_blank(),
strip.background = element_blank(),
text = element_text(size = 16, family = "xkcd"))
} else {
warning("Not xkcd fonts installed! See vignette(\"xkcd-intro\")")
theme(panel.grid.major = element_blank(),
##axis.ticks = element_blank(),
axis.ticks = element_line(colour = "black"),
panel.background = element_blank(),
panel.grid.minor = element_blank(),
legend.key = element_blank(),
strip.background = element_blank(),
text = element_text(size = 16))}
}
| /scratch/gouwar.j/cran-all/cranData/xkcd/R/theme_xkcd.R |
## Emilio Torres Manzanera
## University of Oviedo
## Time-stamp: <2018-05-23 17:37 emilio on emilio-despacho>
## ============================================================
##' Plot the axis
##'
##' This function plots the axis
##'
##' It plots the axis of the graph.
##'
##' @param xrange The range of the X axe.
##' @param yrange The range of the Y axe.
##' @param ... Other arguments.
##' @return A layer with the axis.
##' @import ggplot2
##' @import extrafont
##' @importFrom Hmisc bezier
##' @importFrom stats runif
##' @export
##' @examples
##' \dontrun{
##' xrange <- range(mtcars$mpg)
##' yrange <- range(mtcars$wt)
##' p <- ggplot() +
##' geom_point(aes(mpg, wt), data=mtcars) +
##' xkcdaxis(xrange,yrange)
##' p
##' }
xkcdaxis <- function(xrange, yrange, ...) {
if( is.null(xrange) | is.null(yrange) )
stop("Arguments are: xrange, yrange")
xjitteramount <- diff(xrange)/50
yjitteramount <- diff(yrange)/50
## This cause R CMD check to give the note
## 'no visible binding for global variable'
## Notes do not forbbiden the submission
## I will follow this suggestion:
## http://stackoverflow.com/questions/9439256/how-can-i-handle-r-cmd-check-no-visible-binding-for-global-variable-notes-when
##mappingsegment <- aes(x=x,y=y,xend=xend,yend=yend) ## Put it within a with!!!
## dataaxex <- data.frame(xbegin=xrange[1]-xjitteramount,
## ybegin=yrange[1]-yjitteramount,
## xend=xrange[2]+xjitteramount,
## yend=yrange[1]-yjitteramount)
## mappingsegment <- with(dataaxex, aes(xbegin=xbegin,ybegin=ybegin,xend=xend,yend=yend))
dataaxex <- data.frame(x=xrange[1]-xjitteramount,
y=yrange[1]-yjitteramount,
xend=xrange[2]+xjitteramount,
yend=yrange[1]-yjitteramount)
mappingsegment <- with(dataaxex, aes(x=x,y=y,xend=xend,yend=yend))
axex <- xkcdline(mappingsegment, dataaxex, yjitteramount = yjitteramount, mask = FALSE, ... )
dataaxey <- data.frame(x=xrange[1]-xjitteramount,
y=yrange[1]-yjitteramount,
xend=xrange[1]-xjitteramount,
yend=yrange[2]+yjitteramount)
mappingsegment <- with(dataaxey, aes(x=x,y=y,xend=xend,yend=yend))
axey <- xkcdline(mappingsegment, dataaxey, xjitteramount = xjitteramount, mask = FALSE, ... )
coordcarte <- coord_cartesian(xlim = xrange + 1.5*c(-xjitteramount,xjitteramount),
ylim = yrange + 1.5*c(-yjitteramount,yjitteramount))
list(c(axex,axey), coordcarte,theme_xkcd())
}
## Therefore, we get a data frame with the default names of the mapping
## and a new mapping with the names by default
## For instance
## mapping <- aes(x= x1 +y1, y = y1) -> mapping <- aes(x= x, y = y)
## data[ , c("x1","y1","color")] -> data[, c("x","y","x1","y1","color")]
createdefaultmappinganddata <- function(mapping, data, mandatoryarguments =c("x","y")) {
## Check the names of the aes
nm <- names(mapping)
positionswithoutname <- (1:length(nm))[nm==""]
failsthisarguments <- mandatoryarguments[ !(mandatoryarguments %in% nm) ]
if(length(failsthisarguments) != length(positionswithoutname))
stop(paste("Argumenst of aes are ", paste(mandatoryarguments, collapse=", "),".",sep=""))
names(mapping)[positionswithoutname] <- failsthisarguments
## New names
namesmapping <- names(mapping)
## Create a new data
## For each name of the mapping, evaluate it and create a new data base
## with the names of the mapping.
## dataaes <- as.data.frame(lapply(mapping, function(xnamedataxkcdveryrare.327) with(data, eval(xnamedataxkcdveryrare.327)))) # ggplot2 version <=2.2.1
dataaes <- as.data.frame(lapply(mapping,
function(xnamedataxkcdveryrare.327)
with(data,
with(data,
eval(parse(text=quo_name(xnamedataxkcdveryrare.327)))))))
## Add the rest of variables of the data base
variablestocbind <- names(data)[!(names(data) %in% namesmapping)]
dataaes[, variablestocbind] <- data[,variablestocbind]
## Now, it creates a new mapping with the default variables x=x, y=x, yend=yend, and so on.
## See the definition of the function ggplot2::aes_string
parsed <- lapply(namesmapping, function(x) parse(text = x)[[1]])
names(parsed) <- namesmapping
newmapping <- structure(parsed, class = "uneval")
list(mapping = newmapping, data = dataaes)
}
## Apply a FUN to each row of the DATA
## If the arguments of the FUN are in the DATA and in the ELLIPSIS
## only use the variable of the DATA
##
## If doitalsoforoptargs = TRUE, then try to get the row of the ELLIPSIS variable
## when applying the function FUN to each row of the DATA.
## Otherwise, use the original ELLIPSIS variable
## when calling the function FUN
doforeachrow <- function(data, fun, doitalsoforoptargs, ...) {
## Do not pass the variables of the ELLIPSIS
## that are they are in the DATA
argList <- list(...)
for( i in intersect(names(data), names(argList) ) )
argList[i] <- NULL
if(doitalsoforoptargs) {
## If there are variable of ELLIPSIS with the same length than the data base
## copy them to the data base and delete them from the ELLIPSIS
for( i in names(argList) ) {
if(!(is.null(argList[i])==TRUE)){
if(length(argList[[i]]) == 1
| length(argList[[i]]) == dim(data)[1] ) {
data[,i] <- argList[[i]]
argList[i] <- NULL
}
}
}
}
##print(data)
## Now, apply for each row the FUN
lapply(1:(dim(data)[1]),
function(i, data, fun, argList) {
largstopass <- as.list(data[i,,])
mylistofargs <- c(largstopass, argList)
## Arguments of the function?
fcn <- get(fun, mode = "function")
argsfcntt <- names(formals(fcn))
if( "..." %in% argsfcntt ) do.call(fun, mylistofargs)
else { ## we can only pass the arguments of the function
for( i in names(mylistofargs)[ !(names(mylistofargs) %in% argsfcntt )])
mylistofargs[i] <- NULL
do.call(fun, mylistofargs)
}
},
data = data,
fun = fun,
argList = unlist(argList)
)
}
## ggplot version <= 2.2.1
mappingjoin <- function(x,y) {
nm1 <- names(x)
nm2 <- names(y)
for( i in intersect(nm1,nm2)) y[[i]] <- NULL
parsed <- lapply(c(x,y), function(x) parse(text = x)[[1]])
structure(parsed, class = "uneval")
}
mappingjoin2 <- function ( y)
{
y[["xend"]] <- NULL
y[["yend"]] <- NULL
y[["diameter"]] <- NULL
y[["scale"]] <- NULL
y[["ratioxy"]] <- NULL
y[["angleofspine"]] <- NULL
y[["anglerighthumerus"]] <- NULL
y[["anglelefthumerus"]] <- NULL
y[["anglerightradius"]] <- NULL
y[["angleleftradius"]] <- NULL
y[["anglerightleg"]] <- NULL
y[["angleleftleg"]] <- NULL
y[["angleofneck"]] <- NULL
y
}
pointscircunference <- function(x =0, y=0, diameter = 1, ratioxy=1, npoints = 16, alpha= runif(1, 0, pi/2)){
##require(Hmisc) # bezier
center <- c(x,y)
r <- rep( diameter / 2, npoints )
tt <- seq(alpha,2*pi + alpha,length.out = npoints)
r <- jitter(r)
sector <- tt > alpha & tt <= ( pi/ 2 + alpha)
r[ sector ] <- r[sector] * 1.05
sector <- tt > ( 2 * pi/2 + alpha) & tt < (3* pi/ 2 +alpha)
r[ sector ] <- r[sector] * 0.95
xx <- center[1] + r * cos(tt) * ratioxy
yy <- center[2] + r * sin(tt)
##return(data.frame(x = xx, y = yy))
return(data.frame(bezier(x = xx, y =yy,evaluation=60)))
}
pointssegment <- function(x, y, xend, yend, npoints = 10, xjitteramount= 0, yjitteramount=0, bezier = TRUE) {
##require(Hmisc) # bezier
if(npoints < 2 )
stop("npoints must be greater than 1")
## If there are no jitters, do not interpolate
if( xjitteramount == 0 & yjitteramount == 0) npoints <- 2
xbegin <- x
ybegin <- y
x <- seq(xbegin,xend,length.out = npoints)
if( (xend - xbegin) != 0 ) {
y <- (yend - ybegin) * ( x - xbegin ) / (xend - xbegin) + ybegin
} else {
y <- seq(ybegin, yend, length.out = npoints)
}
if(xjitteramount !=0) x <- jitter(x, amount=xjitteramount)
if(yjitteramount !=0) y <- jitter(y, amount=yjitteramount)
x[1] <- xbegin
y[1] <- ybegin
x[length(x)] <- xend
y[length(y)] <- yend
if(bezier & length(x)>2 & (xjitteramount != 0 | yjitteramount != 0)) {
data <- data.frame(bezier(x=x, y=y, evaluation=30))
}
else data <- data.frame(x=x,y=y)
data
}
| /scratch/gouwar.j/cran-all/cranData/xkcd/R/xkcdaxis.R |
## Emilio Torres Manzanera
## University of Oviedo
## Time-stamp: <2018-05-23 12:15 emilio on emilio-despacho>
## ============================================================
##' It draws a handwritten line.
##'
##' This function draws handwritten lines or circles.
##'
##' It draws a segment or a circunference in an XKCD style.
##'
##' If it is a segment, the following aesthetics are required:
##' \enumerate{
##' \item xbegin: x position of the point from.
##' \item ybegin: y position of the point from.
##' \item xend: x position of the point to.
##' \item yend: y position of the point to.
##' }
##'
##' If it is a circunference, the following aesthetics are required:
##' \enumerate{
##' \item x: x position of the center.
##' \item y: y position of the center.
##' \item diameter: diameter of the circunference.
##' }
##'
##' Additionally, you can use the aesthetics of \code{\link[ggplot2]{geom_path}}.
##'
##' @title Draw lines or circunferences
##' @param mapping Mapping between variables and aesthetics generated by \code{\link[ggplot2]{aes}}. See Details.
##' @param data Dataset used in this layer.
##' @param typexkcdline A string value. If it is \code{segment}, it draws a segment. If it is \code{circunference}, it plots a circunference.
##' @param mask Logical. If it is TRUE, it erases the pictures that are under the line.
##' @param ... Optional arguments.
##' @return A layer.
##' @seealso \code{\link[ggplot2]{aes}}, \code{\link[ggplot2]{geom_path}}
##' @keywords manip
##' @import ggplot2
##' @export
##' @examples
##' data <- data.frame(x1=c(1,2), y1=c(10,20), xend=c(2.5,0.5),
##' yend=c(20,10), model=c("low","high"))
##'
##' ggplot() + xkcdline(mapping=aes(x=x1 +y1, y=y1, xend =xend, yend= yend,
##' color = model), data=data)
##'
##' ggplot() + xkcdline(mapping=aes(x=x1 +y1, y=y1, xend =xend, yend= yend,
##' color = model), data=data) + facet_grid(. ~ model)
##'
##' ggplot() + xkcdline(mapping=aes(x=x1 +y1, y=y1, diameter =xend), data=data, type="circunference")
xkcdline <- function(mapping, data, typexkcdline="segment", mask = TRUE, ...) {
if(typexkcdline == "segment" ){
fun <- "pointssegment"
## Required variable in the aesthetics function for segment
requiredaesthetics <- c("x","y","xend","yend")
} else if(typexkcdline == "circunference" ) {
fun <- "pointscircunference"
requiredaesthetics <- c("x","y","diameter")
} else stop("typexkcdline must be segment or circle")
## We transform the data to get a default mapping
segementmapdat <- createdefaultmappinganddata(mapping, data, requiredaesthetics)
data <- segementmapdat$data
mapping <- segementmapdat$mapping
nsegments <- dim(data)[1]
## Are arguments of fun in the ellipsis?
## Yes, try to add to the data base
datafun <- data
argList<-list(...)
fcn <- get(fun, mode = "function")
argsfcntt <- names(formals(fcn))
argsfcn <- argsfcntt[ argsfcntt != "..."]
for( i in intersect(argsfcn, names(argList))) {
if(!(is.null(argList[i])==TRUE)){
if(length(argList[[i]]) == 1 ) datafun[, i] <- unlist(rep(argList[[i]],nsegments))
if(length(argList[[i]]) == nsegments ) datafun[, i] <- argList[[i]]
}
}
## Now, calculate the interpolates for each segment
listofinterpolates <- doforeachrow(datafun, fun, FALSE, ...)
listofinterpolateswithillustrativedata <- lapply(1:nsegments,
function(i) {
dti <- listofinterpolates[[i]]
illustrativevariables <- names(datafun)[ ! names(datafun) %in% names(dti) ]
dti[, illustrativevariables] <- datafun[i, illustrativevariables]
dti}
)
##print(listofinterpolateswithillustrativedata)
## ggplot version <= 2.2.1
if(typexkcdline == "segment" ){
## the mapping is xbegin, ybegin,...
## but we need x,y [functions pointssegment returns x,y and geom_path requires x,y]
## mapping <- mappingjoin(aes(x=x,y=y), mapping) # R CMD check gives NOTES
## mapping <- with(data, mappingjoin(aes(x=x,y=y), mapping)) ## ggplot version <= 2.2.1
}
mapping <- with(data, mappingjoin2(mapping))
listofpaths <- lapply(listofinterpolateswithillustrativedata,
function(x, mapping, mask, ...) {
pathmask <- NULL
##print(mapping)
if(mask) {
## Plot a white line widther that the original line
## We must check the color, colour or size
## and change them to white and a greater width
argList<-list(...)
for(i in intersect(c("color","colour"), names(argList)))
argList[i] <- NULL
argList$mapping <- mapping
argList$data <- x
argList$colour <- "white"
if(is.null(argList$size)==TRUE) argList$size <- 3
if(argList$size <= 3 ) argList$size <- 3
else argList$size <- argList$size *2
##print(argList)
##pathmask <- do.call("geom_path",argList)
pathmask <- do.call("geom_path",argList) # ggplot 2 version <= 2.2.1
##pathmask <- geom_path(mapping = mapping, data = x, colour="white",size=8)
}
## c(pathmask,
## geom_path(mapping = mapping, data = x, ...)) # ggplot2 version <= 2.2.1
c(pathmask,
geom_path(mapping = mapping, data = x, ...))
},
mapping = mapping,
mask= mask
## mask = mask,
## ... = ... ggplot2.0 does not like dots
)
listofpaths
}
| /scratch/gouwar.j/cran-all/cranData/xkcd/R/xkcdline.R |
## Emilio Torres Manzanera
## University of Oviedo
## Time-stamp: <2018-05-23 12:36 emilio on emilio-despacho>
## ============================================================
##' It draws a stick figure
##'
##' This function draws a stick figure.
##'
##' The following aesthetics are required:
##'\enumerate{
##' \item x: x position of the center of the head.
##' \item y: y position of the center of the head.
##' \item scale: scale of the man. It is the size of the man (in units of
##' the Y axis).
##' \item ratioxy: Ratio x to y of the graph (Use ratioxy <- diff(xrange) / diff(yrange))
##' \item angleofspine: angle between the spine and a horizontal line
##' that passes by the center of the head.
##' \item anglerighthumerus, anglelefthumerus: angle between the right/left humerus and a
##' horizontal line that passes by the top of the spine.
##' \item anglerightradius, angleleftradius: angle between the right/left radius and a
##' horizontal line that passes by the end of the right/left humerus.
##' \item anglerightleg, anglelefthleg: angle between the right/left left and a
##' horizontal line that passes by the end of the end of the spine.
##' \item angleofneck: angle between the begin of spine and a horizontal
##' line that passes by the center of the head.
##'}
##'Angles are in radians.
##'
##'
##' Additionally, you can use the aesthetics of \code{\link[ggplot2]{geom_path}},
##' and \code{xkcdline}.
##'
##' @title Draw a stick figure
##' @param mapping Mapping between variables and aesthetics generated by \code{\link[ggplot2]{aes}}. See Details.
##' @param data Dataset used in this layer.
##' @param ... Optional arguments.
##' @return A layer.
##' @seealso \code{\link[ggplot2]{aes}}, \code{\link[ggplot2]{geom_path}}, \code{\link{xkcdline}}
##' @keywords manip
##' @import ggplot2
##' @export
##' @examples
##' datascaled <- data.frame(x=c(-3,3),y=c(-30,30))
##' p <- ggplot(data=datascaled, aes(x=x,y=y)) + geom_point()
##' xrange <- range(datascaled$x)
##' yrange <- range(datascaled$y)
##' ratioxy <- diff(xrange) / diff(yrange)
##'
##' mapping <- aes(x=x,
##' y=y,
##' scale=scale,
##' ratioxy=ratioxy,
##' angleofspine = angleofspine,
##' anglerighthumerus = anglerighthumerus,
##' anglelefthumerus = anglelefthumerus,
##' anglerightradius = anglerightradius,
##' angleleftradius = angleleftradius,
##' anglerightleg = anglerightleg,
##' angleleftleg = angleleftleg,
##' angleofneck = angleofneck,
##' color = color )
##'
##' dataman <- data.frame( x= c(-1,0,1), y=c(-10,0,10),
##' scale = c(10,7,5),
##' ratioxy = ratioxy,
##' angleofspine = seq(- pi / 2, -pi/2 + pi/8, l=3) ,
##' anglerighthumerus = -pi/6,
##' anglelefthumerus = pi + pi/6,
##' anglerightradius = 0,
##' angleleftradius = runif(3,- pi/4, pi/4),
##' angleleftleg = 3*pi/2 + pi / 12 ,
##' anglerightleg = 3*pi/2 - pi / 12,
##' angleofneck = runif(3, min = 3 * pi / 2 - pi/10 , max = 3 * pi / 2 + pi/10),
##' color=c("A","B","C"))
##'
##' p + xkcdman(mapping,dataman)
xkcdman <- function(mapping, data, ...) {
requiredaesthetics <- c("x","y",
"scale",
"ratioxy",
"angleofspine",
"anglerighthumerus",
"anglelefthumerus",
"anglerightradius",
"angleleftradius",
"anglerightleg",
"angleleftleg",
"angleofneck")
## We transform the data to get a default mapping
defaultmapdat <- createdefaultmappinganddata(mapping, data, requiredaesthetics)
data <-defaultmapdat$data
mapping <- defaultmapdat$mapping
centerofhead <- cbind(data$x,data$y)
diameterofhead <- data$scale
lengthofspine <- diameterofhead
lengthofleg <- lengthofspine * 1.2
lengthofhumerus <- lengthofspine * 0.6
lengthofradius <- lengthofspine * 0.5
beginspine <- centerofhead + (diameterofhead / 2) * cbind( cos(data$angleofneck) * data$ratioxy, sin( data$angleofneck))
endspine <- beginspine + lengthofspine * cbind( cos( data$angleofspine) * data$ratioxy , sin(data$angleofspine))
endrighthumerus <- beginspine + lengthofhumerus * cbind( cos( data$anglerighthumerus) * data$ratioxy, sin(data$anglerighthumerus))
endlefthumerus <- beginspine + lengthofhumerus * cbind( cos( data$anglelefthumerus)* data$ratioxy, sin(data$anglelefthumerus))
bone <- function(begin, distance, angle, ratioxy, mapping, data, ... ) {
end <- cbind( begin[,1] + distance * cos( angle ) * ratioxy, begin[,2] + distance * sin(angle) )
data$x <- begin[,1]
data$y <- begin[,2]
data$xend <- end[,1]
data$yend <- end[,2]
ttmapping <- unlist(mapping)
ttmapping$x <- parse(text = "x")[[1]]
ttmapping$y <- parse(text = "y")[[1]]
ttmapping$xend <- parse(text = "xend")[[1]]
ttmapping$yend <- parse(text = "yend")[[1]]
newmapping <- structure(ttmapping, class = "uneval")
xkcdline(mapping=newmapping, data=data, ...)
}
head <- function(centerofhead, diameter, ratioxy , mapping, data,...) {
data$diameter <- diameter
ttmapping <- unlist(mapping)
ttmapping$diameter <- parse(text = "diameter")[[1]]
newmapping <- structure(ttmapping, class = "uneval")
xkcdline(mapping = newmapping, data =data, typexkcdline="circunference", ...)
}
c(head(centerofhead=centerofhead, diameter = diameterofhead, ratioxy = data$ratioxy, mapping = mapping, data = data, ...),
bone(begin = beginspine, distance = lengthofspine, angle = data$angleofspine, ratioxy = data$ratioxy, mapping =mapping, data = data, ... ),
bone(begin = beginspine, distance = lengthofhumerus, angle = data$anglerighthumerus, ratioxy = data$ratioxy, mapping =mapping, data = data, ...) , # right humerus
bone(begin = endrighthumerus, distance = lengthofradius, angle = data$anglerightradius , ratioxy = data$ratioxy, mapping =mapping, data = data, ...),
bone(begin = beginspine, distance = lengthofhumerus, angle = data$anglelefthumerus, ratioxy = data$ratioxy, mapping =mapping, data = data, ...),
bone(begin = endlefthumerus, distance = lengthofradius, angle = data$angleleftradius, ratioxy = data$ratioxy, mapping =mapping, data = data, ...),
bone(begin = endspine, distance = lengthofleg, angle = data$angleleftleg, ratioxy = data$ratioxy, mapping =mapping, data = data, ...), # Leg
bone(begin = endspine, distance = lengthofleg, angle = data$anglerightleg, ratioxy= data$ratioxy, mapping =mapping, data = data, ...)
) #Leg
}
| /scratch/gouwar.j/cran-all/cranData/xkcd/R/xkcdman.R |
## Emilio Torres Manzanera
## University of Oviedo
## Time-stamp: <2018-05-23 17:40 emilio on emilio-despacho>
## ============================================================
##' It draws fuzzy rectangles.
##'
##' This function draws fuzzy rectangles.
##'
##' It plots rectangles. The following aesthetics are required:
##' \enumerate{
##' \item xmin
##' \item ymin
##' \item xmax
##' \item ymax
##' }
##' Additionally, you can use the aesthetics of \code{\link[ggplot2]{geom_path}} and \code{\link[ggplot2]{geom_rect}}.
##' @title Draw fuzzy rectangles
##' @param mapping Mapping between variables and aesthetics generated by \code{\link[ggplot2]{aes}}. See Details.
##' @param data Dataset used in this layer.
##' @param ... Optional arguments.
##' @return A layer.
##' @seealso \code{\link[ggplot2]{aes}}, \code{\link[ggplot2]{geom_path}}
##' @keywords manip
##' @import ggplot2
##' @export
##' @examples
##' volunteers <- data.frame(year=c(2007:2011),
##' number=c(56470, 56998,59686, 61783, 64251))
##' p <- ggplot() + xkcdrect(aes(xmin = year,
##' xmax= year +0.3,
##' ymin=number,
##' ymax = number + 3600),
##' volunteers,
##' fill="red", colour="black")
##' p
xkcdrect <- function(mapping, data, ...) {
requiredaesthetics <- c("xmin","ymin",
"xmax","ymax")
rect1 <- with(data,geom_rect(mapping, data, ...))
defaultmapdat <- createdefaultmappinganddata(mapping, data, requiredaesthetics)
data <-defaultmapdat$data
mapping <- defaultmapdat$mapping
xrange <- range(min(data$xmin, data$xmax), max(data$xmin, data$xmax))
yrange <- range(min(data$ymin, data$ymax), max(data$ymin, data$ymax))
borderxjitteramount <- diff(xrange)/100
borderyjitteramount <- diff(yrange)/100
bordercolour <- "white"
bordersize <- 3
argList <- list(...)
if( "colour" %in% names(argList) ) bordercolour <- argList[["colour"]]
if( "color" %in% names(argList) ) bordercolour <- argList[["color"]]
if( "size" %in% names(argList) ) bordersize <- argList[["size"]]
if( "xjitteramount" %in% names(argList) ) borderxjitteramount <- argList[["xjitteramount"]]
if( "yjitteramount" %in% names(argList) ) borderyjitteramount <- argList[["yjitteramount"]]
## ## To avoid Notes when R CMD check, use a with
## mappu <- with(data, mappingjoin(aes(x=xmin,y=ymax, xend=xmax, yend=ymax), mapping))
## mappr <- with(data, mappingjoin(aes(xbegin=xmax,ybegin=ymin, xend=xmax, yend=ymax), mapping))
## mappl <- with(data, mappingjoin(aes(xbegin=xmin,ybegin=ymin, xend=xmin, yend=ymax), mapping))
## mappb <- with(data, mappingjoin(aes(xbegin=xmin,ybegin=ymin, xend=xmax, yend=ymin), mapping))
mappu <- with(data, aes(x=xmin,y=ymax, xend=xmax, yend=ymax))
mappr <- with(data, aes(x=xmax,y=ymin, xend=xmax, yend=ymax))
mappl <- with(data, aes(x=xmin,y=ymin, xend=xmin, yend=ymax))
mappb <- with(data, aes(x=xmin,y=ymin, xend=xmax, yend=ymin))
upperline <- xkcdline(mappu,
data, colour=bordercolour,
yjitteramount=borderyjitteramount, mask = FALSE, size=bordersize, ...)
rightline <- xkcdline(mappr,
data, colour=bordercolour,
xjitteramount=borderxjitteramount, mask = FALSE, size=bordersize, ...)
leftline <- xkcdline(mappl,
data, colour=bordercolour,
xjitteramount=borderxjitteramount, mask = FALSE, size=bordersize, ...)
bottomline <- xkcdline(mappb,
data, colour=bordercolour,
yjitteramount=borderyjitteramount, mask = FALSE, size=bordersize, ...)
## upperline <- xkcdline(aes(x=xmin,y=ymax,xend=xmax,yend=ymax),
## data, colour=bordercolour,
## yjitteramount=borderyjitteramount, mask = FALSE, size=bordersize, ...)
## rightline <- xkcdline(aes(x=xmax,y=ymin,xend=xmax,yend=ymax),
## data, colour=bordercolour,
## xjitteramount=borderxjitteramount, mask = FALSE, size=bordersize, ...)
## leftline <- xkcdline(aes(x=xmin,y=ymin,xend=xmin,yend=ymax),
## data, colour=bordercolour,
## xjitteramount=borderxjitteramount, mask = FALSE, size=bordersize, ...)
## bottomline <- xkcdline(aes(x=xmin,y=ymin,xend=xmax,yend=ymin),
## data, colour=bordercolour,
## yjitteramount=borderyjitteramount, mask = FALSE, size=bordersize, ...)
list(rect1, upperline , rightline, leftline, bottomline)
}
| /scratch/gouwar.j/cran-all/cranData/xkcd/R/xkcdrect.R |
## Emilio Torres Manzanera
## University of Oviedo
## Time-stamp: <2014-04-20 dom 17:01 emilio on emilio-Satellite-P100>
## ============================================================
## Borrowed from dplyr
## Otherwise, the print options of trunc_mat fails
.onAttach <- function(libname, pkgname) {
op <- options()
op.dplyr <- list(
dplyr.show_sql = FALSE,
dplyr.explain_sql = FALSE,
dplyr.strict_sql = FALSE,
dplyr.print_min = 10L,
dplyr.print_max = 100L
)
toset <- !(names(op.dplyr) %in% names(op))
if(any(toset)) options(op.dplyr[toset])
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/xkcd/R/zzz.R |
### R code from vignette source 'xkcd-intro.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: packages (eval = FALSE)
###################################################
## install.packages(c("xkcd","splancs","reshape"), dependencies=TRUE)
###################################################
### code chunk number 2: fonts (eval = FALSE)
###################################################
## library(extrafont)
## library(ggplot2)
## if( 'xkcd' %in% fonts()) {
## p <- ggplot() + geom_point(aes(x=mpg, y=wt), data=mtcars) +
## theme(text = element_text(size = 16, family = "xkcd"))
## } else {
## warning("Not xkcd fonts installed!")
## p <- ggplot() + geom_point(aes(x=mpg, y=wt), data=mtcars)
## }
## p
###################################################
### code chunk number 3: xkcd-intro.Rnw:152-157 (eval = FALSE)
###################################################
## p <- ggplot() + geom_point(aes(x=mpg, y=wt), data=mtcars)
## ggsave("grnofonts.png",p)
## p <- ggplot() + geom_point(aes(x=mpg, y=wt), data=mtcars) +
## theme(text = element_text(size = 16, family = "xkcd"))
## ggsave("grfonts.png",p)
###################################################
### code chunk number 4: xkcd-intro.Rnw:185-199 (eval = FALSE)
###################################################
## library(extrafont)
## download.file("http://simonsoftware.se/other/xkcd.ttf",
## dest="xkcd.ttf", mode="wb")
## system("mkdir ~/.fonts")
## system("cp xkcd.ttf ~/.fonts")
## font_import(pattern = "[X/x]kcd", prompt=FALSE)
## fonts()
## fonttable()
## if(.Platform$OS.type != "unix") {
## ## Register fonts for Windows bitmap output
## loadfonts(device="win")
## } else {
## loadfonts()
## }
###################################################
### code chunk number 5: xkcd-intro.Rnw:208-209 (eval = FALSE)
###################################################
## ggsave("gr1.png", p)
###################################################
### code chunk number 6: embedfonts (eval = FALSE)
###################################################
## ggsave("gr1.pdf", plot=p, width=12, height=4)
## if(.Platform$OS.type != "unix") {
## ## Needed for Windows. Make sure you have the correct path
## Sys.setenv(R_GSCMD =
## "C:\\Program Files (x86)\\gs\\gs9.06\\bin\\gswin32c.exe")
## }
## embed_fonts("gr1.pdf")
###################################################
### code chunk number 7: xkcd-intro.Rnw:236-237 (eval = FALSE)
###################################################
## install.packages("xkcd",dependencies = TRUE)
###################################################
### code chunk number 8: xkcd-intro.Rnw:241-244 (eval = FALSE)
###################################################
## help(package="xkcd")
## vignette("xkcd-intro") # It opens the PDF
## browseVignettes(package = "xkcd") # To browse the PDF, R and Rnw
###################################################
### code chunk number 9: library (eval = FALSE)
###################################################
## library(xkcd)
###################################################
### code chunk number 10: axis (eval = FALSE)
###################################################
## xrange <- range(mtcars$mpg)
## yrange <- range(mtcars$wt)
## set.seed(123) # for reproducibility
## p <- ggplot() + geom_point(aes(mpg, wt), data=mtcars) +
## xkcdaxis(xrange,yrange)
## p
###################################################
### code chunk number 11: xkcd-intro.Rnw:268-269 (eval = FALSE)
###################################################
## ggsave("graxis.png",p)
###################################################
### code chunk number 12: stickfigure (eval = FALSE)
###################################################
## ratioxy <- diff(xrange)/diff(yrange)
## mapping <- aes(x, y, scale, ratioxy, angleofspine,
## anglerighthumerus, anglelefthumerus,
## anglerightradius, angleleftradius,
## anglerightleg, angleleftleg, angleofneck,
## linetype=city)
##
## dataman <- data.frame(x= c(15,30), y=c(3, 4),
## scale = c(0.3,0.51) ,
## ratioxy = ratioxy,
## angleofspine = -pi/2 ,
## anglerighthumerus = c(pi/4, -pi/6),
## anglelefthumerus = c(pi/2 + pi/4, pi +pi/6),
## anglerightradius = c(pi/3, -pi/3),
## angleleftradius = c(pi/3, -pi/3),
## anglerightleg = 3*pi/2 - pi / 12,
## angleleftleg = 3*pi/2 + pi / 12 ,
## angleofneck = runif(1, 3*pi/2-pi/10, 3*pi/2+pi/10),
## city=c("Liliput","Brobdingnag"))
##
## p <- ggplot() + geom_point(aes(mpg, wt, colour=as.character(vs)), data=mtcars) +
## xkcdaxis(xrange,yrange) +
## xkcdman(mapping, dataman)
## p
###################################################
### code chunk number 13: xkcd-intro.Rnw:322-323 (eval = FALSE)
###################################################
## ggsave("grstickfigure.png",p)
###################################################
### code chunk number 14: xkcd-intro.Rnw:327-328 (eval = FALSE)
###################################################
## p + facet_grid(.~vs)
###################################################
### code chunk number 15: caritas (eval = FALSE)
###################################################
## volunteers <- data.frame(year=c(2007:2011),
## number=c(56470, 56998, 59686, 61783, 64251))
## xrange <- range(volunteers$year)
## yrange <- range(volunteers$number)
## ratioxy <- diff(xrange) / diff(yrange)
##
## datalines <- data.frame(xbegin=c(2008.3,2010.5),ybegin=c(63000,59600),
## xend=c(2008.5,2010.3), yend=c(63400,59000))
##
## mapping <- aes(x, y, scale, ratioxy, angleofspine,
## anglerighthumerus, anglelefthumerus,
## anglerightradius, angleleftradius,
## anglerightleg, angleleftleg, angleofneck)
##
## dataman <- data.frame( x= c(2008,2010), y=c(63000, 58850),
## scale = 1000 ,
## ratioxy = ratioxy,
## angleofspine = -pi/2 ,
## anglerighthumerus = c(-pi/6, -pi/6),
## anglelefthumerus = c(-pi/2 - pi/6, -pi/2 - pi/6),
## anglerightradius = c(pi/5, -pi/5),
## angleleftradius = c(pi/5, -pi/5),
## angleleftleg = 3*pi/2 + pi / 12 ,
## anglerightleg = 3*pi/2 - pi / 12,
## angleofneck = runif(1, 3*pi/2-pi/10, 3*pi/2+pi/10))
##
## p <- ggplot() + geom_smooth(mapping=aes(x=year, y =number),
## data =volunteers, method="loess") +
## xkcdaxis(xrange,yrange) +
## ylab("Volunteers at Caritas Spain") +
## xkcdman(mapping, dataman) +
## annotate("text", x=2008.7, y = 63700,
## label = "We Need\nVolunteers!", family="xkcd" ) +
## annotate("text", x=2010.5, y = 60000,
## label = "Sure\nI can!", family="xkcd" ) +
## xkcdline(aes(xbegin=xbegin,ybegin=ybegin,xend=xend,yend=yend),
## datalines, xjitteramount = 0.12)
## p # Figure 5.a
###################################################
### code chunk number 16: xkcd-intro.Rnw:396-397 (eval = FALSE)
###################################################
## ggsave("grcaritas.png",p)
###################################################
### code chunk number 17: barchart (eval = FALSE)
###################################################
## data <- volunteers
## data$xmin <- data$year - 0.1
## data$xmax <- data$year + 0.1
## data$ymin <- 50000
## data$ymax <- data$number
## xrange <- range(min(data$xmin)-0.1, max(data$xmax) + 0.1)
## yrange <- range(min(data$ymin)+500, max(data$ymax) + 1000)
##
## mapping <- aes(xmin=xmin,ymin=ymin,xmax=xmax,ymax=ymax)
## p <- ggplot() + xkcdrect(mapping,data) +
## xkcdaxis(xrange,yrange) +
## xlab("Year") + ylab("Volunteers at Caritas Spain")
## p # Figure 5.b
###################################################
### code chunk number 18: xkcd-intro.Rnw:419-420 (eval = FALSE)
###################################################
## ggsave("grbar.png",p)
###################################################
### code chunk number 19: help (eval = FALSE)
###################################################
## library(zoo)
## library(xkcd)
## require(splancs) #install.packages("splancs", dependencies = TRUE, repos="http://cran.es.r-project.org/")
##
## mydatar <- read.table(text="
## 6.202
## 5.965
## 5.778
## 5.693
## 5.639
## 5.273
## 4.978
## 4.833
## 4.910
## 4.696
## 4.574
## 4.645
## 4.612
## ")
##
## mydata1 <- mydatar[dim(mydatar)[1]:1,]
## z <- zooreg(mydata1, end = as.yearqtr("2013-1"), frequency = 4)
## z
##
##
## mydata <- data.frame(parados=z)
## mydata$year <- as.numeric(as.Date(as.yearqtr(rownames(mydata))))
## mydata$label <- paste(substr(rownames(mydata),3,4),substr(rownames(mydata),6,7),sep="")
##
## data <- mydata
## data$xmin <- as.numeric(data$year) -1
## data$xmax <- data$xmin + 90
## data$ymin <- 4.5
## data$ymax <- data$parados
##
## n <- 3200
## poligono <- mydata[,c("year","parados")]
## names(poligono) <- c("x","y")
## poligono <- rbind(poligono, c(max(poligono$x),4.4))
## poligono <- rbind(poligono, c(min(poligono$x),4.4))
## points <- data.frame(x=runif(n,range(poligono$x)[1],range(poligono$x)[2] ),
## y=runif(n,range(poligono$y)[1],range(poligono$y)[2] ))
## kk <- inout(points, poligono)
## points <- points[kk, ]
## points <- rbind(points,poligono)
##
## x <- points$x
## y <- points$y
## nman <- length(x)
## sizer <-runif(nman, 4, 6)
## nman
##
## xrange <- c(min(x),max(x))
## yrange <- c(min(y),max(y))
## ratioxy <- diff(xrange)/diff(yrange)
##
## n <- 2
## set.seed(123)
## twomen <- xkcdman(mapping= aes(x, y,
## scale,
## ratioxy,
## angleofspine ,
## anglerighthumerus,
## anglelefthumerus,
## anglerightradius,
## angleleftradius,
## anglerightleg,
## angleleftleg,
## angleofneck),
## data.frame(x=c(15600, 14800) ,
## y=c(5.3, 5.7),
## scale = 0.2,
## ratioxy = ratioxy,
## angleofspine = runif(n, - pi/2 - pi/10, -pi/2 + pi/10),
## anglerighthumerus = runif(n, -pi/6- pi/10, - pi/6 + pi/10),
## anglelefthumerus = runif(n, pi + pi/6 -pi/10, pi + pi/6 + pi/10),
## anglerightradius = runif(n, -pi/4, pi/4),
## angleleftradius = runif(n, pi -pi/4, pi + pi/4),
## anglerightleg = runif(n, 3* pi/2 + pi/12 , 3* pi/2 + pi/12 + pi/10),
## angleleftleg = runif(n, 3* pi/2 - pi/12 - pi/10, 3* pi/2 - pi/12 ),
## angleofneck = runif(n, -pi/2-pi/10, -pi/2 + pi/10)))
##
## p1 <- ggplot() + geom_text(aes(x,y,label="0"), data.frame(x=x,y=y),family="xkcd",alpha=0.4,size=sizer) + xkcdaxis(xrange,yrange) +
## ylab("Unemployed persons (millions)") + xlab("Date") +
## twomen +
## annotate("text", x= 15250, y=5.95,label="Help!", family="xkcd",size=7) +
## xkcdline(aes(xbegin=xbegin,ybegin=ybegin,xend=xend,yend=yend),
## data=data.frame( xbegin=15600, ybegin=5.42, xend=15250, yend=5.902 )
## , xjitteramount = 200) + theme(legend.position="none")
## #p1
## p2 <- p1 + scale_x_continuous(breaks=as.numeric(mydata$year),label=mydata$label)
## p2
##
## ggsave("grhelp.png")
##
###################################################
### code chunk number 20: homosapiens (eval = FALSE)
###################################################
## library(reshape)
##
## mydata <- read.table(header=TRUE,sep=",",text="
## year,ministerio,banco,fmi,homo
## 2013,2,1.95,1.96,1.94
## 2014,2.1,1.97,1.93,1.88
## 2015,2.2,2.05,1.90,1.87
## ")
## mydatalong <- melt(mydata, id="year", measure.vars= names(mydata)[-1])
##
## xrange <- c(2013,2015)
## yrange <- c(1.86,2.21)
## set.seed(123)
## ##p <- ggplot() + geom_smooth(aes(x=year, y=value, group=variable,linetype=variable), data=mydatalong, position = position_jitter(h=0.0001),color="black") + theme(legend.position = "none") + xkcdaxis(xrange,yrange)
## p <- ggplot() + geom_smooth(aes(x=year, y=value, group=variable,linetype=variable), data=mydatalong,color="black") + theme(legend.position = "none") + xkcdaxis(xrange,yrange)
## p2 <- p + ylab("Change in real GDP (%)") + xlab("Economic Projections of several Institutes") + scale_x_continuous(breaks=c(mydata$year), labels=c(mydata$year))
## datalabel <- data.frame(x=2014.95,
## y=t(mydata[mydata$year==2015,c(2,3,4,5)]),
## label=c("Ministry of Economy","National Bank","International Monetary Fund","Homo Sapiens Sapiens*"))
## names(datalabel) <- c("x","y","label")
##
## p3 <- p2 + geom_text(aes(x=x,y=y,label=label), data=datalabel, hjust=1, vjust=1,family="xkcd",size=7) +
## annotate("text", x=2013.4, y=1.852, label="*Homo Sapiens Sapiens = Doubly Wise Man",family="xkcd",size=3.5)
## ggsave("grhomosapiens.png",p3)
###################################################
### code chunk number 21: sevan (eval = FALSE)
###################################################
## resumen <-
## structure(list(tonombre = structure(c(1L, 2L, 3L, 11L, 4L, 5L,
## 8L, 6L, 7L, 9L, 10L, 14L, 12L, 13L, 15L), .Label = c("Andalucía",
## "Aragón", "Asturias", "Canarias", "Cantabria", "C-LaMancha",
## "CyLeón", "Cataluña", "Extremadura", "Galicia", "Baleares",
## "Madrid", "Murcia", "La Rioja", "Valencia"), class = "factor"),
## persons = c(2743706L, 515772L, 364410L, 399963L, 699410L,
## 212737L, 2847377L, 717874L, 894946L, 371502L, 942277L, 119341L,
## 2561918L, 493833L, 1661613L), frompersons = c(14266L, 3910L,
## 3214L, 3283L, 4371L, 1593L, 10912L, 8931L, 9566L, 3231L,
## 5407L, 940L, 21289L, 3202L, 9939L), topersons = c(10341L,
## 3805L, 2523L, 4039L, 3911L, 1524L, 12826L, 10897L, 7108L,
## 2312L, 4522L, 1066L, 26464L, 3529L, 9187L), llegan = c(0.38,
## 0.74, 0.69, 1.01, 0.56, 0.72, 0.45, 1.52, 0.79, 0.62, 0.48,
## 0.89, 1.03, 0.71, 0.55), sevan = c(0.52, 0.76, 0.88, 0.82,
## 0.62, 0.75, 0.38, 1.24, 1.07, 0.87, 0.57, 0.79, 0.83, 0.65,
## 0.6)), .Names = c("tonombre", "persons", "frompersons", "topersons",
## "llegan", "sevan"), row.names = c(NA, -15L), class = "data.frame")
##
##
## resumenlargo <- melt(resumen[,c("tonombre","llegan","sevan")])
##
## oo <- order(resumen$llegan)
## nombreordenados <- (resumen$tonombre)[oo]
## nombreordenados
##
## resumenlargo$tonombre <- factor( resumenlargo$tonombre, levels=nombreordenados, ordered=TRUE)
##
##
## set.seed(130613)
## kk <- ggplot() +
## geom_bar( aes(y= value, x=tonombre,fill=variable ), data=resumenlargo[resumenlargo$variable=="llegan", ], stat="identity") +
## geom_bar(aes(y= (-1)* value, x=tonombre,fill=variable ), data=resumenlargo[resumenlargo$variable=="sevan", ], stat="identity") +
## scale_y_continuous(breaks=seq(-1.2,1.5,0.3),labels=abs(seq(-1.2,1.5,0.3))) +
## ylab("Movilidad de los asalariados (% sobre asalariados residentes)") +
## coord_flip() +
## theme_xkcd() + xlab("") + theme(axis.ticks.y = element_blank(), axis.text.y = element_blank())
##
##
##
## kk2 <- kk +
## geom_text(aes(x=(tonombre),y=0,label=tonombre), data=resumenlargo,family="xkcd")
##
##
## lleganespana <- sum(resumen$topersons)*100/ sum(resumen$persons)
## sevanespana <- sum(resumen$frompersons)*100/ sum(resumen$persons)
##
##
## lineaespana1 <- xkcdline(mapping=aes(xbegin=1-0.5,ybegin=lleganespana,xend=15+0.5, yend=lleganespana, yjitteramount=0.051), data= resumenlargo, linetype=2,mask=FALSE)
## lineaespana2 <- xkcdline(mapping=aes(xbegin=1-0.5,ybegin=-lleganespana,xend=15+0.5, yend=-lleganespana), yjitteramount=0.051, data= resumenlargo,linetype=2,mask=FALSE)
##
##
## kk3 <- kk2 + xkcdline(mapping=aes(xbegin=as.numeric(tonombre)-0.5,ybegin=-1.24,xend=as.numeric(tonombre)-0.5, yend=1.52, xjitteramount=0.151), data= resumenlargo, size=3,color="white") + lineaespana1 + lineaespana2
##
##
## kk4 <- kk3 + annotate("text",x=1, y=c(lleganespana,-lleganespana),label="Media de España", hjust=c(-0.11,-0.11), vjust=c(-0.1,0.1),family="xkcd",angle=90)
##
##
## kk5 <- kk4 + scale_fill_discrete(name="",
## breaks=c("llegan", "sevan"),
## labels=c("Llegan", "Se van")) + theme(legend.justification=c(0,0), legend.position=c(0,0))
##
##
##
##
## xrange <- c(1,15)
## yrange <- c(-1.3,1.6)
## ratioxy <- diff(xrange)/diff(yrange)
## x <- 7
## y <- 1.5
## scale <- 0.35
## mapman <- aes(x, y,
## scale,
## ratioxy,
## angleofspine ,
## anglerighthumerus,
## anglelefthumerus,
## anglerightradius,
## angleleftradius,
## anglerightleg,
## angleleftleg,
## angleofneck)
## n <- 1
## set.seed(130613)
## datamanflip <- data.frame( x= x,
## y= y,
## scale = scale ,
## ratioxy = ratioxy,
## angleofspine = runif(n, -pi/2 -pi/2 - pi/10,-pi/2 -pi/2 + pi/10),
## ##angleofspine = runif(n, -0 - pi/10,-0 + pi/10),
## anglerighthumerus = runif(n, -pi/2-pi/6-pi/10, -pi/2 -pi/6+pi/10),
## anglelefthumerus = runif(n, -pi/2-pi/2 - pi/10, -pi/2 -pi/2 + pi/10),
## anglerightradius = runif(n, -pi/2-pi/5 - pi/10, -pi/2-pi/5 + pi/10),
## angleleftradius = runif(n, -pi/2-pi/5 - pi/10, -pi/2-pi/5 + pi/10),
## angleleftleg = runif(n, -pi/2 + 3*pi/2 + pi / 12 -pi/20,-pi/2 +3*pi/2 + pi / 12 +pi/20) ,
## anglerightleg = runif(n, -pi/2 + 3*pi/2 - pi / 12 -pi/20, -pi/2+ 3*pi/2 - pi / 12 +pi/20) ,
## angleofneck = runif(n, -pi/2+3*pi/2-pi/10, -pi/2+3*pi/2+pi/10))
## p1 <- xkcdman(mapman , datamanflip)
##
## kk6 <- kk5 + p1
##
##
## kk7 <- kk6 + annotate("text", x=9.3, y = 1.3, label="Unos vienen, otros se van",family="xkcd" ) +
## xkcdline(aes(xbegin=xbegin,xend=xend,yend=yend,ybegin=ybegin), yjitteramount=0.135,data=data.frame(xbegin=9.0, xend=7.2, ybegin=1.2, yend=1.3))
##
##
## ggsave(kk7,filename="grsevan.png")
##
###################################################
### code chunk number 22: motherday (eval = FALSE)
###################################################
## mommy <- read.table(sep=" ",text ="
## 8 100
## 9 0
## 10 0
## 11 0
## 12 0
## 13 0
## 14 100
## 15 100
## 16 500
## 17 420
## 18 75
## 19 50
## 20 100
## 21 40
## 22 0
## ")
## names(mommy) <- c("hour","number")
## data <- mommy
## data$xmin <- data$hour - 0.25
## data$xmax <- data$xmin + 1
## data$ymin <- 0
## data$ymax <- data$number
## xrange <- range(8, 24)
## yrange <- range(min(data$ymin) + 10 , max(data$ymax) + 200)
## ratioxy <- diff(xrange)/diff(yrange)
## timelabel <- function(text,x,y) {
## te1 <- annotate("text", x=x, y = y + 65, label=text, size = 6,family ="xkcd")
## list(te1,
## xkcdline(aes(xbegin=xbegin, ybegin= ybegin, xend=xend,yend=yend),
## data.frame(xbegin=x,ybegin= y + 50, xend=x,yend=y), xjitteramount = 0.5))
## }
## n <- 1800
## set.seed(123)
## x <- runif(n, xrange[1],xrange[2] )
## y <- runif(n, yrange[1],yrange[2] )
## inside <- unlist(lapply(1:n, function(i) any(data$xmin <= x[i] & x[i] < data$xmax &
## data$ymin <= y[i] & y[i] < data$ymax)))
## x <- x[inside]
## y <- y[inside]
## nman <- length(x)
## sizer <- round(runif(nman, 1, 10),0)
## angler <- runif(nman, -10,10)
##
## p <- ggplot() +
## geom_text(aes(x,y,label="Mummy",angle=angler,hjust=0, vjust=0),
## family="xkcd",size=sizer,alpha=0.3) +
## xkcdaxis(xrange,yrange) +
## annotate("text", x=16, y = 650,
## label="Happy Mother's day", size = 16,family ="xkcd") +
## xlab("daily schedule") +
## ylab("Number of times mothers are called on by their children") +
## timelabel("Wake up", 9, 125) + timelabel("School", 12.5, 90) +
## timelabel("Lunch", 15, 130) +
## timelabel("Homework", 18, 525) +
## timelabel("Bath", 21, 110) +
## timelabel("zzz", 23.5, 60)
##
##
## p
## ggsave("grmotherday.png",p)
###################################################
### code chunk number 23: PDF (eval = FALSE)
###################################################
## require(tools)
## texi2dvi("xkcd-intro.tex", pdf = TRUE)
| /scratch/gouwar.j/cran-all/cranData/xkcd/inst/doc/xkcd-intro.R |
xcolors<-function(max_rank=-1) {
if (max_rank>0 && max_rank<NROW(.color_data))
.color_data$color_name[seq_len(max_rank)]
else
.color_data$color_name
}
name2color<-function(name,exact=TRUE,hex_only=TRUE,n=-1){
if(exact){
d<-.color_data[match(name,.color_data$color_name),]
} else {
d<-.color_data[grep(name,.color_data$color_name),]
if (n>0 && nrow(d)>n) d<-d[seq_len(n),]
}
if (hex_only) {
d$hex
} else{
d
}
}
nearest_named<-function(color, hex_only=FALSE,max_rank=-1,Lab=TRUE){
if (NCOL(color)==3){
rgbcol<-color
} else if (is.character(color)){
rgbcol<-t(col2rgb(color))
} else if (is.factor(color)){#sigh
rgbcol<-t(col2rgb(as.character(color)))
}
if (max_rank>0 & max_rank<nrow(.color_data))
ranks<-seq_len(max_rank)
else
ranks<-1:nrow(.color_data)
if (Lab){
labcol<-convertColor(rgbcol/255,from="sRGB",to="Lab")
nearest<-knnx.index(as.matrix(.color_data[ranks,c("L","a","b")]),query=labcol,k=1)
} else {
nearest<-knnx.index(as.matrix(.color_data[ranks,c("red","green","blue")]),query=rgbcol,k=1)
}
if (hex_only)
.color_data[nearest,"hex"]
else
.color_data[nearest,]
} | /scratch/gouwar.j/cran-all/cranData/xkcdcolors/R/xkcdcol.R |
detect_with_markers <- function(x, marker_open, marker_close) {
!is.na(x) & startsWith(x, marker_open) & endsWith(x, marker_close)
}
| /scratch/gouwar.j/cran-all/cranData/xlcutter/R/markers.R |
#' Validate an xlsx template file to use in [xlsx_cutter()]
#'
#' @inheritParams xlsx_cutter
#' @param minimal Logical (default to `FALSE`) saying whether the template
#' should contain only variables delimited by markers and nothing else, or
#' if extra text can be included (and ignored)
#' @param error Logical (defaults to `TRUE`) saying whether failed validations
#' should result in an error (`TRUE`) or a warning (`FALSE`)
#'
#' @returns `TRUE` if the template is valid, `FALSE` otherwise
#'
#' @export
#'
#' @examples
#' # Valid template
#' validate_xltemplate(
#' system.file("example", "timesheet_template.xlsx", package = "xlcutter")
#' )
#'
#' # Invalid templates
#' validate_xltemplate(
#' system.file("example", "template_duped_vars.xlsx", package = "xlcutter")
#' )
#'
#' validate_xltemplate(
#' system.file("example", "template_fluff.xlsx", package = "xlcutter"),
#' minimal = TRUE
#' )
validate_xltemplate <- function(
template_file,
template_sheet = 1,
marker_open = "{{", marker_close = "}}",
minimal = FALSE,
error = FALSE
) {
cnd_msg <- NULL
template <- tidyxl::xlsx_cells(template_file, template_sheet)
template_minimal <- template[
detect_with_markers(template$character, marker_open, marker_close),
]
has_fluff <- nrow(template_minimal) < nrow(template)
if (has_fluff && minimal) {
cnd_msg <- c(
cnd_msg,
sprintf(
ngettext(
nrow(template) - nrow(template_minimal),
"%s and includes %d field not defining any variable",
"%s and includes %d fields not defining any variable"
),
"The provided template is not minimal",
nrow(template) - nrow(template_minimal)
)
)
}
noms <- trimws(
substr(
template_minimal$character,
nchar(marker_open) + 1,
nchar(template_minimal$character) - nchar(marker_close)
)
)
has_dups <- anyDuplicated(noms) > 0
if (has_dups > 0) {
noms_duplicated <- unique(noms[duplicated(noms)])
cnd_msg <- c(
cnd_msg,
sprintf(
ngettext(
length(noms_duplicated),
"%s variable is duplicated in template: %s",
"%s variables are duplicated in template: %s"
),
length(noms_duplicated),
toString(noms_duplicated)
)
)
}
if (error) {
stop(
"This template is not valid:\n",
paste(sprintf("- %s", cnd_msg), collapse = "\n"),
call. = FALSE
)
}
lapply(cnd_msg, warning, call. = FALSE)
# This is an unnecessary copy for now but may be useful as we add more checks
valid <- !has_dups && (!has_fluff || !minimal)
return(valid)
}
| /scratch/gouwar.j/cran-all/cranData/xlcutter/R/validate_xltemplate.R |
#' Create a data.frame from a folder of non-rectangular excel files
#'
#' Create a data.frame from a folder of non-rectangular excel files based on a
#' defined custom template
#'
#' @param data_files vector of paths to the xlsx files to parse
#' @param template_file path to the template file to use as a model to parse the
#' xlsx files in `data_folder`
#' @param data_sheet sheet id to extract from the xlsx files
#' @param template_sheet sheet id of the template file to use as a model to
#' parse the xlsx files in `data_folder`
#' @param marker_open,marker_close character marker to mark the variables to
#' extract in the `template_file`
#'
#' @returns A rectangular `data.frame` with columns as defined in the template.
#' Column types are determined automatically by `type.convert()`
#'
#' @importFrom stats setNames
#' @importFrom utils type.convert
#'
#' @export
#'
#' @examples
#'
#' data_files <- list.files(
#' system.file("example", "timesheet", package = "xlcutter"),
#' pattern = "\\.xlsx$",
#' full.names = TRUE
#' )
#'
#' template_file <- system.file(
#' "example", "timesheet_template.xlsx",
#' package = "xlcutter"
#' )
#'
#' xlsx_cutter(
#' data_files,
#' template_file
#' )
#'
xlsx_cutter <- function(
data_files, template_file,
data_sheet = 1, template_sheet = 1,
marker_open = "{{", marker_close = "}}"
) {
template <- tidyxl::xlsx_cells(
template_file,
template_sheet,
include_blank_cells = FALSE
)
template <- template[
detect_with_markers(template$character, marker_open, marker_close),
]
coords <- template[, c("row", "col")]
# We used to have a dedicated remove_markers() function which specifically
# removed the markers with a regex.
# BUT, since we already extracted strings with the markers, we can more simply
# and more efficiently remove the markers based on nchar
noms <- trimws(
substr(
template$character,
nchar(marker_open) + 1,
nchar(template$character) - nchar(marker_close)
)
)
res <- lapply(
data_files,
single_xlsx_cutter,
template_file, data_sheet, coords, noms
)
res <- as.data.frame(do.call(rbind, res))
type.convert(res, as.is = TRUE)
}
single_xlsx_cutter <- function(
data_file, template_file, data_sheet,
coords, noms
) {
d <- tidyxl::xlsx_cells(
data_file,
sheets = data_sheet
)
# FIXME: this is not ideal because we'd rather not read blank cells at all
# by setting `include_blank_cells = FALSE` in `tidyxl::xlsx_cells()`. But
# this is currently failing in the case where we have blank cells with
# comments: https://github.com/nacnudus/tidyxl/issues/91
d <- d[!d$is_blank, ]
d <- merge(coords, d, all = FALSE, all.x = TRUE)
d <- d[order(d$row, d$col), ]
# Present in template but not in file. Introduced by merge(all.x = TRUE)
d$data_type[is.na(d$data_type)] <- "missing"
d$res[d$data_type %in% c("error", "missing")] <- NA_character_
d$res[d$data_type == "logical"] <- d$logical[d$data_type == "logical"]
d$res[d$data_type == "numeric"] <- d$numeric[d$data_type == "numeric"]
d$res[d$data_type == "date"] <- format(d$date[d$data_type == "date"])
d$res[d$data_type == "character"] <- d$character[d$data_type == "character"]
setNames(
d$res,
noms
)
}
| /scratch/gouwar.j/cran-all/cranData/xlcutter/R/xlsx_cutter.R |
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
| /scratch/gouwar.j/cran-all/cranData/xlcutter/inst/doc/design.R |
---
title: "Design decisions"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Design decisions}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
This vignette discusses some design decisions made in this package and the rationale behind them.
## Why do I need to pass a list of files to parse rather than an entire folder?
A very early prototype of this package used this approach of taking the path of an entire folder. But we quickly noticed it severely lacked flexibility. For example, some users may have subfolders in their main folder, and may want or not to parse files within these subfolders. Alternatively, they may want to exclude some files in the folder based on a certain pattern in the file name. Handling this in `{xlcutter}` would force us to add extra arguments (such as `recursive`, `pattern`, etc.), that we would to pass to `list.files()`. Or users may even want to pass individual files scattered across many folders.
Passing a list of files rather than an entire folder sacrifices a tiny amount of ease of use for a large increase in flexibility.
| /scratch/gouwar.j/cran-all/cranData/xlcutter/inst/doc/design.Rmd |
---
title: "Design decisions"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Design decisions}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
This vignette discusses some design decisions made in this package and the rationale behind them.
## Why do I need to pass a list of files to parse rather than an entire folder?
A very early prototype of this package used this approach of taking the path of an entire folder. But we quickly noticed it severely lacked flexibility. For example, some users may have subfolders in their main folder, and may want or not to parse files within these subfolders. Alternatively, they may want to exclude some files in the folder based on a certain pattern in the file name. Handling this in `{xlcutter}` would force us to add extra arguments (such as `recursive`, `pattern`, etc.), that we would to pass to `list.files()`. Or users may even want to pass individual files scattered across many folders.
Passing a list of files rather than an entire folder sacrifices a tiny amount of ease of use for a large increase in flexibility.
| /scratch/gouwar.j/cran-all/cranData/xlcutter/vignettes/design.Rmd |
#' Select SNP by MAF.
#'
#' \code{MAF} returns SNPs with higher MAF than default value.
#'
#' @param snp SNP name.
#' @param gender gender variable.
#' @param male male information.
#' @param MAF_v minimum MAF value.
#' @param data a dataset.
MAF <- function(snp, gender, male, MAF_v, data) {
data <- stats::na.omit(data[, which(colnames(data) %in% c(snp, gender))])
snp_v <- data[,which(colnames(data) %in% snp)]
n <- length(snp_v)
ind <- (data[,which(colnames(data) %in% gender)] == male)
MAF_R <- (2 * sum(snp_v[ind] != 0) + 2 * sum(snp_v[!ind] == 2) + 1 * sum(snp_v[!ind] == 1)) /(2 * n)
MAF_R <- round(MAF_R,4)
if (MAF_R >= MAF_v) {
return(c(snp, MAF_R))
} else {
return(NULL)
}
}
| /scratch/gouwar.j/cran-all/cranData/xlink/R/MAF.R |
#' Simulation data for Genetic association models for X-chromosome SNPS
#'
#' A simulated dataset containing 400 observations. The variables list as follows:
#'
#' \itemize{
#' \item ID. Identification number.
#' \item OS. Survival indicator, 1 for death, 0 for censoring.
#' \item OS_time. Duration time of survival.
#' \item gender. Binary value 0,1 with P(x=1)=0.5 and hazard ratio is 1.
#' \item Age. Uniform distribution in [20,80] and hazard ratio is 1.02.
#' \item Smoking. Binary value 0,1 with P(x=1)=0.3 and hazard ratio is 1.2.
#' \item Treatment. Binary value 0,1 with P(X=1)=0.3 and hazard ratio is 1.2.
#' \item snp_1. True type in coxph model is 'XCI', minor allele frequency is 0.2, hazard ratio is 1.5.
#' \item snp_2. True type in coxph model is 'XCI-E', minor allele frequency is 0.3, hazard ratio is 1.5.
#' \item snp_3. True type in coxph model is 'XCI-S', minor allele frequency is 0.4, hazard ratio is 1.5.
#' \item snp_4. True type in coxph model is 'XCI', minor allele frequency is 0.3, hazard ratio is 1.
#' \item snp_5. True type in coxph model is 'XCI-E', minor allele frequency is 0.1, hazard ratio is 1.
#' \item snp_6. True type in coxph model is 'XCI', minor allele frequency is 0.2, hazard ratio is 1.
#' \item snp_7. True type in coxph model is 'XCI', minor allele frequency is 0.15, hazard ratio is 1.
#' \item snp_8. True type in coxph model is 'XCI-E', minor allele frequency is 0.1, hazard ratio is 1.
#' \item snp_9. True type in coxph model is 'XCI', minor allele frequency is 0.2, hazard ratio is 1.
#' \item snp_10. True type in coxph model is 'XCI', minor allele frequency is 0.15, hazard ratio is 1.
#' }
#'
#' @docType data
#' @keywords datasets
#' @name Rdata
#' @usage data(Rdata)
#' @format A data frame with 400 rows and 17 variables.
NULL
| /scratch/gouwar.j/cran-all/cranData/xlink/R/Rdata.R |
#' Model fitting results for each SNP considering as XCI-E type
#'
#' \code{fit_XCI_E_model} returns model fitting results for each SNP understanding as XCI-E type.
#'
#' @param resp Response variable for continuous or binary model fitting.
#' @param os Survival indicator, 1 for death, 0 for censoring.
#' @param ostime Duration time of survival.
#' @param snp Single SNP name.
#' @param gender Gender variable.
#' @param male Male indicator in gender variable.
#' @param female Female indicator in gender variable.
#' @param covars Covariates list.
#' @param model Fitting model type. For 'linear', fitting linear model. For 'binary', fitting logistic regression model. For 'survival', fitting survival model.
#' @param data Data set.
#' @return It returns estimated parameters, confidence interval and P value for each variable. Baseline model and full model maximum likelihood estimation are provided.
#' @seealso \code{\link{lm}{stats}} for linear model, \code{\link{glm}{stats}} for logistic regression model, and \code{\link{coxph}{survival}} for survival model.
#' @import survival
#' @import stats
fit_XCI_E_model <- function(resp, os, ostime, snp, gender, male, female, covars, model, data) {
MAF_value <- MAF(snp = snp, gender = gender, male = male, MAF_v = 0, data = data)[2]
if (model == "survival") {
var_list <- c(os, ostime, snp, gender, covars)
var_n <- length(var_list)
data <- data[, var_list]
snp_var <- data[, snp]
n_na <- (!is.na(snp_var))
data <- data[n_na, ]
male_snp <- data[data[, 4] == male, 3]
snp_type <- sum(levels(as.factor(male_snp)) %in% c("2"))
if (snp_type == 1) {
new_col <- data[, 3]
new_col[((data[, 4] == male) & (data[, 3] == 2))] <- 1
data <- cbind(data, new_col)
snp_new <- paste(snp, "XCI_E", sep = "_")
colnames(data)[var_n + 1] <- snp_new
formula <- paste("Surv", "(", ostime, ",", os, ")", "~", snp_new, "+", gender)
} else {
formula <- paste("Surv", "(", ostime, ",", os, ")", "~", snp, "+", gender)
}
formula_bl <- paste("Surv", "(", ostime, ",", os, ")", "~", gender)
if (length(covars) != 0) {
covar_formula <- paste(covars, collapse = "+")
formula_bl <- paste(formula_bl, covar_formula, sep = "+")
formula <- paste(formula, covar_formula, sep = "+")
}
Model_bl <- survival::coxph(stats::as.formula(formula_bl), data <- data)
Model <- survival::coxph(stats::as.formula(formula), data <- data)
LR_AIC <- Model$loglik[2] - Model_bl$loglik[2]
infor <- infor_table(x = summary(Model)$coefficients, snp = snp, covar_n = rownames(summary(Model)$coefficients), MAF_value = MAF_value, model)
loglik_infor <- t(c(Model_bl$loglik[2], Model$loglik[2], LR_AIC))
colnames(loglik_infor) <- c("Baseline", "Full model", "Loglik ratio")
mylist <- list(coefficients = infor, loglik = loglik_infor)
return(mylist)
}
if (model == "binary") {
var_list <- c(resp, snp, gender, covars)
var_n <- length(var_list)
data <- data[, var_list]
snp_var <- data[, snp]
n_na <- (!is.na(snp_var))
data <- data[n_na, ]
male_snp <- data[data[, 3] == male, 2]
snp_type <- sum(levels(as.factor(male_snp)) %in% c("2"))
if (snp_type == 1) {
new_col <- data[, 2]
new_col[((data[, 3] == male) & (data[, 2] == 2))] <- 1
data <- cbind(data, new_col)
snp_new <- paste(snp, "XCI_E", sep = "_")
colnames(data)[var_n + 1] <- snp_new
formula <- paste(resp, "~", snp_new, "+", gender)
} else {
formula <- paste(resp, "~", snp, "+", gender)
}
formula_bl <- paste(resp, "~", gender)
if (length(covars) != 0) {
covar_formula <- paste(covars, collapse = "+")
formula_bl <- paste(formula_bl, covar_formula, sep = "+")
formula <- paste(formula, covar_formula, sep = "+")
}
Model_bl <- stats::glm(stats::as.formula(formula_bl), data <- data, family = binomial(link = "logit"))
Model <- stats::glm(stats::as.formula(formula), data <- data, family = binomial(link = "logit"))
LR_AIC <- stats::logLik(Model) - stats::logLik(Model_bl)
infor <- infor_table(x = summary(Model)$coefficients, snp = snp, covar_n = rownames(summary(Model)$coefficients), MAF_value = MAF_value, model)
loglik_infor <- t(c(stats::logLik(Model_bl), stats::logLik(Model), LR_AIC))
colnames(loglik_infor) <- c("Baseline", "Full model", "Loglik ratio")
mylist <- list(coefficients = infor, loglik = loglik_infor)
return(mylist)
}
if (model == "linear") {
var_list <- c(resp, snp, gender, covars)
var_n <- length(var_list)
data <- data[, var_list]
snp_var <- data[, snp]
n_na <- (!is.na(snp_var))
data <- data[n_na, ]
male_snp <- data[data[, 3] == male, 2]
snp_type <- sum(levels(as.factor(male_snp)) %in% c("2"))
if (snp_type == 1) {
new_col <- data[, 2]
new_col[((data[, 3] == male) & (data[, 2] == 2))] <- 1
data <- cbind(data, new_col)
snp_new <- paste(snp, "XCI_E", sep = "_")
colnames(data)[var_n + 1] <- snp_new
formula <- paste(resp, "~", snp_new, "+", gender)
} else {
formula <- paste(resp, "~", snp, "+", gender)
}
formula_bl <- paste(resp, "~", gender)
if (length(covars) != 0) {
covar_formula <- paste(covars, collapse = "+")
formula_bl <- paste(formula_bl, covar_formula, sep = "+")
formula <- paste(formula, covar_formula, sep = "+")
}
Model_bl <- stats::lm(stats::as.formula(formula_bl), data <- data)
Model <- stats::lm(stats::as.formula(formula), data <- data)
LR_AIC <- stats::logLik(Model) - stats::logLik(Model_bl)
infor <- infor_table(x = summary(Model)$coefficient, snp = snp, covar_n = rownames(summary(Model)$coefficient), MAF_value = MAF_value, model)
loglik_infor <- t(c(stats::logLik(Model_bl), stats::logLik(Model), LR_AIC))
colnames(loglik_infor) <- c("Baseline", "Full model", "Loglik ratio")
mylist <- list(coefficients = infor, loglik = loglik_infor)
return(mylist)
}
}
| /scratch/gouwar.j/cran-all/cranData/xlink/R/fit_XCI_E_model.R |
#' Model fitting results for each SNP considering as XCI type
#'
#' \code{fit_XCI_model} returns model fitting results for each SNP understanding as XCI type.
#'
#' @param resp Response variable for continuous or binary model fitting.
#' @param os Survival indicator, 1 for death, 0 for censoring.
#' @param ostime Duration time of survival.
#' @param snp Single SNP name.
#' @param gender Gender variable.
#' @param male Male indicator in gender variable.
#' @param female Female indicator in gender variable.
#' @param covars Covariates list.
#' @param model Fitting model type. For 'linear', fitting linear model. For 'binary', fitting logistic regression model. For 'survival', fitting survival model.
#' @param data Data set.
#' @return It returns estimated parameters, confidence interval and P value for each variable. Baseline model and full model maximum likelihood estimation are provided.
#' @seealso \code{\link{lm}{stats}} for linear model, \code{\link{glm}{stats}} for logistic regression model, and \code{\link{coxph}{survival}} for survival model.
#' @import survival
#' @import stats
fit_XCI_model <- function(resp, os, ostime, snp, gender, male, female, covars, model, data) {
MAF_value <- MAF(snp = snp, gender = gender, male = male, MAF_v = 0, data = data)[2]
if (model == "survival") {
var_list <- c(os, ostime, snp, gender, covars)
var_n <- length(var_list)
data <- data[, var_list]
snp_var <- data[, snp]
n_na <- (!is.na(snp_var))
data <- data[n_na, ]
male_snp <- data[data[, 4] == male, 3]
snp_type <- sum(levels(as.factor(male_snp)) %in% c("2"))
if (snp_type == 0) {
new_col <- data[, 3]
new_col[((data[, 4] == male) & (data[, 3] == 1))] <- 2
data <- cbind(data, new_col)
snp_new <- paste(snp, "XCI", sep = "_")
colnames(data)[var_n + 1] <- snp_new
formula <- paste("Surv", "(", ostime, ",", os, ")", "~", snp_new, "+", gender)
} else {
formula <- paste("Surv", "(", ostime, ",", os, ")", "~", snp, "+", gender)
}
formula_bl <- paste("Surv", "(", ostime, ",", os, ")", "~", gender)
if (length(covars) != 0) {
covar_formula <- paste(covars, collapse = "+")
formula_bl <- paste(formula_bl, covar_formula, sep = "+")
formula <- paste(formula, covar_formula, sep = "+")
}
Model_o <- survival::coxph(stats::as.formula(formula_bl), data <- data)
Model <- survival::coxph(stats::as.formula(formula), data <- data)
LR_AIC <- Model$loglik[2] - Model_o$loglik[2]
infor <- infor_table(x = summary(Model)$coefficients, snp = snp, covar_n = rownames(summary(Model)$coefficients), MAF_value = MAF_value, model)
loglik_infor <- t(c(Model_o$loglik[2], Model$loglik[2], LR_AIC))
colnames(loglik_infor) <- c("Baseline", "Full model", "Loglik ratio")
mylist <- list(coefficients = infor, loglik = loglik_infor)
return(mylist)
}
if (model == "binary") {
var_list <- c(resp, snp, gender, covars)
var_n <- length(var_list)
data <- data[, var_list]
snp_var <- data[, snp]
n_na <- (!is.na(snp_var))
data <- data[n_na, ]
male_snp <- data[data[, 3] == male, 2]
snp_type <- sum(levels(as.factor(male_snp)) %in% c("2"))
if (snp_type == 0) {
new_col <- data[, 2]
new_col[((data[, 3] == male) & (data[, 2] == 1))] <- 2
data <- cbind(data, new_col)
snp_new <- paste(snp, "XCI", sep = "_")
colnames(data)[var_n + 1] <- snp_new
formula <- paste(resp, "~", snp_new, "+", gender)
} else {
formula <- paste(resp, "~", snp, "+", gender)
}
formula_bl <- paste(resp, "~", gender)
if (length(covars) != 0) {
covar_formula <- paste(covars, collapse = "+")
formula_bl <- paste(formula_bl, covar_formula, sep = "+")
formula <- paste(formula, covar_formula, sep = "+")
}
Model_bl <- stats::glm(stats::as.formula(formula_bl), data <- data, family = binomial(link = "logit"))
Model <- stats::glm(stats::as.formula(formula), data <- data, family = binomial(link = "logit"))
LR_AIC <- stats::logLik(Model) - stats::logLik(Model_bl)
infor <- infor_table(x = summary(Model)$coefficients, snp = snp, covar_n = rownames(summary(Model)$coefficients), MAF_value = MAF_value, model)
loglik_infor <- t(c(stats::logLik(Model_bl), stats::logLik(Model), LR_AIC))
colnames(loglik_infor) <- c("Baseline", "Full model", "Loglik ratio")
mylist <- list(coefficients = infor, loglik = loglik_infor)
return(mylist)
}
if (model == "linear") {
var_list <- c(resp, snp, gender, covars)
var_n <- length(var_list)
data <- data[, var_list]
snp_var <- data[, snp]
n_na <- (!is.na(snp_var))
data <- data[n_na, ]
male_snp <- data[data[, 3] == male, 2]
snp_type <- sum(levels(as.factor(male_snp)) %in% c("2"))
if (snp_type == 0) {
new_col <- data[, 2]
new_col[((data[, 3] == male) & (data[, 2] == 1))] <- 2
data <- cbind(data, new_col)
snp_new <- paste(snp, "XCI", sep = "_")
colnames(data)[var_n + 1] <- snp_new
formula <- paste(resp, "~", snp_new, "+", gender)
} else {
formula <- paste(resp, "~", snp, "+", gender)
}
formula_bl <- paste(resp, "~", gender)
if (length(covars) != 0) {
covar_formula <- paste(covars, collapse = "+")
formula_bl <- paste(formula_bl, covar_formula, sep = "+")
formula <- paste(formula, covar_formula, sep = "+")
}
Model_bl <- stats::lm(stats::as.formula(formula_bl), data <- data)
Model <- stats::lm(stats::as.formula(formula), data <- data)
LR_AIC <- stats::logLik(Model) - stats::logLik(Model_bl)
infor <- infor_table(x = summary(Model)$coefficient, snp = snp, covar_n = rownames(summary(Model)$coefficient), MAF_value = MAF_value, model)
loglik_infor <- t(c(stats::logLik(Model_bl), stats::logLik(Model), LR_AIC))
colnames(loglik_infor) <- c("Baseline", "Full model", "Loglik ratio")
mylist <- list(coefficients = infor, loglik = loglik_infor)
return(mylist)
}
}
| /scratch/gouwar.j/cran-all/cranData/xlink/R/fit_XCI_model.R |
#' Model fitting results for each SNP considering as XCI, XCI-E and XCI-S type
#'
#' \code{fit_all_models} returns model fitting results for each SNP understanding as XCI, XCI-E and XCI-S type respectively. Model comparison results is provided by using AIC as a criterion.
#'
#' @param resp Response variable for continuous or binary model fitting.
#' @param os Survival indicator, 1 for death, 0 for censoring.
#' @param ostime Duration time of survival.
#' @param snp Single SNP name.
#' @param gender Gender variable.
#' @param male Male indicator in gender variable.
#' @param female Female indicator in gender variable.
#' @param covars Covariates list.
#' @param model Fitting model type. For 'linear', fitting linear model. For 'binary', fitting logistic regression model. For 'survival', fitting survival model.
#' @param data Data set.
#' @return It returns estimated parameters, confidence interval and P value for each variable. Baseline model and full model maximum likelihood estimation are provided.
#' @seealso \code{\link{lm}{stats}} for linear model, \code{\link{glm}{stats}} for logistic regression model, and \code{\link{coxph}{survival}} for survival model.
#' @import survival
#' @import stats
fit_all_models <- function(resp, os, ostime, snp, gender, male, female, covars, model, data) {
MAF_value <- MAF(snp = snp, gender = gender, male = male, MAF_v = 0, data = data)[2]
if (model == "survival") {
var_list <- c(os, ostime, snp, gender, covars)
var_n <- length(var_list)
data <- data[, var_list]
snp_var <- data[, snp]
n_na <- (!is.na(snp_var))
data <- data[n_na, ]
ind <- !((data[, 3] == 1) & (data[, 4] == female))
male_snp <- data[data[, 4] == male, 3]
snp_type <- sum(levels(as.factor(male_snp)) %in% c("2"))
if (snp_type == 1) {
new_col <- data[, 3]
new_col[((data[, 4] == male) & (data[, 3] == 2))] <- 1
snp_xci <- data[, 3]
} else {
new_col <- data[, 3]
new_col[((data[, 4] == male) & (data[, 3] == 1))] <- 2
snp_xci <- new_col
}
model_list <- c("XCI", "XCI-E")
model_list <- c(model_list[2 - snp_type], model_list[1 + snp_type])
data <- cbind(data, new_col)
snp_new <- paste(snp, c("XCI_E", "XCI")[2 - snp_type], sep = "_")
colnames(data)[var_n + 1] <- snp_new
formula_bl <- paste("Surv", "(", ostime, ",", os, ")", "~", gender)
formula_g <- paste("Surv", "(", ostime, ",", os, ")", "~", snp, "+", gender)
formula_a <- paste("Surv", "(", ostime, ",", os, ")", "~", snp_new, "+", gender)
formula_sk <- paste("Surv", "(", ostime, ",", os, ")", "~", "snp_sk", "+", gender)
if (length(covars) != 0) {
covar_formula <- paste(covars, collapse = "+")
formula_bl <- paste(formula_bl, covar_formula, sep = "+")
formula_g <- paste(formula_g, covar_formula, sep = "+")
formula_a <- paste(formula_a, covar_formula, sep = "+")
formula_sk <- paste(formula_sk, covar_formula, sep = "+")
}
Model_bl <- survival::coxph(stats::as.formula(formula_bl), data <- data)
Model_g <- survival::coxph(stats::as.formula(formula_g), data <- data)
Model_a <- survival::coxph(stats::as.formula(formula_a), data <- data)
coef_g <- Model_g$coefficients
coef_a <- Model_a$coefficients
LR_g <- Model_g$loglik[2] - Model_bl$loglik[2]
LR_a <- Model_a$loglik[2] - Model_bl$loglik[2]
snp_skew <- function(x) {
value <- ind * snp_xci + (1 - ind) * x
return(value)
}
formula_s <- paste(formula_bl, "+snp_skew(x)", step = "")
goal_fun <- function(x) {
max_loglik <- survival::coxph(stats::as.formula(formula_s), data = cbind(data, snp_skew(x)))$loglik[2]
return(-max_loglik)
}
result <- stats::optim(par = 1, fn = goal_fun, lower = 0, upper = 2, method = "L-BFGS-B")
gamma <- result$par
LR_s <- -result$value - Model_bl$loglik[2]
snp_sk <- ind * snp_xci + (1 - ind) * gamma
Model_s <- survival::coxph(stats::as.formula(formula_sk), data = cbind(data, snp_sk))
Coef_AIC <- Model_s$coefficients
if (is.na(Coef_AIC[1]) == 1) {
gamma <- NA
}
model_ch <- c("XCI-S")
model_ind <- ((LR_a - 2) > (LR_s - 3) && (LR_a > LR_g)) + 2 * ((LR_g - 2) > (LR_s - 3) && (LR_g > LR_a)) + 3 * ((LR_a == LR_g) && (LR_g - 2) > (LR_s -
3))
if (model_ind == 1) {
model_ch <- model_list[2]
}
if (model_ind == 2) {
model_ch <- model_list[1]
}
if (model_ind == 3) {
model_ch <- c("XCI")
}
infor_g <- infor_table(x = summary(Model_g)$coefficients, snp = snp, covar_n = rownames(summary(Model_g)$coefficients), MAF_value = MAF_value, model)
loglik_infor <- t(c(Model_bl$loglik[2], Model_g$loglik[2], LR_g))
colnames(loglik_infor) <- c("Baseline", "Full model", "Loglik ratio")
mylist_g <- list(coefficients = infor_g, loglik = loglik_infor)
infor_a <- infor_table(x = summary(Model_a)$coefficients, snp = snp, covar_n = rownames(summary(Model_a)$coefficients), MAF_value = MAF_value, model)
loglik_infor <- t(c(Model_bl$loglik[2], Model_a$loglik[2], LR_a))
colnames(loglik_infor) <- c("Baseline", "Full model", "Loglik ratio")
mylist_a <- list(coefficients = infor_a, loglik = loglik_infor)
infor_s <- infor_table(x = summary(Model_s)$coefficients, snp = snp, covar_n = rownames(summary(Model_s)$coefficients), MAF_value = MAF_value, model)
loglik_infor <- t(c(Model_bl$loglik[2], Model_s$loglik[2], LR_s))
colnames(loglik_infor) <- c("Baseline", "Full model", "Loglik ratio")
mylist_s <- list(coefficients = infor_s, loglik = loglik_infor, Gamma = gamma)
mylist <- list(mylist_g, mylist_a, mylist_s, model_ch)
names(mylist) <- c(model_list, "XCI-S", "Best model by AIC")
return(mylist)
}
if (model == "binary") {
var_list <- c(resp, snp, gender, covars)
var_n <- length(var_list)
data <- data[, var_list]
snp_var <- data[, snp]
n_na <- (!is.na(snp_var))
data <- data[n_na, ]
ind <- !((data[, 2] == 1) & (data[, 3] == female))
male_snp <- data[data[, 3] == male, 2]
snp_type <- sum(levels(as.factor(male_snp)) %in% c("2"))
if (snp_type == 1) {
new_col <- data[, 2]
new_col[((data[, 3] == male) & (data[, 2] == 2))] <- 1
snp_xci <- data[, 2]
} else {
new_col <- data[, 2]
new_col[((data[, 3] == male) & (data[, 2] == 1))] <- 2
snp_xci <- new_col
}
model_list <- c("XCI", "XCI-E")
model_list <- c(model_list[2 - snp_type], model_list[1 + snp_type])
data <- cbind(data, new_col)
snp_new <- paste(snp, c("XCI_E", "XCI")[2 - snp_type], sep = "_")
colnames(data)[var_n + 1] <- snp_new
formula_bl <- paste(resp, "~", gender)
formula_g <- paste(resp, "~", snp, "+", gender)
formula_a <- paste(resp, "~", snp_new, "+", gender)
formula_sk <- paste(resp, "~", "snp_sk", "+", gender)
if (length(covars) != 0) {
covar_formula <- paste(covars, collapse = "+")
formula_bl <- paste(formula_bl, covar_formula, sep = "+")
formula_g <- paste(formula_g, covar_formula, sep = "+")
formula_a <- paste(formula_a, covar_formula, sep = "+")
formula_sk <- paste(formula_sk, covar_formula, sep = "+")
}
Model_bl <- stats::glm(stats::as.formula(formula_bl), data <- data, family = binomial(link = "logit"))
Model_g <- stats::glm(stats::as.formula(formula_g), data <- data, family = binomial(link = "logit"))
Model_a <- stats::glm(stats::as.formula(formula_a), data <- data, family = binomial(link = "logit"))
coef_g <- Model_g$coefficients
coef_a <- Model_a$coefficients
LR_g <- stats::logLik(Model_g) - stats::logLik(Model_bl)
LR_a <- stats::logLik(Model_a) - stats::logLik(Model_bl)
snp_skew <- function(x) {
value <- ind * snp_xci + (1 - ind) * x
return(value)
}
formula_s <- paste(formula_bl, "+snp_skew(x)", step = "")
goal_fun <- function(x) {
max_loglik <- stats::logLik(stats::glm(stats::as.formula(formula_s), data = cbind(data, snp_skew(x)), family = binomial(link = "logit")))
return(-max_loglik)
}
result <- stats::optim(par = 1, fn = goal_fun, lower = 0, upper = 2, method = "L-BFGS-B")
gamma <- result$par
LR_s <- -result$value - stats::logLik(Model_bl)
snp_sk <- ind * snp_xci + (1 - ind) * gamma
Model_s <- stats::glm(stats::as.formula(formula_sk), data = cbind(data, snp_sk), family = binomial(link = "logit"))
Coef_AIC <- Model_s$coefficients
if (is.na(Coef_AIC[1]) == 1) {
gamma <- NA
}
model_ch <- c("XCI-S")
model_ind <- ((LR_a - 2) > (LR_s - 3) && (LR_a > LR_g)) + 2 * ((LR_g - 2) > (LR_s - 3) && (LR_g > LR_a)) + 3 * ((LR_a == LR_g) && (LR_g - 2) > (LR_s -
3))
if (model_ind == 1) {
model_ch <- model_list[2]
}
if (model_ind == 2) {
model_ch <- model_list[1]
}
if (model_ind == 3) {
model_ch <- c("XCI")
}
infor_g <- infor_table(x = summary(Model_g)$coefficients, snp = snp, covar_n = rownames(summary(Model_g)$coefficients), MAF_value = MAF_value, model)
loglik_infor <- t(c(stats::logLik(Model_bl), stats::logLik(Model_g), LR_g))
colnames(loglik_infor) <- c("Baseline", "Full model", "Loglik ratio")
mylist_g <- list(coefficients = infor_g, loglik = loglik_infor)
infor_a <- infor_table(x = summary(Model_a)$coefficients, snp = snp, covar_n = rownames(summary(Model_a)$coefficients), MAF_value = MAF_value, model)
loglik_infor <- t(c(stats::logLik(Model_bl), stats::logLik(Model_a), LR_a))
colnames(loglik_infor) <- c("Baseline", "Full model", "Loglik ratio")
mylist_a <- list(coefficients = infor_a, loglik = loglik_infor)
infor_s <- infor_table(x = summary(Model_s)$coefficients, snp = snp, covar_n = rownames(summary(Model_s)$coefficients), MAF_value = MAF_value, model)
loglik_infor <- t(c(stats::logLik(Model_bl), stats::logLik(Model_s), LR_s))
colnames(loglik_infor) <- c("Baseline", "Full model", "Loglik ratio")
mylist_s <- list(coefficients = infor_s, loglik = loglik_infor, Gamma = gamma)
mylist <- list(mylist_g, mylist_a, mylist_s, model_ch)
names(mylist) <- c(model_list, "XCI-S", "Best model by AIC")
return(mylist)
}
if (model == "linear") {
var_list <- c(resp, snp, gender, covars)
var_n <- length(var_list)
data <- data[, var_list]
snp_var <- data[, snp]
n_na <- (!is.na(snp_var))
data <- data[n_na, ]
ind <- !((data[, 2] == 1) & (data[, 3] == female))
male_snp <- data[data[, 3] == male, 2]
snp_type <- sum(levels(as.factor(male_snp)) %in% c("2"))
if (snp_type == 1) {
new_col <- data[, 2]
new_col[((data[, 3] == male) & (data[, 2] == 2))] <- 1
snp_xci <- data[, 2]
} else {
new_col <- data[, 2]
new_col[((data[, 3] == male) & (data[, 2] == 1))] <- 2
snp_xci <- new_col
}
model_list <- c("XCI", "XCI-E")
model_list <- c(model_list[2 - snp_type], model_list[1 + snp_type])
data <- cbind(data, new_col)
snp_new <- paste(snp, c("XCI_E", "XCI")[2 - snp_type], sep = "_")
colnames(data)[var_n + 1] <- snp_new
formula_bl <- paste(resp, "~", gender)
formula_g <- paste(resp, "~", snp, "+", gender)
formula_a <- paste(resp, "~", snp_new, "+", gender)
formula_sk <- paste(resp, "~", "snp_sk", "+", gender)
if (length(covars) != 0) {
covar_formula <- paste(covars, collapse = "+")
formula_bl <- paste(formula_bl, covar_formula, sep = "+")
formula_g <- paste(formula_g, covar_formula, sep = "+")
formula_a <- paste(formula_a, covar_formula, sep = "+")
formula_sk <- paste(formula_sk, covar_formula, sep = "+")
}
Model_bl <- stats::lm(stats::as.formula(formula_bl), data <- data)
Model_g <- stats::lm(stats::as.formula(formula_g), data <- data)
Model_a <- stats::lm(stats::as.formula(formula_a), data <- data)
coef_g <- Model_g$coefficient
coef_a <- Model_a$coefficient
LR_g <- stats::logLik(Model_g) - stats::logLik(Model_bl)
LR_a <- stats::logLik(Model_a) - stats::logLik(Model_bl)
snp_skew <- function(x) {
value <- ind * snp_xci + (1 - ind) * x
return(value)
}
formula_s <- paste(formula_bl, "+snp_skew(x)", step = "")
goal_fun <- function(x) {
max_loglik <- stats::logLik(stats::lm(stats::as.formula(formula_s), data = cbind(data, snp_skew(x))))
return(-max_loglik)
}
result <- stats::optim(par = 1, fn = goal_fun, lower = 0, upper = 2, method = "L-BFGS-B")
gamma <- result$par
LR_s <- -result$value - stats::logLik(Model_bl)
snp_sk <- ind * snp_xci + (1 - ind) * gamma
Model_s <- stats::lm(stats::as.formula(formula_sk), data = cbind(data, snp_sk))
Coef_AIC <- Model_s$coefficient
if (is.na(Coef_AIC[1]) == 1) {
gamma <- NA
}
model_ch <- c("XCI-S")
model_ind <- ((LR_a - 2) > (LR_s - 3) && (LR_a > LR_g)) + 2 * ((LR_g - 2) > (LR_s - 3) && (LR_g > LR_a)) + 3 * ((LR_a == LR_g) && (LR_g - 2) > (LR_s -
3))
if (model_ind == 1) {
model_ch <- model_list[2]
}
if (model_ind == 2) {
model_ch <- model_list[1]
}
if (model_ind == 3) {
model_ch <- c("XCI")
}
infor_g <- infor_table(x = summary(Model_g)$coefficient, snp = snp, covar_n = rownames(summary(Model_g)$coefficient), MAF_value = MAF_value, model)
loglik_infor <- t(c(stats::logLik(Model_bl), stats::logLik(Model_g), LR_g))
colnames(loglik_infor) <- c("Baseline", "Full model", "Loglik ratio")
mylist_g <- list(coefficients = infor_g, loglik = loglik_infor)
infor_a <- infor_table(x = summary(Model_a)$coefficient, snp = snp, covar_n = rownames(summary(Model_a)$coefficient), MAF_value = MAF_value, model)
loglik_infor <- t(c(stats::logLik(Model_bl), stats::logLik(Model_a), LR_a))
colnames(loglik_infor) <- c("Baseline", "Full model", "Loglik ratio")
mylist_a <- list(coefficients = infor_a, loglik = loglik_infor)
infor_s <- infor_table(x = summary(Model_s)$coefficient, snp = snp, covar_n = rownames(summary(Model_s)$coefficient), MAF_value = MAF_value, model)
loglik_infor <- t(c(stats::logLik(Model_bl), stats::logLik(Model_s), LR_s))
colnames(loglik_infor) <- c("Baseline", "Full model", "Loglik ratio")
mylist_s <- list(coefficients = infor_s, loglik = loglik_infor, Gamma = gamma)
mylist <- list(mylist_g, mylist_a, mylist_s, model_ch)
names(mylist) <- c(model_list, "XCI-S", "Best model by AIC")
return(mylist)
}
}
| /scratch/gouwar.j/cran-all/cranData/xlink/R/fit_all_models.R |
#' Information table.
#'
#' \code{infor_table} returns information table of estimated coefficients/hazard ratio, confidence interval and P value.
#'
#' @param x An output from continuous/binary/survival model.
#' @param snp Single SNP name.
#' @param covar_n Covariate names.
#' @param MAF_value A minimum value of minor allele frequency.
#' @param model Model type.
#' @return Information table. If linear or binary model is chosen, it returns estimated coefficients,
#' confidence interval and P value. If survival model is chosen, it returns hazard ratio, confidence interval and P value.
infor_table <- function(x, snp, covar_n, MAF_value, model) {
if (model == "survival") {
covar_n[1] <- snp
CI_up <- round(exp(x[, 1] + 1.96 * x[, 3]), 4)
CI_low <- round(exp(x[, 1] - 1.96 * x[, 3]), 4)
hz <- round(x[, 2], 4)
pv <- x[, 5]
MAF_c <- c(MAF_value, rep("NA", length(covar_n) - 1))
CI <- paste("[", CI_low, ",", CI_up, "]", sep = "")
Table <- cbind(hz, as.data.frame(CI), pv, MAF_c)
rownames(Table) <- covar_n
colnames(Table) <- c("Hazard Ratio", "Confidence Interval (95%)", "P Value", "MAF")
return(Table)
}
if (model %in% c("binary", "linear")) {
covar_n[2] <- snp
CI_up <- round((x[, 1] + 1.96 * x[, 2]), 4)
CI_low <- round((x[, 1] - 1.96 * x[, 2]), 4)
es <- round(x[, 1], 4)
pv <- x[, 4]
MAF_c <- c("NA", MAF_value, rep("NA", length(covar_n) - 2))
CI <- paste("[", CI_low, ",", CI_up, "]", sep = "")
Table <- cbind(es, as.data.frame(CI), pv, MAF_c)
rownames(Table) <- covar_n
colnames(Table) <- c("Estimate", "Confidence Interval (95%)", "P Value", "MAF")
return(Table)
}
}
| /scratch/gouwar.j/cran-all/cranData/xlink/R/infor_table.R |
#' Selected results table by P value
#'
#' \code{select_output} returns selected SNP information by P value.
#'
#' @param input Input results from xlink fit.
#' @param pv_thold P value threshold for output.
#' @return It returns estimated parameters, confidence interval, P value, MAF and Best model information.
#' @examples
#' Covars<-c("Age","Smoking","Treatment")
#' SNPs<-c("snp_1","snp_2","snp_3")
#' result<-xlink_fit(os="OS",ostime ="OS_time",snps=SNPs,gender ="gender",covars=Covars,
#' option =list(type="all",MAF_v=0.05), model="survival", data = Rdata)
#' select_output(input=result,pv_thold=10^-5)
#' @seealso \code{\link{xlink_fit}{xlink}} for input results.
#' @export
select_output <- function(input, pv_thold = 1) {
var_name <- rownames(input[1][[1]][[1]])
if (length(var_name) != 0) {
model_def <- ("(Intercept)" %in% var_name) * 1
snp_num <- length(input)
if (model_def == 0) {
p_value_ouput <- function(x) {
R <- input[x][[1]][[1]][1, 3]
return(R)
}
snp_name <- function(x) {
R <- rownames(input[x][[1]][[1]][1, ])
return(R)
}
all_P_vec <- unlist(lapply(1:snp_num, p_value_ouput))
if (sum(all_P_vec <= pv_thold) !=0){
select_snp_vec <- (which(all_P_vec <= pv_thold) - 1) * length(var_name) + 1
snp_name_vec <- unlist(lapply(1:snp_num, snp_name))
} else {
return("No SNP's P value satisfies pv_thold requirement.")
}
} else {
p_value_ouput <- function(x) {
R <- input[x][[1]][[1]][2, 3]
return(R)
}
snp_name <- function(x) {
R <- rownames(input[x][[1]][[1]][2, ])
return(R)
}
all_P_vec <- unlist(lapply(1:snp_num, p_value_ouput))
if (sum(all_P_vec <= pv_thold) !=0){
select_snp_vec <- (which(all_P_vec <= pv_thold) - 1) * length(var_name) + 2
snp_name_vec <- unlist(lapply(1:snp_num, snp_name))
} else {
return("No SNP's P value satisfies pv_thold requirement.")
}
}
table_out <- do.call(Map, c(rbind, input))[1][[1]][select_snp_vec, ]
rownames(table_out) <- snp_name_vec [which(all_P_vec <= pv_thold)]
results <- table_out
} else {
var_name <- rownames(input[1][[1]][[1]][[1]])
model_def <- ("(Intercept)" %in% var_name) * 1
snp_num <- length(input)
if (model_def == 0) {
best_model_vec <- function(x) {
R1 <- input[x][[1]][[4]][[1]]
R2 <- which((names(input[1][[1]]) == R1))
P_v_b <- input[x][[1]][[R2]][[1]][1, 3]
if (P_v_b <= pv_thold) {
if (R2 == 3) {
gamma <- input[x][[1]][[3]][[3]]
} else {
gamma <- NA
}
R4 <- P_v_b
R5 <- cbind(input[x][[1]][[R2]][[1]][1, ], R1, gamma)
return(R5)
} else {
return(NULL)
}
}
all_best_model_vec <- lapply(1:snp_num, best_model_vec)
table_out = as.data.frame(all_best_model_vec[1])
for (i in 2:length(all_best_model_vec)) {
table_out <- rbind(table_out, as.data.frame(all_best_model_vec[i]))
}
table_out <- cbind(rownames(table_out), table_out)
if ( dim(table_out)[1]!=0) {
colnames(table_out) <- c("SNP", "Hazard Ratio", "Confidence Interval (95%)", "P Value", "MAF", "Best model", "Gamma")
rownames(table_out) <- c()
results <- table_out
} else {
return("No SNP's P value satisfies pv_thold requirement.")
}
} else {
best_model_vec <- function(x) {
R1 <- input[x][[1]][[4]][[1]]
R2 <- which((names(input[1][[1]]) == R1))
P_v_b <- input[x][[1]][[R2]][[1]][2, 3]
if (P_v_b <= pv_thold) {
if (R2 == 3) {
gamma <- input[x][[1]][[3]][[3]]
} else {
gamma <- NA
}
R4 <- P_v_b
R5 <- cbind(input[x][[1]][[R2]][[1]][2, ], R1, gamma)
return(R5)
} else {
return(NULL)
}
}
all_best_model_vec <- lapply(1:snp_num, best_model_vec)
table_out = as.data.frame(all_best_model_vec[1])
for (i in 2:length(all_best_model_vec)) {
table_out <- rbind(table_out, as.data.frame(all_best_model_vec[i]))
}
table_out <- cbind(rownames(table_out), table_out)
if ( dim(table_out)[1]!=0) {
colnames(table_out) <- c("SNP", "Estimate", "Confidence Interval (95%)", "P Value", "MAF", "Best model", "Gamma")
rownames(table_out) <- c()
results <- table_out
} else {
return("No SNP's P value satisfies pv_thold requirement.")
}
}
}
return(results)
}
| /scratch/gouwar.j/cran-all/cranData/xlink/R/select_output.R |
#' xlink: A package for genetic association models for X-chromosome SNPS on continuous, binary and survival outcomes.
#'
#' The expression of X-chromosome undergoes three possible biological processes: X-chromosome inactivation (XCI),
#' escape of the X-chromosome inactivation (XCI-E),and skewed X-chromosome inactivation (XCI-S).
#' To analyze the X-linked genetic association for phenotype such as continuous, binary, and time-to-event outcomes with
#' the actual process unknown, we propose a unified approach of maximizing the likelihood or partial likelihood over
#' all of the potential biological processes.
#'
#'
#' @section xlink functions:
#' xlink_fit, select_output.
#'
#' @docType package
#' @name xlink
#' @references Xu, Wei, and Meiling Hao. 'A unified partial likelihood approach for X-chromosome association on time-to-event outcomes.' Genetic epidemiology 42.1 (2018): 80-94.
NULL
| /scratch/gouwar.j/cran-all/cranData/xlink/R/xlink.R |
#' Genetic association models for X-chromosome SNPs on continuous, binary and survival outcomes
#'
#' \code{xlink_fit} returns model fitting results for each SNP with the covariates.
#'
#' @param resp Response variable for continuous or binary model fitting.
#' @param os Survival indicator, 1 for death, 0 for censoring.
#' @param ostime Duration time of survival.
#' @param snps SNP name list for model fitting.
#' @param gender Gender information must be included in the data. Default setting is male=1 and female=0. If not as default setting, please provide male and female information in the option.
#' @param covars Covariates list if needed.
#' @param option There are three options. First, type has default 'all', which provides model fitting results for each SNP understanding as 'XCI', 'XCI-E' and 'XCI-S' type respectively. If type is chosen as 'XCI' or 'XCI-E', all the SNPS consider as 'XCI' or 'XCI-E' type in corresponding model. Secondly, if gender is not as default gender setting (male=1,female=0), male and female information should be provided here.The third one, MAF_v is the low bound of the minimum allele frequency, the SNP MAF below this value will not be used in xlink_fit.
#' @param model Fitting model. For 'linear', fitting linear model. For 'binary', fitting logistic regression model. For 'survival', fitting survival model.
#' @param data Data set.
#' @return It returns estimated parameters, confidence interval and P value for each variable in the chosen model. The baseline and full model maximum likelihood estimation are provided. If type is 'all', best model choice is provided by using AIC as an benchmark.
#' @examples
#' Covars<-c("Age","Smoking","Treatment")
#' SNPs<-c("snp_1","snp_2","snp_3")
#' xlink_fit(os="OS",ostime="OS_time",snps=SNPs,gender="gender",covars=Covars,
#' option =list(MAF_v=0.05),model="survival",data = Rdata)
#' xlink_fit(resp="OS_time",snps=SNPs,gender="gender",option =list(type="XCI",MAF_v=0.05),
#' model="linear",data = Rdata)
#' @seealso \code{\link{lm}{stats}} for linear model, \code{\link{glm}{stats}} for logistic regression model, and \code{\link{coxph}{survival}} for survival model.
#' @references Xu, Wei, and Meiling Hao. 'A unified partial likelihood approach for X-chromosome association on time-to-event outcomes.' Genetic epidemiology 42.1 (2018): 80-94.
#' @references Han, D., Hao, M., Qu, L., & Xu, W. (2019). A novel model for the X-chromosome inactivation association on survival data. Statistical Methods in Medical Research.
#' @export
#' @import survival
#' @import stats
xlink_fit <- function(resp = c(), os = c(), ostime = c(), snps = c(), gender = c(), covars = c(), option = c(type = c(), male = c(), female = c(), MAF_v = 0),
model = c(), data) {
requireNamespace("survival")
if (length(model) == 0) {
stop("Model type needed.")
} else if (model == "survival") {
if (length(os) == 0 || length(ostime) == 0) {
stop("Survival information needed.")
}
if (length(option$type) != 0) {
modeltype <- option$type
} else {
modeltype <- "all"
}
} else if (model %in% c("binary", "linear")) {
if (length(resp) == 0) {
stop("Response variable needed.")
}
if (length(option$type) != 0) {
modeltype <- option$type
} else {
modeltype <- "all"
}
} else {
stop("Model type incorrect.")
}
if (length(gender) == 0) {
stop("Gender information needed.")
} else {
gender_Lv <- levels(as.factor(data[, gender]))
}
if (length(option$male) != 0) {
male <- option$male
female <- option$female
if (sum(c(male, female) %in% gender_Lv) != 2) {
stop("Male and female information incorrect.")
}
} else {
if (sum(c("0", "1") %in% gender_Lv) != 2) {
stop("Male and female information needed.")
}
male <- 1
female <- 0
}
MAF_select <- function(x) {
T <- MAF(snp = x, gender = gender, male = male, MAF_v = option$MAF_v, data = data)
return(T)
}
infor_all <- function(x) {
T <- fit_all_models(resp = resp, os = os, ostime = ostime, snp = x, gender = gender, male = male, female = female, covars = covars, model = model, data = data)
return(T)
}
infor_XCI <- function(x) {
T <- fit_XCI_model(resp = resp, os = os, ostime = ostime, snp = x, gender = gender, male = male, female = female, covars = covars, model = model, data = data)
return(T)
}
infor_XCI_E <- function(x) {
T <- fit_XCI_E_model(resp = resp, os = os, ostime = ostime, snp = x, gender = gender, male = male, female = female, covars = covars, model = model, data = data)
return(T)
}
infor_snp <- unlist(lapply(snps, MAF_select))
snp_select <- infor_snp[(1:length(infor_snp)%%2 == 1)]
snp_select_MAF <- as.numeric(infor_snp[(1:length(infor_snp)%%2 == 0)])
if (modeltype == "all") {
results <- base::lapply(snp_select, infor_all)
}
if (modeltype == "XCI") {
results <- base::lapply(snp_select, infor_XCI)
}
if (modeltype == "XCI-E") {
results <- base::lapply(snp_select, infor_XCI_E)
}
names(results) <- snp_select
return(results)
}
| /scratch/gouwar.j/cran-all/cranData/xlink/R/xlink_fit.R |
## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----eval=FALSE----------------------------------------------------------
# library("devtools")
# install_github("qiuanzhu/xlink")
#
## ----eval=FALSE----------------------------------------------------------
# library("xlink")
# head(Rdata)
#
## ---- echo=FALSE, results='asis'-----------------------------------------
library(xlink)
knitr::kable(head(Rdata))
## ----eval=FALSE----------------------------------------------------------
# Covars<-c("Age","Smoking","Treatment")
# SNPs<-c("snp_1","snp_2")
# output<-xlink_fit(os="OS",ostime="OS_time",snps=SNPs,gender="gender",covars=Covars, option =list(type="XCI",MAF_v=0.05),model="survival",data = Rdata)
#
## ----echo=FALSE, results='asis'------------------------------------------
Covars<-c("Age","Smoking","Treatment")
SNPs<-c("snp_1","snp_2")
output<-xlink_fit(os="OS",ostime="OS_time",snps=SNPs,gender="gender",covars=Covars, option =list(type="XCI",MAF_v=0.05),model="survival",data = Rdata)
## ----echo=FALSE, results='asis'------------------------------------------
knitr::kable(output[1]$snp_1$coefficients)
knitr::kable(output[1]$snp_1$loglik)
## ----eval=FALSE----------------------------------------------------------
# Covars<-c("Age","Smoking","Treatment")
# SNPs<-c("snp_1","snp_2")
# output<-xlink_fit(os="OS",ostime="OS_time",snps=SNPs,gender="gender",covars=Covars, option =list(type="all",MAF_v=0.05),model="survival",data = Rdata)
#
## ----echo=FALSE, results='asis'------------------------------------------
output<-xlink_fit(os="OS",ostime="OS_time",snps=SNPs,gender="gender",covars=Covars, option =list(MAF_v=0.1),model="survival",data = Rdata)
knitr::kable(output$snp_1$`XCI-E`$coefficients)
knitr::kable(output$snp_1$`XCI-E`$loglik)
## ----echo=FALSE, results='asis'------------------------------------------
knitr::kable(output$snp_1$`XCI`$coefficients)
knitr::kable(output$snp_1$`XCI`$loglik)
## ----echo=FALSE, results='asis'------------------------------------------
knitr::kable(output$snp_1$`XCI-S`$coefficients)
knitr::kable(output$snp_1$`XCI-S`$loglik)
knitr::kable(output$snp_1$`XCI-S`$Gamma , col.names ="Gamma")
## ----echo=FALSE, results='asis'------------------------------------------
knitr::kable(output$snp_1$`Best model by AIC`, col.names = "Best model by AIC" )
## ----eval=FALSE----------------------------------------------------------
# Covars<-c("Age","Smoking","Treatment")
# SNPs<-c("snp_1","snp_2","snp_3")
# result<-xlink_fit(os="OS",ostime ="OS_time",snps=SNPs,gender ="gender",covars=Covars,
# option =list(type="all",MAF_v=0.05), model="survival", data = Rdata)
# select_output(input=result,pv_thold=10^-5)
#
## ----echo=FALSE, results='asis'------------------------------------------
Covars<-c("Age","Smoking","Treatment")
SNPs<-c("snp_1","snp_2","snp_3")
result<-xlink_fit(os="OS",ostime ="OS_time",snps=SNPs,gender ="gender",covars=Covars,
option =list(type="all",MAF_v=0.05), model="survival", data = Rdata)
knitr::kable( select_output(input=result,pv_thold=10^-5) )
| /scratch/gouwar.j/cran-all/cranData/xlink/inst/doc/xlink.R |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.