content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
# THIS FILE CONTAINS ALL THE FUNCTIONS FOR ESTIMATING ALLELE FREQUENCIES
#
# =====================================================================================
#' Estimate allele frequencies
#'
#' @param genotype_matrix matrix of genotypes
#' @param ploidy_level level of ploidy
#'
#' @return matrix of allele frequencies
#'
#' @examples
#' data("APIS_offspring")
#' allele_frequencies = get_allele_frequencies(genotype_matrix = APIS_offspring,
#' ploidy_level = 2)
#'
#' @keywords internal
#' @noRd
#'
get_allele_frequencies <- function(genotype_matrix, ploidy_level = 2) {
# DESCRIPTION
# Estimate allele frequencies based on genotype matrix
#
# INPUTS
# genotype_matrix : genotype matrix in the APIS format
# ploidy_level : ploidy level of the individuals of the genotype_matrix
#
# OUTPUTS
# result : dataframe with the count of each allele and their frequency
genotype_matrix_two_columns <- matrix(NA, nrow = nrow(genotype_matrix), ncol = ploidy_level*ncol(genotype_matrix))
import_column <- seq(1,ncol(genotype_matrix_two_columns),ploidy_level)
# Divide each genotype (coded A/A) into 2 columns
shift <- ploidy_level - 1
for (i in c(1:ncol(genotype_matrix))) {
tmp <- strsplit(genotype_matrix[,i], split = '/', fixed = TRUE)
M <- t(mapply(FUN = function(X) {X}, tmp))
genotype_matrix_two_columns[,(import_column[i]:(import_column[i]+shift))] <- M
}
# List of the different alleles
variant <- sort(unique(unlist(as.list(apply(genotype_matrix_two_columns, 2, unique)))))
# Create the results matrix
result_matrix <- matrix(0, nrow = ncol(genotype_matrix), ncol = length(variant))
rownames(result_matrix) <- colnames(genotype_matrix)
colnames(result_matrix) <- variant
for (n in 1:nrow(result_matrix)) {
tmp <- table(genotype_matrix_two_columns[,(import_column[n]:(import_column[n]+shift))])
result_matrix[n,match(names(tmp), colnames(result_matrix))] <- tmp
}
# Estimate the allele frequencies
frequency_matrix <- result_matrix / (rowSums(result_matrix[,which(colnames(result_matrix) != 'NA')]))
colnames(frequency_matrix) <- paste0('Freq_', colnames(result_matrix))
# Merge the results
result <- cbind(result_matrix, tot = rowSums(result_matrix), frequency_matrix)
# Return the result
return(result)
}
#' Create variant dataframe
#'
#' @param offspring_genotype matrix of the offspring genotypes
#' @param sire_genotype matrix of the sire genotypes
#' @param dam_genotype matrix of the dam genotypes
#'
#' @return dataframe with the correspondance between input allele and recoded allele
#'
#' @keywords internal
#' @noRd
get_variant_dataframe <- function(offspring_genotype, sire_genotype, dam_genotype) {
# DESCRIPTION
# Get all the alleles for all the markers in the population
#
# INPUTS
# offspring_genotype : genotype matrix of the offspring in the APIS format
# sire_genotype : genotype matrix of the sires in the APIS format
# dam_genotype : genotype matrix of the dams in the APIS format
#
# OUTPUTS
# variant_dataframe : dataframe with all possible alleles for all the markers and their new code
variant <- unique(unlist(strsplit(as.vector(rbind(offspring_genotype, sire_genotype, dam_genotype)), '/')))
NA_variant <- which(variant == "NA")
if (length(NA_variant) > 0) {
variant <- variant[-which(variant == "NA")]
}
variant_dataframe <- data.frame(variant = as.character(variant),
recode = c(1:length(variant)))
variant_dataframe$variant <- as.character(variant_dataframe$variant)
variant_dataframe <- rbind(variant_dataframe, c(as.character("NA"), 0))
return(variant_dataframe)
}
| /scratch/gouwar.j/cran-all/cranData/APIS/R/functions_allele_frequency.R |
# THIS FILE CONTAINS ALL THE FUNCTIONS FOR ASSIGNMENT POWER
#
# =====================================================================================
#' Assignment power
#'
#' @param sire_genotype matrix of the sire genotypes
#' @param dam_genotype matrix of the dam genotypes
#' @param ploidy_level ploidy level of the parents
#' @param verbose verbose
#'
#' @return the theoretical assignment power calculated with the formula proposed in Vandeputte (2012)
#'
#' @examples
#' data("APIS_sire")
#' data("APIS_dam")
#'
#' P = assignment_power(sire_genotype = APIS_sire, dam_genotype = APIS_dam)
#'
#' @export
assignment_power <- function(sire_genotype, dam_genotype,
ploidy_level = 2,
verbose = FALSE) {
# DESCRIPTION
# This function calculates the theoretical assignment power as proposed in Vandeputte, M (2012)
#
# INPUTS
# sire_genotype : genotype matrix of the sires in the APIS format
# dam_genotype : genotype matrix of the dams in the APIS format
# ploidy_level : ploidy level of the parental population
# verbose : display information on the screen
#
# OUTPUTS
# P : the theoretical assignment power
parent_population <- rbind(sire_genotype, dam_genotype)
# Importe the allFreq function and calculate the allele frequencies
allele_frequency <- get_allele_frequencies(as.matrix(parent_population),
ploidy_level = ploidy_level)
column <- which(colnames(allele_frequency) == 'tot')
calculate_frequency <- as.data.frame(allele_frequency[,((column+1):ncol(allele_frequency))])
test_NA <- which(colnames(allele_frequency) == "Freq_NA")
if (length(test_NA) != 0) {
calculate_frequency <- calculate_frequency[,-test_NA]
}
total_column <- ncol(calculate_frequency)
# Calculate Q1 and Q3 for each marker
calculate_frequency$Q1i <- 1 - 2*rowSums(calculate_frequency[,1:total_column]^2) +
rowSums(calculate_frequency[,1:total_column]^3) + 2*rowSums(calculate_frequency[,1:total_column]^4) -
2*rowSums(calculate_frequency[,1:total_column]^2)^2 - 3*rowSums(calculate_frequency[,1:total_column]^5) +
3*rowSums(calculate_frequency[,1:total_column]^3)*rowSums(calculate_frequency[,1:total_column]^2)
calculate_frequency$Q3i <- 1 + 4*rowSums(calculate_frequency[,1:total_column]^4) -
4*rowSums(calculate_frequency[,1:total_column]^5) - 3*rowSums(calculate_frequency[,1:total_column]^6) -
8*rowSums(calculate_frequency[,1:total_column]^2)^2 + 2*rowSums(calculate_frequency[,1:total_column]^3)^2 +
8*rowSums(calculate_frequency[,1:total_column]^3)*rowSums(calculate_frequency[,1:total_column]^2)
# Calculate the global Q1 and Q3
Q1 <- 1 - prod(1-calculate_frequency$Q1i)
Q3 <- 1 - prod(1-calculate_frequency$Q3i)
# Calculate the assignment power
P <- Q1^(nrow(dam_genotype)+nrow(sire_genotype)-2)*Q3^((nrow(dam_genotype)-1)*(nrow(sire_genotype)-1))
if (verbose) {
P_char = substr(P, 1, 5)
cat(paste0("theoretical assignment power : ", P_char))
cat("\n")
cat('---------------------------------------------------', sep = '\n')
}
# Return the result
return(P)
}
| /scratch/gouwar.j/cran-all/cranData/APIS/R/functions_assignment_power.R |
# THIS FILE CONTAINS ALL THE FUNCTIONS FOR ASSIGNMENT RATE
#
# =====================================================================================
#' Assignment rate
#'
#' @param pedigree pedigree file (from the APIS_2n() or APIS_3n() function)
#'
#' @return assignment rate
#' @keywords internal
#' @noRd
#'
get_assignment_rate <- function(pedigree) {
# DESCRIPTION
# compute the assignment rate (in %)
#
# INPUTS
# pedigree : the pedigree file
#
# OUTPUTS
# assignment_rate : the assignment rate
individual_assigned = length(which(!is.na(pedigree$sire)))
assignment_rate = individual_assigned / nrow(pedigree)
return(assignment_rate)
}
| /scratch/gouwar.j/cran-all/cranData/APIS/R/functions_assignment_rate.R |
# THIS FILE CONTAINS ALL THE FUNCTIONS FOR CHECKING INPUTS
#
# =====================================================================================
# GENERAL PART
# ======================================================================================
#' Check matrix format
#'
#' @param genotype_matrix genotype matrix
#' @param verbose verbose
#'
#' @importFrom methods is
#'
#' @keywords internal
#' @noRd
check_matrices_format <- function(genotype_matrix, verbose = FALSE) {
# DESCRIPTION
# This function checks the matrices format
#
# INPUTS
# genotype_matrix : genotype matrix in the APIS format
# verbose : display information on the screen
#
# OUTPUTS
# stop if an error is detected
# Check if genotype_matrix is a matrix
if (!is(genotype_matrix, 'matrix')) {
stop("The genotype matrices should be a 'matrix' object : use the function as.matrix() ")
}
# Check if genotype_matrix is a character matrix
if (!is.character(genotype_matrix)) {
stop("The genotype matrices should be filled with 'character' values : check the genotype format and use the function storage.mode() ")
}
if (verbose) {
cat(paste0(deparse(substitute(genotype_matrix)), " matrix format : OK"))
cat("\n")
}
}
#' Check genotypes
#'
#' @param offspring_genotype offspring genotype matrix
#' @param sire_genotype sire genotype matrix
#' @param dam_genotype dam genotype matrix
#' @param verbose verbose
#'
#' @return list of all genotypes matrices
#'
#' @keywords internal
#' @noRd
check_genotypes <- function(offspring_genotype, sire_genotype, dam_genotype,
offspring_ploidy_level = 2,
parental_ploidy_level = 2,
verbose = FALSE) {
# DESCRIPTION
# Check different genotype format errors
#
# INPUTS
# offspring_genotype : genotype matrix of the offspring in the APIS format
# sire_genotype : genotype matrix of the dams in the APIS format
# dam_genotype : genotype matrix of the offspring in the APIS format
# offspring_ploidy_level : ploidy level of the offspring
# parental_ploidy_level : ploidy level of the parental population
# verbose : display information on the screen
#
# OUTPUTS
# list of the tree matrices after quality control
# Check if all matrices have the same number of markers
if (ncol(offspring_genotype) != ncol(sire_genotype) || ncol(offspring_genotype) != ncol(dam_genotype)) {
stop("All genotype matrices (offspring_genotype, sire_genotype and dam_genotype) should have the same numbers of markers")
}
# Check for offspring genotype marker that is not genotyped
offspring_ploidy_NA = paste0(rep("NA", times = offspring_ploidy_level), collapse = "/")
check_NA_offspring <- function(X) {
if (length(X[which(X == offspring_ploidy_NA)]) == length(X)) {
return(TRUE)
} else {
return(FALSE)
}
}
parental_ploidy_NA = paste0(rep("NA", times = parental_ploidy_level), collapse = "/")
check_NA_parent <- function(X) {
if (length(X[which(X == parental_ploidy_NA)]) == length(X)) {
return(TRUE)
} else {
return(FALSE)
}
}
offspring_marker_genotype <- apply(offspring_genotype, 2, check_NA_offspring)
sire_marker_genotype <- apply(sire_genotype, 2, check_NA_parent)
dam_marker_genotype <- apply(dam_genotype, 2, check_NA_parent)
marker_non_genotyped <- which(offspring_marker_genotype | sire_marker_genotype | dam_marker_genotype)
if (length(marker_non_genotyped) == 0) {
# all markers are genotyped
} else {
id_marker <- colnames(offspring_genotype)[marker_non_genotyped]
offspring_genotype <- offspring_genotype[, -marker_non_genotyped]
sire_genotype <- sire_genotype[, -marker_non_genotyped]
dam_genotype <- dam_genotype[, -marker_non_genotyped]
if (verbose) {
message(paste0("marker(s) : ", paste0(id_marker, collapse = " / "), " with no genotypes were removed"))
}
}
return(list(offspring_genotype, sire_genotype, dam_genotype))
}
#' Check parameters
#'
#' @param exclusion_threshold the exclusion threshold
#' @param number_marker number of markers genotyped
#' @param error maximum error rate tolerated in the pedigree
#' @param verbose verbose
#'
#' @importFrom methods is
#'
#' @keywords internal
#' @noRd
check_input_parameters <- function(exclusion_threshold, number_marker, error,
verbose = FALSE) {
# DESCRIPTION
# Check the input parameters : exclusion threshold and maximum error accepted
#
# INPUTS
# exclusion_threshold : exclusion threshold
# number_marker : number of markers
# error : percent of error allowed in "mendel" method
# verbose : display information on the screen
#
# OUPUTS
# stop if errors are detected
# Check if exclusion_threshold is a numeric variable
if (!is(exclusion_threshold, 'numeric')) {
if (! is.null(exclusion_threshold)){
stop("exclusion_threshold parameter should be a 'numeric variable' or NULL (default) : use the function as.numeric() ")
}
} else {
# Check if the number of mismatches allowed is lower than the number of markers and positive
if ((exclusion_threshold >= 0) && (exclusion_threshold <= number_marker)) {
# OK
} else {
stop("The exclusion threshold is not in the correct range : should be in the range [0, number of markers]")
}
}
# Check if the error parameter is a numeric variable
if (!is(error, 'numeric')) {
stop("error parameter should be a 'numeric variable' : use the function as.numeric() ")
}
# Check if the error in the range [0, 1]
if ((error < 0) | (error > 1)) {
stop("The error is not in the correct range : should be in the range [0, 1]")
}
if (verbose) {
cat('exclusion_threshold parameter : OK', sep = "\n")
cat('error parameter : OK', sep = "\n")
cat('---------------------------------------------------', sep = '\n')
}
}
| /scratch/gouwar.j/cran-all/cranData/APIS/R/functions_check.R |
#' Find delimiter
#'
#' @param string a string where it is needed to find delimiter (file import for example)
#'
#' @return the delimiter
#'
#' @keywords internal
#' @noRd
#'
#' @examples
#' string = c("What;is;my;delimiter?")
#' delim = find_delim(string)
#' delim
#'
find_delim <- function(string) {
non_alnum <- gsub("[[:alnum:]]", "", string)
non_alnum_count <- table(strsplit(non_alnum, "")[[1]])
if (length(non_alnum_count)>0){
df <- data.frame(Charac = names(non_alnum_count), Occu = as.numeric(non_alnum_count), stringsAsFactors = FALSE)
poss_charac = c(" ",";",",","\t"," ")
df=df[df$Charac %in% poss_charac,]
if (nrow(df)>0){
del=df$Charac[which(df$Occu==max(df$Occu))]
return(del)
} else {
return("") #default for read.table
}
} else {
return("") #default for read.table
}
}
| /scratch/gouwar.j/cran-all/cranData/APIS/R/functions_find_delim.R |
# THIS FILE CONTAINS ALL THE FUNCTIONS FOR IMPORTING INPUTS
#
# =====================================================================================
# =================================================================================
#
# IMPORT FROM OTHER SOFTWARE FUNCTIONS
#
#' Import from Plink .ped
#'
#' @param ped_file name of the ped file (from Plink)
#' @param no_fid if "no_fid" parameter was used in plink (default : FALSE)
#' @param no_parents if "no_parents" parameter was used in plink (default : FALSE)
#' @param no_sex if "no_sex" parameter was used in plink (default : FALSE)
#' @param no_pheno if "no_pheno" parameter was used in plink (default : FALSE)
#' @param marker_names list of marker names (default : NULL)
#'
#' @return matrix of genotypes for APIS
#'
#' @importFrom data.table fread
#'
#' @export
import_from_ped <- function(ped_file,
no_fid = FALSE,
no_parents = FALSE,
no_sex = FALSE,
no_pheno = FALSE,
marker_names = NULL) {
# DESCRIPTION
# import .ped file in the APIS format
#
# INPUTS
# ped_file : path to the .ped file
# no_fid : "no_fid" argument from plink
# no_parents : "no_parents" argument from plink
# no_sex : "no_sex" argument from plink
# no_pheno : "no_pheno" argument from plink
# marker_names : names of the markers
#
# OUPUTS
# ped_file : genotype matrix in the APIS format
ped_file = as.data.frame(data.table::fread(ped_file, sep = "\t"))
plink_param = c(!no_parents, !no_parents, !no_sex, !no_pheno)
if (!no_fid) {
ped_file = ped_file[, -1]
}
rownames(ped_file) = ped_file[, 1] ; ped_file = ped_file[, -1]
col_remove = sum(plink_param)
if (col_remove > 0) {
ped_file = ped_file[, -c(1:col_remove)]
}
if (is.null(marker_names)) {
colnames(ped_file) = paste0('marker_', c(1:ncol(ped_file)))
} else {
colnames(ped_file) = marker_names
}
ped_file = as.matrix(ped_file)
storage.mode(ped_file) = "character"
ped_file = gsub(x = ped_file, pattern = " ", replacement = "/")
ped_file[which(ped_file == "0/0")] = "NA/NA"
return(ped_file)
}
#' Import from .vcf
#'
#' @param vcf_file name of the vcf file
#'
#' @return matrix of genotypes for APIS
#'
#' @importFrom data.table fread
#' @export
import_from_vcf <- function(vcf_file) {
# DESCRIPTION
# import .vcf file in the APIS format
#
# INPUTS
# ped_file : path to the .vcf file
#
# OUPUTS
# apis_file : genotype matrix in the APIS format
vcf_file = as.data.frame(data.table::fread(vcf_file))
vcf_file = vcf_file[, -c(1:2, 4:9)]
rownames(vcf_file) = vcf_file[, 1] ; vcf_file = vcf_file[, -1]
apis_file <- t(vcf_file)
if (!is(apis_file, 'matrix')) {
apis_file <- as.matrix(apis_file)
}
storage.mode(apis_file) <- "character"
apis_file = gsub(x = apis_file, pattern = "\\|", replacement = "/")
apis_file = apply(apis_file, 2, function(X) {substr(X, start = 1, stop = 3)})
apis_file[which(apis_file == "./.")] <- "NA/NA"
return(apis_file)
}
| /scratch/gouwar.j/cran-all/cranData/APIS/R/functions_import.R |
# THIS FILE CONTAINS ALL THE FUNCTIONS FOR CREATING PEDIGREE
#
# =====================================================================================
#' Create pedigree for delta method
#'
#' @param log_file log file
#' @param threshold_delta threshold for delta
#'
#' @return pedigree using the delta method
#'
#' @keywords internal
#' @noRd
create_pedigree_delta <- function(log_file, threshold_delta) {
# DESCRIPTION
# create the pedigree file for "mendel" method
#
# INPUTS
# log_file : the log file
# threshold_delta : the threshold value for deltas
#
# OUPUTS
# pedigree : the pedigree
pedigree = log_file[,1:3]
colnames(pedigree) = c('offspring', 'sire', 'dam')
individual_not_assigned = which(log_file$delta_1_2 < threshold_delta)
if(length(individual_not_assigned) > 0) {
pedigree[individual_not_assigned, 2:3] = c(NA, NA)
}
return(pedigree)
}
#' Create pedigree for exclusion method
#'
#' @param log_file log file
#' @param threshold_exclusion threshold for mismatches
#'
#' @return pedigree using the exclusion method
#'
#' @keywords internal
#' @noRd
create_pedigree_exclusion <- function(log_file, threshold_exclusion) {
# DESCRIPTION
# create the pedigree file for "exclusion" method
#
# INPUTS
# log_file : the log file
# threshold_exclusion : the threshold value for mismatches
#
# OUPUTS
# pedigree : the pedigree
pedigree = log_file[,1:3]
colnames(pedigree) = c('offspring', 'sire', 'dam')
individual_not_assigned = which((log_file$mismatch_1 > threshold_exclusion) |
(log_file$mismatch_1 == log_file$mismatch_2))
if(length(individual_not_assigned) > 0) {
pedigree[individual_not_assigned, 2:3] = c(NA, NA)
}
return(pedigree)
}
#' Create pedigree for probability method
#'
#' @param log_file log file
#' @param threshold_probability threshold for probabilities
#'
#' @return pedigree using the probabilty method
#'
#' @keywords internal
#' @noRd
create_pedigree_probability <- function(log_file, threshold_probability) {
# DESCRIPTION
# create the pedigree file for "probability" method
#
# INPUTS
# log_file : the log file
# threshold_exclusion : the threshold value for probabilities
#
# OUPUTS
# pedigree : the pedigree
pedigree = log_file[,1:3]
colnames(pedigree) = c('offspring', 'sire', 'dam')
individual_not_assigned = which(log_file$probability_1 < threshold_probability)
if(length(individual_not_assigned) > 0) {
pedigree[individual_not_assigned, 2:3] = c(NA, NA)
}
return(pedigree)
}
| /scratch/gouwar.j/cran-all/cranData/APIS/R/functions_pedigree.R |
# THIS FILE CONTAINS ALL THE FUNCTIONS FOR PERSONAL THRESHOLD
#
# =====================================================================================
#' Personal threshold
#'
#' @param log_file log file from the APIS_2n() or APIS_3n function
#' @param method method : "delta", "probability" or "exclusion"
#' @param threshold threshold
#' @param verbose verbose
#'
#' @return pedigree file
#'
#' @importFrom cowplot plot_grid
#'
#' @keywords internal
#' @noRd
#'
personal_threshold <- function(log_file, method, threshold,
verbose = FALSE) {
# DESCRIPTION
# process APIS outputs with new threshold value
#
# INPUTS
# log_file : the log file
# method : method of assignment
# threshold : the new threshold value
# verbose : display information on screen
#
# OUPUTS
# pedigree : the pedigree
if(method == 'delta') {
pedigree = create_pedigree_delta(log_file = log_file, threshold_delta = threshold)
# Plot graphs
p1 = plot_delta(log_file = log_file, threshold = threshold)
p2 = plot_probabilities(log_file = log_file, threshold = NULL)
p3 = plot_mismatches(log_file = log_file, threshold = NULL)
} else if(method == 'probability') {
pedigree = create_pedigree_probability(log_file = log_file, threshold_probability = threshold)
# Plot graphs
p1 = plot_delta(log_file = log_file, threshold = NULL)
p2 = plot_probabilities(log_file = log_file, threshold = threshold)
p3 = plot_mismatches(log_file = log_file, threshold = NULL)
} else if(method == 'exclusion') {
pedigree = create_pedigree_exclusion(log_file = log_file, threshold_exclusion = threshold)
# Plot graphs
p1 = plot_delta(log_file = log_file, threshold = NULL)
p2 = plot_probabilities(log_file = log_file, threshold = NULL)
p3 = plot_mismatches(log_file = log_file, threshold = threshold)
} else {
stop("Invalid method : must be 'delta', 'probability' or 'exclusion'")
}
print(cowplot::plot_grid(p1, p2, p3, nrow = 3))
if (verbose) {
print_summary(theoretical_assignment_power = NULL,
assignment_rate = get_assignment_rate(pedigree))
}
return(pedigree)
}
| /scratch/gouwar.j/cran-all/cranData/APIS/R/functions_personal_threshold.R |
# THIS FILE CONTAINS ALL THE FUNCTIONS FOR PLOTING
#
# =====================================================================================
#' Plot probabilities
#'
#' @param log_file log file from the APIS_2n() or APIS_3n function
#' @param threshold threshold
#' @param simulated_individuals names of the simulated individuals
#'
#' @return plot of the distribution of probabilities
#'
#' @import ggplot2
#'
#' @export
plot_probabilities <- function(log_file, threshold = NULL, simulated_individuals = NULL) {
# DESCRIPTION
# plot probabilities histogram
#
# INPUTS
# log_file : the log file
# threshold : the threshold value
# simulated_individuals : vector of simulated individual names
#
# OUPUTS
# p : the graph
if (!is.null(simulated_individuals)) {
log_file$simulated = ifelse(test = log_file$offspring %in% simulated_individuals, yes = 1, no = 0)
data_plot = data.frame(values = c(log_file$probability_1, log_file$probability_2),
P = rep(c('P1', 'P2'), each = nrow(log_file)),
simulated = log_file$simulated)
} else {
log_file$simulated = ifelse(test = log_file$offspring %in% simulated_individuals, yes = 1, no = 0)
data_plot = data.frame(values = c(log_file$probability_1, log_file$probability_2),
P = rep(c('P1', 'P2'), each = nrow(log_file)),
simulated = 0)
}
p = ggplot2::ggplot(data = data_plot, aes(x = .data$values)) +
ggplot2::geom_histogram(data = subset(data_plot, data_plot$simulated == 1 & data_plot$P == "P2"),
alpha = 0.25, bins = 30, fill = "blue") +
ggplot2::geom_histogram(data = subset(data_plot, data_plot$simulated == 1 & data_plot$P == "P1"),
alpha = 0.25, bins = 30, fill = "red") +
ggplot2::geom_histogram(data = subset(data_plot, data_plot$simulated == 0 & data_plot$P == "P1"),
alpha = 0.3, bins = 30, fill = "red") +
ggplot2::geom_histogram(data = subset(data_plot, data_plot$simulated == 0 & data_plot$P == "P2"),
alpha = 0.3, bins = 30, fill = "blue") +
ggplot2::xlab('mendelian transmission probability') +
ggplot2::theme(axis.title.y = element_blank())
if (!is.null(threshold)) {
p = p + ggplot2::geom_vline(xintercept = threshold)
}
return(p)
}
#' Plot deltas
#'
#' @param log_file log file from the APIS_2n() or APIS_3n function
#' @param threshold threshold
#' @param simulated_individuals names of the simulated individuals
#'
#' @return plot of the distribution of delta
#'
#' @import ggplot2
#'
#' @export
plot_delta <- function(log_file, threshold = NULL, simulated_individuals = NULL) {
# DESCRIPTION
# plot delta histogram
#
# INPUTS
# log_file : the log file
# threshold : the threshold value
# simulated_individuals : vector of simulated individual names
#
# OUPUTS
# p : the graph
if (!is.null(simulated_individuals)) {
log_file$simulated = ifelse(test = log_file$offspring %in% simulated_individuals, yes = 1, no = 0)
data_plot = data.frame(values = c(log_file$delta_1_2, log_file$delta_2_3),
P = rep(c('delta_1_2', 'delta_2_3'), each = nrow(log_file)),
simulated = log_file$simulated)
} else {
log_file$simulated = ifelse(test = log_file$offspring %in% simulated_individuals, yes = 1, no = 0)
data_plot = data.frame(values = c(log_file$delta_1_2, log_file$delta_2_3),
P = rep(c('delta_1_2', 'delta_2_3'), each = nrow(log_file)),
simulated = 0)
}
p = ggplot2::ggplot(data = data_plot, aes(x = .data$values)) +
ggplot2::geom_histogram(data = subset(data_plot, data_plot$simulated == 1 & data_plot$P == "delta_2_3"),
alpha = 0.25, bins = 30, fill = "blue") +
ggplot2::geom_histogram(data = subset(data_plot, data_plot$simulated == 1 & data_plot$P == "delta_1_2"),
alpha = 0.25, bins = 30, fill = "red") +
ggplot2::geom_histogram(data = subset(data_plot, data_plot$simulated == 0 & data_plot$P == "delta_1_2"),
alpha = 0.3, bins = 30, fill = "red") +
ggplot2::geom_histogram(data = subset(data_plot, data_plot$simulated == 0 & data_plot$P == "delta_2_3"),
alpha = 0.3, bins = 30, fill = "blue") +
ggplot2::xlab('delta') +
ggplot2::theme(axis.title.y = element_blank())
if (!is.null(threshold)) {
p = p + ggplot2::geom_vline(xintercept = threshold)
}
return(p)
}
#' Plot mismatches
#'
#' @param log_file log file from the APIS_2n() or APIS_3n function
#' @param threshold threshold
#' @param simulated_individuals names of the simulated individuals
#'
#' @return plot of the distribution of mismatches
#'
#' @import ggplot2
#'
#' @export
plot_mismatches <- function(log_file, threshold = NULL, simulated_individuals = NULL) {
# DESCRIPTION
# plot mistmaches histogram
#
# INPUTS
# log_file : the log file
# threshold : the threshold value
# simulated_individuals : vector of simulated individual names
#
# OUPUTS
# p : the graph
if (!is.null(simulated_individuals)) {
log_file$simulated = ifelse(test = log_file$offspring %in% simulated_individuals, yes = 1, no = 0)
data_plot = data.frame(values = c(log_file$mismatch_1, log_file$mismatch_2),
P = rep(c('mismatch_1', 'mismatch_2'), each = nrow(log_file)),
simulated = log_file$simulated)
} else {
log_file$simulated = ifelse(test = log_file$offspring %in% simulated_individuals, yes = 1, no = 0)
data_plot = data.frame(values = c(log_file$mismatch_1, log_file$mismatch_2),
P = rep(c('mismatch_1', 'mismatch_2'), each = nrow(log_file)),
simulated = 0)
}
p = ggplot2::ggplot(data = data_plot, aes(x = .data$values)) +
ggplot2::geom_histogram(data = subset(data_plot, data_plot$simulated == 1 & data_plot$P == "mismatch_2"),
alpha = 0.25, bins = 30, fill = "blue") +
ggplot2::geom_histogram(data = subset(data_plot, data_plot$simulated == 1 & data_plot$P == "mismatch_1"),
alpha = 0.25, bins = 30, fill = "red") +
ggplot2::geom_histogram(data = subset(data_plot, data_plot$simulated == 0 & data_plot$P == "mismatch_1"),
alpha = 0.3, bins = 30, fill = "red") +
ggplot2::geom_histogram(data = subset(data_plot, data_plot$simulated == 0 & data_plot$P == "mismatch_2"),
alpha = 0.3, bins = 30, fill = "blue") +
ggplot2::xlab('number of mismatch') +
ggplot2::theme(axis.title.y = element_blank())
if (!is.null(threshold)) {
p = p + ggplot2::geom_vline(xintercept = threshold)
}
return(p)
}
| /scratch/gouwar.j/cran-all/cranData/APIS/R/functions_plot.R |
# THIS FILE CONTAINS ALL THE FUNCTIONS FOR RECODING FOR C FUNCTION
#
# =====================================================================================
#' Genotypes recoding
#'
#' @param genotype_matrix genotype matrix
#' @param variant_dataframe output of the get_variant_dataframe() function
#' @param number_cores number of cores for parallel functions
#'
#' @return recoded genotype matrix
#'
#' @importFrom parallel makeCluster stopCluster
#' @importFrom doParallel registerDoParallel
#' @importFrom foreach %dopar% foreach
#'
#' @keywords internal
#' @noRd
recode_genotypes_for_assignment_2n <- function(genotype_matrix, variant_dataframe, number_cores) {
# DESCRIPTION
# Recode a genotype matrix according to the variant coding for diploid
#
# INPUTS
# genotype_matrix : genotype matrix in the APIS format
# variant_dataframe : the variant dataframe (output of get_variant_dataframe())
# number_cores : number of cores for parallel programming
#
# OUPUTS
# recode_genotype : recoded genotype matrix
# Recode for C subroutine
recode_genotype_for_subroutine <- function(marker_genotype, marker_list) {
tmp <- unlist(strsplit(marker_genotype, '/'))
all1 <- marker_list[which(marker_list[,1] == tmp[1]),2]
all2 <- marker_list[which(marker_list[,1] == tmp[2]),2]
return(sort(c(all1, all2), decreasing = FALSE))
}
# Initialize variables
i <- NULL
# Create parallel nodes
cl = parallel::makeCluster(number_cores)
doParallel::registerDoParallel(cl)
# Recode offspring genotypes
recode_genotype <- foreach(i = 1:nrow(genotype_matrix), .combine = rbind) %dopar% {
tmp <- as.numeric(as.vector(sapply(genotype_matrix[i,, drop = FALSE], recode_genotype_for_subroutine, marker_list = variant_dataframe)))
}
parallel::stopCluster(cl)
if (!is(recode_genotype, "matrix")) {
recode_genotype <- t(as.matrix(recode_genotype))
} else {
}
rownames(recode_genotype) <- rownames(genotype_matrix)
return(recode_genotype)
}
#' Genotypes recoding
#'
#' @param genotype_matrix genotype matrix
#' @param variant_dataframe output of the get_variant_dataframe() function
#' @param number_cores number of cores for parallel functions
#'
#' @return recoded genotype matrix
#'
#' @importFrom parallel makeCluster stopCluster
#' @importFrom doParallel registerDoParallel
#' @importFrom foreach %dopar% foreach
#'
#' @keywords internal
#' @noRd
recode_genotypes_for_assignment_3n <- function(genotype_matrix, variant_dataframe, number_cores) {
# DESCRIPTION
# Recode a genotype matrix according to the variant coding for triploid
#
# INPUTS
# genotype_matrix : genotype matrix in the APIS format
# variant_dataframe : the variant dataframe (output of get_variant_dataframe())
# number_cores : number of cores for parallel programming
#
# OUPUTS
# recode_genotype : recoded genotype matrix
# Recode for C subroutine
recode_genotype_3n_for_subroutine <- function(marker_genotype, marker_list) {
tmp <- unlist(strsplit(marker_genotype, '/'))
all1 <- marker_list[which(marker_list[,1] == tmp[1]),2]
all2 <- marker_list[which(marker_list[,1] == tmp[2]),2]
all3 <- marker_list[which(marker_list[,1] == tmp[3]),2]
new_genotype = c(all1, all2, all3)
return(sort(new_genotype, decreasing = FALSE))
}
# Initialize variables
i <- NULL
# Create parallel nodes
cl = parallel::makeCluster(number_cores)
doParallel::registerDoParallel(cl)
# Recode offspring genotypes
recode_genotype <- foreach(i = 1:nrow(genotype_matrix), .combine = rbind) %dopar% {
tmp <- as.numeric(as.vector(sapply(genotype_matrix[i,, drop = FALSE], recode_genotype_3n_for_subroutine, marker_list = variant_dataframe)))
}
parallel::stopCluster(cl)
if (!is(recode_genotype, "matrix")) {
recode_genotype <- t(as.matrix(recode_genotype))
} else {
}
rownames(recode_genotype) <- rownames(genotype_matrix)
return(recode_genotype)
}
#' Recode allele frequencies
#'
#' @param allele_frequency allele frequencies from the "get_allele_frequencies_2n()" function
#' @param variant_dataframe output of the get_variant_dataframe() function
#'
#' @return recoded allele frequencies
#'
#' @keywords internal
#' @noRd
recode_allele_frequencies_for_assignment <- function(allele_frequency, variant_dataframe) {
# DESCRIPTION
# Recode allele frequencies to match the variant coding
#
# INPUTS
# allele_frequency : dataframe of allele frequencies (output of the get_allele_frequencies())
# variant_dataframe : the variant dataframe (output of get_variant_dataframe())
#
# OUPUTS
# allele_frequency : recoded allele frequencies dataframe
# Recode function for each marker
recode_allele_frequencies <- function(column_name, marker_list) {
tmp <- unlist(strsplit(column_name, '_'))[2]
return(marker_list[which(marker_list[,1] == tmp), 2])
}
# Remove frequency of "NA" variant
has_NA <- which(colnames(allele_frequency) == "Freq_NA")
if (length(has_NA) > 0) {
allele_frequency <- allele_frequency[,-has_NA]
}
# Keep only "Freq" columns
allele_frequency <- allele_frequency[, c((floor(ncol(allele_frequency)/2)+2):ncol(allele_frequency))]
# Recode columns with the new variant coding system
colnames(allele_frequency) <- sapply(colnames(allele_frequency),
recode_allele_frequencies,
marker_list = variant_dataframe)
allele_frequency <- rbind(colnames(allele_frequency), allele_frequency)
# Add allele frequencies of missing alleles
add_missing_allele_frequency <- as.numeric(variant_dataframe$recode[-which(variant_dataframe$recode %in% allele_frequency[1,])])
add_missing_allele_frequency <- add_missing_allele_frequency[-which(add_missing_allele_frequency == 0)]
add_missing_allele_frequency_matrix <- matrix(data = c(add_missing_allele_frequency, rep(0, times = length(add_missing_allele_frequency) * (nrow(allele_frequency) - 1))),
ncol = length(add_missing_allele_frequency), nrow = nrow(allele_frequency), byrow = TRUE)
allele_frequency <- apply(allele_frequency, 2, as.numeric)
allele_frequency <- cbind(allele_frequency, add_missing_allele_frequency_matrix)
allele_frequency <- allele_frequency[, order(allele_frequency[1,])]
allele_frequency <- allele_frequency[-1,]
return(allele_frequency)
}
#' Prepare data for assignment
#'
#' @param offspring_genotype offspring genotype matrix
#' @param sire_genotype sire genotype matrix
#' @param dam_genotype dam genotype matrix
#' @param ploidy_level ploidy level
#' @param number_cores number of cores for parallel functions
#' @param verbose verbose
#'
#' @return recoded datasets
#'
#' @keywords internal
#' @noRd
prepare_for_assignment <- function(offspring_genotype, sire_genotype, dam_genotype,
ploidy_level,
number_cores,
verbose = FALSE) {
# DESCRIPTION
# Recode all genotype matrices for assignment routine
#
# INPUTS
# offspring_genotype : genotype matrix of the offspring in the APIS format
# sire_genotype : genotype matrix of the sires in the APIS format
# dam_genotype : genotype matrix of the dams in the APIS format
# ploidy_level : level of ploidy of the offspring
# number_cores : number of cores for parallel programming
# verbose : display information of the screen
#
# OUPUTS
# list of 4 elements : recoded matrix of genotypes (offspring, sire, dam) and recoded allele frequencies
# Estimation of allele frequencies
allele_frequency = get_allele_frequencies(genotype_matrix = offspring_genotype,
ploidy_level = ploidy_level)
# Create the variant recoding dataframe
variant_recode_dataframe = get_variant_dataframe(offspring_genotype = offspring_genotype,
sire_genotype = sire_genotype,
dam_genotype = dam_genotype)
# Recode genotyping matrices
if (ploidy_level == 2) {
if (verbose) {
cat("recoding offspring genotype matrix", sep = "\n")
}
offspring_genotypes_recoded = recode_genotypes_for_assignment_2n(offspring_genotype,
variant_dataframe = variant_recode_dataframe,
number_cores = number_cores)
if (verbose) {
cat("recoding sire genotype matrix", sep = "\n")
}
sire_genotypes_recoded = recode_genotypes_for_assignment_2n(sire_genotype,
variant_dataframe = variant_recode_dataframe,
number_cores = number_cores)
if (verbose) {
cat("recoding dam genotype matrix", sep = "\n")
}
dam_genotypes_recoded = recode_genotypes_for_assignment_2n(dam_genotype,
variant_dataframe = variant_recode_dataframe,
number_cores = number_cores)
} else if (ploidy_level == 3) {
if (verbose) {
cat("recoding offspring genotype matrix", sep = "\n")
}
offspring_genotypes_recoded = recode_genotypes_for_assignment_3n(offspring_genotype,
variant_dataframe = variant_recode_dataframe,
number_cores = number_cores)
if (verbose) {
cat("recoding sire genotype matrix", sep = "\n")
}
sire_genotypes_recoded = recode_genotypes_for_assignment_2n(sire_genotype,
variant_dataframe = variant_recode_dataframe,
number_cores = number_cores)
if (verbose) {
cat("recoding dam genotype matrix", sep = "\n")
}
dam_genotypes_recoded = recode_genotypes_for_assignment_2n(dam_genotype,
variant_dataframe = variant_recode_dataframe,
number_cores = number_cores)
} else {
}
# Recoded allele frequencies
if (verbose) {
cat("recoding allele frequencies", sep = "\n")
cat('---------------------------------------------------', sep = '\n')
}
allele_frequency_recoded = recode_allele_frequencies_for_assignment(allele_frequency = allele_frequency,
variant_dataframe = variant_recode_dataframe)
# Return
return(list(offspring_genotypes_recoded, sire_genotypes_recoded, dam_genotypes_recoded, allele_frequency_recoded))
}
| /scratch/gouwar.j/cran-all/cranData/APIS/R/functions_prepare_for_assignment.R |
# THIS FILE CONTAINS ALL THE FUNCTIONS FOR PRINTING
#
# =====================================================================================
#' Print title of APIS
#'
#' @keywords internal
#' @noRd
print_title <- function() {
# DESCRIPTION
# Print the title
#
# INPUTS
#
# OUPUTS
# display the title on the screen
cat('===================================================', sep = '\n')
cat(' ___ _____ _ _____ ', sep = '\n')
cat(' / | | _ \\ | | / ___/ ', sep = '\n')
cat(' / /| | | |_| | | | | |___ ', sep = '\n')
cat(' / / | | | ___/ | | \\ __ \\ ', sep = '\n')
cat(' / / | | | | | | ___| | ', sep = '\n')
cat(' /_/ |_| |_| |_| /_____/ ', sep = '\n')
cat('\n')
cat('---------------------------------------------------', sep = '\n')
cat('AUTO-ADAPTIVE PARENTAGE INFERENCE SOFTWARE', sep = '\n')
cat('---------------------------------------------------', sep = '\n')
}
#' Print the summary of the assignment
#'
#' @keywords internal
#' @noRd
print_summary <- function(theoretical_assignment_power,
assignment_rate) {
# DESCRIPTION
# Print the assignment summary
#
# INPUTS
# theoretical_assignment_power : theoretical assignment power
# # assignment_rate : assignment rate
# OUPUTS
# display the summary on the screen
cat('--------------------------------------', sep = '\n')
cat(' APIS SUMMARY', sep = '\n')
cat('--------------------------------------', sep = '\n')
if (!is.null(theoretical_assignment_power)) {
P = substr(theoretical_assignment_power, 1, 5)
cat("theoretical assignment power : ", P)
cat("\n")
}
AR = assignment_rate * 100
cat("realized assignment rate : ", AR, "%")
cat("\n")
}
| /scratch/gouwar.j/cran-all/cranData/APIS/R/functions_print.R |
# THIS FILE CONTAINS ALL THE FUNCTIONS FOR ESTIMATING PROBABILITIES
#
# =====================================================================================
#' Estimate probabilities
#'
#' @param offspring_genotype matrix of the offspring genotypes
#' @param sire_genotype matrix of the sire genotypes
#' @param dam_genotype matrix of the dam genotypes
#' @param allele_frequencies allele frequencies (from get_allele_frequencies() function)
#' @param ploidy_level ploidy level of the offspring
#'
#' @return list of 2 matrices : matrix of mendelian transmission probabilities and matrix of mismatches
#'
#' @useDynLib APIS, .registration = TRUE
#'
#' @keywords internal
#' @noRd
get_individual_probability_2n <- function(offspring_genotype, sire_genotype, dam_genotype,
allele_frequencies,
ploidy_level) {
# DESCRIPTION
# Get the mendelian transmission probabilities and mismatches for all the parent pairs for one offspring
#
# INPUTS
# offspring_genotype : genotype matrix of one offspring after recoding
# sire_genotype : genotype matrix of the sires after recoding
# dam_genotype : genotype matrix of the dams after recoding
# allele_frequencies : allele frequency matrix after recoding
# ploidy_level : ploidy level of the offspring
#
# OUTPUTS
# list of 2 elements : mendelian transmission probabilities and mismatches for all the parental pairs
# Prepare for Call
offspring_genotypes_for_C = as.vector(t(offspring_genotype))
sire_genotypes_for_C = as.vector(t(sire_genotype))
dam_genotypes_for_C = as.vector(t(dam_genotype))
allele_frequency_for_C = as.vector(t(allele_frequencies))
number_sire = nrow(sire_genotype)
number_dam = nrow(dam_genotype)
number_marker = ncol(offspring_genotype) / ploidy_level
number_variant = ncol(allele_frequencies)
all_probabilities = vector(mode = "numeric", length = number_sire * number_dam)
all_mismatches = vector(mode = "integer", length = number_sire * number_dam)
output_C = .C('get_individual_mendelian_probability_2n',
as.integer(offspring_genotypes_for_C),
as.integer(sire_genotypes_for_C),
as.integer(dam_genotypes_for_C),
as.integer(number_sire),
as.integer(number_dam),
as.integer(number_marker),
as.integer(number_variant),
as.double(allele_frequency_for_C),
as.double(all_probabilities),
as.integer(all_mismatches))
return(list(output_C[[9]], output_C[[10]]))
}
#' Estimate probabilities
#'
#' @param offspring_genotype offspring genotype matrix
#' @param sire_genotype sire genotype matrix
#' @param dam_genotype dan genotype matrix
#' @param allele_frequencies allele frequencies
#' @param t_recom recombinaison rate
#'
#' @return list of 2 matrices : matrix of mendelian transmission probabilities and matrix of mismatches
#'
#' @useDynLib APIS, .registration = TRUE
#'
#' @keywords internal
#' @noRd
get_individual_probability_3n <- function(offspring_genotype, sire_genotype, dam_genotype,
allele_frequencies,
ploidy_level,t_recom) {
# DESCRIPTION
# get probability matrix and mismatch matrix using C routine for one triploid individual
#
# INPUTS
# offspring_genotype : genotype matrix of one offspring after recoding
# sire_genotype : genotype matrix of the sires after recoding
# dam_genotype : genotype matrix of the dams after recoding
# allele_frequencies : allele frequencies after recoding
# ploidy_level : ploidy level of the offspring
# OUPUTS
# list of 2 elements : matrix of probabilities and matrix of mismatches
# Prepare for Call
offspring_genotypes_for_C = as.vector(t(offspring_genotype))
sire_genotypes_for_C = as.vector(t(sire_genotype))
dam_genotypes_for_C = as.vector(t(dam_genotype))
allele_frequency_for_C = as.vector(t(allele_frequencies))
number_sire = nrow(sire_genotype)
number_dam = nrow(dam_genotype)
number_marker = ncol(offspring_genotype) / ploidy_level
number_variant = ncol(allele_frequencies)
all_probabilities = vector(mode = "numeric", length = number_sire * number_dam)
all_mismatches = vector(mode = "integer", length = number_sire * number_dam)
output_C = .C('get_individual_mendelian_probability_3n',
as.integer(offspring_genotypes_for_C),
as.integer(sire_genotypes_for_C),
as.integer(dam_genotypes_for_C),
as.integer(number_sire),
as.integer(number_dam),
as.integer(number_marker),
as.integer(number_variant),
as.double(allele_frequency_for_C),
as.double(all_probabilities),
as.integer(all_mismatches),
as.double(t_recom)) # ajout JR -- 23/02/2023
return(list(output_C[[9]], output_C[[10]]))
}
| /scratch/gouwar.j/cran-all/cranData/APIS/R/functions_probabilities.R |
.onAttach <- function(libname, pkgname) {
package_citation <- "Griot et al. (2020). APIS: An auto-adaptive parentage inference software that tolerates missing parents"
doi = "https://doi.org/10.1111/1755-0998.13103"
# message("Thank you for using APIS!")
# message("To acknowledge our work, please cite the package:")
# message(package_citation)
# message(doi)
packageStartupMessage("Thank you for using APIS!")
packageStartupMessage("To acknowledge our work, please cite the package:")
packageStartupMessage(package_citation)
packageStartupMessage(doi)
}
| /scratch/gouwar.j/cran-all/cranData/APIS/R/zzz.R |
appe.glm <-
function(mdl, dat.train, dat.test, method="uLSIF", sigma=NULL, lambda=NULL, kernel_num=NULL, fold=5, stabilize=TRUE, qstb=0.025, reps=2000, conf.level=0.95) {
n0 = nrow(dat.train)
n1 = nrow(dat.test)
on = as.character(formula(mdl$call)[[2]])
## observed & predicted response values
Y1 = dat.test[,on]
scr1 = predict(mdl, newdata=dat.test)
scr0 = predict(mdl, newdata=dat.train)
## weight calculation via package 'densratio'
xtrain = update(mdl, data=dat.train, x=TRUE)$x[,-1,drop=FALSE]
xtest = update(mdl, data=dat.test, x=TRUE)$x[,-1,drop=FALSE]
wgt1 = densratio.appe(scr0, scr1, method, sigma, lambda, kernel_num, fold, stabilize, qstb)
wgt2 = densratio.appe(xtrain, xtest, method, sigma, lambda, kernel_num, fold, stabilize, qstb)
## predictive performance measure
Cv = cvalest.bin(Y1, scr1)
Cvw1 = cvalest.bin(Y1, scr1, wgt1)
Cvw2 = cvalest.bin(Y1, scr1, wgt2)
message("\nPoint estimates:")
result = data.frame(c(Cv, Cvw1, Cvw2))
names(result) = 'Est'
row.names(result) = c('Cstat','C adjusted by score','C adjusted by predictors')
print(round(result, 3))
## bootstrap
if (reps > 0) {
Cvb = Cvw1b = Cvw2b = rep(NA, reps)
for (b in 1:reps) {
f.train = sample(1:n0, replace=TRUE)
f.test = sample(1:n1, replace=TRUE)
Y1b = Y1[f.test]
mdlb = update(mdl, data=dat.train[f.train,])
scr1b = predict(mdlb, newdata=dat.test[f.test,])
scr0b = mdlb$fitted.values
xtrainb = xtrain[f.train,,drop=FALSE]
xtestb = xtest[f.test,,drop=FALSE]
wgt1b = densratio.appe(scr0b, scr1b, method, sigma, lambda, kernel_num, fold, stabilize, qstb)
wgt2b = densratio.appe(xtrainb, xtestb, method, sigma, lambda, kernel_num, fold, stabilize, qstb)
Cvb[b] = cvalest.bin(Y1b, scr1b)
Cvw1b[b] = cvalest.bin(Y1b, scr1b, wgt1b)
Cvw2b[b] = cvalest.bin(Y1b, scr1b, wgt2b)
}
## se
Cvse = sd(Cvb, na.rm=TRUE)
Cvw1se = sd(Cvw1b, na.rm=TRUE)
Cvw2se = sd(Cvw2b, na.rm=TRUE)
## percentile ci
cl = c((1-conf.level)/2, 1 - (1-conf.level)/2)
Cvci = quantile(Cvb, cl, na.rm=TRUE)
Cvw1ci = quantile(Cvw1b, cl, na.rm=TRUE)
Cvw2ci = quantile(Cvw2b, cl, na.rm=TRUE)
## approx ci
Cvcia = Cv + Cvse * qnorm(cl)
Cvw1cia = Cvw1 + Cvw1se * qnorm(cl)
Cvw2cia = Cvw2 + Cvw2se * qnorm(cl)
## output
message("\nPoint & Interval estimates:")
result = cbind(result,
c(Cvse, Cvw1se, Cvw2se),
rbind(Cvci, Cvw1ci, Cvw2ci),
rbind(Cvcia, Cvw1cia, Cvw2cia))
names(result) = c('Est', 'SE', 'Percentile.l', 'Percentile.u', 'Approx.l', 'Approx.u')
print(round(result, 3))
}
invisible(result)
}
| /scratch/gouwar.j/cran-all/cranData/APPEstimation/R/appe.glm.R |
appe.lm <-
function(mdl, dat.train, dat.test, method="uLSIF", sigma=NULL, lambda=NULL, kernel_num=NULL, fold=5, stabilize=TRUE, qstb=0.025, reps=2000, conf.level=0.95) {
n0 = nrow(dat.train)
n1 = nrow(dat.test)
on = as.character(formula(mdl$call)[[2]])
## observed & predicted response values
Y1 = dat.test[,on]
scr1 = predict(mdl, newdata=dat.test)
scr0 = predict(mdl, newdata=dat.train)
## weight calculation via package 'densratio'
xtrain = update(mdl, data=dat.train, x=TRUE)$x[,-1,drop=FALSE]
xtest = update(mdl, data=dat.test, x=TRUE)$x[,-1,drop=FALSE]
wgt1 = densratio.appe(scr0, scr1, method, sigma, lambda, kernel_num, fold, stabilize, qstb)
wgt2 = densratio.appe(xtrain, xtest, method, sigma, lambda, kernel_num, fold, stabilize, qstb)
## predictive performance measure
L1 = mean(abs(Y1 - scr1))
L1w1 = weighted.mean(abs(Y1 - scr1), w=wgt1)
L1w2 = weighted.mean(abs(Y1 - scr1), w=wgt2)
L2 = mean((Y1 - scr1)^2)
L2w1 = weighted.mean((Y1 - scr1)^2, w=wgt1)
L2w2 = weighted.mean((Y1 - scr1)^2, w=wgt2)
message("\nPoint estimates:")
result = data.frame(c(L1, L1w1, L1w2, L2, L2w1, L2w2))
names(result) = 'Estimate'
row.names(result) = c('L1','L1 adjusted by score','L1 adjusted by predictors','L2','L2 adjusted by score','L2 adjusted by predictors')
print(round(result, 3))
## bootstrap
if (reps > 0) {
L1b = L1w1b = L1w2b = L2b = L2w1b = L2w2b = rep(NA, reps)
for (b in 1:reps) {
f.train = sample(1:n0, replace=TRUE)
f.test = sample(1:n1, replace=TRUE)
Y1b = Y1[f.test]
mdlb = update(mdl, data=dat.train[f.train,])
scr1b = predict(mdlb, newdata=dat.test[f.test,])
scr0b = mdlb$fitted.values
xtrainb = xtrain[f.train,,drop=FALSE]
xtestb = xtest[f.test,,drop=FALSE]
wgt1b = densratio.appe(scr0b, scr1b, method, sigma, lambda, kernel_num, fold, stabilize, qstb)
wgt2b = densratio.appe(xtrainb, xtestb, method, sigma, lambda, kernel_num, fold, stabilize, qstb)
L1b[b] = mean(abs(Y1b - scr1b))
L1w1b[b] = weighted.mean(abs(Y1b - scr1b), w=wgt1b)
L1w2b[b] = weighted.mean(abs(Y1b - scr1b), w=wgt2b)
L2b[b] = mean((Y1b - scr1b)^2)
L2w1b[b] = weighted.mean((Y1b - scr1b)^2, w=wgt1b)
L2w2b[b] = weighted.mean((Y1b - scr1b)^2, w=wgt2b)
}
## se
L1se = sd(L1b, na.rm=TRUE)
L1w1se = sd(L1w1b, na.rm=TRUE)
L1w2se = sd(L1w2b, na.rm=TRUE)
L2se = sd(L2b, na.rm=TRUE)
L2w1se = sd(L2w1b, na.rm=TRUE)
L2w2se = sd(L2w2b, na.rm=TRUE)
## percentile ci
cl = c((1-conf.level)/2, 1 - (1-conf.level)/2)
L1ci = quantile(L1b, cl, na.rm=TRUE)
L1w1ci = quantile(L1w1b, cl, na.rm=TRUE)
L1w2ci = quantile(L1w2b, cl, na.rm=TRUE)
L2ci = quantile(L2b, cl, na.rm=TRUE)
L2w1ci = quantile(L2w1b, cl, na.rm=TRUE)
L2w2ci = quantile(L2w2b, cl, na.rm=TRUE)
## approx ci
L1cia = L1 + L1se * qnorm(cl)
L1w1cia = L1w1 + L1w1se * qnorm(cl)
L1w2cia = L1w2 + L1w2se * qnorm(cl)
L2cia = L2 + L2se * qnorm(cl)
L2w1cia = L2w1 + L2w1se * qnorm(cl)
L2w2cia = L2w2 + L2w2se * qnorm(cl)
## output
message("\nPoint & Interval estimates:")
result = cbind(result,
c(L1se, L1w1se, L1w2se, L2se, L2w1se, L2w2se),
rbind(L1ci, L1w1ci, L1w2ci, L2ci, L2w1ci, L2w2ci),
rbind(L1cia, L1w1cia, L1w2cia, L2cia, L2w1cia, L2w2cia))
names(result) = c('Estimate', 'Std.Error', 'Percentile.l', 'Percentile.u', 'Approx.l', 'Approx.u')
print(round(result, 3))
}
invisible(result)
}
| /scratch/gouwar.j/cran-all/cranData/APPEstimation/R/appe.lm.R |
cvalest.bin <-
function (Y, scr, wgt=NULL) {
if (is.null(wgt)) wgt = rep(1, length(Y))
obj = data.frame(Y, scr, wgt)
obj1 = obj[obj$Y==1,]
obj0 = obj[obj$Y==0,]
cnt = 0
for (i in 1:nrow(obj1)) {
flg = obj1$scr[i] > obj0$scr
cnt = cnt + obj1$wgt[i] * sum(obj0[flg,]$wgt)
flg = obj1$scr[i] == obj0$scr
cnt = cnt + obj1$wgt[i] * sum(obj0[flg,]$wgt) / 2
}
n = nrow(obj)
nrm = sum(obj1$wgt) * sum(obj0$wgt)
return(cnt / nrm)
}
| /scratch/gouwar.j/cran-all/cranData/APPEstimation/R/cvalest.bin.R |
densratio.appe <-
function(xtrain, xtest, method="uLSIF", sigma=NULL, lambda=NULL, kernel_num=NULL, fold=5, stabilize=TRUE, qstb=0.025) {
xtrain = as.matrix(xtrain)
xtest = as.matrix(xtest)
if (is.null(kernel_num)) kernel_num = 100
if (is.null(sigma)) {
center = matrix(xtest[sample(1:nrow(xtest), kernel_num),], kernel_num, ncol(xtest))
sigma = as.array(quantile((dist(center))))
sigma = unique(sigma[ sigma>0.001 ])
}
if (is.null(lambda)) lambda = "auto"
if (method == "uLSIF" || method == "KLIEP") {
wgt = densratio(xtrain, xtest, method, sigma, lambda, kernel_num, fold, verbose=FALSE)$compute_density_ratio(xtest)
# } else if (method == "gam") {
# wgt = densratio.gam(xtrain, xtest, stabilize)
} else {
# stop("\n\nmethod should be either in ('uLSIF', 'KLIEP', 'gam').\n\n")
stop("\n\nmethod should be either in ('uLSIF', 'KLIEP').\n\n")
}
## tail-weight stabilization
if (stabilize) {
vl = quantile(wgt, qstb)
wgt[ wgt < vl ] = vl
vl = quantile(wgt, 1-qstb)
wgt[ wgt > vl ] = vl
}
return(wgt)
}
| /scratch/gouwar.j/cran-all/cranData/APPEstimation/R/densratio.appe.R |
#' @title Various variations of visualizations of clonal expansion post-RunAPOTC
#'
#' @description
#' `r lifecycle::badge("stable")`
#'
#' Given a seurat object with an 'apotc' (APackOfTheClones) object
#' from running [RunAPOTC], this function will read the information and return
#' a customizable ggplot2 object of the clonal expansion with a circle size
#' legend. If the user is unhappy about certain aspects of the plot, many
#' parameters can be adjusted with the [AdjustAPOTC] function.
#'
#' The specific APackOfTheClones run to be plotted can be identified in two
#' ways: either by inputting the `run_id` associated with the run that was
#' either defined / auto-generated during [RunAPOTC], or by inputting the
#' `reduction_base`, `clonecall`, `extra_filter` and any other keyword arguments
#' that corresponded to the run. Its heavily recommended to use the `run_id`.
#' If none of these parameters are inputted, the function defaults to returning
#' the plot of the latest run.
#'
#' @inheritParams RunAPOTC
#'
#' @param seurat_obj A seurat object that has been integrated with clonotype
#' data and has had a valid run of [RunAPOTC].
#' @param res The number of points on the generated path per full circle. From
#' plot viewers, if circles seem slightly too pixelated, it is recommended to
#' first try to export the plot as an `.svg` before increasing `res` due to
#' increased plotting times from [ggforce::geom_circle].
#' @param linetype The type of outline each circle should have. defaults to
#' `"blank` meaning no outline. More information is in the function
#' documentation of `ggforce::geom_circle`.
#' @param use_default_theme logical that defaults to `TRUE`. If `TRUE`,
#' the resulting plot will have the same theme as the seurat reference reduction
#' plot. Else, the plot will simply have a blank background.
#' @param retain_axis_scales If `TRUE`, approximately maintains the axis scales
#' of the original reduction plot. However, it will only attempt to extend the
#' axes and never shorten. This is recommended to be set to `TRUE` especially if
#' working with subsetted versions of the clonal data.
#' @param show_labels If `TRUE`, will label each circle cluster at the centroid,
#' defaulting to "C0, C1, ...".
#' @param label_size The text size of labels if shown. Defaults to 5.
#' @param add_size_legend If `TRUE`, adds a legend to the plot visualizing the
#' relative sizes of clones. Note that it is simply an overlay and not a real
#' ggplot2 legend.
#' @param legend_sizes numeric vector. Indicates the circle sizes to be
#' displayed on the legend, and will always be sorted from smallest to greatest.
#' Defaults to `"auto"` which estimate a reasonable range of sizes to display.
#' @param legend_position character or numeric. Can be set to either
#' `"top_left"`, `"top_right"`, `"bottom_left"`, `"bottom_right"` and places the
#' legend roughly in the corresponding position. Otherwise, can be a numeric
#' vector of length 2 indicating the x and y position of the *topmost (smallest)
#' circle* of the legend.
#' @param legend_buffer numeric. Indicates how much to "push" the legend towards
#' the center of the plot from the selected corner. If negative, will push away
#' @param legend_color character. Indicates the hex color of the circles
#' displayed on the legend. Defaults to the hex code for a gray tone
#' @param legend_spacing numeric. Indicates the horizontal distance between each
#' stacked circle on the size legend. Defaults to `"auto"` which will use an
#' estimated value depending on plot size
#' @param legend_label character. The title of the legend, which defaults to
#' `"clone sizes`.
#' @param legend_text_size numeric. The text size of the letters and numbers on
#' the legend
#' @param add_legend_background logical. If `TRUE`, will add a border around the
#' legend and fill the background to be white, overlaying anything else.
#'
#' @return A ggplot object of the APackOfTheClones clonal expansion plot of the
#' seurat object
#'
#' @seealso [AdjustAPOTC]
#'
#' @export
#'
#' @examples
#' data("combined_pbmc")
#'
#' combined_pbmc <- RunAPOTC(
#' combined_pbmc, run_id = "run1", verbose = FALSE
#' )
#'
#' # plotting with default arguments will plot the latest "run1"
#' clonal_packing_plot <- APOTCPlot(combined_pbmc)
#'
APOTCPlot <- function(
seurat_obj,
reduction_base = NULL,
clonecall = NULL,
...,
extra_filter = NULL,
run_id = NULL,
res = 360L,
linetype = "blank",
use_default_theme = TRUE,
retain_axis_scales = FALSE,
#alpha = 1,
show_labels = FALSE,
label_size = 5,
add_size_legend = TRUE,
legend_sizes = "auto",
legend_position = "auto",
legend_buffer = 0.2,
legend_color = "#808080",
legend_spacing = "auto",
legend_label = "Clone sizes",
legend_text_size = 5,
add_legend_background = TRUE
) {
varargs_list <- list(...)
args <- hash::hash(as.list(environment()))
args$run_id <- infer_object_id_if_needed(args, varargs_list = varargs_list)
APOTCPlot_error_handler(args)
apotc_obj <- getApotcData(seurat_obj, args$run_id)
result_plot <- plot_clusters(
clusters = get_plottable_df_with_color(apotc_obj),
n = res,
linetype = linetype#,
#alpha=alpha
)
#set theme
if (use_default_theme) {
result_plot <- add_default_theme(
plt = result_plot,
reduction = get_reduction_base(apotc_obj)
)
} else {
result_plot <- result_plot + ggplot2::theme_void()
}
# get current plot dimensions
result_plot_dimensions <- get_plot_dims(result_plot)
# retain axis scales on the resulting plot.
if (retain_axis_scales) {
result_plot_dimensions <- get_retain_scale_dims(
seurat_obj,
reduction = get_reduction_base(apotc_obj),
ball_pack_plt = result_plot,
plot_dims = result_plot_dimensions
)
result_plot <- result_plot + ggplot2::expand_limits(
x = get_xr(result_plot_dimensions),
y = get_yr(result_plot_dimensions)
)
}
# TODO clonal link computation here
if (show_labels) {
result_plot <- insert_labels(result_plot, apotc_obj, label_size)
}
if (add_size_legend) {
result_plot <- insert_legend(
plt = result_plot,
plt_dims = result_plot_dimensions,
apotc_obj = apotc_obj,
sizes = legend_sizes,
pos = legend_position,
buffer = legend_buffer,
color = legend_color,
n = res,
spacing = legend_spacing,
legend_label = legend_label,
legend_textsize = legend_text_size,
do_add_legend_border = add_legend_background
)
}
result_plot
}
APOTCPlot_error_handler <- function(args) {
check_apotc_identifiers(args)
if (!is_an_integer(args$res)) {
stop(call. = FALSE, "`res` must be an integer value of length 1.")
}
if (!is_a_character(args$linetype)) {
stop(call. = FALSE, "`linetype` must be a character of length 1.")
}
if (!is_a_logical(args$use_default_theme)) {
stop(call. = FALSE,
"`use_default_theme` must be a logical value of length 1."
)
}
if (!is_a_logical(args$retain_axis_scales)) {
stop(call. = FALSE,
"`retain_axis_scales` must be a logical value of length 1."
)
}
if (!is_a_logical(args$show_labels)) {
stop(call. = FALSE,
"`show_labels` must be a logical value of length 1."
)
}
if (!is_a_numeric(args$label_size)) {
stop(call. = FALSE, "`label_size` must be a numeric value of length 1.")
}
if (!is_a_logical(args$add_size_legend)) {
stop(call. = FALSE,
"`add_size_legend` must be a logical value of length 1."
)
}
# check object_id validity
if (!containsApotcRun(args$seurat_obj, args$run_id)) {
stop(call. = FALSE, paste(
"APackOfTheClones object with id", args$run_id,
"does not exist in the seurat object"
))
}
# TODO: Add more specific checks for other parameters
check_filtering_conditions(args)
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/APOTCPlot.R |
#' @keywords internal
"_PACKAGE"
## usethis namespace: start
#' @importFrom dplyr %>%
#' @importFrom Rcpp sourceCpp
#' @useDynLib APackOfTheClones, .registration = TRUE
## usethis namespace: end
NULL
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/APackOfTheClones-package.R |
#' @title
#' Adjust the parameters of the APackOfTheClones reduction in a seurat
#' object
#'
#' @description
#' `r lifecycle::badge("experimental")`
#'
#' If the user is unsatisfied with the clonal expansion plot that
#' was generated from `RunAPOTC` and `APOTCPlot`, this function has a range of
#' arguments to modify the data and/or parameters of the visualization. Note
#' that some of the arguments may conflict with eachother.
#'
#' @inheritParams RunAPOTC
#'
#' @param seurat_obj The seurat object to be adjusted.
#' @param new_clone_scale_factor a single numeric in (0, 1]. changes the
#' clone_scale_factor
#' @param new_rad_scale_factor a single numeric in (0, 1]. changes the
#' radius scaling factor of all circles.
#' @param relocate_cluster numeric of arbitrary length. Indicates which
#' cluster(s) to relocate to new coordinates
#' @param relocation_coord numeric of length two or a list of numerics of length
#' two of length of `relocate_cluster`. If its a list, indicates each coordinate
#' that the clusters in `relocate_cluster` should move to. If its just a
#' numeric, then will relocate all clusters in `relocate_cluster` to the input,
#' which is likely not desired behavior, so this should only be convenience
#' syntax if `relocate_cluster` has length 1.
#' @param nudge_cluster numeric of arbitrary length. Indicates which
#' cluster(s) to "nudge"/translate their coordinate(s) by.
#' @param nudge_vector numeric of length two or a list of numerics of length
#' two of length of `nudge_cluster`. If its a list, indicates each translation
#' vector (in other words, x-y coordinates) that the clusters in
#' `nudge_cluster` should be translate by. If its just a numeric, then will
#' translate all clusters in `nudge_cluster` by the input - which mostly is
#' syntactic sugar for translating a single cluster if the input of
#' `nudge_cluster` is of length 1.
#' @param recolor_cluster numeric of arbitrary length. Indicates which
#' cluster(s) to change their color by.
#' @param new_color character of arbitrary length. Indicates the corresponding
#' new colors that selected clusters in `recolor_cluster` should be changed to.
#'
#' @return The adjusted `seurat_obj`
#' @export
#'
#' @examples
#' # do an APackOfTheClones run
#' pbmc <- RunAPOTC(get(data("combined_pbmc")), verbose = FALSE)
#'
#' # adjust the rad_scale_factor, and nudge cluster 1 by (1, 1)
#' pbmc <- AdjustAPOTC(
#' pbmc,
#' new_rad_scale_factor = 0.9,
#' nudge_cluster = 1,
#' nudge_vector = c(1, 1),
#' verbose = FALSE
#' )
#'
#' # plot the result
#' APOTCPlot(pbmc)
#'
#' # perhaps multiple clusters need to be relocated and repulsed
#' pbmc <- AdjustAPOTC(
#' pbmc,
#' relocate_cluster = c(5, 10),
#' relocation_coord = list(c(2, 3.5), c(0, 5)),
#' repulse = TRUE,
#' verbose = FALSE
#' )
#'
#' # plot again to check results
#' APOTCPlot(pbmc)
#'
AdjustAPOTC <- function(
seurat_obj,
reduction_base = NULL,
clonecall = NULL,
...,
extra_filter = NULL,
run_id = NULL,
new_rad_scale_factor = NULL,
new_clone_scale_factor = NULL,
repulse = FALSE,
repulsion_threshold = 1,
repulsion_strength = 1,
max_repulsion_iter = 10L,
relocate_cluster = NULL, # can also be a vector
relocation_coord = NULL, # vector or list of vectors
nudge_cluster = NULL, # same as above
nudge_vector = NULL,
recolor_cluster = NULL, #same as above
new_color = NULL,
#interactive = FALSE,
verbose = TRUE
) {
varargs_list <- list(...)
args <- hash::hash(as.list(environment()))
AdjustAPOTC_error_handler(args)
object_id <- infer_object_id_if_needed(args, varargs_list = varargs_list)
apotc_obj <- getApotcData(seurat_obj, object_id)
args <- hash::hash(as.list(environment()))
# # TODO
# if (interactive) {
# return(runShinyAdjustAPOTC(args))
# }
if (should_change(new_clone_scale_factor)) {
apotc_obj <- change_clone_scale(seurat_obj, args)
}
if (should_change(new_rad_scale_factor)) {
apotc_obj <- change_rad_scale(apotc_obj, new_rad_scale_factor)
}
if (should_change(recolor_cluster)) {
apotc_obj <- recolor_clusters(apotc_obj, recolor_cluster, new_color)
}
if (should_change(relocate_cluster)) {
apotc_obj <- relocate_clusters(
apotc_obj, relocate_cluster, relocation_coord
)
}
if (should_change(nudge_cluster)) {
apotc_obj <- nudge_clusters(apotc_obj, nudge_cluster, nudge_vector)
}
if (repulse) {
apotc_obj <- repulseClusters(
apotc_obj, repulsion_threshold, repulsion_strength,
max_repulsion_iter, verbose
)
}
seurat_obj <- setApotcData(seurat_obj, object_id, apotc_obj)
seurat_obj
}
AdjustAPOTC_error_handler <- function(args) {
# TODO - type check
# TODO rest of errors
check_apotc_identifiers(args)
if (should_change(args$new_rad_scale_factor)) {
if (args$new_rad_scale_factor < 0) {
stop(call. = FALSE, "new_rad_scale_factor must be a positive number")
}
}
if (should_change(args$new_clone_scale_factor)) {
if (args$new_clone_scale_factor < 0) {
stop(call. = FALSE, "new_rad_scale_factor must be a positive number")
}
}
check_repulsion_params(args)
# if (!is.null(args$relocation_coord) && (length(args$relocate_cluster) != length(args$relocation_coord))) {
# return("length of relocate_cluster must be the same as the length of relocation_coord")
# }
# if (!is.null(args$nudge_vector) && (length(args$nudge_cluster) != length(args$nudge_vector))) {
# return("length of nudge_cluster must be the same as the length of nudge_vector")
# }
# if ((args$relocate_cluster != -1) && (args$nudge_cluster != -1)) {
# if (!is.null(intersect(args$relocation_cluster, args$nudge_cluster))) {
# return("There are repeated elements in relocate_cluster and/or nudge_cluster")
# }
# }
}
# TODO it should be more efficient + safer to mathematically transform all vals
change_clone_scale <- function(seurat_obj, args) {
if (args$verbose) {
message("Repacking all clusters with new clone scale factor")
}
past_params <- find_seurat_command(
seurat_obj = seurat_obj,
func_name = "RunAPOTC",
id = args$object_id
)@params
args$apotc_obj@clone_scale_factor <- args$new_clone_scale_factor
circlepackClones(
apotc_obj = args$apotc_obj,
ORDER = past_params$order_clones,
try_place = past_params$try_place,
verbose = args$verbose
)
}
change_rad_scale <- function(apotc_obj, new_factor) {
old_factor <- get_rad_scale_factor(apotc_obj)
conversion_num <- get_clone_scale_factor(apotc_obj) *
(new_factor - old_factor)
for (i in seq_len(get_num_clusters(apotc_obj))) {
curr <- apotc_obj@clusters[[i]]
if (isnt_empty(curr)) {
apotc_obj@clusters[[i]]$rad <- curr$rad + conversion_num
}
}
apotc_obj@rad_scale_factor <- new_factor
apotc_obj
}
recolor_clusters <- function(apotc_obj, recolor_cluster, new_color) {
for (i in seq_along(recolor_cluster)) {
apotc_obj@cluster_colors[recolor_cluster[i]] <- new_color[[i]]
}
apotc_obj
}
#FIXME (again :/)
relocate_clusters <- function(apotc_obj, relocate_cluster, relocation_coord) {
if (is.numeric(relocation_coord)) {
relocation_coord <- init_list(length(relocate_cluster), relocation_coord)
}
new_clusterlists <- get_clusterlists(apotc_obj)
for (i in seq_along(relocate_cluster)) {
cl_ind <- relocate_cluster[i]
new_clusterlists[[cl_ind]] <- move_cluster(
cluster = new_clusterlists[[cl_ind]],
new_coord = relocation_coord[[i]]
)
}
setModifiedClusterlists(apotc_obj, new_clusterlists)
}
nudge_clusters <- function(apotc_obj, nudge_cluster, nudge_vector) {
if (is.numeric(nudge_vector)) {
nudge_vector <- init_list(length(nudge_cluster), nudge_vector)
}
relocate_clusters(
apotc_obj,
relocate_cluster = nudge_cluster,
relocation_coord = operate_on_same_length_lists(
func = add,
l1 = nudge_vector,
l2 = get_centroids(apotc_obj)[nudge_cluster]
)
)
}
# TODO label movement
# need functions for readjusting the apotc reduction for better visuals
# also possible to boot up a shiny window in the future?
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/AdjustAPOTC.R |
#' @title
#' The ApotcData class
#'
#' @description
#' `r lifecycle::badge("experimental")`
#'
#' An S4 class for storing information about T/B cell clonal expansion to be
#' used by various [APackOfTheClones] functions. Instances of this object type
#' are stored by [RunAPOTC] in a seurat object's `@misc` slot under a list named
#' `"APackOfTheClones"`. This class is not meant to be directly instantiated,
#' accessed, nor modified by the user. Attributes should only be accessed by the
#' associated getters so its independent of implementation.
#'
#' @slot reduction_base character indicating the reduction the plotting was
#' based off of.
#' @slot clonecall character indicating the column name of the seurat object's
#' metadata that contains the clone information.
#' @slot metadata_filter_string character indicating the metadata filter string
#' used to subset the seurat object before running APOTC.
#' @slot clusters Clustered clones which is an R list of lists of length 5,
#' with each list of length 5, with the first 3 elements being numeric vectors
#' containing the x and y coordinates and radii of each clone in the cluster.
#' The fourth element is a numeric vector of length 2 indicating the centroid of
#' the cluster, and the fifth is the estimated cluster radius.
#' @slot centroids Centroids of the clusters which default to the same centroids
#' of the clusterlists, tho this also means clusters has to be iterated over
#' everytime the plotting function is called. but it wont be slow probably
#' @slot clone_sizes the original unscaled clone sizes for the samples.
#' @slot clone_scale_factor scale factor to multiply `clone_sizes` by when
#' running the clonal expansion plotting algorithms
#' @slot rad_scale_factor scale factor to multiply the radii in clusterlists by
#' after they have all been computed to increase spacing between circles. Might
#' also be better in the future to instead just have a number to subtract :/
#' @slot cluster_colors character vector indicating coloration of each cluster
#' @slot reduction_base character indicating the reduction the plotting was
#' based off of
#' @slot label_coords list of numeric vectors of length two indicating the (x,y)
#' coordinates of each label if plotted
#'
#' @keywords internal
#' @noRd
#'
methods::setClass(
Class = "ApotcData",
slots = c(
reduction_base = 'character',
clonecall = 'character',
metadata_filter_string = 'character',
clusters = 'list',
centroids = 'list',
clone_sizes = 'list',
num_clusters = 'numeric',
clone_scale_factor = 'numeric', # should make these overridable in APOTCPlot, and choose if they want to modify
rad_scale_factor = 'numeric',
cluster_colors = 'character',
labels = 'character',
label_coords = 'list'
)
)
ApotcData <- function(
seurat_obj, metadata_filter_condition, clonecall, reduction_base,
clone_scale_factor, rad_scale_factor
) {
if (identical(metadata_filter_condition, "")) {
return(initializeApotcData(
seurat_obj, clonecall, reduction_base, clone_scale_factor, rad_scale_factor
))
}
initializeSubsetApotcData(
seurat_obj, metadata_filter_condition, clonecall, reduction_base,
clone_scale_factor, rad_scale_factor
)
}
initializeApotcData <- function(
seurat_obj, clonecall, reduction_base, clone_scale_factor, rad_scale_factor
) {
num_clusters <- get_num_total_clusters(seurat_obj) # may need to redo, will leave empty elements for invalid clusters
initial_centroids <- get_cluster_centroids(seurat_obj, reduction_base)
raw_all_clone_sizes <- count_raw_clone_sizes(
seurat_obj = seurat_obj, num_clusters = num_clusters, clonecall = clonecall
)
methods::new(
Class = "ApotcData",
reduction_base = reduction_base,
clonecall = clonecall,
metadata_filter_string = "",
clusters = list(),
centroids = initial_centroids,
clone_sizes = raw_all_clone_sizes,
num_clusters = num_clusters,
clone_scale_factor = clone_scale_factor,
rad_scale_factor = rad_scale_factor,
cluster_colors = gg_color_hue(num_clusters),
labels = gen_labels(num_clusters),
label_coords = initial_centroids
)
}
# create subset / new obj based on new conditions, assuming valid!
# based on an initialized (not nessecarily packed) apotc obj
# assumes metadata_filter_condition cannot be null
initializeSubsetApotcData <- function(
seurat_obj, metadata_filter_condition, clonecall, reduction_base,
clone_scale_factor, rad_scale_factor
) {
seurat_obj <- subsetSeuratMetaData(seurat_obj, metadata_filter_condition)
apotc_obj <- initializeApotcData(
seurat_obj, clonecall, reduction_base, clone_scale_factor, rad_scale_factor
)
apotc_obj@metadata_filter_string <- metadata_filter_condition
apotc_obj
}
# pack the clones assuming centroids are present
circlepackClones <- function(apotc_obj, ORDER, try_place, verbose) {
apotc_obj@clusters <- pack_into_clusterlists(
sizes = get_processed_clone_sizes(apotc_obj),
centroids = get_centroids(apotc_obj),
num_clusters = get_num_clusters(apotc_obj),
rad_decrease = get_rad_decrease(apotc_obj),
ORDER = ORDER,
scramble = !ORDER,
try_place = try_place,
verbose = verbose
)
# see which elemens of sizes are empty and set corresponding elements empty
for (i in seq_len(get_num_clusters(apotc_obj))) {
if (isnt_empty(apotc_obj@clusters[[i]])) {
next
}
apotc_obj@centroids[[i]] <- list()
apotc_obj@label_coords[[i]] <- list()
# colors ignored, should have some way to match with seurat colors
}
apotc_obj
}
# function to do repulsion for both cases
repulseClusters <- function(
apotc_obj, repulsion_threshold, repulsion_strength, max_repulsion_iter,
verbose
) {
repulsed_clusters <- get_repulsed_clusterlists(
packed_clusters = get_clusterlists(apotc_obj),
initial_centroids = get_centroids(apotc_obj),
num_clusters = get_num_clusters(apotc_obj),
repulsion_threshold = repulsion_threshold,
repulsion_strength = repulsion_strength,
max_repulsion_iter = max_repulsion_iter,
verbose = verbose
)
setModifiedClusterlists(
apotc_obj, modified_clusterlists = repulsed_clusters
)
}
# function to modify the apotc_obj's relevant slots when modified clusterlists
# are introduced e.g. for cluster repulsion or relocation. This cannot be used
# for completely new irrelevant clusterlists, as the centroids and label_coords
# are modified correspondingly to the original clusters.
setModifiedClusterlists <- function(apotc_obj, modified_clusterlists) {
modified_centroids <- read_centroids(modified_clusterlists)
apotc_obj@label_coords <- move_coord_list_by_same_amount(
coord_list = get_label_coords(apotc_obj),
original_coord_list = get_centroids(apotc_obj),
new_coord_list = modified_centroids
)
apotc_obj@clusters <- modified_clusterlists
apotc_obj@centroids <- modified_centroids
apotc_obj
}
convert_to_rad_decrease <- function(clone_scale_factor, rad_scale_factor) {
clone_scale_factor * (1 - rad_scale_factor)
}
# getters
get_reduction_base <- function(apotc_obj) {
apotc_obj@reduction_base
}
get_clonecall <- function(apotc_obj) {
apotc_obj@clonecall
}
get_metadata_filter_string <- function(apotc_obj) {
apotc_obj@metadata_filter_string
}
get_clusterlists <- function(apotc_obj) {
apotc_obj@clusters
}
get_centroids <- function(apotc_obj) {
apotc_obj@centroids
}
get_raw_clone_sizes <- function(apotc_obj) {
apotc_obj@clone_sizes
}
get_processed_clone_sizes <- function(apotc_obj) {
raw_tabled_clone_sizes <- get_raw_clone_sizes(apotc_obj)
processed_sizes <- init_list(get_num_clusters(apotc_obj), list())
for (i in seq_len(get_num_clusters(apotc_obj))) {
if (!is_empty_table(raw_tabled_clone_sizes[[i]])) {
processed_sizes[[i]] <- apotc_obj@clone_scale_factor *
sqrt(as.numeric(raw_tabled_clone_sizes[[i]][[1]]))
}
}
processed_sizes
}
get_num_clones <- function(apotc_obj) { # TODO test
sum(unlist(get_raw_clone_sizes(apotc_obj)))
}
get_num_clusters <- function(apotc_obj) {
apotc_obj@num_clusters
}
get_valid_num_clusters <- function(apotc_obj) {
n <- 0
for (cluster in apotc_obj@clusters) {
if (isnt_empty(cluster)) {
n <- n + 1
}
}
n
}
get_clone_scale_factor <- function(apotc_obj) {
apotc_obj@clone_scale_factor
}
get_rad_scale_factor <- function(apotc_obj) {
apotc_obj@rad_scale_factor
}
get_rad_decrease <- function(apotc_obj) {
convert_to_rad_decrease(
clone_scale_factor = get_clone_scale_factor(apotc_obj),
rad_scale_factor = get_rad_scale_factor(apotc_obj)
)
}
get_cluster_colors <- function(apotc_obj) {
apotc_obj@cluster_colors
}
get_labels <- function(apotc_obj) {
apotc_obj@labels
}
get_label_coords <- function(apotc_obj) {
apotc_obj@label_coords
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/ApotcData.R |
# script to manage the interface for accessing the apotc data
# all functions assume arguments are correct
# from the input of RunAPOTC, convert the condition to a call to be put in
# @meta.data %>% dpylr::filter(eval(parse(text = "output of this func")))
# assume that metadata_filter is a valid ADDITIONAL filter condition.
# assume that varargs_list is a valid named list where each name is a column
# and element is a string vector of which factors to INCLUDE
#
parse_to_metadata_filter_str <- function(metadata_filter, varargs_list) {
if (!is_valid_args(varargs_list)) {
if (is_valid_filter_str(metadata_filter)) {
return(strip_unquoted_spaces(metadata_filter))
}
return("")
}
filter_strings <- vector("character", length(varargs_list))
colnames <- names(varargs_list)
for (i in seq_along(varargs_list)) {
filter_strings[i] <- col_cond_vec_to_filter_str(
condition_vector = sort(unique(varargs_list[[i]])), colnames[i]
)
}
filter_string <- sort_and_join_conds_by_and(filter_strings)
if (is_valid_filter_str(metadata_filter)) {
filter_string <- paste(
"(", filter_string, ")&(", metadata_filter, ")", sep = ""
)
}
strip_unquoted_spaces(filter_string)
}
is_valid_args <- function(varargs_list) isnt_empty(varargs_list)
is_valid_filter_str <- function(metadata_string) {
if (is.null(metadata_string)) return(FALSE)
if (identical(strip_spaces(metadata_string), "")) return(FALSE)
return(TRUE)
}
col_cond_vec_to_filter_str <- function(condition_vector, colname) {
UseMethod("col_cond_vec_to_filter_str")
}
col_cond_vec_to_filter_str.character <- function(
condition_vector, colname
) {
col_conds_to_str_w_insert(
condition_vector = condition_vector, colname = colname,
insert_char = "'"
)
}
col_cond_vec_to_filter_str.default <- function(
condition_vector, colname
) {
col_conds_to_str_w_insert(
condition_vector = condition_vector, colname = colname,
insert_char = ""
)
}
col_conds_to_str_w_insert <- function(
condition_vector, colname, insert_char
) {
filter_str <- ""
for (i in seq_along(condition_vector)) {
filter_str <- paste(
filter_str,
colname, "==", insert_char, condition_vector[i], insert_char,
"|", sep = ""
)
}
substr(filter_str, 1, nchar(filter_str) - 1)
}
sort_and_join_conds_by_and <- function(filter_strings) {
if (length(filter_strings) == 1) return(filter_strings[1])
paste(
"(", paste(sort(filter_strings), collapse = ")&("), ")", sep = ""
)
}
# functions for converting args of RunAPOTC to the apotc data sample id
# stored under under @misc[["APackOfTheClones"]][[id]]
infer_object_id_if_needed <- function(args, varargs_list) {
if (!should_compute(args$run_id)) return(args$run_id)
if (
is.null(args$reduction_base) &&
is.null(args$clonecall) &&
is.null(args$extra_filter) &&
is_empty(varargs_list)
) {
latest_id <- getLastApotcDataId(args$seurat_obj)
# verbosity?
message(paste(
"* using the latest APackOfTheClones Run Data with object id:",
latest_id
))
return(latest_id)
}
parse_to_object_id(
reduction_base = attempt_correction(
args$seurat_obj, args$reduction_base
),
clonecall = .theCall([email protected], args$clonecall),
varargs_list = varargs_list,
metadata_filter = args$extra_filter
)
}
utils::globalVariables(c(".idSepStr", ".idNullStr"))
.idSepStr = ";"
.idNullStr = "_"
parse_to_object_id <- function(
reduction_base, clonecall, varargs_list, metadata_filter
) {
object_id <- paste(reduction_base, .idSepStr, clonecall, .idSepStr, sep = "")
if (!is_valid_args(varargs_list)) {
object_id <- paste(object_id, .idNullStr, .idSepStr, sep = "")
} else {
object_id <- paste(
object_id, varargs_list_to_id_segment(varargs_list),
.idSepStr, sep = ""
)
}
if (!is_valid_filter_str(metadata_filter)) {
return(paste(object_id, .idNullStr, sep = ""))
}
paste(object_id, gsub(" ", "", metadata_filter), sep = "")
}
get_default_apotc_id <- function(reduction_base, clonecall) {
parse_to_object_id(
reduction_base = reduction_base, clonecall = clonecall,
varargs_list = list(), metadata_filter = NULL
)
}
varargs_list_to_id_segment <- function(varargs_list) {
segments <- vector("character", length(varargs_list))
colnames <- names(varargs_list)
for (i in seq_along(varargs_list)) {
segments[i] <- paste(
colnames[i], "=",
repr_as_string(sort(unique(varargs_list[[i]]))),
sep = ""
)
}
if (length(segments) == 1) {
return(segments)
}
paste(sort(segments), collapse = ",")
}
# getting and setting related functions
getApotcData <- function(seurat_obj, obj_id) {
seurat_obj@misc[["APackOfTheClones"]][[obj_id]]
}
getLastApotcData <- function(seurat_obj) {
getApotcData(seurat_obj, getLastApotcDataId(seurat_obj))
}
setApotcData <- function(seurat_obj, obj_id, apotc_obj) {
seurat_obj@misc[["APackOfTheClones"]][[obj_id]] <- apotc_obj
seurat_obj
}
containsAnyApotcData <- function(seurat_obj) {
!is.null(getApotcDataIds(seurat_obj))
}
#' @title
#' Check for the existence of an APackOfTheClones run with its run id
#'
#' @description
#' `r lifecycle::badge("stable")`
#'
#' A convenience function to check for the existence of an APackOfTheClones
#' run with its run id, regardless of if any run has been made
#'
#' @param seurat_obj a seurat object
#' @param run_id character. The id of the associated ApotcRun.
#'
#' @return A logical indicating whether the run exists.
#' @export
#'
#' @examples
#' pbmc <- RunAPOTC(
#' seurat_obj = get(data("combined_pbmc")),
#' reduction_base = "umap",
#' clonecall = "strict",
#' run_id = "run1",
#' verbose = FALSE
#' )
#'
#' containsApotcRun(pbmc, "run1")
#' #> [1] TRUE
#'
#' containsApotcRun(pbmc, "run2")
#' #> [1] FALSE
#'
containsApotcRun <- function(seurat_obj, run_id) {
if (!is_seurat_object(seurat_obj)) stop("input must be a seurat object")
if (length(run_id) != 1) stop("the `run_id` argument must be of length 1")
if (!containsAnyApotcData(seurat_obj)) {
return(FALSE)
}
any(getApotcDataIds(seurat_obj) == run_id)
}
#' @title
#' Delete the results of an APackOfTheClones run
#'
#' @description
#' `r lifecycle::badge("stable")`
#'
#' A convenience function to erase all data associated with a particular run,
#' including the ApotcData and command in seurat_obj@command. The run_id would
#' be no longer accessible afterwards.
#'
#' @param seurat_obj a seurat object that has had RunAPOTC ran on it before in
#' order of the functions being called.
#' @param run_id character. The id of the associated ApotcRun.
#'
#' @return The modified input seurat object
#' @export
#'
#' @examples
#' pbmc <- RunAPOTC(
#' seurat_obj = get(data("combined_pbmc")),
#' reduction_base = "umap",
#' clonecall = "strict",
#' run_id = "run1",
#' verbose = FALSE
#' )
#'
#' getApotcDataIds(pbmc)
#' #> [1] "run1"
#'
#' # delete the data
#' pbmc <- deleteApotcData(pbmc, "run1")
#'
#' getApotcDataIds(pbmc)
#' #> NULL
#'
deleteApotcData <- function(seurat_obj, run_id) {
if (!is_seurat_object(seurat_obj)) stop("input must be a seurat object")
if (length(run_id) != 1) stop("the `run_id` argument must be of length 1")
if (!containsApotcRun(seurat_obj, run_id)) {
stop(paste("no run with id:", run_id, "is present"))
}
seurat_obj <- setApotcData(seurat_obj, run_id, NULL)
seurat_obj@commands[[get_command_name("RunAPOTC", run_id)]] <- NULL
seurat_obj
}
#' @title
#' Get all run ids of previous RunAPOTC runs on a seurat object
#'
#' @description
#' `r lifecycle::badge("stable")`
#'
#' A convenience function to get all run ids of previous RunAPOTC run IDs
#'
#' @param seurat_obj a seurat object that has had RunAPOTC ran on it before in
#' order of the functions being called.
#'
#' @return a character vector of all run ids of previous RunAPOTC runs, in
#' the order they were ran in. If there are no runs on the object, it returns
#' `NULL`.
#' @export
#'
#' @examples
#' pbmc <- RunAPOTC(
#' seurat_obj = get(data("combined_pbmc")),
#' reduction_base = "umap",
#' clonecall = "strict",
#' verbose = FALSE
#' )
#'
#' getApotcDataIds(pbmc)
#' #> [1] "umap;CTstrict;_;_"
#'
#' pbmc <- RunAPOTC(
#' seurat_obj = pbmc,
#' reduction_base = "umap",
#' clonecall = "gene",
#' verbose = FALSE
#' )
#'
#' getApotcDataIds(pbmc)
#' #> [1] "umap;CTstrict;_;_" "umap;CTgene;_;_"
#'
getApotcDataIds <- function(seurat_obj) {
if (!is_seurat_object(seurat_obj)) stop("input must be a seurat object")
ids <- names(seurat_obj@misc[["APackOfTheClones"]])
if (identical(ids, character(0)) || is.null(ids)) return(NULL)
ids
}
#' @title
#' Get the object id of the most recent RunAPOTC run on a seurat object
#'
#' @description
#' `r lifecycle::badge("stable")`
#'
#' A convenience function to get the object id of the most recent valid
#' [RunAPOTC] run, to be used by [APOTCPlot] and [AdjustAPOTC]
#'
#' @param seurat_obj a seurat object that has had RunAPOTC ran on it before in
#' order of the functions being called.
#'
#' @return a character of the object id of the last [RunAPOTC] call
#' @export
#'
#' @examples
#' # first run
#' pbmc <- RunAPOTC(
#' seurat_obj = get(data("combined_pbmc")),
#' reduction_base = "umap",
#' clonecall = "strict",
#' verbose = FALSE
#' )
#'
#' getLastApotcDataId(pbmc)
#' #> [1] "umap;CTstrict;_;_"
#'
#' # second run with a different clonecall
#' pbmc <- RunAPOTC(
#' seurat_obj = pbmc,
#' reduction_base = "umap",
#' clonecall = "gene",
#' verbose = FALSE
#' )
#'
#' getLastApotcDataId(pbmc)
#' #> [1] "umap;CTgene;_;_"
#'
getLastApotcDataId <- function(seurat_obj) {
getlast(getApotcDataIds(seurat_obj))
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/ApotcIndexing.R |
# vectorized circle_layout - outputs list of clusterlists
pack_into_clusterlists <- function(
sizes, centroids, num_clusters, rad_decrease = 0,
ORDER = TRUE, scramble = FALSE, try_place = FALSE, verbose = TRUE
){
output_list <- init_list(num_elements = num_clusters, init_val = list())
# initialize progress bar stats
if (verbose) {
packed_clone_count <- 0
total_clone_count <- sum(sapply(sizes, length))
start_progress_bar()
}
for(i in 1:num_clusters){
input_rad_vec <- sizes[[i]]
if (!isnt_empty(input_rad_vec) || is.null(input_rad_vec)) {
next
}
output_list[[i]] <- cpp_circle_layout(
input_rad_vec = process_rad_vec(input_rad_vec, ORDER, scramble),
centroid = centroids[[i]],
rad_decrease = rad_decrease,
try_place = try_place,
verbose = FALSE
)
if (verbose) {
packed_clone_count <- packed_clone_count + length(input_rad_vec)
progress_bar(packed_clone_count, total_clone_count)
}
}
if (verbose) message("")
output_list
}
process_rad_vec <- function(input_rad_vec, ORDER, scramble) {
if (ORDER) {
return(sort(input_rad_vec, decreasing = TRUE))
}
if (scramble) {
# user should set seed themselves
return(sample(input_rad_vec))
}
input_rad_vec
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/Main_algo.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
cpp_circle_layout <- function(input_rad_vec, centroid, rad_decrease = 0, try_place = FALSE, verbose = TRUE) {
.Call(`_APackOfTheClones_cpp_circle_layout`, input_rad_vec, centroid, rad_decrease, try_place, verbose)
}
rcppFilterReductionCoords <- function(seuratBarcodes, reductionCoords) {
.Call(`_APackOfTheClones_rcppFilterReductionCoords`, seuratBarcodes, reductionCoords)
}
get_average_vector <- function(vec_list) {
.Call(`_APackOfTheClones_get_average_vector`, vec_list)
}
get_component_repulsion_vector <- function(inp, i, j, G) {
.Call(`_APackOfTheClones_get_component_repulsion_vector`, inp, i, j, G)
}
do_cluster_intersect <- function(Cn_centroid, Cn_clRad, Cm_centroid, Cm_clRad, thr) {
.Call(`_APackOfTheClones_do_cluster_intersect`, Cn_centroid, Cn_clRad, Cm_centroid, Cm_clRad, thr)
}
calculate_transformation_vectors <- function(transformation_vectors, overall_repulsion_vec, num_clusters) {
.Call(`_APackOfTheClones_calculate_transformation_vectors`, transformation_vectors, overall_repulsion_vec, num_clusters)
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/RcppExports.R |
# Functions defined in src/repulsion.cpp
# - get_average_vector(vec_list)
# avg vector of a list of vectors
# - get_component_repulsion_vector(inp,i,j,G):
# repulsion vec of cluster i, j in inp
# - calculate_transformation_vectors(t,v,n):
# get average vectors from a list of list of vecs
# returns `list()` if all zeros
# - do_cluster_intersect(cn_c,cn_r,cm_c,cm_r,thr):
# check if two clusterlists overlap in c++
# convinience function for more code conciseness
# repulses clusters and returns a list of length 2.
# first is the new modified list of clusterlists
# second is the centroids
get_repulsed_clusterlists <- function(
packed_clusters, initial_centroids, num_clusters, repulsion_threshold,
repulsion_strength, max_repulsion_iter, verbose
) {
if (verbose) {
message(paste(
"\nrepulsing all clusters | max iterations =", max_repulsion_iter
))
}
packed_clusters <- repulse_cluster(
packed_clusters, repulsion_threshold, repulsion_strength,
max_repulsion_iter, verbose
)
packed_clusters
}
# Alias to initialize direction vectors in a list
initialize_direction_vectors <- function(num_clusters) {
direction_vectors <- vector("list", num_clusters)
for (i in 1:num_clusters) {
direction_vectors[[i]] <- c(0, 0)
}
direction_vectors
}
# Alias to initialize the overall repulsion vec
initialize_list_of_transformation_vectors <- function(
blank_vectors, num_clusters
) {
output <- vector("list", num_clusters)
for (i in 1:num_clusters) {
output[[i]] <- blank_vectors
}
output
}
# check in current iteration if two clusters are worth repulsing
do_proceed <- function(inp, i, j, thr) {
if ((i == j) || (!(isnt_empty(inp[[i]]) && isnt_empty(inp[[j]])))) {
return(FALSE)
}
do_cl_intersect(inp[[i]], inp[[j]], thr)
}
do_cl_intersect <- function(Cn, Cm, thr = 1) {
do_cluster_intersect(
Cn[[4]], Cn[[5]], Cm[[4]], Cm[[5]], thr
)
}
# O(N^2) operation to calculate all repulsion vectors for each cluster
calculate_repulsion_vectors <- function(
overall_repulsion_vec, inp,
num_clusters, G = 1, thr = 0
) {
for (i in 1:num_clusters) {
for (j in 1:num_clusters) {
if (!do_proceed(inp,i,j,thr)) {
overall_repulsion_vec[[i]][[j]] <- c(0, 0)
next
}
overall_repulsion_vec[[i]][[j]] <- get_component_repulsion_vector(
inp, i, j, G
)
}
}
overall_repulsion_vec
}
# iterative repulsion. inp is a list of clusterlists.
# missing members of clusterlists are NA right now
# returns the modified clusterlist
repulse_cluster <- function(
inp, thr = 1, G = 1, max_iter = 20, verbose = TRUE
) {
start_progress_bar(verbose)
#init variables - could use a class
num_clusters <- length(inp)
transformation_vectors <- initialize_direction_vectors(num_clusters) # variable naming is confusing here; this is a list of the transformations for each cluster at the end of each iteration.
overall_repulsion_vec <- initialize_list_of_transformation_vectors(
transformation_vectors, num_clusters
) # this one is for storing all repulsion vectors for all pairwise comparisons that are yet to be averaged for each iteration
for(curr_iteration in 1:max_iter){
overall_repulsion_vec <- calculate_repulsion_vectors(
overall_repulsion_vec, inp, num_clusters, G, thr
)
transformation_vectors <- calculate_transformation_vectors(
transformation_vectors, overall_repulsion_vec, num_clusters
)
#transformation vectors is an empty list() if everything was c(0,0)
if (!isnt_empty(transformation_vectors)) {
end_progress_bar(verbose)
return(inp)
}
# with the transformation vectors established, each cluster is moved
for (i in 1:num_clusters) {
if (isnt_empty(inp[[i]])) {
inp[[i]] <- trans_coord(inp[[i]], transformation_vectors[[i]])
}
}
if (verbose) progress_bar(curr_iteration, max_iter)
}
end_progress_bar(verbose)
inp
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/Repulsion.R |
#' @title
#' Run the APackOfTheClones method on a combined Seurat object for
#' downstream visualization of clonal expansion
#'
#' @description
#' `r lifecycle::badge("stable")`
#'
#' Computes necessary information for an APackOfTheClones
#' clonal expansion plot ([APOTCPlot]) and stores it in the seurat object.
#' Gets sizes of unique clones and utilizes a circle-packing algorithm to
#' pack circles representing individual clones in approximately the same
#' dimensional reduction (`reduction_base`) coordinates.
#'
#' The parameter `extra_filter` along with an unlimited number of additional
#' keyword arguments can be used to filter the cells by certain conditions
#' in the metadata, and new results will be stored in addition to other runs
#' the users may have done.
#'
#' Each APackOfTheClones run is uniquely identified by the parameters
#' `reduction_base`, `clonecall`, `extra_filter`, and any additional keywords
#' passed to filter the metadata. Each distinct run result is stored in the
#' seurat object and has an associated Id generated from the aforementioned
#' parameters. To view the id of the latest run, call [getLastApotcDataId].
#' To view all the ids of previous runs, call [getApotcDataIds]. To work further
#' with a specific run (most importantly, plotting), the user can use this id
#' in the arguments with is slightly more convenient than passing in the
#' original RunAPOTC parameters again but both ways work.
#'
#' If the user wishes to manually customize/fix the expansion plot
#' generated, the circular packing information can be modified
#' with the [AdjustAPOTC] function.
#'
#' @param seurat_obj Seurat object with one or more dimension reductions and
#' already have been integrated with a TCR/BCR library with
#' `scRepertoire::combineExpression`.
#' @param reduction_base character. The seurat reduction to base the clonal
#' expansion plotting on. Defaults to `'umap'` but can be any reduction present
#' within the reductions slot of the input seurat object, including custom ones.
#' If `'pca'``, the cluster coordinates will be based on PC1 and PC2.
#' However, generally APackOfTheClones is used for displaying UMAP and
#' occasionally t-SNE versions to intuitively highlight clonal expansion.
#' @param clonecall character. The column name in the seurat object metadata to
#' use. See `scRepertoire` documentation for more information about this
#' parameter that is central to both packages.
#' @param ... additional "subsetting" keyword arguments indicating the rows
#' corresponding to elements in the seurat object metadata that should be
#' filtered by. E.g., `seurat_clusters = c(1, 9, 10)` will filter the cells to
#' those in the `seurat_clusters` column with any of the values 1, 9, and 10.
#' Unfortunately, column names in the seurat object metadata cannot
#' conflict with the keyword arguments. ***MAJOR NOTE*** if any subsetting
#' keyword arguments are a *prefix* of any preceding argument names (e.g. a
#' column named `reduction` is a prefix of the `reduction_base` argument)
#' R will interpret it as the same argument unless *both* arguments
#' are named. Additionally, this means any subsequent arguments *must* be named.
#' @param extra_filter character. An additional string that should be formatted
#' *exactly* like a statement one would pass into [dplyr::filter] that does
#' *additional* filtering to cells in the seurat object - on top of the other
#' keyword arguments - based on the metadata. This means that it will be
#' logically AND'ed with any keyword argument filters. This is a more flexible
#' alternative / addition to the filtering keyword arguments. For example, if
#' one wanted to filter by the length of the amino acid sequence of TCRs, one
#' could pass in something like `extra_filter = "nchar(CTaa) - 1 > 10"`. When
#' involving characters, ensure to enclose with single quotes.
#' @param run_id character. This will be the ID associated with the data of a
#' run, and will be used by other important functions like [APOTCPlot] and
#' [AdjustAPOTC]. Defaults to `NULL`, in which case the ID will be generated
#' in the following format:
#'
#' `reduction_base;clonecall;keyword_arguments;extra_filter`
#'
#' where if keyword arguments and extra_filter are underscore characters if
#' there was no input for the `...` and `extra_filter` parameters.
#' @param clone_scale_factor Dictates how much to scale each circle(between 0,1)
#' radius when converting from clonotype counts into circles that represent
#' individual clonotypes. The argument defaults to the character `"auto"`, and
#' if so, the most visually pleasing factor will be estimated.
#' @param rad_scale_factor numeric between 0 and 1. This value decreases the
#' radius of the smallest clones by this scale factor. And the absolute value
#' of this decrease will be applied to all packed circles, effectively shrinking
#' all circles on the spot, and introduce more constant spacing in between.
#' @param order_clones logical. Decides if the largest clone circles should be
#' near cluster centroids. This is highly recommended to be set to TRUE for
#' increased intuitiveness of the visualization, as resulting plots tend to
#' give an improved impression of the proportion of expanded clones. If
#' `FALSE,` will randomly scramble the positions of each circle. For the sake
#' of being replicable, a random seed is recommended to be set with [set.seed].
#' @param try_place If `TRUE`, always minimizes distance from a newly placed
#' circle to the origin in the circle packing algorithm.
#' @param repulse If `TRUE`, will attempt to push overlapping clusters away from
#' each other.
#' @param repulsion_threshold numeric. The radius that clonal circle clusters
#' overlap is acceptable when repulsing.
#' @param repulsion_strength numeric. The smaller the value the less the
#' clusters repulse each other per iteration, and vice versa.
#' @param max_repulsion_iter integer. The number of repulsion iterations.
#' @param override logical. If `TRUE`, will override any existing
#' APackOfTheClones run data with the same `run_id`.
#' @param verbose logical. Decides if visual cues are displayed to the R console
#' of the progress.
#'
#' @details
#' All APackOfTheClones run data is stored in the Seurat object under
#' `seurat_object@misc$APackOfTheClones`, which is a list of S4 objects of the
#' type "ApotcData", with each element corresponding to a unique run. The id of
#' each run is the name of each element in the list. The user
#' ***really shouldn't*** manually modify anything in the list as it may cause
#' unexpected behavior with many other functions.
#'
#' Additionally, it also logs a seurat command associated with the run in the
#' `seurat_object@commands` slot as a "SeuratCommand" object (from Seurat),
#' where the name of the object in the list is formatted as `RunAPOTC.run_id`.
#' Again, the user should not modify anything in these objects as they are used
#' by some related functions, mainly [AdjustAPOTC].
#'
#' @return A modified version of the input seurat object, which harbors data
#' necessary for visualizing the clonal expansion of the cells with [APOTCPlot]
#' and has a friendly user interface to modify certain attributes with
#' [AdjustAPOTC].
#'
#' @seealso [APOTCPlot], [AdjustAPOTC], [getApotcDataIds]
#'
#' @export
#'
#' @examples
#' data("combined_pbmc")
#'
#' # this is the recommended approach to use a custom run_id with default params
#' combined_pbmc <- RunAPOTC(combined_pbmc, run_id = "default", verbose = FALSE)
#'
#' # here's a seperate run with some filters to the meta data, where
#' # `orig.ident` is a custom column in the example data. Notice that it is not
#' # a `RunAPOTC` parameter but a user keyword argument
#' combined_pbmc <- RunAPOTC(
#' combined_pbmc, run_id = "sample17", orig.ident = c("P17B", "P17L"),
#' verbose = FALSE
#' )
#'
#' # the exact same thing can be achieved with the `extra_filter` parameter
#' combined_pbmc <- RunAPOTC(
#' combined_pbmc,
#' run_id = "sample17",
#' extra_filter = "substr(orig.ident, 2, 3) == '17'",
#' override = TRUE,
#' verbose = FALSE
#' )
#'
RunAPOTC <- function(
seurat_obj,
reduction_base = "umap",
clonecall = "strict",
...,
extra_filter = NULL,
run_id = NULL,
clone_scale_factor = "auto",
rad_scale_factor = 0.95,
order_clones = TRUE,
try_place = FALSE,
repulse = TRUE,
repulsion_threshold = 1,
repulsion_strength = 1,
max_repulsion_iter = 20L,
override = FALSE,
verbose = TRUE
) {
# setup and check inputs
call_time <- Sys.time()
varargs_list <- list(...)
RunAPOTC_partial_arg_checker(hash::hash(as.list(environment())))
if (verbose) message("Initializing APOTC run...")
# compute inputs
reduction_base <- attempt_correction(seurat_obj, reduction_base)
clonecall <- .theCall([email protected], clonecall)
if (should_estimate(clone_scale_factor)) {
clone_scale_factor <- estimate_clone_scale_factor(seurat_obj, clonecall)
if (verbose) message(paste(
"* Setting `clone_scale_factor` to",
round(clone_scale_factor, digits = 3)
))
}
metadata_filter_string <- parse_to_metadata_filter_str(
metadata_filter = extra_filter, varargs_list = varargs_list
)
obj_id <- infer_object_id_if_needed(
args = hash::hash(as.list(environment())), varargs_list = varargs_list
)
RunAPOTC_parameter_checker(hash::hash(as.list(environment())))
if (verbose) {
message(paste("* id for this run:", obj_id))
if (override && containsApotcRun(seurat_obj, obj_id)) {
message("* overriding results of the previous run")
}
}
# run the packing algos
apotc_obj <- ApotcData(
seurat_obj,
metadata_filter_string,
clonecall,
reduction_base,
clone_scale_factor,
rad_scale_factor
)
if (verbose) message("\nPacking clones into clusters")
apotc_obj <- circlepackClones(
apotc_obj,
ORDER = order_clones,
try_place = try_place,
verbose = verbose
)
if (repulse) {
apotc_obj <- repulseClusters(
apotc_obj,
repulsion_threshold,
repulsion_strength,
max_repulsion_iter,
verbose
)
}
# store the apotc object in the correct slot with the correct id
seurat_obj <- setApotcData(seurat_obj, obj_id, apotc_obj)
# TODO this could be simplified, something with looking at the grandparent
# environment frame with n = 2
command_obj <- make_apotc_command(call_time)
seurat_obj <- log_seurat_command(
seurat_obj = seurat_obj,
command_obj = command_obj,
id = obj_id
)
if (verbose) print_completion_time(call_time, newline = TRUE)
seurat_obj
}
RunAPOTC_partial_arg_checker <- function(args) {
check_apotc_identifiers(args)
check_filtering_conditions(args)
# Check if clone_scale_factor is numeric of length 1
if (
!is_a_numeric(args$clone_scale_factor)
&& !should_estimate(args$clone_scale_factor)
) {
stop(call. = FALSE,
"`clone_scale_factor` must be a numeric value of length 1."
)
}
# Check if rad_scale_factor is numeric of length 1
if (!is_a_numeric(args[["rad_scale_factor"]])) {
stop(call. = FALSE,
"`rad_scale_factor` must be a numeric value of length 1."
)
}
# Check if try_place is logical of length 1
if (!is_a_logical(args[["try_place"]])) {
stop(call. = FALSE, "`try_place` must be a logical value of length 1.")
}
check_repulsion_params(args)
# Check if override is logical of length 1
if (!is_a_logical(args[["override"]])) {
stop(call. = FALSE, "`override` must be a logical value of length 1.")
}
# Check if verbose is logical of length 1
if (!is_a_logical(args[["verbose"]])) {
stop(call. = FALSE, "`verbose` must be a logical value of length 1.")
}
}
check_apotc_identifiers <- function(args) {
if (!is_seurat_object(args[["seurat_obj"]])) {
stop(call. = FALSE, "`seurat_obj` must be a Seurat object.")
}
if (!is.null(args$reduction_base) && !is_a_character(args$reduction_base)) {
stop(call. = FALSE, "`reduction_base` must be a character of length 1.")
}
if (!is.null(args$clonecall) && !is_a_character(args$clonecall)) {
stop(call. = FALSE, "`clonecall` must be a character of length 1.")
}
if (!is.null(args$extra_filter) && !is_a_character(args$extra_filter)) {
stop(call. = FALSE, "`extra_filter` must be a character or NULL of length 1.")
}
}
check_filtering_conditions <- function(args, frame_level = 2) {
if (is_empty(args$varargs_list)) return()
metadata_cols <- names([email protected])
all_formals <- get_processed_argnames(frame_level)
for (argname in names(args$varargs_list)) {
if (argname %in% metadata_cols) next
stop(call. = FALSE,
"should `", argname, "` be a function argument? ",
"If so, did you mean `", closest_word(argname, all_formals), "`? ",
"Otherwise, should `", argname, "` be a subsetting argument? ",
"If so, did you mean `", closest_word(argname, metadata_cols), "`?"
)
}
}
check_repulsion_params <- function(args) {
# Check if repulse is logical of length 1
if (!is_a_logical(args[["repulse"]])) {
stop(call. = FALSE, "`repulse` must be a logical value of length 1.")
}
if (args$repulse) return()
# Check if repulsion_threshold is numeric of length 1
if (!is_a_numeric(args[["repulsion_threshold"]])) {
stop(call. = FALSE,
"`repulsion_threshold` must be a numeric value of length 1."
)
}
if (args[["repulsion_threshold"]] <= 0) {
stop(call. = FALSE, "`repulsion_threshold` has to be a positive number")
}
# Check if repulsion_strength is numeric of length 1
if (!is_a_numeric(args[["repulsion_strength"]])) {
stop(call. = FALSE,
"`repulsion_strength` must be a numeric value of length 1."
)
}
if (args[["repulsion_strength"]] <= 0) {
stop(call. = FALSE, "`repulsion_strength` has to be a positive number")
}
# Check if max_repulsion_iter is an integer of length 1
if (!is_an_integer(args[["max_repulsion_iter"]])) {
stop(call. = FALSE,
"`max_repulsion_iter` must be an integer value of length 1."
)
}
if (args[["max_repulsion_iter"]] <= 0) {
stop(call. = FALSE, "`max_repulsion_iter` has to be a positive number")
}
}
RunAPOTC_parameter_checker <- function(args) {
if (args[["clone_scale_factor"]] <= 0) {
stop(call. = FALSE,
"`clone_scale_factor` has to be a positive real number"
)
}
if (args[["rad_scale_factor"]] <= 0 || args[["rad_scale_factor"]] > 1) {
stop(call. = FALSE,
"`rad_scale_factor` has to be a positive real number in (0, 1]"
)
}
if (!args$override && containsApotcRun(args$seurat_obj, args$obj_id)) {
stop(call. = FALSE, paste(
"An APackOfTheClones run with the the parameters", args$obj_id,
"appears to already have been ran. If this is intended,",
"set the `override` argument to `TRUE` and re-run."
))
}
# TODO more checks
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/RunAPOTC.R |
# the following code is copied and modified from ggplot2 under the R/aes.R file
# (under MIT license). Implements the soft deprecated `aes_string()` function as
# `apotc_aes_string()`
apotc_rename_aes <- function(x) {
names(x) <- ggplot2::standardise_aes_names(names(x))
duplicated_names <- names(x)[duplicated(names(x))]
if (length(duplicated_names) > 0L) {
return(NULL)
}
x
}
apotc_aes_string <- function(x, y, ...) {
mapping <- list(...)
if (!missing(x)) mapping["x"] <- list(x)
if (!missing(y)) mapping["y"] <- list(y)
mapping <- lapply(mapping, function(x) {
if (is.character(x)) {
x <- rlang::parse_expr(x)
}
})
structure(apotc_rename_aes(mapping), class = "uneval")
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/aes_string.R |
gen_labels <- function(num_clusters) {
label_vec <- character(num_clusters)
for (i in seq_len(num_clusters)) {
label_vec[i] <- paste("C", i, sep = "")
}
label_vec
}
# show the labels on the plot
insert_labels <- function(plt, apotc_obj, size) {
for (i in 1:apotc_obj@num_clusters) {
if (!isnt_empty(apotc_obj@clusters[[i]])) {
next
}
plt <- plt + ggplot2::annotate(
"text",
x = apotc_obj@label_coords[[i]][1],
y = apotc_obj@label_coords[[i]][2],
label = apotc_obj@labels[i],
size = size
)
}
plt
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/cluster_labels.R |
#script for functions to deal with centroids and cluster coords
# All clusters of circles are referred to as a "clusterlist" in code comments
# A clusterlist just refers to a simple R list of length five with:
# [["x"]] numeric vector of x coordinates of all circles
# [["y"]] numeric vector of y coordinates of all circles
# [["rad"]] numeric vector of radii of all circles
# [["centroid"]] numeric vector of the cluster centroid x and y coordinate
# [["clRad"]] approximate radius of the cluster
# centroid finder for a matrix of [x, y, cluster]
find_centroids <- function(df, total_clusters) {
cll <- split(df, factor(df[, 3])) #the last cluster column becomes redundant
l <- length(cll)
nameset <- rep(c(""), times = l)
xset <- rep(c(0), times = l)
yset <- xset
for (i in 1:l){
nameset[i] <- cll[[i]][,3][1]
xset[i] <- sum(cll[[i]][, 1]) / length(cll[[i]][, 1])
yset[i] <- sum(cll[[i]][, 2]) / length(cll[[i]][, 2])
}
list_output <- init_list(num_elements = total_clusters, init_val = list())
for (i in 1:l) {
list_output[[as.integer(nameset[i])]] <- c(xset[i], yset[i])
}
return(list_output)
}
# get reduction centroids from seurat obj, where the barcodes in the reduction
# cell embeddings will be filtered to be exactly the same as those left in the
# metadata incase it was additionally filtered.
get_cluster_centroids <- function(
seurat_obj, reduction = "umap", passed_in_reduc_obj = FALSE
) {
if (passed_in_reduc_obj) {
reduc_coords <- [email protected][, 1:2]
} else {
reduc_coords <- get_2d_embedding(seurat_obj, reduction)
}
find_centroids(
df = data.frame(
rcppFilterReductionCoords(
seuratBarcodes = rownames([email protected]),
reductionCoords = reduc_coords
),
[email protected][["seurat_clusters"]]
),
total_clusters = get_num_total_clusters(seurat_obj)
)
}
# TRANSFORM coordinates of a clusterlist from c(0, 0) to its own new centroid,
# or MOVE to new coord from current
trans_coord <- function(cluster, new_coord = NULL) {
if (!is.null(new_coord)) {
dx <- new_coord[1]
dy <- new_coord[2]
cluster[[4]] <- cluster[[4]] + c(dx, dy)
} else {
dx <- cluster[[4]][1]
dy <- cluster[[4]][2]
}
cluster[[1]] <- cluster[[1]] + dx
cluster[[2]] <- cluster[[2]] + dy
cluster
}
# MOVE clusterlist to a new centroid, irrespective of previous centroid
move_cluster <- function(cluster, new_coord) {
dx <- cluster[[4]][1] - new_coord[1]
dy <- cluster[[4]][2] - new_coord[2]
cluster[[1]] <- cluster[[1]] - dx
cluster[[2]] <- cluster[[2]] - dy
cluster[[4]] <- new_coord
cluster
}
# function to GET a list of centroids from a list of clusterlists,
read_centroids <- function(list_of_clusterlists) {
output_centroids <- init_list(length(list_of_clusterlists), list())
for (i in seq_along(list_of_clusterlists)) {
curr <- list_of_clusterlists[[i]]
if (is.null(get_centroid(curr)) || is_empty(get_centroid(curr))) {
next
}
output_centroids[[i]] <- get_centroid(curr)
}
output_centroids
}
# getters for a single clusterlist
get_x_coords <- function(l) l[[1]]
get_y_coords <- function(l) l[[2]]
get_radii <- function(l) l[[3]]
get_centroid <- function(l) l$centroid
get_cluster_radius <- function(l) l[[5]]
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/clusters.R |
# wrapper to get the number of identified clusters:
count_umap_clusters <- function(seurat_obj) {
length(levels([email protected][["seurat_clusters"]]))
}
# get the ggplot colors - important function for 'apotc'
gg_color_hue <- function(n) {
hues <- seq(15, 375, length = n + 1)
grDevices::hcl(h = hues, l = 65, c = 100)[1:n]
}
# generate a hashmap of cluster to color
gen_cluster_color_hashmap <- function(num_clusters) {
color_vec <- gg_color_hue(num_clusters)
output <- hash::hash()
for (i in 1:num_clusters) {
cluster_str <- paste("cluster", as.character(i-1))
output[cluster_str] <- color_vec[i]
}
output
}
#' inserts a list of colors into a column in the cluster df by label with the
#' v0.1.2 version of the packing algos
#' @noRd
insert_colors <- function(cluster_dataframe, num_clusters) {
color_hashmap <- gen_cluster_color_hashmap(num_clusters)
color_vec <- cluster_dataframe[[1]] # 1 is label
for (i in seq_along(color_vec)) {
color_vec[i] <- color_hashmap[[color_vec[i]]]
}
return(cluster_dataframe %>% dplyr::mutate("color" = color_vec))
}
# new version that simply takes the readily existing colors in an seurat
# object, and adds the colors to the dataframe
# pair colors to hashmap
pair_colors_to_hash <- function(apotc_obj) {
color_vec <- apotc_obj@cluster_colors
output <- hash::hash()
for (i in 1:apotc_obj@num_clusters) {
cluster_str <- paste("cluster", as.character(i - 1))
output[cluster_str] <- color_vec[i]
}
output
}
extract_and_add_colors <- function(apotc_obj, plot_df) {
color_hashmap <- pair_colors_to_hash(apotc_obj)
color_vec <- plot_df[[1]] # 1 is label
for (i in seq_along(color_vec)) {
color_vec[i] <- color_hashmap[[color_vec[i]]]
}
plot_df %>% dplyr::mutate("color" = color_vec)
}
# in the future should probably make a fake ggplot cluster legend on the right
# side by inserting scatterplt points? (like in the seurat UMPA plot) But also,
# the problem is that it becomes inconsistent with the clone size legend :/
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/colors.R |
#' @title
#' Artificially generated Seurat object
#'
#' @description
#' A generated 'SeuratObject' of a small single-sample sc-RNAseq experiment.
#' Has a corresponding T-cell receptor library generated from
#' single cell immune profiling, named `"mini_clonotype_data"`
#'
#' @usage data("mini_seurat_obj")
#'
#' @format A Seurat object with the following slots filled
#' \describe{
#' \item{assays}{
#' \itemize{Currently only contains one assay ("RNA" - scRNA-seq expression data)
#' \item{counts - Raw expression data}
#' \item{data - Normalized expression data}
#' \item{scale.data - Scaled expression data}
#' \item{var.features - names of the current features selected as variable}
#' \item{meta.features - Assay level metadata such as mean and variance}
#' }}
#' \item{meta.data}{Cell level metadata}
#' \item{active.assay}{Current default assay}
#' \item{active.ident}{Current default idents}
#' \item{graphs}{Neighbor graphs computed, currently stores the SNN}
#' \item{reductions}{Dimensional reductions: PCA, UMAP, and tSNE}
#' \item{version}{Seurat version used to create the object}
#' \item{commands}{Command history}
#' }
#'
#' @seealso [mini_clonotype_data()]
"mini_seurat_obj"
#' @title
#' Artificially generated T cell receptor library
#'
#' @keywords internal
#'
#' @description
#' `r lifecycle::badge("deprecated")`
#'
#' A generated dataframe of a T-cell receptor (TCR) library generated from
#' single cell immune profiling. It is a subset the full dataframe
#' which would usually have up to 18 columns containing different data,
#' because the intended purpose of this object is to test various functions
#' in 'APackOfTheClones'. The dataframe compliments `mini_seurat_obj` and
#' can be integrated into it with `integrate_tcr`.
#'
#' @usage data("mini_clonotype_data")
#'
#' @format `data.frame`
#' A data frame with 80 rows and 2 columns:
#' \describe{
#' \item{barcode}{barcodes corresponding to each sequenced cell}
#' \item{raw_clonotype_id}{clonotype information for each cell}
#' }
#'
#' @details Note that the clonotypes in the `raw_clonotype_id` column
#' actually do not contain all of clonotype`1`...clonotype`n`
#'
#' @seealso [mini_seurat_obj()]
"mini_clonotype_data"
#' @title
#' Example Multi-sampled T-cell seurat object with integrated TCR library
#'
#' @description
#' r lifecycle::badge("experimental")`
#'
#' Generated with `scRepertoire::combineExpression`, more specifically, with:
#'
#' `r get(data(combined_pbmc))@commands[["combineExpression"]]@call.string`
#'
#' @usage data("combined_pbmc")
#'
#' @format A Seurat object with the following slots filled
#' \describe{
#' \item{assays}{
#' \itemize{Currently only contains one assay ("RNA" - scRNA-seq expression data)
#' \item{counts - Raw expression data}
#' \item{data - Normalized expression data}
#' \item{scale.data - Scaled expression data}
#' \item{var.features - names of the current features selected as variable}
#' \item{meta.features - Assay level metadata such as mean and variance}
#' }}
#' \item{meta.data}{Cell level metadata with a combined TCR contig list `from scRepertoire`}
#' \item{active.assay}{Current default assay}
#' \item{active.ident}{Current default idents}
#' \item{graphs}{Neighbor graphs computed, currently stores the SNN}
#' \item{reductions}{Dimensional reductions: UMAP}
#' \item{version}{Seurat version used to create the object}
#' \item{commands}{Command history, including the one used to create this object "combineExpression"}
#' }
#'
"combined_pbmc"
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/data.R |
md_deprecation_docstring <- function() {
paste(
'***ALL v0.1.x functions are deprecated***, and the workflow has been ',
'completely revamped - now depending on the scRepertoire v2 package - ',
'which allows for the processing of multi-sampled single cell data. ',
'Please read the vignettes with `browseVignettes("APackOfTheClones")`',
'or visit https://qile0317.github.io/APackOfTheClones/',
sep = ""
)
}
deprecation_docstring <- function() {
gsub("*", "", md_deprecation_docstring(), fixed = TRUE)
}
#' DEPRECATED: Integrate a single TCR library into Seurat object metadata
#'
#' @description
#' `r lifecycle::badge('deprecated')`
#'
#' `r md_deprecation_docstring()`
#'
#' Modifies a `Seurat` object's metadata by taking all the columns
#' of the `all_contig_annotations.csv`, and adding new elements to
#' `[email protected]`, corresponding to each cells' barcodes and handling
#' duplicates (since the sequencing of TRA and TRB genes creates multiple data
#' points for the same cell).
#'
#' @details
#' Columns from cells (barcodes) that had duplicates in another row are
#' concatenated into strings, separated by `__` in the metadata element.
#' Barcodes from the TCR library that had no matches to barcodes in the
#' `seurat_obj` will add `NA`s for all elements of the same index.
#'
#' @param seurat_obj Seurat object
#' @param tcr_file `data.frame` of the T cell library generated by Cell Ranger.
#' It is very important that the row with cell barcodes is strictly named
#' `"barcode"`, which is the default name of barcodes in 10X's
#' `all_contig_annotations.csv` file.
#' @param verbose if `TRUE`, will display a progress bar to the R console.
#'
#' @return Returns a new Seurat object with new elements in the metadata
#'
#' @export
#' @importFrom data.table .GRP
#' @importFrom data.table .SD
#'
#' @keywords internal
#'
#' @examples
#' library(Seurat)
#' library(APackOfTheClones)
#' data("mini_clonotype_data","mini_seurat_obj")
#'
#' # integrate the TCR data into new seurat object
#' integrated_seurat_object <- integrate_tcr(
#' mini_seurat_obj, mini_clonotype_data, verbose = FALSE
#' )
#'
#' integrated_seurat_object
#'
#' @references
#' atakanekiz (2019) Tutorial:Integrating VDJ sequencing data with Seurat.
#' `https://www.biostars.org/p/384640/`
#'
integrate_tcr <- function(seurat_obj, tcr_file, verbose = TRUE) {
time_called <- Sys.time()
lifecycle::deprecate_warn(
when = "1.0.0",
what = I("integrate_tcr")
)
seurat_obj <- dev_integrate_tcr(seurat_obj, tcr_file, verbose)
seurat_obj@commands[["integrate_tcr"]] <- make_apotc_command(time_called)
seurat_obj
}
dev_integrate_tcr <- function(seurat_obj, tcr_file, verbose) {
tcr <- data.table::as.data.table(tcr_file)
# Prepare a progress bar to monitor progress (helpful for large aggregations)
if (verbose) {
message("integrating TCR library into seurat object")
grpn <- data.table::uniqueN(tcr$barcode)
pb <- utils::txtProgressBar(min = 0, max = grpn, style = 3)
}
# Generate a function that will concatenate unique data entries and collapse
# duplicate rows. first factorize the data and then get factor levels as
# unique data points. Then data points are pasted together separated with
# "__" to access later on if needed
data_concater <- function(x){
x <- levels(factor(x)) # not sure if na.omit is needed on x
paste(x, collapse = "__")
}
# This code applies data_concater function per barcodes to create a
# concatenated string with the information we want to keep
if (verbose) {
tcr_collapsed <- tcr[, {utils::setTxtProgressBar(pb, .GRP);
lapply(.SD, data_concater)},
by = "barcode"
]
} else {
tcr_collapsed <- tcr[, lapply(.SD, data_concater), by = "barcode"]
}
# assign rownames for integration and add metadata
rownames(tcr_collapsed) <- tcr_collapsed$barcode
# remove NA
tcr_collapsed <- stats::na.omit(tcr_collapsed)
seurat_obj <- Seurat::AddMetaData(
seurat_obj,
metadata = tcr_collapsed
)
if (verbose) {
percent_integrated <- 100 - percent_na(seurat_obj)
message(paste(
"\nPercent of unique barcodes:",
as.character(round(percent_integrated)),
"%"
))
}
return(seurat_obj)
}
#' @title
#' DEPRECATED: count the number of clonotype sizes per cell cluster in a seurat
#' object integrated with a TCR library
#'
#' @description
#' `r lifecycle::badge("deprecated")`
#'
#' `r md_deprecation_docstring()`
#'
#' @details
#' The function is no longer needed as the `scRepertoire` workflow takes care
#' of this step.
#'
#' @param integrated_seurat_obj Seurat object that has been integrated with a
#' T-cell receptor library with the deprecated function
#' \code{\link{integrate_tcr}}. More specifically, in the metadata, there must
#' at least be the elements `seurat_clusters` and `raw_clonotype_id`
#'
#' @return A list of `table` objects, where each element is tabled
#' clonotype frequencies for the seurat cluster corresponding to the same index
#' - 1. For example, the 5th element is a tabled frequency of counts that
#' corresponds to the 4th seurat cluster (as seurat clusters are 0-indexed).
#' If an element is `NULL`, it indicates that there were no corresponding T-cell
#' receptor barcode for the cells in the cluster.
#'
#' @export
#'
#' @examples
#' library(Seurat)
#' library(APackOfTheClones)
#' data("mini_clonotype_data","mini_seurat_obj")
#'
#' # produce an integrated seurat_object
#' integrated_seurat_object <- integrate_tcr(
#' mini_seurat_obj, mini_clonotype_data, verbose = FALSE
#' )
#'
#' clonotype_counts <- count_clone_sizes(integrated_seurat_object)
#' clonotype_counts
#'
count_clone_sizes <- function(integrated_seurat_obj) {
lifecycle::deprecate_warn(
when = "1.0.0",
what = I("`count_clone_sizes`"),
with = I("countCloneSizes")
)
if (is.null([email protected][["seurat_clusters"]])) {
stop("No seurat clusters detected on the seurat object")
}
if (is.null([email protected][["raw_clonotype_id"]])) {
stop(paste(
"Seurat object is not integrated with a T-cell receptor,",
"library or has no metadata `raw_clonotype_id`"
))
}
df <- data.frame(
"clusters" = [email protected][["seurat_clusters"]],
"clonotype_ids" = [email protected][["raw_clonotype_id"]]
)
freq_df <- stats::aggregate(
clonotype_ids ~ clusters, data = df, function(x) table(x)
)
num_clusters <- length(levels(
[email protected][["seurat_clusters"]]
))
freq <- vector("list", num_clusters)
for (i in seq_len(nrow(freq_df))) {
freq[[freq_df[[1]][i]]] <- table(as.numeric(freq_df[[2]][[i]]))
}
freq
}
#' @title
#' DEFUNCT: Visualize T cell clonal expansion with a ball-packing plot.
#'
#' @description
#' `r lifecycle::badge("defunct")`
#'
#' `r md_deprecation_docstring()`
#'
#' @param ... arbitrary arguments
#'
#' @keywords internal
#'
#' @return error message
#' @export
#'
clonal_expansion_plot <- function(...) {
lifecycle::deprecate_stop(
when = "1.0.0",
what = I(
"visualizing clonal expansion with `clonal_expansion_plot`"
),
with = I("vizAPOTC")
)
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/deprecated_functions.R |
#' @title count the number of clonotype sizes per cell cluster in a seurat
#' object combined with a VDJ library
#'
#' @description
#' `r lifecycle::badge("stable")`
#'
#' Get clonotype frequencies from a seurat object's meta.data slot.
#'
#' @inheritParams RunAPOTC
#' @param seurat_obj a seurat object combined with a VDJ library with the
#' `scRepertoire`.
#'
#' @return A list of table objects, with the table at each index corresponding
#' to each cluster index. Each table's names are the clonotype name indicated
#' by `clonecall` after filtering, while the values are the actual clone sizes.
#' @export
#'
#' @examples
#' data("combined_pbmc")
#'
#' countCloneSizes(combined_pbmc)
#' countCloneSizes(combined_pbmc, "aa")
#' countCloneSizes(combined_pbmc, "nt", orig.ident = c("P17B", "P17L"))
#'
countCloneSizes <- function(
seurat_obj, clonecall = "strict", extra_filter = NULL, ...
) {
# check inputs
if (!is_seurat_object(seurat_obj))
stop("`seurat_obj` must be a Seurat object.")
if (!is_a_character(clonecall))
stop("`clonecall` must be a character of length 1.")
if (!is.null(extra_filter) && !is_a_character(extra_filter))
stop("`extra_filter` must be a character of length 1.")
check_filtering_conditions(as.list(environment()), frame_level = 1)
clonecall <- .theCall([email protected], clonecall)
filter_string <- parse_to_metadata_filter_str(
metadata_filter = extra_filter, varargs_list = list(...)
)
if (is_valid_filter_str(filter_string)) {
seurat_obj <- subsetSeuratMetaData(seurat_obj, filter_string)
}
count_raw_clone_sizes(
seurat_obj = seurat_obj,
num_clusters = get_num_total_clusters(seurat_obj),
clonecall = clonecall
)
}
# count the raw clone from the integrated seurat object from the METADATA
# TODO should make another function with a user wrapper AND as a getter
count_raw_clone_sizes <- function(
seurat_obj, num_clusters, clonecall
) {
# aggregate the raw counts
freq_df <- stats::aggregate(
stats::as.formula(paste(clonecall, "~ seurat_clusters")),
[email protected],
function(x) table(x)
)
# compile the tabled counts, purposefully not modifying them
cluster_indicies <- as.numeric(freq_df[[1]]) # converts to one based indexing!
num_valid_clusters <- length(cluster_indicies)
index <- 1
clone_sizes <- init_list(num_clusters, table(NULL))
for (i in 1:num_clusters) {
if (index > num_valid_clusters) {
break
}
if (i != cluster_indicies[index]) {
next
}
clone_sizes[[i]] <- freq_df[[2]][index]
index <- index + 1
}
clone_sizes
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/get_clone_sizes.R |
# script to make a custom circle size legend overlay
# FIXME somethings wrong when i do vizAPOTC(sce), 46 appears twice but one of circles are smaller
insert_legend <- function(
plt,
plt_dims,
apotc_obj,
sizes,
pos,
buffer,
color = "#808080",
n = 360,
spacing = "auto",
legend_label = "Clone sizes",
legend_textsize = 5,
do_add_legend_border = FALSE
) {
# setup relevant variables
rad_decrease <- get_rad_decrease(apotc_obj)
sizes <- get_processed_legend_sizes(apotc_obj, sizes)
pos <- correct_legend_coord_if_str(pos)
spacing <- process_legend_spacing(spacing, plt_dims, rad_decrease)
# calculate relevant legend plotting data
unpositioned_legend_df <- gen_unpositioned_legend_df(
legend_sizes = sizes,
spacing = spacing,
circ_scale_factor = apotc_obj@clone_scale_factor,
rad_decrease = rad_decrease,
color = color
)
legend_dims <- get_legend_dims(unpositioned_legend_df)
if (should_estimate(pos)) {
legend_df <- estimate_best_legend_df(
plt_dims, apotc_obj, unpositioned_legend_df, legend_dims, buffer
)
} else {
legend_df <- generate_legend_df(
pos, unpositioned_legend_df, legend_dims, plt_dims, buffer
)
}
label_coord <- get_legend_title_coord(legend_df, legend_dims, spacing)
# plotting
# add the legend label on top
plt <- plt + ggplot2::annotate(
"text", x = label_coord[1], y = label_coord[2],
label = legend_label, size = legend_textsize
)
# add the background
if (do_add_legend_border) {
plt <- add_legend_backing(
plt = plt, plt_dims = plt_dims, legend_df = legend_df
)
}
# add the side number labels
plt <- plt + ggplot2::annotate(
"text", x = legend_df[, "label_x"], y = legend_df[, "y"],
label = legend_df[, "labels"], size = legend_textsize
)
# add the circles and return
plt + ggforce::geom_circle(
data = legend_df,
mapping = apotc_aes_string(
x0 = "circle_x",
y0 = "y",
r = "rad",
fill = "color"
),
linetype = "blank",
n = n
)
}
get_processed_legend_sizes <- function(apotc_obj, s) {
if (is.numeric(s)) return(sort(unique(c(1, s))))
if (should_estimate(s)) return(estimate_legend_sizes(apotc_obj))
s
}
estimate_legend_sizes <- function(apotc_obj) {
sizes <- unlist(apotc_obj@clone_sizes)
sort(unique(round(c(
1,
stats::median(sizes), mean(sizes), mean(unique(sizes)),
max(sizes)
))))
}
correct_legend_coord_if_str <- function(pos) {
if (is.numeric(pos)) {
return(pos)
}
pos <- strip_and_lower(pos)
pos <- switch(
pos,
"auto" = "auto",
"topleft" = "top_left",
"topright" = "top_right",
"bottomleft" = "bottom_left",
"bottomright" = "bottom_right",
pos
)
if (should_estimate(pos)) return(pos)
user_attempt_correction(
s = pos,
strset = c("top_left", "top_right", "bottom_left", "bottom_right"),
stop_msg_start = "invalid legend coordinate string"
)
}
process_legend_spacing <- function(spacing, plt_dims, rad_decrease) {
if (should_estimate(spacing)) {
return(calculate_legend_spacing(spacing, plt_dims, rad_decrease))
}
spacing
}
calculate_legend_spacing <- function(
spacing, plt_dims, rad_decrease, portion = 0.05
) {
(abs(get_xr(plt_dims)[1]) * portion) - (2 * rad_decrease)
}
# given the circle placements, estimate the legend dataframe with the least
# *number* of circles that overlap
estimate_best_legend_df <- function(
plt_dims, apotc_obj, unpositioned_legend_df, legend_dims, buffer
) {
min_num_circles_covered <- Inf
best_legend_df <- data.frame()
for (pos in c("top_left", "top_right", "bottom_left", "bottom_right")) {
curr_legend_df <- generate_legend_df(
pos, unpositioned_legend_df, legend_dims, plt_dims, buffer
)
curr_num_circles_covered <- num_circles_covered_by_legend(
apotc_obj,
minmax_dims = get_legend_backing_minmax_dims(curr_legend_df)
)
if (curr_num_circles_covered == 0) {
return(curr_legend_df)
}
if (curr_num_circles_covered < min_num_circles_covered) {
min_num_circles_covered <- curr_num_circles_covered
best_legend_df <- curr_legend_df
}
}
best_legend_df
}
num_circles_covered_by_legend <- function(apotc_obj, minmax_dims) {
num_circles_covered <- 0
for (cluster in apotc_obj@clusters) {
for (i in seq_along(cluster$x)) {
does_overlap_x <- is_bound_between(
cluster$x[i],
lowerbound = minmax_dims["xmin"] + cluster$rad[i],
upperbound = minmax_dims["xmax"] - cluster$rad[i]
)
does_overlap_y <- is_bound_between(
cluster$y[i],
lowerbound = minmax_dims["ymin"] + cluster$rad[i],
upperbound = minmax_dims["ymax"] - cluster$rad[i]
)
if (does_overlap_x && does_overlap_y) {
num_circles_covered <- num_circles_covered + 1
}
}
}
num_circles_covered
}
gen_unpositioned_legend_df <- function(
legend_sizes, spacing, circ_scale_factor, rad_decrease, color
) {
radii <- (sqrt(legend_sizes) * circ_scale_factor) - rad_decrease
num_radii <- length(radii)
label_x <- spacing + 1 - rad_decrease +
(sqrt(radii[num_radii]) * circ_scale_factor)
data.frame(
"circle_x" = rep(0, num_radii),
"label_x" = rep(label_x, num_radii),
"y" = get_unpositioned_y_coords(radii, spacing),
"rad" = radii,
"labels" = as.character(legend_sizes),
"color" = rep(color, num_radii)
)
}
get_unpositioned_y_coords <- function(radii, spacing) {
y_coords <- numeric(length(radii))
curr_y <- -spacing
r <- 0
for (i in seq_along(radii)) {
prev_radius <- r
r <- radii[i]
curr_y <- curr_y - prev_radius - r - spacing
y_coords[i] <- curr_y
}
y_coords
}
# get the width and height of the circles and labels, NOT accounting for buffer
get_legend_dims <- function(unpositioned_legend_df) {
c(
"x" = max_rad(unpositioned_legend_df) +
get_label_x(unpositioned_legend_df),
"y" = min_rad(unpositioned_legend_df) +
abs(max_y(unpositioned_legend_df)) -
abs(min_y(unpositioned_legend_df)) +
max_rad(unpositioned_legend_df)
)
}
generate_legend_df <- function(
pos, unpositioned_legend_df, legend_dims, plt_dims, buffer
) {
if (!is.numeric(pos)) {
pos <- estimate_top_left_circ_coord(
unpositioned_legend_df = unpositioned_legend_df,
legend_dims = legend_dims,
destination_str = pos,
plt_dims = plt_dims,
buffer = buffer
)
}
move_unpositioned_legend_df(
unpositioned_legend_df,
to_top_left_destination_coord = pos
)
}
min_y <- function(legend_df) legend_df[1, "y"]
max_y <- function(legend_df) legend_df[nrow(legend_df), "y"]
min_rad <- function(legend_df) legend_df[1, "rad"]
max_rad <- function(legend_df) legend_df[nrow(legend_df), "rad"]
get_circle_x <- function(legend_df) legend_df[1, "circle_x"]
get_label_x <- function(legend_df) legend_df[1, "label_x"]
# get the starting coordinate for the top left center of the *first circle*
estimate_top_left_circ_coord <- function(
unpositioned_legend_df, legend_dims, destination_str, plt_dims, buffer
) {
xr <- get_xr(plt_dims)
yr <- get_yr(plt_dims)
if (identical(destination_str, "top_left")) {
return(c(xr[1] + max_rad(unpositioned_legend_df) + buffer, yr[2]))
}
if (identical(destination_str, "top_right")) {
return(c(xr[2] - unpositioned_legend_df[1, "label_x"] - buffer, yr[2]))
}
if (identical(destination_str, "bottom_left")) {
return(c(
xr[1] + max_rad(unpositioned_legend_df) + buffer,
yr[1] + legend_dims[2]
))
}
# bottom right
return(c(
xr[2] - unpositioned_legend_df[1, "label_x"] - buffer,
yr[1] + legend_dims[2]
))
}
move_unpositioned_legend_df <- function(
unpositioned_df, to_top_left_destination_coord
) {
dx <- to_top_left_destination_coord[1] - unpositioned_df[1, "circle_x"]
dy <- to_top_left_destination_coord[2] - unpositioned_df[1, "y"]
unpositioned_df[, "circle_x"] <- unpositioned_df[, "circle_x"] + dx
unpositioned_df[, "label_x"] <- unpositioned_df[, "label_x"] + dx
unpositioned_df[, "y"] <- unpositioned_df[, "y"] + dy
unpositioned_df
}
# get the coordinate in the middle of the top of the legend
get_legend_title_coord <- function(legend_df, legend_dims, spacing) {
c("x" = sum(get_legend_backing_minmax_dims(legend_df)[1:2]) * 0.5,
"y" = min_y(legend_df) + min_rad(legend_df) + (1.5 * spacing))
}
add_legend_backing <- function(plt, plt_dims, legend_df) {
linewidth <- get_linewidth(plt_dims)
dims <- get_legend_backing_minmax_dims(legend_df)
# add the back border rectangle
plt <- plt + ggplot2::geom_rect(ggplot2::aes(
xmin = dims["xmin"] - linewidth, xmax = dims["xmax"] + linewidth,
ymin = dims["ymin"] - linewidth, ymax = dims["ymax"] + linewidth,
fill = "black"
))
# add the white inside
plt + ggplot2::geom_rect(ggplot2::aes(
xmin = dims["xmin"], xmax = dims["xmax"],
ymin = dims["ymin"], ymax = dims["ymax"],
fill = "white",
linetype = "blank"
)) +
ggplot2::theme(legend.position = "none")
}
get_linewidth <- function(plt) {
xr <- get_xr(plt)
bound_num(
abs(xr[2] - xr[1]) * 0.002,
lowerbound = 0.001,
upperbound = 0.1
)
}
get_legend_backing_minmax_dims <- function(legend_df) {
spacing <- 0.15
max_radius <- max_rad(legend_df)
xmin <- get_circle_x(legend_df) - max_radius - spacing
xmax <- get_label_x(legend_df) + max_radius + spacing
ymin <- max_y(legend_df) - max_radius - spacing
ymax <- min_y(legend_df) + min_rad(legend_df) + spacing
c("xmin" = xmin, "xmax" = xmax, "ymin" = ymin, "ymax" = ymax)
}
# could put the ggplot color legend by sticking some points under something
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/insert_legend.R |
# # a super simple regression of manually determined clone scale based on cell
# # count a much more complicated model can use other facts about the seurat
# # obj to improve how visually plesant is it. Some overlap factor could
# # probably be eestimated with the raw clone counts
# cell_count <- c(80, 365, 2500)
# desirable_factor <- c(1, 0.3, 0.2)
# plot(cell_count, desirable_factor)
estimate_clone_scale_factor <- function(seurat_obj, clonecall) {
num_clones <- count_clones(seurat_obj, clonecall)
if (num_clones <= 365) {
approx_clone_scale_factor <- (-0.002456 * num_clones) + 1.196491
} else {
approx_clone_scale_factor <- (-4.684e-05 * num_clones) + 3.171e-01
}
bound_num(approx_clone_scale_factor, lowerbound = 0.05, upperbound = 1)
}
# test ggobject for label addition
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/parameter_estimators.R |
get_plottable_df_with_color <- function(apotc_data) {
extract_and_add_colors(
apotc_obj = apotc_data,
plot_df = df_full_join(get_clusterlists(apotc_data))
)
}
# full join a list of lists of (x,y,r) vectors into a dataframe with
# generated labels.
df_full_join <- function(clstr_list) {
df <- data.frame(
'label' = character(0),
'x' = numeric(0),
'y' = numeric(0),
'r' = numeric(0)
)
seurat_cluster_index <- 0 # zero indexed :P
for (i in seq_along(clstr_list)) {
if (!isnt_empty_nor_na(clstr_list[[i]])) {
seurat_cluster_index <- seurat_cluster_index + 1
next
}
df <- dplyr::full_join(
df,
data.frame(
"label" = rep( # TODO this could be customized or add new col
paste("cluster", seurat_cluster_index),
length(clstr_list[[i]][["x"]])
),
"x" = clstr_list[[i]][["x"]],
"y" = clstr_list[[i]][["y"]],
"r" = clstr_list[[i]][["rad"]]
),
by = dplyr::join_by("label", "x", "y", "r")
)
seurat_cluster_index <- seurat_cluster_index + 1
}
df
}
# result plotting function. clusters is a list of clusterlists TRANSFORM into a
# dataframe, which are clusters. A cluster list includes x, y, rad, centroid,
# clRad. the clusters imput is a dataframe.
plot_clusters <- function(
clusters,
n = 360,
linetype = "blank"#,
#alpha = 1
) {
ggplot2::ggplot(data = clusters) +
ggforce::geom_circle(
mapping = apotc_aes_string(
x0 = "x",
y0 = "y",
r = "r",
fill = "color"#,
#alpha = as.character(alpha)
),
n = n,
linetype = linetype
) +
ggplot2::scale_fill_identity() +
ggplot2::coord_fixed() +
ggplot2::theme(legend.position = "none")
}
# TODO not quite the same
add_default_theme <- function(plt, reduction) {
label_hashmap <- hash::hash(
c("umap", "tsne", "pca"), c("UMAP", "tSNE", "PC")
)
label <- label_hashmap[[reduction]]
plt +
ggplot2::theme_classic() +
ggplot2::xlab(paste(label, 1, sep = "_")) +
ggplot2::ylab(paste(label, 2, sep = "_"))
}
get_retain_scale_dims <- function(
seurat_obj, reduction, ball_pack_plt, plot_dims
) {
reduction_dims <- get_plot_dims(
Seurat::DimPlot(object = seurat_obj, reduction = reduction)
)
reduction_xr <- get_xr(reduction_dims)
reduction_yr <- get_yr(reduction_dims)
ball_pack_xr <- get_xr(plot_dims)
ball_pack_yr <- get_yr(plot_dims)
# set new ranges
min_xr <- min(ball_pack_xr[1], reduction_xr[1])
max_xr <- max(ball_pack_xr[2], reduction_xr[2])
min_yr <- min(ball_pack_yr[1], reduction_yr[1])
max_yr <- max(ball_pack_yr[2], reduction_yr[2])
# return dims in same output format as get_plot_dims
list("xr" = c(min_xr, max_xr), "yr" = c(min_yr, max_yr))
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/plot_API.R |
# A variety of scRepertoire v2 functions copied during beta testing,
# all permissions granted by the owner + both authors
.theCall <- function(df, x, check.df = TRUE) {
x <- .convertClonecall(x)
if(check.df) {
if(inherits(df, "list") & !any(colnames(df[[1]]) %in% x)) {
stop(
"Check the clonal variabe (cloneCall) being used in the function, it does not appear in the data provided.",
call. = FALSE
)
} else if (inherits(df, "data.frame") & !any(colnames(df) %in% x)) {
stop(
"Check the clonal variabe (cloneCall) being used in the function, it does not appear in the data provided.",
call. = FALSE
)
}
}
return(x)
}
.convertClonecall <- function(x) {
clonecall_dictionary <- hash::hash(
"gene" = "CTgene",
"genes" = "CTgene",
"ctgene" = "CTgene",
"nt" = "CTnt",
"nucleotide" = "CTnt",
"nucleotides" = "CTnt",
"ctnt" = "CTnt",
"aa" = "CTaa",
"amino" = "CTaa",
"ctaa" = "CTaa",
"gene+nt" = "CTstrict",
"strict" = "CTstrict",
"ctstrict" = "CTstrict"
)
possible_clonecall <- clonecall_dictionary[[tolower(strip_unquoted_spaces(x))]]
if (!is.null(possible_clonecall)) return(possible_clonecall)
x
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/scRepertoire_v2_functions.R |
# script for the function to add the APOTC command records to seurat objects
# Is copied from SeuratObject R/command.R script, under the MIT license
log_seurat_command <- function(
seurat_obj, command_obj, id = NULL
) {
seurat_obj@commands[[get_command_name(command_obj, id)]] <- command_obj
seurat_obj
}
find_seurat_command <- function(seurat_obj, func_name, id = NULL) {
seurat_obj@commands[[get_command_name(func_name, id)]]
}
utils::globalVariables(c(".commandIdSepStr"))
.commandIdSepStr = "."
get_command_name <- function(command, id = NULL) {
command_name <- ifelse(
test = is.character(command),
yes = command,
no = command@name
)
ifelse(
test = is.null(id),
yes = command_name,
no = paste(command_name, id, sep = .commandIdSepStr)
)
}
# function to create the seurat command
# almost identical to seurat version except only data.frame's names are saved in
# params and assay.used is only "RNA"
#
# NOTE: the clone_scale_factor will always be its actual value even if it was
# originally "auto" during the input.
#
make_apotc_command <- function(call_time, assay = "RNA") {
if (as.character(x = sys.calls()[[1]])[1] == "do.call") {
call_string <- deparse(expr = sys.calls()[[1]])
command_name <- as.character(x = sys.calls()[[1]])[2]
} else {
command_name <- as.character(
x = deparse(expr = sys.calls()[[sys.nframe() - 1]])
)
command_name <- gsub(
pattern = "\\.Seurat",
replacement = "",
x = command_name
)
call_string <- command_name
command_name <- seurat_extractfield(
string = command_name,
field = 1,
delim = "\\("
)
}
# return the command object
methods::new(
Class = 'SeuratCommand',
name = command_name,
params = get_parent_params(),
time.stamp = call_time,
call.string = call_string,
assay.used = assay
)
}
seurat_extractfield <- function(string, field = 1, delim = "_") {
fields <- as.numeric(
x = unlist(x = strsplit(x = as.character(x = field), split = ","))
)
if (length(x = fields) == 1) {
return(strsplit(x = string, split = delim)[[1]][field])
}
paste(
strsplit(x = string, split = delim)[[1]][fields], collapse = delim
)
}
# function to be used within another parent function, extracting the arguments
# to the parent function and returning it as a named list, while also allowing
# filtering out of certain object types to save memory
get_processed_argnames <- function(n = 2) {
process_argnames(
names(formals(fun = sys.function(which = sys.parent(n = n + 1))))
)
}
process_argnames <- function(argnames) {
argnames <- grep(
pattern = "object",
x = argnames,
invert = TRUE,
value = TRUE
)
argnames <- grep(
pattern = "anchorset",
x = argnames,
invert = TRUE,
value = TRUE
)
argnames <- grep(
pattern = "\\.\\.\\.",
x = argnames,
invert = TRUE,
value = TRUE
)
argnames
}
get_parent_params <- function(
n = 2,
excluded_types = c("Seurat"),
only_named_types = c("data.frame", "data.table", "list")
) {
argnames <- get_processed_argnames(n)
params <- list()
p.env <- parent.frame(n = n)
argnames <- intersect(x = argnames, y = ls(name = p.env))
for (arg in argnames) {
param_value <- get(x = arg, envir = p.env)
is_excluded_type <- FALSE
for (obj_type in excluded_types) {
if (inherits(param_value, obj_type)) {
is_excluded_type <- TRUE
break
}
}
if (is_excluded_type) {next}
for (obj_type in only_named_types) {
if (inherits(param_value, obj_type)) {
param_value <- names(param_value)
break
}
}
params[[arg]] <- param_value
}
params
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/seurat_command.R |
# progress bar functions
progress_bar <- function (x = 0, max = 100) {
percent <- 100 * (x / max)
cat(sprintf(
'\r[%-50s] %d%%',
paste(rep('=', percent * 0.5), collapse = ''),
floor(percent)
))
}
start_progress_bar <- function(verbose = TRUE) {
if (verbose) {
progress_bar(0, 1)
}
}
end_progress_bar <- function(verbose = TRUE) {
if (verbose) {
progress_bar(1, 1)
}
}
print_completion_time <- function(start_time, digits = 3, newline = FALSE) {
end_time <- Sys.time()
if (newline) cat("\n")
message(paste(
"\nCompleted successfully, time elapsed:",
round(as.numeric(end_time - start_time), digits),
"seconds\n"
))
}
# readability functions
create_empty_table <- function() {
structure(
integer(0),
dim = 0L,
dimnames = structure(list(NULL), names = ""),
class = "table"
)
}
is_empty <- function(inp) identical(inp, list())
isnt_empty <- function(inp) !identical(inp, list())
isnt_na <- function(inp) !any(is.na(inp))
isnt_empty_nor_na <- function(inp) isnt_empty(inp) && isnt_na(inp)
is_empty_table <- function(inp) identical(inp, table(NULL))
is_int <- function(num) all(num == as.integer(num))
should_estimate <- function(obj, auto_str = "auto") identical(obj, auto_str)
should_assume <- should_estimate
should_change <- function(obj) !is.null(obj)
should_compute <- function(x) is.null(x)
# plotting related utils
#' @title Get the xmin, xmax, ymin, ymax of a ggplot object
#' @return list(xr = c(xmin, xmax), yr = c(ymin, ymax))
#' @noRd
get_plot_dims <- function(plt) {
built_plt_layout <- ggplot2::ggplot_build(plt)$layout
list(
xr = built_plt_layout$panel_scales_x[[1]]$range$range,
yr = built_plt_layout$panel_scales_y[[1]]$range$range
)
}
get_xr <- function(p) {
if (ggplot2::is.ggplot(p)) {
return(ggplot2::ggplot_build(p)$layout$panel_scales_x[[1]]$range$range)
}
p[[1]]
}
get_yr <- function(p) {
if (ggplot2::is.ggplot(p)) {
return(ggplot2::ggplot_build(p)$layout$panel_scales_y[[1]]$range$range)
}
p[[2]]
}
is_seurat_object <- function(obj) inherits(obj, "Seurat")
is_a_character <- function(x) {
if (length(x) != 1) return(FALSE)
is.character(x)
}
is_an_integer <- function(x) {
if (length(x) != 1) return(FALSE)
as.numeric(x) == as.numeric(as.integer(x))
}
is_a_numeric <- function(x) {
if (length(x) != 1) return(FALSE)
is.numeric(x)
}
is_a_logical <- function(x) {
if (length(x) != 1) return(FALSE)
is.logical(x)
}
# math utils
bound_num <- function(num, lowerbound, upperbound) {
min(max(num, lowerbound), upperbound)
}
is_bound_between <- function(num, lowerbound, upperbound) {
num >= lowerbound && num <= upperbound
}
add <- function(x, y) x + y
subtract <- function(x, y) x - y
is_even <- function(x) x %% 2 == 0
is_odd <- function(x) x %% 2 == 1
get_unique_pairs_up_to <- function(x) {
if (x <= 1) return(list())
all_unique_pairs <- init_list(choose(x, x - 2))
index <- 1
for (i in 1:(x - 1)) {
for (j in (i + 1):x) {
all_unique_pairs[[index]] <- c(i, j)
index <- index + 1
}
}
all_unique_pairs
}
# spelling related functions
strip_spaces <- function(s) gsub(" ", "", s)
strip_and_lower <- function(s) strip_spaces(tolower(s))
strip_unquoted_spaces <- function(input_str) {
all_parts <- strsplit(input_str, "'")
for (i in seq_along(input_str)) {
parts <- all_parts[[i]]
for (j in seq_along(parts)) {
if (is_odd(j)) parts[j] <- strip_spaces(parts[j])
}
input_str[i] <- Reduce(function(...) paste(..., sep = "'"), parts)
if (is_even(length(parts))) {
input_str[i] <- paste(input_str[i], "'", sep = "")
}
}
input_str
}
user_attempt_correction <- function(
s,
strset,
stop_msg_start,
modifiers = list(tolower, trimws, strip_unquoted_spaces, strip_spaces)
) {
# word modifiers for increase similarity - order matters!
modifiers <- list(
tolower, trimws, strip_unquoted_spaces, strip_spaces
)
# check if the string is already present in strset and if yes return
match_indicies <- which(s == strset)
if (length(match_indicies) == 1) return(s)
get_only_similar_word_or_null <- function(modifier) {
match_indicies <- which(modifier(s) == modifier(strset))
if (length(match_indicies) != 1) return(NULL)
message(paste(
"* assuming `", s, "` corresponds to `",
strset[match_indicies], "`", sep = ""
))
strset[match_indicies]
}
for (modifier in append(identity, modifiers)) {
potential_unique_similar_word <- get_only_similar_word_or_null(modifier)
if (!is.null(potential_unique_similar_word)) {
return(potential_unique_similar_word)
}
}
for (ij in get_unique_pairs_up_to(length(modifiers))) {
potential_unique_similar_word <- get_only_similar_word_or_null(
modifier = function(x) modifiers[[ij[1]]](modifiers[[ij[2]]](x))
)
if (!is.null(potential_unique_similar_word)) {
return(potential_unique_similar_word)
}
}
stop(
stop_msg_start, " `", s, "`, did you mean: `",
closest_word(s, strset), "`?",
call. = FALSE
)
}
closest_word <- function(s, strset) {
strset <- unique(strset)
if (length(strset) == 1) return(strset)
strset_lowercase <- tolower(strset)
s <- tolower(s)
closest_w <- strset_lowercase[1]
closest_dist <- utils::adist(s, closest_w)
for(i in 2:length(strset_lowercase)) {
curr_dist <- utils::adist(s, strset_lowercase[i])
if (curr_dist < closest_dist) {
closest_w <- strset[i]
closest_dist <- curr_dist
}
}
closest_w
}
# list utilities
init_list <- function(num_elements, init_val = NULL) {
l <- vector("list", num_elements)
for (i in 1:num_elements) {
l[[i]] <- init_val
}
l
}
getlast <- function(x) UseMethod("getlast")
getlast.default <- function(x) x[length(x)]
getlast.list <- function(x) x[[length(x)]]
# operate on non-empty elements of two lists of the same length
# with a 2-argument function
operate_on_same_length_lists <- function(func, l1, l2) {
l <- init_list(length(l1), list())
for (i in seq_along(l1)) {
if (isnt_empty(l1[[i]]) && isnt_empty(l2[[i]])) {
if (!(is.null(l1[i]) || is.null(l2[i]))) {
l[[i]] <- func(l1[[i]], l2[[i]])
}
}
}
l
}
move_coord_list_by_same_amount <- function(
coord_list, original_coord_list, new_coord_list
) {
operate_on_same_length_lists(
func = add,
l1 = coord_list,
l2 = operate_on_same_length_lists(
func = subtract,
l1 = new_coord_list,
l2 = original_coord_list
)
)
}
#' Take a list of character vectors and join each element of the vectors
#' together, separating each character by sep. Currently recursive which
#' will be bad for larger inputs :P
#' @return a character vector
#' @noRd
construct_prefix_vector <- function(params, sep = "_") {
unlist(join_list_of_characters(params, sep))
}
join_list_of_characters <- function(params, sep = "_") {
if (length(params) == 2) {
l2 <- params[[2]]
} else {
l2 <- construct_prefix_vector(params[2:length(params)])
}
operate_on_same_length_lists(
func = function(x, y) paste(x, y, sep = sep),
l1 = params[[1]],
l2 = l2
)
}
# S3 method to represent vectors as strings
repr_as_string <- function(input, ...) {
UseMethod("repr_as_string")
}
repr_as_string.character <- function(input, ...) {
to_string_rep_with_insert(v = input, insert = "'")
}
repr_as_string.default <- function(input, ...) {
to_string_rep_with_insert(v = input, insert = "")
}
# represent vector as string - doesnt take into account of names!
to_string_rep_with_insert <- function(v, insert) {
if (length(v) == 1) {
return(paste(insert, v, insert, sep = ""))
}
output <- ""
for (x in v) {
output <- paste(output, insert, x, insert, ",", sep = "")
}
paste("c(", substr(output, 1, nchar(output) - 1), ")", sep = "")
}
subset_dataframe <- function(df, filter_string) {
df %>% dplyr::filter(eval(parse(text = filter_string)))
}
# Seurat utils
subsetSeuratMetaData <- function(
seurat_obj, filter_string, error_param = "extra_filter"
) {
[email protected] <- subset_dataframe([email protected], filter_string)
if (nrow([email protected]) == 0) {
stop(call. = FALSE, paste(
"please check `", error_param, "`, ",
"no rows in the seurat metadata match the filter condition",
sep = ""
))
}
seurat_obj
}
# Returns the number of valid barcodes that are not NA's
count_tcr_barcodes <- function(seurat_obj) {
sum(!is.na([email protected][["barcode"]]))
}
count_clones <- function(seurat_obj, clonecall) {
sum(!is.na([email protected][[clonecall]]))
}
# get the percent of NA's in the metadata barcode column for the message
percent_na <- function(seurat_obj) {
num_barcodes <- length([email protected][["barcode"]])
100 * (num_barcodes - count_tcr_barcodes(seurat_obj)) / num_barcodes
}
get_rna_assay_barcodes <- function(seurat_obj) {
seurat_obj@assays[["RNA"]]@data@Dimnames[[2]]
}
# seurat cluster related functions
count_num_clusters <- function(seurat_obj) {
data.table::uniqueN(([email protected][["seurat_clusters"]]))
}
get_num_total_clusters <- function(seurat_obj) {
length(levels([email protected][["seurat_clusters"]]))
}
# seurat reduction related functions
any_reduction_exists <- function(seurat_obj) {
reduction_names <- get_curr_reduc_names(seurat_obj)
!(is.null(reduction_names) || identical(reduction_names, character(0)))
}
get_curr_reduc_names <- function(seurat_obj) {
names(seurat_obj@reductions)
}
get_2d_embedding <- function(seurat_obj, reduction) {
seurat_obj@reductions[[reduction]]@cell.embeddings[, 1:2]
}
attempt_correction <- function(seurat_obj, reduction) {
if (!any_reduction_exists(seurat_obj)) {
stop("No dimensional reductions detected")
}
reduction <- ifelse(
test = identical(strip_and_lower(reduction), "t-sne") &&
!any("t-sne" == strip_and_lower(get_curr_reduc_names(seurat_obj))),
yes = "tsne",
no = reduction
)
user_attempt_correction(
reduction,
strset = get_curr_reduc_names(seurat_obj),
stop_msg_start = "Invalid reduction"
)
}
#' @title
#' Calculate seurat cluster centroids based on a Dimensional reduction
#'
#' @description
#' `r lifecycle::badge("stable")`
#'
#' Utility function to calculate the physical xy coordinates of each seurat
#' cluster based on a dimensional reduction already present in the object.
#' The results are returned in a list with the length of the number of distinct
#' seurat clusters based on the seurat_obj `meta.data`.
#'
#' @param seurat_obj input seurat object with the dimensional reduction of
#' choice already present, and seurat clusters computed.
#' @param reduction character. The reduction that the centroid calculation
#' should be based on.
#'
#' @return A list of the length of the number of distinct clusters in the
#' seurat object metadata, where each element of the list is a numeric vector
#' of length 2, with the numbers corresponding to the x and y coordinate
#' respectively of the seurat cluster with the corresponding index.
#'
#' @export
#'
#' @examples
#' data("combined_pbmc")
#' getReductionCentroids(combined_pbmc, reduction = "umap")
#'
getReductionCentroids <- function(seurat_obj, reduction) {
get_cluster_centroids(
seurat_obj = seurat_obj,
reduction = user_get_reduc_obj(seurat_obj, reduction),
passed_in_reduc_obj = TRUE
)
}
user_get_reduc_obj <- function(seurat_obj, reduction) {
if (!is_seurat_object(seurat_obj))
stop(call. = FALSE, "`seurat_obj` not a seurat object!")
if (!is_a_character(reduction))
stop(call. = FALSE, "`reduction` must be one character")
seurat_obj@reductions[[attempt_correction(seurat_obj, reduction)]]
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/utils.R |
#' @title
#' Directly visualize clonal expansion of a combined seurat object
#'
#' @description
#' `r lifecycle::badge("stable")`
#'
#' This function combines the functionality of both `RunAPOTC` and `APOTCPlot`.
#' Given a Seurat object, it first runs the APackOfTheClones method ([RunAPOTC])
#' to compute clonal expansion information, and then generates a customizable
#' ggplot2 object of the clonal expansion plot with a circle size legend
#' ([APOTCPlot]).
#'
#' @inheritParams RunAPOTC
#' @inheritParams APOTCPlot
#'
#' @inherit APOTCPlot return
#' @export
#'
#' @examples
#' data("combined_pbmc")
#'
#' # plot with default parameters
#' vizAPOTC(combined_pbmc, verbose = FALSE)
#'
#' # use arguments from RunAPOTC and APOTCPlot
#' vizAPOTC(
#' combined_pbmc, try_place = TRUE, show_labels = TRUE, verbose = FALSE
#' )
#'
vizAPOTC <- function(
seurat_obj,
reduction_base = "umap",
clonecall = "strict",
...,
extra_filter = NULL,
clone_scale_factor = "auto",
rad_scale_factor = 0.95,
order_clones = TRUE,
try_place = FALSE,
repulse = TRUE,
repulsion_threshold = 1,
repulsion_strength = 1,
max_repulsion_iter = 20L,
res = 360L,
linetype = "blank",
use_default_theme = TRUE,
retain_axis_scales = FALSE,
#alpha = 1,
show_labels = FALSE,
label_size = 5,
add_size_legend = TRUE,
legend_sizes = "auto",
legend_position = "auto",
legend_buffer = 0.2,
legend_color = "#808080",
legend_spacing = "auto",
legend_label = "Clone sizes",
legend_text_size = 5,
add_legend_background = TRUE,
verbose = TRUE
) {
seurat_obj <- RunAPOTC(
seurat_obj,
reduction_base = reduction_base,
clonecall = clonecall,
...,
extra_filter = extra_filter,
run_id = "vizAPOTC",
clone_scale_factor = clone_scale_factor,
rad_scale_factor = rad_scale_factor,
order_clones = order_clones,
try_place = try_place,
repulse = repulse,
repulsion_threshold = repulsion_threshold,
repulsion_strength = repulsion_strength,
max_repulsion_iter = max_repulsion_iter,
override = TRUE,
verbose = verbose
)
if (verbose) message("Plotting...\n")
APOTCPlot(
seurat_obj,
run_id = "vizAPOTC",
res = res,
linetype = linetype,
use_default_theme = use_default_theme,
retain_axis_scales = retain_axis_scales,
show_labels = show_labels,
label_size = label_size,
add_size_legend = add_size_legend,
legend_sizes = legend_sizes,
legend_position = legend_position,
legend_buffer = legend_buffer,
legend_color = legend_color,
legend_spacing = legend_spacing,
legend_label = legend_label,
legend_text_size = legend_text_size,
add_legend_background = add_legend_background
)
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/vizAPOTC.R |
.onAttach <- function(libname, pkgname) {
packageStartupMessage(paste(
"\nThank you for using APackOfTheClones v",
utils::packageVersion("APackOfTheClones"), "\n\n",
'if you use this package in your paper/study, please cite with\n',
'`citation("APackOfTheClones")`\n',
sep = ""
))
# new deprecation note for the release
packageStartupMessage(paste(
'*** DEPRECATION NOTICE TO RETURNING USERS OF VERSION 0.1.x *** \n',
deprecation_docstring(), "\n",
sep = ""
))
}
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/R/zzz.R |
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
knitr::opts_chunk$set(echo = FALSE)
options(repos = c(CRAN = "http://cran.rstudio.com"))
# utility functions
head <- function(df) {
knitr::kable(utils::head(df))
}
quiet_load_all_CRAN <- function(...) {
for (pkg in list(...)) {
if (require(pkg, quietly = TRUE, character.only = TRUE)) next
invisible(install.packages(
pkg, quiet = TRUE, verbose = FALSE, character.only = TRUE
))
suppressPackageStartupMessages(invisible(
require(pkg, quietly = TRUE, character.only = TRUE)
))
}
}
# load packages
quiet_load_all_CRAN("ggplot2", "cowplot", "Seurat")
## ----setup--------------------------------------------------------------------
suppressPackageStartupMessages(library(APackOfTheClones))
## ----setup_seurat, eval = FALSE-----------------------------------------------
# library(scRepertoire)
#
# pbmc <- scRepertoire::combineExpression(
# scRepertoire::combineTCR(
# get(data("contig_list", package = "scRepertoire")),
# samples = c("P17B", "P17L", "P18B", "P18L", "P19B", "P19L", "P20B", "P20L"),
# removeNA = FALSE,
# removeMulti = FALSE,
# filterMulti = FALSE
# ),
# pbmc,
# cloneCall = "gene",
# proportion = TRUE
# )
#
# print(pbmc)
## ----actual_print_pbmc, eval = TRUE, echo = TRUE, include = FALSE-------------
# TODO use a nicer looking dataset
pbmc <- get(data("combined_pbmc"))
print(pbmc)
## ----eval = FALSE-------------------------------------------------------------
# # Here is the function ran with its default parameters
# pbmc <- RunAPOTC(pbmc)
#
# #> Initializing APOTC run...
# #> * Setting `clone_scale_factor` to 0.3
# #> * id for this run: umap;CTstrict;_;_
# #>
# #> Packing clones into clusters
# #> [==================================================] 100%
# #>
# #> repulsing all clusters | max iterations = 20
# #> [==================================================] 100%
# #>
# #> Completed successfully, time elapsed: 0.155 seconds
# #>
## ----runapotc_default, include = FALSE----------------------------------------
pbmc <- RunAPOTC(pbmc, verbose = FALSE)
## ----runapotc2----------------------------------------------------------------
pbmc <- RunAPOTC(
pbmc, run_id = "sample17", orig.ident = c("P17B", "P17L"), verbose = FALSE
)
## ----apotcplot_subset_params, eval = FALSE------------------------------------
# reduction_base = NULL,
# clonecall = NULL,
# ...,
# extra_filter = NULL,
## ----apotcplot----------------------------------------------------------------
# Here, plots for samples 17, 18, and 19 as seen in the previous vignette are made, where
# `orig.ident` is a custom column in the example data with levels corresponding to sample ids:
# ("P17B" "P17L" "P18B" "P18L" "P19B" "P19L" "P20B" "P20L").
pbmc <- RunAPOTC(
pbmc, run_id = "P17", orig.ident = c("P17B", "P17L"), verbose = FALSE
)
pbmc <- RunAPOTC(
pbmc, run_id = "P18", orig.ident = c("P18B", "P18L"), verbose = FALSE
)
pbmc <- RunAPOTC(
pbmc, run_id = "P19", orig.ident = c("P19B", "P19L"), verbose = FALSE
)
cowplot::plot_grid(
vizAPOTC(pbmc, verbose = FALSE),
APOTCPlot(pbmc, run_id = "P17"),
APOTCPlot(pbmc, run_id = "P18"),
APOTCPlot(pbmc), # run_id omitted as sample 19 was the latest run
labels = c("all", "17", "18", "19")
)
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/inst/doc/APackOfTheClones-runs.R |
---
title: "Storing and Fine-Tuning APackOfTheClones Runs"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Storing and Fine-Tuning APackOfTheClones Runs}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
data: 'Compiled: `r format(Sys.Date(), "%B %d, %Y")`'
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
knitr::opts_chunk$set(echo = FALSE)
options(repos = c(CRAN = "http://cran.rstudio.com"))
# utility functions
head <- function(df) {
knitr::kable(utils::head(df))
}
quiet_load_all_CRAN <- function(...) {
for (pkg in list(...)) {
if (require(pkg, quietly = TRUE, character.only = TRUE)) next
invisible(install.packages(
pkg, quiet = TRUE, verbose = FALSE, character.only = TRUE
))
suppressPackageStartupMessages(invisible(
require(pkg, quietly = TRUE, character.only = TRUE)
))
}
}
# load packages
quiet_load_all_CRAN("ggplot2", "cowplot", "Seurat")
```
```{r setup}
suppressPackageStartupMessages(library(APackOfTheClones))
```
## Introduction
As demonstrated in `vignette("APackOfTheClones")`, after processing the seurat & clonotype data properly with `scRepertoire`, `vizAPOTC` provides a direct way to produce the ball-packing clonal expansion visualization, though for select users it may be somewhat clunky, if certain parameters need to be readjusted constantly. In this vignette, more details about how APackOfTheClones runs can be stored and re-adjusted will be covered - mainly through `RunAPOTC`, `APOTCPlot`, and `AdjustAPOTC`. Ensure to read the aforementioned vignette before this one.
<details>
<summary>
**As a reminder, here's how to set up the seurat object and clonotype data**
</summary>
```{r, setup_seurat, eval = FALSE}
library(scRepertoire)
pbmc <- scRepertoire::combineExpression(
scRepertoire::combineTCR(
get(data("contig_list", package = "scRepertoire")),
samples = c("P17B", "P17L", "P18B", "P18L", "P19B", "P19L", "P20B", "P20L"),
removeNA = FALSE,
removeMulti = FALSE,
filterMulti = FALSE
),
pbmc,
cloneCall = "gene",
proportion = TRUE
)
print(pbmc)
```
```{r, actual_print_pbmc, eval = TRUE, echo = TRUE, include = FALSE}
# TODO use a nicer looking dataset
pbmc <- get(data("combined_pbmc"))
print(pbmc)
```
</details>
### Overview
All of `vizAPOTC`'s arguments are actually derived from `RunAPOTC` and `APOTCPlot`. The former is responsible for storing data of the S4 class `ApotcData` in the seurat object under a named list in `@misc$APackOfTheClones` under some character run ID, and the latter allows the visualization of these data objects with some customization. `AdjustAPOTC` has many arguments for adjusting the data associated with some APackOfTheClones run stored by `RunAPOTC`, including adjusting cluster positions, colors, repulsion, etc. which can be visualized again with `APOTCPlot`.
## Managing APackOfTheClones run data
`RunAPOTC` has approximate the first half of `vizAPOTC`'s arguments until `max_repulsion_iter`, meaning it has all the data subsetting, circle size scaling, and cluster repulsion capabilities covered in the previous vignette. The most essential difference is the presence of the argument `run_id`, which corresponds to an id for the `ApotcData` object. If left blank, one will be automatically generated in the following format:
> `reduction_base;clonecall;keyword_arguments;extra_filter`
where if keyword arguments and extra_filter are underscore (`_`) characters if there was no input for the `...` and `extra_filter` parameters.
```{r, eval = FALSE}
# Here is the function ran with its default parameters
pbmc <- RunAPOTC(pbmc)
#> Initializing APOTC run...
#> * Setting `clone_scale_factor` to 0.3
#> * id for this run: umap;CTstrict;_;_
#>
#> Packing clones into clusters
#> [==================================================] 100%
#>
#> repulsing all clusters | max iterations = 20
#> [==================================================] 100%
#>
#> Completed successfully, time elapsed: 0.155 seconds
#>
```
```{r, runapotc_default, include = FALSE}
pbmc <- RunAPOTC(pbmc, verbose = FALSE)
```
From the verbal queues, one can see how the `run_id` was set. Here's it ran again but with more optional arguments and a custom `run_id`:
```{r, runapotc2}
pbmc <- RunAPOTC(
pbmc, run_id = "sample17", orig.ident = c("P17B", "P17L"), verbose = FALSE
)
```
### Utilities for Managing APackOfTheClones Runs
It is to note that the data abstraction here with a `run_id` is intentional, and users should not manually touch any of the `ApotcData` objects with the seurat object unless they are extremely familiar with the latest internal implementation. Instead, here is a collection of functions that may be useful:
- `getApotcDataIds(pbmc)` gets all current `run_id`'s, if any.
- `getLastApotcDataId(pbmc)` gets the latest `run_id`, if any.
- `containsApotcRun(pbmc, run_id = "foo")` returns whether a `run_id` exists in the seurat object.
- `deleteApotcData(pbmc, run_id = "foo")` deletes *all data* associated with a certain `run_id`.
## APOTCPlot
To visualize stored APackOfTheClones runs, `APOTCPlot` takes in a seurat object and the `run_id`. If no `run_id` is provided, it defaults to using the latest run. All other parameters are same as in the second half of `vizAPOTC`. Although it is noteworthy that if the user had always relied on auto-generated `run_id`'s then `APOTCPlot` also has these subsetting arguments:
```{r, apotcplot_subset_params, eval = FALSE}
reduction_base = NULL,
clonecall = NULL,
...,
extra_filter = NULL,
```
And putting in identical arguments to generate the original `ApotcData` would work too, but this approach is less recommended as its a lot more (unnecessarily) verbose. Here is `APOTCPlot` in action:
```{r, apotcplot}
# Here, plots for samples 17, 18, and 19 as seen in the previous vignette are made, where
# `orig.ident` is a custom column in the example data with levels corresponding to sample ids:
# ("P17B" "P17L" "P18B" "P18L" "P19B" "P19L" "P20B" "P20L").
pbmc <- RunAPOTC(
pbmc, run_id = "P17", orig.ident = c("P17B", "P17L"), verbose = FALSE
)
pbmc <- RunAPOTC(
pbmc, run_id = "P18", orig.ident = c("P18B", "P18L"), verbose = FALSE
)
pbmc <- RunAPOTC(
pbmc, run_id = "P19", orig.ident = c("P19B", "P19L"), verbose = FALSE
)
cowplot::plot_grid(
vizAPOTC(pbmc, verbose = FALSE),
APOTCPlot(pbmc, run_id = "P17"),
APOTCPlot(pbmc, run_id = "P18"),
APOTCPlot(pbmc), # run_id omitted as sample 19 was the latest run
labels = c("all", "17", "18", "19")
)
```
## AdjustAPOTC
This function's parameters help modify certain attributes about APackOfTheClones runs, and has the exact same first six parameters as `APOTCPlot` for managing which run to modify. It also possesses the same four repulsion arguments in `vizAPOTC` and `RunAPOTC` if a run is to be repulsed again. See the function level documentation for the following parameters that can modify cluster locations, colors, and the adjustment of the `clone_scale_factor` and `rad_scale_factor`.
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/inst/doc/APackOfTheClones-runs.Rmd |
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
knitr::opts_chunk$set(echo = FALSE)
options(repos = c(CRAN = "http://cran.rstudio.com"))
# utility functions
head <- function(df) {
knitr::kable(utils::head(df))
}
quiet_load_all_CRAN <- function(...) {
for (pkg in list(...)) {
if (require(pkg, quietly = TRUE, character.only = TRUE)) next
invisible(install.packages(
pkg, quiet = TRUE, verbose = FALSE, character.only = TRUE
))
suppressPackageStartupMessages(invisible(
require(pkg, quietly = TRUE, character.only = TRUE)
))
}
}
# load packages
quiet_load_all_CRAN("ggplot2", "cowplot", "Seurat")
## ----setup--------------------------------------------------------------------
suppressPackageStartupMessages(library(APackOfTheClones))
## ----combineTCR, eval = FALSE-------------------------------------------------
# library(scRepertoire)
#
# # load in the corresponding 6-sample TCR contigs from scRepertoire
# contig_list <- get(data("contig_list", package = "scRepertoire"))
#
# # combine the TCR contigs into clones with custom samples
# combined_contig_list <- scRepertoire::combineTCR(
# contig_list,
# samples = c("P17B", "P17L", "P18B", "P18L", "P19B", "P19L", "P20B", "P20L"),
# removeNA = FALSE,
# removeMulti = FALSE,
# filterMulti = FALSE
# )
## ----combining, eval = FALSE--------------------------------------------------
# # a seurat object corresponding to combined_contig_list named `pbmc` is loaded
# pbmc <- scRepertoire::combineExpression(
# combined_contig_list,
# pbmc,
# cloneCall = "gene",
# proportion = TRUE
# )
#
# print(pbmc)
## ----actual_print_pbmc, eval = TRUE, echo = TRUE, include = FALSE-------------
# TODO use a nicer looking dataset
pbmc <- get(data("combined_pbmc"))
print(pbmc)
## ----umap, echo = TRUE--------------------------------------------------------
pbmc_umap_plot <- UMAPPlot(pbmc)
pbmc_umap_plot
## ----initial_vizapotc, echo = TRUE--------------------------------------------
default_apotc_plot <- vizAPOTC(pbmc, verbose = FALSE)
default_apotc_plot
## ----echo = TRUE, eval = FALSE------------------------------------------------
# reduction_base = "umap",
# clonecall = "strict",
## ----echo = TRUE, eval = FALSE------------------------------------------------
# ...,
# extra_filter = NULL,
## ----subsetting, echo = TRUE--------------------------------------------------
# `orig.ident` is a custom column in the example data with levels corresponding to sample ids:
# ("P17B" "P17L" "P18B" "P18L" "P19B" "P19L" "P20B" "P20L"). Here, it is subsetted
# by the keyword argument approach
subset_sample_17_plot <- vizAPOTC(
pbmc, orig.ident = c("P17B", "P17L"), verbose = FALSE
)
# here, it is subsetted with `extra_filter` for sample 18 with dplyr syntax:
subset_sample_18_plot <- vizAPOTC(
pbmc, extra_filter = "substr(orig.ident, 1, 3) == 'P18'", verbose = FALSE
)
# here, sample 19 is subsetted with both arguments to show that they work in conjunction
subset_sample_19_plot <- vizAPOTC(
pbmc,
orig.ident = "P19B",
extra_filter = "orig.ident == 'P19L' | orig.ident == 'P19B'",
verbose = FALSE
)
cowplot::plot_grid(
default_apotc_plot,
subset_sample_17_plot,
subset_sample_18_plot,
subset_sample_19_plot,
labels = c("all", "17", "18", "19")
)
## ----echo = TRUE, eval = FALSE------------------------------------------------
# repulse = TRUE,
# repulsion_threshold = 1,
# repulsion_strength = 1,
# max_repulsion_iter = 10
## ----legend_params, echo = TRUE, eval = FALSE---------------------------------
# add_size_legend = TRUE,
# legend_sizes = "auto",
# legend_position = "auto",
# legend_buffer = 0.2,
# legend_color = "#808080",
# legend_spacing = "auto",
# legend_label = "Clone sizes",
# legend_text_size = 5,
# add_legend_background = TRUE,
## ----other_params, echo = TRUE, eval = FALSE----------------------------------
# order_clones = TRUE,
# try_place = FALSE,
# res = 360L,
# linetype = "blank",
# use_default_theme = TRUE,
# retain_axis_scales = FALSE,
# show_labels = FALSE,
# label_size = 5,
## ----void_labelled_plot, echo = TRUE------------------------------------------
vizAPOTC(pbmc, show_labels = TRUE, use_default_theme = FALSE, verbose = FALSE)
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/inst/doc/APackOfTheClones.R |
---
title: "APackOfTheClones Essentials"
description: >
A brief walkthrough of the clonal expansion visualization workflow.
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{APackOfTheClones Essentials}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
data: 'Compiled: `r format(Sys.Date(), "%B %d, %Y")`'
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
knitr::opts_chunk$set(echo = FALSE)
options(repos = c(CRAN = "http://cran.rstudio.com"))
# utility functions
head <- function(df) {
knitr::kable(utils::head(df))
}
quiet_load_all_CRAN <- function(...) {
for (pkg in list(...)) {
if (require(pkg, quietly = TRUE, character.only = TRUE)) next
invisible(install.packages(
pkg, quiet = TRUE, verbose = FALSE, character.only = TRUE
))
suppressPackageStartupMessages(invisible(
require(pkg, quietly = TRUE, character.only = TRUE)
))
}
}
# load packages
quiet_load_all_CRAN("ggplot2", "cowplot", "Seurat")
```
```{r setup}
suppressPackageStartupMessages(library(APackOfTheClones))
```
## Introduction
Single-cell RNA sequencing (scRNA-seq) and T/B cell receptor (TCR) sequencing are popular techniques for studying immune cell function and disease. The combined use of such data can provide valuable insights into the immune system, including clonal expansion. `APackOfTheClones` provides a simple, easily customizable, and publication-ready method to intuitively visualize clonal expansion between different cell clusters with `ggplot2`, and can be easily slotted into any analysis pipeline.
The method counts clonotypes, and using a dimensional reduction (e.g. UMAP) of all cells in a single cell immune profiling experiment as a base, it circle-packs all clone sizes directly as circles within circular clusters according to its seurat cluster. The advantage of this method compared to other (very limited) visualizations of clonal expansion out there is that its arguably much more intuitive. To see it in action, see [this paper](https://doi.org/10.1126/sciimmunol.abg6356) and [this paper](https://doi.org/10.1111/all.15399).
Basic familiarity with the `R` language, the `Seurat` package, and the `scRepertoire` package *VERSION TWO* is assumed. As version two of `scRepertoire` is relatively new and introduces some small breaking changes, it is *essential* that its [new vignettes](https://www.borch.dev/uploads/screpertoire/) are read.
In this vignette, the most essential functionalities of the package are covered, from preparing the data with scRepertoire, to producing and fine-tuning a barebones visualization of clonal expansion.
### Setting up the Seurat Object and Receptor Library with scRepertoire
The premise of the package is that it provides an additional analysis tool on top of `scRepertoire`'s many functions, which are all ran after combining all TCR/BCR contigs into clones, and integrating the clonal information into a seurat object with a dimensional reduction(s). Here are the corresponding vignettes to read in order for combining contigs:
1. [Loading Data](https://www.borch.dev/uploads/screpertoire/articles/loading)
2. [Combining Contigs into Clones](https://www.borch.dev/uploads/screpertoire/articles/combining_contigs)
3. [Additional Processing Steps (optional)](https://www.borch.dev/uploads/screpertoire/articles/processing)
And here is a practical example on how to generate a combined seurat + VDJ object to be used in the vignette named `pbmc`, which is also identical to the built-in example dataset `combined_pbmc` which can be loaded with `data("combined_pbmc", package = "APackOfTheClones")`:
```{r, combineTCR, eval = FALSE}
library(scRepertoire)
# load in the corresponding 6-sample TCR contigs from scRepertoire
contig_list <- get(data("contig_list", package = "scRepertoire"))
# combine the TCR contigs into clones with custom samples
combined_contig_list <- scRepertoire::combineTCR(
contig_list,
samples = c("P17B", "P17L", "P18B", "P18L", "P19B", "P19L", "P20B", "P20L"),
removeNA = FALSE,
removeMulti = FALSE,
filterMulti = FALSE
)
```
For integrating the clonal information, ```scRepertoire::combineExpression``` is the function used [to do so](https://www.borch.dev/uploads/screpertoire/articles/attaching_sc), but for `APackOfTheClones` the scRNA-seq object has to be a seurat object:
```{r, combining, eval = FALSE}
# a seurat object corresponding to combined_contig_list named `pbmc` is loaded
pbmc <- scRepertoire::combineExpression(
combined_contig_list,
pbmc,
cloneCall = "gene",
proportion = TRUE
)
print(pbmc)
```
```{r, actual_print_pbmc, eval = TRUE, echo = TRUE, include = FALSE}
# TODO use a nicer looking dataset
pbmc <- get(data("combined_pbmc"))
print(pbmc)
```
And as a reference, here is the corresponding UMAP plot of the seurat object
```{r, umap, echo = TRUE}
pbmc_umap_plot <- UMAPPlot(pbmc)
pbmc_umap_plot
```
## Simple ball packing visualization
```vizAPOTC``` (short for "visualize APackOfTheClones") is the main convenience function of the package to directly produce the ball packing clonal expansion plot. It takes in a main argument of a combined seurat object (with a long list of optional arguments which will be covered in later sections), and outputs a ggplot object:
```{r, initial_vizapotc, echo = TRUE}
default_apotc_plot <- vizAPOTC(pbmc, verbose = FALSE)
default_apotc_plot
```
### Key characteristics of the plot
* One should notice the immediate correspondence of circular clusters to umap clusters, and be able to derive some immediate insights about which clusters have expanded.
* The clonotype counts for each seurat cluster corresponds to their position and color in the original UMAP centroids
* The most expanded clonotypes are in the center of each circle cluster with larger sizes, symbolizing increased expansion
* On the plot, there is a somewhat imperfect visual legend of the relative clone sizes
* Some clusters have considerable visual overlap
* The returned plot is a fully customizable `ggplot` object
The resulting clonal expansion plot may not be visually satisfactory on the first run without customizations from optional arguments. These arguments and other `ggplot` tricks will be covered to fine-tune the visualization until publication-ready.
## Working with different reduction bases and clonal representations
The arguments to do so are:
```{r, echo = TRUE, eval = FALSE}
reduction_base = "umap",
clonecall = "strict",
```
`reduction_base` indicates what each seurat cluster's centroid locations should be based upon, so UMAP, T-SNE, or PCA (provided the reduction has been ran on the object). `clonecall` corresponds to which column in the seurat metadata should be used to conduct the clonotype counting. These are usually columns generated by combineTCR/BCR but can be custom columns too. Note that ```scRepertoire``` should generate the following columns by default, and the brackets indicate equivalent names one can pass into the clonecall parameter:
- CTgene (gene, genes)
- CTnt (nt, nucleotide, nucleotides)
- CTaa (aa, amino)
- CTstrict (strict, gene+nt)
Switching this parameter could be useful if the user had generated a custom definition of clones and added it to the meta data, or if BCR clonal data is present, since [the definition of a clone for B-cells isn't as clear as it is for TCRs](https://doi.org/10.3389/fimmu.2023.1123968).
## Working with a data subset
A novel feature of version 1 is the ease of running APackOfTheClones for subsets of the full dataset, which may be useful in cases like the need for clonal expansion plotting for only certain samples or conditions. There are two arguments to do so:
```{r, echo = TRUE, eval = FALSE}
...,
extra_filter = NULL,
```
`...` represents an arbitrary number of additional keyword arguments indicating the rows corresponding to elements in the seurat object metadata that should be filtered by. For example, seurat_clusters = c(1, 9, 10) will filter the cells to only those in seurat clusters 1, 9, and 10. `extra_filter` is another additional way to subset the data and should be formatted *exactly* like a statement one would pass into `dplyr::filter` that does *additional* filtering to cells in the seurat object.
Here is an example of the data subsetting, where only samples in the example seurat object that correspond to sample 17, 18, and 19 are plotted, alongside the original plot to see the difference.
```{r, subsetting, echo = TRUE}
# `orig.ident` is a custom column in the example data with levels corresponding to sample ids:
# ("P17B" "P17L" "P18B" "P18L" "P19B" "P19L" "P20B" "P20L"). Here, it is subsetted
# by the keyword argument approach
subset_sample_17_plot <- vizAPOTC(
pbmc, orig.ident = c("P17B", "P17L"), verbose = FALSE
)
# here, it is subsetted with `extra_filter` for sample 18 with dplyr syntax:
subset_sample_18_plot <- vizAPOTC(
pbmc, extra_filter = "substr(orig.ident, 1, 3) == 'P18'", verbose = FALSE
)
# here, sample 19 is subsetted with both arguments to show that they work in conjunction
subset_sample_19_plot <- vizAPOTC(
pbmc,
orig.ident = "P19B",
extra_filter = "orig.ident == 'P19L' | orig.ident == 'P19B'",
verbose = FALSE
)
cowplot::plot_grid(
default_apotc_plot,
subset_sample_17_plot,
subset_sample_18_plot,
subset_sample_19_plot,
labels = c("all", "17", "18", "19")
)
```
## Customization of visual parameters
### Visually scaling circle sizes
For each seurat cluster at index $i$, the final radii $r_{ij}$ of each physical circle at index $j$ representing clone size $s_{ij}$ is calculated with a clone size scaling factor $C \in [0, 1)$ and a radius scaling factor $R \in [0, 1)$ with the following formula:
$$r_{ij} = C\cdot(\sqrt{s_{ij}} - (1 - R))$$
Intuitively, $C$ represents how much to enlarge/shrink each radius geometrically, whereas $R$ represents the scaled size of the smallest clone's radius, and all circles will have their radii decreased by that amount. $R$ defaults to 0.95, whereas $C$ defaults to an approximated factor based on the number of clones that the package computes. The corresponding arguments are `clone_scale_factor` and `rad_scale_factor` in `vizAPOTC`.
### Cluster repulsion
Considerable visual overlap between clusters (due to the algorithm's attempt to fit clusters to the original UMAP coordinates) may occur and obstruct eachother excessively, even though the method by default tries to alleviate this with repulsion of clusters. There are four optional arguments:
```{r, echo = TRUE, eval = FALSE}
repulse = TRUE,
repulsion_threshold = 1,
repulsion_strength = 1,
max_repulsion_iter = 10
```
* `repulsion_threshold` indicates the amount of `ggplot2` units of overlap between clusters that are acceptable. It defaults to `1`, meaning that two clusters that overlap by about 1 unit are considered by the repulsion algorithm to not be overlapping. Increasing this number will increase the amount of overlap between clusters, and decreasing this number will do the opposite, while decreasing it to negative values will incur additional spacing between clusters. However, using negative spacing may cause the plot to look very spaced out. Keep reading to see alternatives to doing so.
* `repulsion_strength` relates to how much the clusters should repel each other. The repulsion algorithm works in iterations, where for each iteration, each cluster "pushes" each other away from eachother by some amount. Increasing this value will cause extra "pushing" during each iteration. However, increasing this factor too much may result once again in a very visually unpleasant plot.
* `max_repulsion_iter` indicates the number of iterations where clusters should repel eachother. Increasing this number would ensure that clusters will (almost always) for sure not be overlapping. A trick with this parameter to make more pleasant plots is to decrease `repulsion_strength` and increase `max_repulsion_iter` to possibly make a more pleasant arrangement of clusters.
For more details on them, read the "Arguments" section in the function documentation. Briefly, first, to make the circle clusters move away from eachother, `repulse` should be set to `TRUE`, and the function should be ran AGAIN. (If you feel this excessive re-running takes too long or is inefficient for your workflow in its current form due to a constant need to fine-tune, see the vignette `APackOfTheClones-runs`.)
### Managing the clone size legend
There are the following six parameters to adjust the legend:
```{r, legend_params, echo = TRUE, eval = FALSE}
add_size_legend = TRUE,
legend_sizes = "auto",
legend_position = "auto",
legend_buffer = 0.2,
legend_color = "#808080",
legend_spacing = "auto",
legend_label = "Clone sizes",
legend_text_size = 5,
add_legend_background = TRUE,
```
`legend_sizes` are autogenerated, with at least sizes for clone size 1 and max(clone sizes). The user can also input whichever sizes they wish as a numeric vector. The `legend_position` on the plot defaults to one of the four corners, whichever will result in the least overlap of the legend with circles present. Otherwise, the user can input manually `"top left"`, `"top right"`, `"bottom left"` `"bottom right`, or just a numeric indicating the x and y coordinates of the top leftmost corner of the legend. `add_legend_background` will display a rectangular background to the legend. The other parameters are less relevant and more details about these arguments can be read in the function documentation.
### Other modifications
A collection of other utility parameters are:
```{r, other_params, echo = TRUE, eval = FALSE}
order_clones = TRUE,
try_place = FALSE,
res = 360L,
linetype = "blank",
use_default_theme = TRUE,
retain_axis_scales = FALSE,
show_labels = FALSE,
label_size = 5,
```
The most important of which are 1. `show_labels` which overlays the seurat cluster on the plot in the format `"Cx"` in the center of each relevant cluster. (see `vignette("APackOfTheClones-runs")` on how one can modify them) 2. `use_default_theme` which will produce a plot that's thematically very similar to the dimensional reduction plot, with axis labels and numeric ticks on each axis. This may be helpful for visually indicating certain plot parameters. However, for publications [such as this one](https://doi.org/10.1126/sciimmunol.abg6356), as the numeric ticks in APackOfTheClones have no actual biologically relevant meaning aside from approximately matching clusters, it may be beneficial to set this argument to `FALSE` which will produce a plot with just the circles:
```{r, void_labelled_plot, echo = TRUE}
vizAPOTC(pbmc, show_labels = TRUE, use_default_theme = FALSE, verbose = FALSE)
```
## Conclusion
That's about it for the most basic functionalities of the clonal expansion visualization function. It's ***strongly recommended*** to save the plot first as an `.svg` file with ```ggplot2::ggsave``` for maximal resolution (especially of the circles) for publication.
For users that need to fine-tune plot parameters and/or save the data within the seurat object for readjustment/replotting, please read the `vignette("APackOfTheClones-runs")`.
For inspiration of how it could practically look like in a real paper context, see the following papers where the original julia implementation of APackOfTheClones was successfully used:
* [Single-cell analysis pinpoints distinct populations of cytotoxic CD4+ T cells and an IL-10+CD109+ $T_{H}2$ cell population in nasal polyps](https://doi.org/10.1126/sciimmunol.abg6356)
* [Recombinant multimeric dog allergen prevents airway hyperresponsiveness in a model of asthma marked by vigorous $T_{H}2$ and $T_{H}17$ cell responses](https://doi.org/10.1111/all.15399)
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/inst/doc/APackOfTheClones.Rmd |
---
title: "Storing and Fine-Tuning APackOfTheClones Runs"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Storing and Fine-Tuning APackOfTheClones Runs}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
data: 'Compiled: `r format(Sys.Date(), "%B %d, %Y")`'
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
knitr::opts_chunk$set(echo = FALSE)
options(repos = c(CRAN = "http://cran.rstudio.com"))
# utility functions
head <- function(df) {
knitr::kable(utils::head(df))
}
quiet_load_all_CRAN <- function(...) {
for (pkg in list(...)) {
if (require(pkg, quietly = TRUE, character.only = TRUE)) next
invisible(install.packages(
pkg, quiet = TRUE, verbose = FALSE, character.only = TRUE
))
suppressPackageStartupMessages(invisible(
require(pkg, quietly = TRUE, character.only = TRUE)
))
}
}
# load packages
quiet_load_all_CRAN("ggplot2", "cowplot", "Seurat")
```
```{r setup}
suppressPackageStartupMessages(library(APackOfTheClones))
```
## Introduction
As demonstrated in `vignette("APackOfTheClones")`, after processing the seurat & clonotype data properly with `scRepertoire`, `vizAPOTC` provides a direct way to produce the ball-packing clonal expansion visualization, though for select users it may be somewhat clunky, if certain parameters need to be readjusted constantly. In this vignette, more details about how APackOfTheClones runs can be stored and re-adjusted will be covered - mainly through `RunAPOTC`, `APOTCPlot`, and `AdjustAPOTC`. Ensure to read the aforementioned vignette before this one.
<details>
<summary>
**As a reminder, here's how to set up the seurat object and clonotype data**
</summary>
```{r, setup_seurat, eval = FALSE}
library(scRepertoire)
pbmc <- scRepertoire::combineExpression(
scRepertoire::combineTCR(
get(data("contig_list", package = "scRepertoire")),
samples = c("P17B", "P17L", "P18B", "P18L", "P19B", "P19L", "P20B", "P20L"),
removeNA = FALSE,
removeMulti = FALSE,
filterMulti = FALSE
),
pbmc,
cloneCall = "gene",
proportion = TRUE
)
print(pbmc)
```
```{r, actual_print_pbmc, eval = TRUE, echo = TRUE, include = FALSE}
# TODO use a nicer looking dataset
pbmc <- get(data("combined_pbmc"))
print(pbmc)
```
</details>
### Overview
All of `vizAPOTC`'s arguments are actually derived from `RunAPOTC` and `APOTCPlot`. The former is responsible for storing data of the S4 class `ApotcData` in the seurat object under a named list in `@misc$APackOfTheClones` under some character run ID, and the latter allows the visualization of these data objects with some customization. `AdjustAPOTC` has many arguments for adjusting the data associated with some APackOfTheClones run stored by `RunAPOTC`, including adjusting cluster positions, colors, repulsion, etc. which can be visualized again with `APOTCPlot`.
## Managing APackOfTheClones run data
`RunAPOTC` has approximate the first half of `vizAPOTC`'s arguments until `max_repulsion_iter`, meaning it has all the data subsetting, circle size scaling, and cluster repulsion capabilities covered in the previous vignette. The most essential difference is the presence of the argument `run_id`, which corresponds to an id for the `ApotcData` object. If left blank, one will be automatically generated in the following format:
> `reduction_base;clonecall;keyword_arguments;extra_filter`
where if keyword arguments and extra_filter are underscore (`_`) characters if there was no input for the `...` and `extra_filter` parameters.
```{r, eval = FALSE}
# Here is the function ran with its default parameters
pbmc <- RunAPOTC(pbmc)
#> Initializing APOTC run...
#> * Setting `clone_scale_factor` to 0.3
#> * id for this run: umap;CTstrict;_;_
#>
#> Packing clones into clusters
#> [==================================================] 100%
#>
#> repulsing all clusters | max iterations = 20
#> [==================================================] 100%
#>
#> Completed successfully, time elapsed: 0.155 seconds
#>
```
```{r, runapotc_default, include = FALSE}
pbmc <- RunAPOTC(pbmc, verbose = FALSE)
```
From the verbal queues, one can see how the `run_id` was set. Here's it ran again but with more optional arguments and a custom `run_id`:
```{r, runapotc2}
pbmc <- RunAPOTC(
pbmc, run_id = "sample17", orig.ident = c("P17B", "P17L"), verbose = FALSE
)
```
### Utilities for Managing APackOfTheClones Runs
It is to note that the data abstraction here with a `run_id` is intentional, and users should not manually touch any of the `ApotcData` objects with the seurat object unless they are extremely familiar with the latest internal implementation. Instead, here is a collection of functions that may be useful:
- `getApotcDataIds(pbmc)` gets all current `run_id`'s, if any.
- `getLastApotcDataId(pbmc)` gets the latest `run_id`, if any.
- `containsApotcRun(pbmc, run_id = "foo")` returns whether a `run_id` exists in the seurat object.
- `deleteApotcData(pbmc, run_id = "foo")` deletes *all data* associated with a certain `run_id`.
## APOTCPlot
To visualize stored APackOfTheClones runs, `APOTCPlot` takes in a seurat object and the `run_id`. If no `run_id` is provided, it defaults to using the latest run. All other parameters are same as in the second half of `vizAPOTC`. Although it is noteworthy that if the user had always relied on auto-generated `run_id`'s then `APOTCPlot` also has these subsetting arguments:
```{r, apotcplot_subset_params, eval = FALSE}
reduction_base = NULL,
clonecall = NULL,
...,
extra_filter = NULL,
```
And putting in identical arguments to generate the original `ApotcData` would work too, but this approach is less recommended as its a lot more (unnecessarily) verbose. Here is `APOTCPlot` in action:
```{r, apotcplot}
# Here, plots for samples 17, 18, and 19 as seen in the previous vignette are made, where
# `orig.ident` is a custom column in the example data with levels corresponding to sample ids:
# ("P17B" "P17L" "P18B" "P18L" "P19B" "P19L" "P20B" "P20L").
pbmc <- RunAPOTC(
pbmc, run_id = "P17", orig.ident = c("P17B", "P17L"), verbose = FALSE
)
pbmc <- RunAPOTC(
pbmc, run_id = "P18", orig.ident = c("P18B", "P18L"), verbose = FALSE
)
pbmc <- RunAPOTC(
pbmc, run_id = "P19", orig.ident = c("P19B", "P19L"), verbose = FALSE
)
cowplot::plot_grid(
vizAPOTC(pbmc, verbose = FALSE),
APOTCPlot(pbmc, run_id = "P17"),
APOTCPlot(pbmc, run_id = "P18"),
APOTCPlot(pbmc), # run_id omitted as sample 19 was the latest run
labels = c("all", "17", "18", "19")
)
```
## AdjustAPOTC
This function's parameters help modify certain attributes about APackOfTheClones runs, and has the exact same first six parameters as `APOTCPlot` for managing which run to modify. It also possesses the same four repulsion arguments in `vizAPOTC` and `RunAPOTC` if a run is to be repulsed again. See the function level documentation for the following parameters that can modify cluster locations, colors, and the adjustment of the `clone_scale_factor` and `rad_scale_factor`.
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/vignettes/APackOfTheClones-runs.Rmd |
---
title: "APackOfTheClones Essentials"
description: >
A brief walkthrough of the clonal expansion visualization workflow.
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{APackOfTheClones Essentials}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
data: 'Compiled: `r format(Sys.Date(), "%B %d, %Y")`'
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
knitr::opts_chunk$set(echo = FALSE)
options(repos = c(CRAN = "http://cran.rstudio.com"))
# utility functions
head <- function(df) {
knitr::kable(utils::head(df))
}
quiet_load_all_CRAN <- function(...) {
for (pkg in list(...)) {
if (require(pkg, quietly = TRUE, character.only = TRUE)) next
invisible(install.packages(
pkg, quiet = TRUE, verbose = FALSE, character.only = TRUE
))
suppressPackageStartupMessages(invisible(
require(pkg, quietly = TRUE, character.only = TRUE)
))
}
}
# load packages
quiet_load_all_CRAN("ggplot2", "cowplot", "Seurat")
```
```{r setup}
suppressPackageStartupMessages(library(APackOfTheClones))
```
## Introduction
Single-cell RNA sequencing (scRNA-seq) and T/B cell receptor (TCR) sequencing are popular techniques for studying immune cell function and disease. The combined use of such data can provide valuable insights into the immune system, including clonal expansion. `APackOfTheClones` provides a simple, easily customizable, and publication-ready method to intuitively visualize clonal expansion between different cell clusters with `ggplot2`, and can be easily slotted into any analysis pipeline.
The method counts clonotypes, and using a dimensional reduction (e.g. UMAP) of all cells in a single cell immune profiling experiment as a base, it circle-packs all clone sizes directly as circles within circular clusters according to its seurat cluster. The advantage of this method compared to other (very limited) visualizations of clonal expansion out there is that its arguably much more intuitive. To see it in action, see [this paper](https://doi.org/10.1126/sciimmunol.abg6356) and [this paper](https://doi.org/10.1111/all.15399).
Basic familiarity with the `R` language, the `Seurat` package, and the `scRepertoire` package *VERSION TWO* is assumed. As version two of `scRepertoire` is relatively new and introduces some small breaking changes, it is *essential* that its [new vignettes](https://www.borch.dev/uploads/screpertoire/) are read.
In this vignette, the most essential functionalities of the package are covered, from preparing the data with scRepertoire, to producing and fine-tuning a barebones visualization of clonal expansion.
### Setting up the Seurat Object and Receptor Library with scRepertoire
The premise of the package is that it provides an additional analysis tool on top of `scRepertoire`'s many functions, which are all ran after combining all TCR/BCR contigs into clones, and integrating the clonal information into a seurat object with a dimensional reduction(s). Here are the corresponding vignettes to read in order for combining contigs:
1. [Loading Data](https://www.borch.dev/uploads/screpertoire/articles/loading)
2. [Combining Contigs into Clones](https://www.borch.dev/uploads/screpertoire/articles/combining_contigs)
3. [Additional Processing Steps (optional)](https://www.borch.dev/uploads/screpertoire/articles/processing)
And here is a practical example on how to generate a combined seurat + VDJ object to be used in the vignette named `pbmc`, which is also identical to the built-in example dataset `combined_pbmc` which can be loaded with `data("combined_pbmc", package = "APackOfTheClones")`:
```{r, combineTCR, eval = FALSE}
library(scRepertoire)
# load in the corresponding 6-sample TCR contigs from scRepertoire
contig_list <- get(data("contig_list", package = "scRepertoire"))
# combine the TCR contigs into clones with custom samples
combined_contig_list <- scRepertoire::combineTCR(
contig_list,
samples = c("P17B", "P17L", "P18B", "P18L", "P19B", "P19L", "P20B", "P20L"),
removeNA = FALSE,
removeMulti = FALSE,
filterMulti = FALSE
)
```
For integrating the clonal information, ```scRepertoire::combineExpression``` is the function used [to do so](https://www.borch.dev/uploads/screpertoire/articles/attaching_sc), but for `APackOfTheClones` the scRNA-seq object has to be a seurat object:
```{r, combining, eval = FALSE}
# a seurat object corresponding to combined_contig_list named `pbmc` is loaded
pbmc <- scRepertoire::combineExpression(
combined_contig_list,
pbmc,
cloneCall = "gene",
proportion = TRUE
)
print(pbmc)
```
```{r, actual_print_pbmc, eval = TRUE, echo = TRUE, include = FALSE}
# TODO use a nicer looking dataset
pbmc <- get(data("combined_pbmc"))
print(pbmc)
```
And as a reference, here is the corresponding UMAP plot of the seurat object
```{r, umap, echo = TRUE}
pbmc_umap_plot <- UMAPPlot(pbmc)
pbmc_umap_plot
```
## Simple ball packing visualization
```vizAPOTC``` (short for "visualize APackOfTheClones") is the main convenience function of the package to directly produce the ball packing clonal expansion plot. It takes in a main argument of a combined seurat object (with a long list of optional arguments which will be covered in later sections), and outputs a ggplot object:
```{r, initial_vizapotc, echo = TRUE}
default_apotc_plot <- vizAPOTC(pbmc, verbose = FALSE)
default_apotc_plot
```
### Key characteristics of the plot
* One should notice the immediate correspondence of circular clusters to umap clusters, and be able to derive some immediate insights about which clusters have expanded.
* The clonotype counts for each seurat cluster corresponds to their position and color in the original UMAP centroids
* The most expanded clonotypes are in the center of each circle cluster with larger sizes, symbolizing increased expansion
* On the plot, there is a somewhat imperfect visual legend of the relative clone sizes
* Some clusters have considerable visual overlap
* The returned plot is a fully customizable `ggplot` object
The resulting clonal expansion plot may not be visually satisfactory on the first run without customizations from optional arguments. These arguments and other `ggplot` tricks will be covered to fine-tune the visualization until publication-ready.
## Working with different reduction bases and clonal representations
The arguments to do so are:
```{r, echo = TRUE, eval = FALSE}
reduction_base = "umap",
clonecall = "strict",
```
`reduction_base` indicates what each seurat cluster's centroid locations should be based upon, so UMAP, T-SNE, or PCA (provided the reduction has been ran on the object). `clonecall` corresponds to which column in the seurat metadata should be used to conduct the clonotype counting. These are usually columns generated by combineTCR/BCR but can be custom columns too. Note that ```scRepertoire``` should generate the following columns by default, and the brackets indicate equivalent names one can pass into the clonecall parameter:
- CTgene (gene, genes)
- CTnt (nt, nucleotide, nucleotides)
- CTaa (aa, amino)
- CTstrict (strict, gene+nt)
Switching this parameter could be useful if the user had generated a custom definition of clones and added it to the meta data, or if BCR clonal data is present, since [the definition of a clone for B-cells isn't as clear as it is for TCRs](https://doi.org/10.3389/fimmu.2023.1123968).
## Working with a data subset
A novel feature of version 1 is the ease of running APackOfTheClones for subsets of the full dataset, which may be useful in cases like the need for clonal expansion plotting for only certain samples or conditions. There are two arguments to do so:
```{r, echo = TRUE, eval = FALSE}
...,
extra_filter = NULL,
```
`...` represents an arbitrary number of additional keyword arguments indicating the rows corresponding to elements in the seurat object metadata that should be filtered by. For example, seurat_clusters = c(1, 9, 10) will filter the cells to only those in seurat clusters 1, 9, and 10. `extra_filter` is another additional way to subset the data and should be formatted *exactly* like a statement one would pass into `dplyr::filter` that does *additional* filtering to cells in the seurat object.
Here is an example of the data subsetting, where only samples in the example seurat object that correspond to sample 17, 18, and 19 are plotted, alongside the original plot to see the difference.
```{r, subsetting, echo = TRUE}
# `orig.ident` is a custom column in the example data with levels corresponding to sample ids:
# ("P17B" "P17L" "P18B" "P18L" "P19B" "P19L" "P20B" "P20L"). Here, it is subsetted
# by the keyword argument approach
subset_sample_17_plot <- vizAPOTC(
pbmc, orig.ident = c("P17B", "P17L"), verbose = FALSE
)
# here, it is subsetted with `extra_filter` for sample 18 with dplyr syntax:
subset_sample_18_plot <- vizAPOTC(
pbmc, extra_filter = "substr(orig.ident, 1, 3) == 'P18'", verbose = FALSE
)
# here, sample 19 is subsetted with both arguments to show that they work in conjunction
subset_sample_19_plot <- vizAPOTC(
pbmc,
orig.ident = "P19B",
extra_filter = "orig.ident == 'P19L' | orig.ident == 'P19B'",
verbose = FALSE
)
cowplot::plot_grid(
default_apotc_plot,
subset_sample_17_plot,
subset_sample_18_plot,
subset_sample_19_plot,
labels = c("all", "17", "18", "19")
)
```
## Customization of visual parameters
### Visually scaling circle sizes
For each seurat cluster at index $i$, the final radii $r_{ij}$ of each physical circle at index $j$ representing clone size $s_{ij}$ is calculated with a clone size scaling factor $C \in [0, 1)$ and a radius scaling factor $R \in [0, 1)$ with the following formula:
$$r_{ij} = C\cdot(\sqrt{s_{ij}} - (1 - R))$$
Intuitively, $C$ represents how much to enlarge/shrink each radius geometrically, whereas $R$ represents the scaled size of the smallest clone's radius, and all circles will have their radii decreased by that amount. $R$ defaults to 0.95, whereas $C$ defaults to an approximated factor based on the number of clones that the package computes. The corresponding arguments are `clone_scale_factor` and `rad_scale_factor` in `vizAPOTC`.
### Cluster repulsion
Considerable visual overlap between clusters (due to the algorithm's attempt to fit clusters to the original UMAP coordinates) may occur and obstruct eachother excessively, even though the method by default tries to alleviate this with repulsion of clusters. There are four optional arguments:
```{r, echo = TRUE, eval = FALSE}
repulse = TRUE,
repulsion_threshold = 1,
repulsion_strength = 1,
max_repulsion_iter = 10
```
* `repulsion_threshold` indicates the amount of `ggplot2` units of overlap between clusters that are acceptable. It defaults to `1`, meaning that two clusters that overlap by about 1 unit are considered by the repulsion algorithm to not be overlapping. Increasing this number will increase the amount of overlap between clusters, and decreasing this number will do the opposite, while decreasing it to negative values will incur additional spacing between clusters. However, using negative spacing may cause the plot to look very spaced out. Keep reading to see alternatives to doing so.
* `repulsion_strength` relates to how much the clusters should repel each other. The repulsion algorithm works in iterations, where for each iteration, each cluster "pushes" each other away from eachother by some amount. Increasing this value will cause extra "pushing" during each iteration. However, increasing this factor too much may result once again in a very visually unpleasant plot.
* `max_repulsion_iter` indicates the number of iterations where clusters should repel eachother. Increasing this number would ensure that clusters will (almost always) for sure not be overlapping. A trick with this parameter to make more pleasant plots is to decrease `repulsion_strength` and increase `max_repulsion_iter` to possibly make a more pleasant arrangement of clusters.
For more details on them, read the "Arguments" section in the function documentation. Briefly, first, to make the circle clusters move away from eachother, `repulse` should be set to `TRUE`, and the function should be ran AGAIN. (If you feel this excessive re-running takes too long or is inefficient for your workflow in its current form due to a constant need to fine-tune, see the vignette `APackOfTheClones-runs`.)
### Managing the clone size legend
There are the following six parameters to adjust the legend:
```{r, legend_params, echo = TRUE, eval = FALSE}
add_size_legend = TRUE,
legend_sizes = "auto",
legend_position = "auto",
legend_buffer = 0.2,
legend_color = "#808080",
legend_spacing = "auto",
legend_label = "Clone sizes",
legend_text_size = 5,
add_legend_background = TRUE,
```
`legend_sizes` are autogenerated, with at least sizes for clone size 1 and max(clone sizes). The user can also input whichever sizes they wish as a numeric vector. The `legend_position` on the plot defaults to one of the four corners, whichever will result in the least overlap of the legend with circles present. Otherwise, the user can input manually `"top left"`, `"top right"`, `"bottom left"` `"bottom right`, or just a numeric indicating the x and y coordinates of the top leftmost corner of the legend. `add_legend_background` will display a rectangular background to the legend. The other parameters are less relevant and more details about these arguments can be read in the function documentation.
### Other modifications
A collection of other utility parameters are:
```{r, other_params, echo = TRUE, eval = FALSE}
order_clones = TRUE,
try_place = FALSE,
res = 360L,
linetype = "blank",
use_default_theme = TRUE,
retain_axis_scales = FALSE,
show_labels = FALSE,
label_size = 5,
```
The most important of which are 1. `show_labels` which overlays the seurat cluster on the plot in the format `"Cx"` in the center of each relevant cluster. (see `vignette("APackOfTheClones-runs")` on how one can modify them) 2. `use_default_theme` which will produce a plot that's thematically very similar to the dimensional reduction plot, with axis labels and numeric ticks on each axis. This may be helpful for visually indicating certain plot parameters. However, for publications [such as this one](https://doi.org/10.1126/sciimmunol.abg6356), as the numeric ticks in APackOfTheClones have no actual biologically relevant meaning aside from approximately matching clusters, it may be beneficial to set this argument to `FALSE` which will produce a plot with just the circles:
```{r, void_labelled_plot, echo = TRUE}
vizAPOTC(pbmc, show_labels = TRUE, use_default_theme = FALSE, verbose = FALSE)
```
## Conclusion
That's about it for the most basic functionalities of the clonal expansion visualization function. It's ***strongly recommended*** to save the plot first as an `.svg` file with ```ggplot2::ggsave``` for maximal resolution (especially of the circles) for publication.
For users that need to fine-tune plot parameters and/or save the data within the seurat object for readjustment/replotting, please read the `vignette("APackOfTheClones-runs")`.
For inspiration of how it could practically look like in a real paper context, see the following papers where the original julia implementation of APackOfTheClones was successfully used:
* [Single-cell analysis pinpoints distinct populations of cytotoxic CD4+ T cells and an IL-10+CD109+ $T_{H}2$ cell population in nasal polyps](https://doi.org/10.1126/sciimmunol.abg6356)
* [Recombinant multimeric dog allergen prevents airway hyperresponsiveness in a model of asthma marked by vigorous $T_{H}2$ and $T_{H}17$ cell responses](https://doi.org/10.1111/all.15399)
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/vignettes/APackOfTheClones.Rmd |
---
title: "ARCHIVED: A walkthrough of APackOfTheClones v0.1.x"
output: rmarkdown::html_vignette
description: >
A full walkthrough of the clonal expansion visualization workflow.
vignette: >
%\VignetteIndexEntry{ARCHIVED: A walkthrough of APackOfTheClones v0.1.x}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
data: 'Compiled: `r format(Sys.Date(), "%B %d, %Y")`'
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
knitr::opts_chunk$set(echo = FALSE)
options(repos = c(CRAN = "http://cran.rstudio.com"))
# function to install and load packages quietly
quiet_load <- function(pkg, CRAN = TRUE, dev_dir = "Qile0317/") {
if (!require(pkg, quietly = TRUE, character.only = TRUE)) {
if (CRAN) {
invisible(install.packages(pkg, quiet = TRUE, verbose = FALSE, character.only = TRUE))
} else {
suppressWarnings(suppressMessages(
devtools::install_github(paste(dev_dir, pkg, sep = ""))
))
}
}
suppressPackageStartupMessages(invisible(require(pkg, character.only = TRUE)))
}
quiet_load_all_CRAN <- function(...) {
pkgs <- list(...)
for (pkg in pkgs) {quiet_load(pkg)}
}
# redefine "head" to output a nicer dataframe
view_head <- function(df) {
knitr::kable(head(df))
}
# load pkgs
quiet_load_all_CRAN("ggplot2", "cowplot", "Seurat", "devtools")
quiet_load("SCIPData", CRAN = FALSE)
# load the legacy version from github
suppressWarnings(suppressMessages(
devtools::install_github("Qile0317/APackOfTheClones@v0")
))
suppressPackageStartupMessages(invisible(APackOfTheClones))
# load data
data("pbmc", package = "SCIPData")
data("tcr_dataframe", package = "SCIPData")
```
# Introduction
Single-cell RNA sequencing (scRNA-seq) and T cell receptor (TCR) sequencing are popular techniques for studying immune cell function and disease. The combined use of such data can provide insights into clonal expansion. `APackOfTheClones` provides a simple, publication-ready method to intuitively visualize clonal expansion between different cell clusters with `ggplot2`, and can be easily slotted into any analysis pipeline.
In this user vignette, we will assume basic familiarity with `Seurat` and `R`, and a `Seurat` object has already been processed through the basic pipeline until at least the UMAP reduction has been obtained. Most importantly, clonal expansion can only be analyzed if the biosample has been run through the 10X genomics [Chromium Single Cell Immune Profiling](https://www.10xgenomics.com/products/single-cell-immune-profiling) service, and the resulting data processed by [Cell Ranger](https://support.10xgenomics.com/single-cell-gene-expression/software/pipelines/latest/what-is-cell-ranger).
We will be showing basic usage of the main function of the package, Namely, in the integration of the 10X genomics T-cell receptor library generated by Cell Ranger into a `Seurat` object, followed by the usage of a fully customizable clonal expansion plotting method.
# Setup the Seurat object and T cell receptor library
Before anything else can be done, we need a `Seurat` object of the single cell experiment with a dimensional reduction. The most modern and common reduction used in publications is UMAP (Uniform Manifold Approximation and Projection) which is what this package defaults to, and what this vignette will use. There is also a way to base it off of the t-SNE (t-distributed stochastic neighbor embedding) or PCA (Principle Component Analysis) reduction though choosing the latter is probably not the best idea.
```{r, init_pbmc, echo = TRUE, collapse = FALSE}
library(Seurat)
suppressPackageStartupMessages(library(APackOfTheClones))
# Here, a Seurat object has already been loaded named `pbmc`
print(pbmc)
```
Here, we can also see its UMAP reduction plot. It is very important that it is present within the Seurat object so that the later clonal expansion visualization can be based on the coordinates for intuitiveness.
```{r, get_umap, echo = TRUE, collapse = FALSE}
umap_plot <- UMAPPlot(pbmc)
umap_plot
```
To analyze clonal expansion, the T-cell library (in the form of the `all_contig_annotations.csv` file) generated by Cell Ranger has to be "integrated" into the seurat object. This simply means that the cell-level receptor information is incorporated into the `@meta.data` attribute. However, before doing so, let's quickly view the file structure. An r dataframe of the read-in file has been loaded named `tcr_dataframe`:
```{r, fake_view_tcr, echo = TRUE, eval = FALSE}
head(tcr_dataframe)
```
```{r, init_tcr}
view_head(tcr_dataframe)
```
This is the first 5 rows of what the file should look like directly from cell ranger. If you for some reason have some custom version of this dataframe that is different, it is highly important that there AT LEAST exists the column with the column name `barcode` (column 1) and `raw_clonotype_id` (column 17), otherwise everything else will not work as intended.
<details>
<summary>**how do I load in the raw file?**</summary>
```{r, fake_readr, echo = TRUE, eval = FALSE}
library(readr)
tcr_dataframe <- readr::read_csv("your_file_location/all_contig_annotations.csv")
```
</details>
# Integration of clonotype information
To finally incorporate the TCR(T-cell receptor library) data into the seurat object, we'll use the simple `integrate_tcr` function.
```{r, integrate, echo = TRUE, collapse = FALSE}
pbmc <- integrate_tcr(pbmc, tcr_dataframe, verbose = FALSE)
```
Note that the last argument `verbose` was intentionally set to `FALSE` due to the presence of a progress bar which corrupts the formatting of the vignette (it defaults to the recommended `TRUE`). Otherwise, the output to the R Console would have been the following:
```
#> integrating TCR library into seurat object
#> |====================================| 100%
#> Percent of unique barcodes: 31 %
```
By calling `integrate_tcr`, one can now find all the information from `all_contig_annotations.csv` within the `@meta.data` attribute. Let's take a quick look on what it looks like:
```{r, fake_view_metadata, echo = TRUE, eval = FALSE}
head([email protected])
```
```{r, view_metadata}
view_head([email protected])
```
From this snippet of the meta data, we can see that for this particular seurat object, the first 5 columns `orig.ident, nCount_RNA, nFeature_RNA, RNA_snn_res.0.6, seurat_clusters` were originally present. What we care about is all the new columns after them. A few key things to note:
* There are a lot of `NA` values, and that is due to 1) some cells aren't T cells, 2) some T cell receptors do not end up being sequenced for many possible reasons, 3) Of the sequenced T cells in the TCR library, there are usually a lot of duplicate barcodes (repeated TCR sequencing of the same cell) as each individual cell may have several TCR types and/or need several contigs to assemble the final sequence.
* unique data about TCRs of the same cell (same barcode) are collapsed together with `__` in the order of the contigs
Now, the seurat object can be analyzed downstream with the new integrated information however we wish. It also is now ready to have its clonotypes counted and the clonal expansion visualized.
<details>
<summary>**do I HAVE to integrate the the TCR library into my seurat object?**</summary>
well... no. But there is no reason not to do so and it is heavily recommended to do so. Otherwise, we are about to see in the upcoming clonal expansion plotting function how you can avoid the integration.
</details>
<details>
<summary>**How do I get the clonal expansion per cluster information myself?**</summary>
There is the `count_clone_sizes(seurat_obj)` function for doing so. Check the documentation by running `?count_clone_sizes` for more details.
</details>
# Ball packing visualization
All one has to do to produce the visualization is by simply running the following code:
```{r, main_plt, echo = TRUE}
pbmc_expansion_plot <- clonal_expansion_plot(pbmc, verbose = FALSE)
# once again, `verbose` is set to false but it defaults to TRUE
pbmc_expansion_plot
```
<details>
<summary>**How do I base the clonal expansion plot off of the t-SNE or PCA reduction?**</summary>
`clonal_expansion_plot` has an optional argument `reduction` which defaults to `'umap'` that handles which reduction the plot is based off of. The argument can be set to `'tsne'` or `'pca'` like so:
```
# t-SNE based plot
pbmc_expansion_plot <- clonal_expansion_plot(pbmc, reduction = 'tsne')
# PCA based plot
pbmc_expansion_plot <- clonal_expansion_plot(pbmc, reduction = 'pca')
```
</details>
## A few things to note about the plot
* The clonotype counts for each seurat cluster corresponds to their position and color in the original UMAP centroids
* The most expanded clonotypes are in the center of each circle cluster with larger sizes, symbolizing increased expansion
* There are less circles than the scRNAseq cell count due to aforementioned reasons.
* On the top left, there is a somewhat covered visual legend of the relative clone sizes
* The red and green clusters have considerable visual overlap
* The returned plot is a fully customizable `ggplot` object
As `APackOfTheClones` is in a rapid-development phase, the resulting clonal expansion plot will usually not be visually satisfactory on the first run without customizations. However, there are many optional arguments and `ggplot` tricks that we are about to explore that will help us make it publication quality.
<details>
<summary>**How do I create the plot without integrating the Seurat object and TCR library?**</summary>
`clonal_expansion_plot` will also work if the user provides the original `seurat` object and the raw TCR library dataframe, like so:
```
# let pbmc be the raw seurat object before the integration
pbmc_expansion_plot <- clonal_expansion_plot(pbmc, tcr_dataframe, verbose = FALSE)
```
</details>
## Adjusting intra-cluster spacing
The considerable overlap between clusters (due to the algorithm's attempt to fit clusters to the original UMAP coordinates) may sometimes obstruct eachother excessively.
To account for this, there are four optional arguments in `clonal_expansion plot`:
```{r, echo = TRUE, eval = FALSE}
repulse = FALSE,
repulsion_threshold = 1,
repulsion_strength = 1,
max_repulsion_iter = 10
```
For more details on them, read the "Arguments" section in the function documentation (`?clonal_expansion_plot`). But to summarize, first, to make the circle clusters move away from eachother, `repulse` should be set to `TRUE`, and the function should be ran AGAIN. (If you feel this excessive re-running takes too long or is inefficient for your workflow in its current form, please make an issue on the github page.)
Setting repulse to TRUE with those default parameters should probably yield something like the following:
```{r, repulse, echo = TRUE}
pbmc_expansion_plot <- clonal_expansion_plot(pbmc, repulse = TRUE, verbose = FALSE)
pbmc_expansion_plot
```
As we can see, the red and green clusters have "moved away" from each other, and there is very little overlap remaining. All other clusters which were not touching remain in their original positions.
<details>
<summary>**Do I HAVE to re-run `clonal_expansion_plot` each time I want to modify a parameter?**</summary>
Unfortunately, yes. With the current version (v1) of `APackOfTheClones`, to change the appearance of the plot, `clonal_expansion_plot` has to be re-ran with modified parameters each time, which unfortunately may be slow/tedious if a lot of modifications are required. Please create a github issue or contact the author if a new version should be made to prevent this.
</details>
If you are still unhappy with the spacing of the plot, see the following ways to adjust the spacing:
* `repulsion_threshold` indicates the amount of `ggplot2` units of overlap between clusters that are acceptable. It defaults to `1`, meaning that two clusters that overlap by about 1 unit are considered by the repulsion algorithm to not be overlapping. Increasing this number will increase the amount of overlap between clusters, and decreasing this number will do the opposite, while decreasing it to negative values will incur additional spacing between clusters. However, using negative spacing may cause the plot to look very spaced out. Keep reading to see alternatives to doing so.
* `repulsion_strength` relates to how much the clusters should repel each other. The repulsion algorithm works in iterations, where for each iteration, each cluster "pushes" each other away from eachother by some amount. Increasing this value will cause extra "pushing" during each iteration. However, increasing this factor too much may result once again in a very visually unpleasant plot. Note that closer clusters will repell eachother more, and larger clusters will repel smaller clusters more.
* `max_repulsion_iter` indicates the number of iterations where clusters should repel eachother. Increasing this number would ensure that clusters will (almost always) for sure not be overlapping. A trick with this parameter to make more pleasant plots is to decrease `repulsion_strength` and increase `max_repulsion_iter` to possibly make a more pleasant arrangement of clusters.
## Adjusting and customizing size legend
The plot we've generated here has a size legend on the top left, but we see that it is partially outside of the plot. Unfortunately the algorithm for placing the legend is far from perfect in version 0 so the user will have to do a little extra work.
There are the following six parameters to adjust the legend:
```{r, legend_params, echo = TRUE, eval = FALSE}
add_size_legend = TRUE,
legend_sizes = c(1, 5, 50),
legend_position = "top_left",
legend_buffer = 1.5,
legend_color = "#808080",
legend_spacing = 0.4
```
Here, we can adjust the argument `legend_buffer`, which indicates the amount of `ggplot2` units to move the legend "inwards" by. We can also slightly increase `legend_spacing`, an argument specifying the vertical distance between circles in the legend.
```{r, adj_legend, echo = TRUE}
# the previous arguments have to be maintained!
pbmc_expansion_plot <- clonal_expansion_plot(
pbmc,
legend_buffer = 3,
legend_spacing = 0.6,
repulse = TRUE,
verbose = FALSE
)
pbmc_expansion_plot
```
We can even change the circle (clone) sizes displayed in the legend by changing the vector `legend_sizes`:
```{r, adj_legend_sizes, echo = TRUE}
pbmc_expansion_plot <- clonal_expansion_plot(
pbmc,
legend_sizes = c(1, 5, 25, 50),
legend_buffer = 3,
legend_spacing = 0.6,
repulse = TRUE,
verbose = FALSE
)
pbmc_expansion_plot
```
More details about these arguments can be read in the function documentation.
## Adjusting and customizing circle size and scaling
So far, the sizes of the individual circles (clones) have seemed to look quite pleasant, fitting roughly into their UMAP cluster centroids snugly. However, this may not always be the case. A likely very common issue upon the initialization of the plot with default arguments is that the circles may be too small or too large.
In these cases, the argument `clone_scale_factor` should be changed. The argument defaults to 0.1, which is the multiplicative scale factor for the square root radius of a clone. So if a clone size is four, and the factor is 0.1, then the actual radius of the circle on the plot would be `sqrt(4) * 0.1` which equates to 0.2.
Additionally, `rad_scale_factor` can optionally be changed to adjust the intra-circular spacing. The factor defaults to 0.95 and indicates the multiplicative amount to adjust the radius of the circles after they have been placed. decreasing this value (slightly) will basically make each circle smaller in place, increasing the spacing between then,
For demonstration sake, we'll change the `clone_scale_factor` to 0.07 and `rad_scale_factor` to 0.9:
```{r, scale_fac, echo = TRUE}
pbmc_expansion_plot <- clonal_expansion_plot(
pbmc,
clone_scale_factor = 0.07,
rad_scale_factor = 0.9,
legend_sizes = c(1, 5, 25, 50),
legend_buffer = 3,
legend_spacing = 0.6,
repulse = TRUE,
verbose = FALSE
)
pbmc_expansion_plot
```
## Other possible modifications
There are a few other arguments this vignette did not cover which can be read in the function documentation. However, to end off, one more important argument to consider is `use_default_theme = TRUE`. It makes the plot have the same axis labels and general theme as the original UMAP plot. If this isn't to your liking or you want to customize the plot your own way (since `clonal_expansion_plot` generates a `ggplot` object!), simply set it to `FALSE` and you'll have a very minimally themed plot:
```{r, blankplt, echo = TRUE}
clonal_expansion_plot(
pbmc,
use_default_theme = FALSE,
clone_scale_factor = 0.07,
rad_scale_factor = 0.9,
legend_sizes = c(1, 5, 25, 50),
legend_buffer = 3,
legend_spacing = 0.6,
repulse = TRUE,
verbose = FALSE
)
```
Additionally, if you really want the plot to reflect the original UMAP plot, you could try using `ggplot2`'s `xlim` and `ylim` functions.
# Final product
That's about it for the most basic functionalities of the clonal expansion visualization function. Remember to save the plot first as an `.svg` file for maximal resolution, and it should be publication ready.
For fun, here's a side-by-side comparison of the UMAP plot and the clonal expansion plot:
```{r, final, echo = TRUE}
library(ggplot2)
library(cowplot)
cowplot::plot_grid(
umap_plot + ggtitle("scRNA-seq UMAP"),
pbmc_expansion_plot + ggtitle("APackOfTheClones clonal expansion plot"),
labels = "AUTO"
)
```
<details>
<summary>**How do I reproduce the plots in this vignette?**</summary>
If you really want to reproduce the plots and code in this vignette, you will need to install the data used in this vignette which has been compiled in an R package `SCIPData` (Single Cell Immune Profiling Data). You can do so with the following code
```{r, echo = TRUE, eval = FALSE}
# install the data package
library(devtools)
devtools::install_github("Qile0317/SCIPData")
# load the package data directly into R objects in memory
data("pbmc", "tcr_dataframe")
```
And the rest of the code can be copied and used accordingly.
Alternatively, there are two smaller, minimal, artificial versions of the seurat object and clonotype data that can be loaded in `APackOfTheClones`, run `?mini_seurat_obj` and `?mini_clonotype_data` for more details.
</details>
| /scratch/gouwar.j/cran-all/cranData/APackOfTheClones/vignettes/archive/v0-main.Rmd |
#' Estimating the AP and the AUC for Binary Outcome Data.
#'
#' \code{APBinary} This function calculates the estimates of the AP and AUC
#' for binary outcomes as well as their confidence intervals
#' using the perturbation or the nonparametric bootstrap
#' resampling method.
#'
#' @param status Binary indicator, 1 indicates case / the class of prediction interest and 0 otherwise.
#' @param marker Numeric risk score. Data can be continuous or ordinal.
#' @param cut.values risk score values to use as a cut-off for calculation of positive predictive values (PPV) and true positive fractions (TPF). The default value is NULL.
#' @param method Method to obtain confidence intervals.
#' @param alpha Confidence level. The default level is 0.95.
#' @param B Number of resampling to obtain confidence interval. The default value is 1000.
#' @param weight Optional. The default weight is 1, same object length as the "status" and "marker" object.
#' @importFrom stats rexp
#' @importFrom stats quantile
#' @importFrom utils write.csv
#' @export APBinary
APBinary <- function(status,marker,cut.values=NULL,method="none",alpha=0.95,B=1000,weight=NULL)
{
############Checking the Formation############
if(length(status)!=length(marker)){
stop("The lengths of each data are not equal!\n")
}
if(is.null(weight)){
vk = rep(1,length(status))
}else{
if(length(weight)!=length(marker)){
stop("The length of weight does not match!\n")
}else{
vk = weight
}
}
data0=cbind(status,marker)
nn<-nrow(data0)
auc=ap=ap_event=array(0,dim=c(B+1))
dk=status;zk=marker
############Set Cut-Off Value############
if(!is.null(cut.values)){
if(!((max(cut.values)<=max(marker))&(min(cut.values)>=min(marker)))){
cut.values=cut.values[(min(marker)<=cut.values)&(cut.values<=max(marker))]
cat("Warning: Some cut values are out of range!\n")
}
if(length(cut.values)==0){cut.values=NULL;cat("Warning: No avaliable cut values!\n")}
if(!is.null(cut.values)){
scl=cut.values
PPV=TPF=array(0,dim=c(length(scl),2))
PPV[,1]=TPF[,1]=scl
TPF[,2] = sum.I(scl,"<",zk,1*(dk==1)*vk)/sum(1*(dk==1)*vk) ## P(Z> cl|D=1)
PPV[,2] = sum.I(scl,"<",zk,1*(dk==1)*vk)/sum.I(scl,"<",zk,vk) ## P(D=1|Z> cl)
}
}
if(method=="none"){
auc = sum((0.5*sum.I(zk,"<",zk,1*(dk==1)*vk)+0.5*sum.I(zk,"<=",zk,1*(dk==1)*vk))*(dk==0)*vk)/(sum(vk*(dk==1))*sum(vk*(dk==0)))
ap = sum(vk*(dk==1)*sum.I(zk,"<=",zk,1*(dk==1)*vk)/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((dk==1)*vk)
event_rate=sum(status)/nn
if(!is.null(cut.values)){
colnames(PPV)=c("cut.off values","PPV")
colnames(TPF)=c("cut.off values","TPF")
write.csv(signif(PPV,3),file=paste("APBinary_PPV.csv",sep=""))
write.csv(signif(TPF,3),file=paste("APBinary_TPF.csv",sep=""))
return(list(PPV=signif(PPV,3),TPF=signif(TPF,3),ap=ap,auc=auc,event_rate=event_rate))
}
return(list(ap=ap,auc=auc,event_rate=event_rate))
}
if(method=="perturbation"){
vk1<-matrix(rexp(nn*B,1),nrow=nn,ncol=B)
auc[1] = sum((0.5*sum.I(zk,"<",zk,1*(dk==1)*vk)+0.5*sum.I(zk,"<=",zk,1*(dk==1)*vk))*(dk==0)*vk)/(sum(vk*(dk==1))*sum(vk*(dk==0)))
ap[1] = sum(vk*(dk==1)*sum.I(zk,"<=",zk,1*(dk==1)*vk)/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((dk==1)*vk)
####save true value
auc[2:(B+1)] = apply((0.5*sum.I(zk,"<",zk,1*(dk==1)*vk1)+0.5*sum.I(zk,"<=",zk,1*(dk==1)*vk1))*(dk==0)*vk1,2,sum,na.rm=T)/(apply(vk1*(dk==1),2,sum)*apply(vk1*(dk==0),2,sum))
ap[2:(B+1)] = apply(vk1*(dk==1)*sum.I(zk,"<=",zk,1*(dk==1)*vk1)/sum.I(zk,"<=",zk,vk1),2,sum,na.rm=T)/apply((dk==1)*vk1,2,sum)
}
if(method=="bootstrap"){
data_resam=array(0,dim=c(nn,ncol(data0),B+1))
data_resam[,,1]=as.matrix(data0)
for(k in 2:(B+1)){
index=sample(c(1:nn),nn,replace=TRUE)
data_resam[,,k]=as.matrix(data0[as.vector(index),])
}
for(k in 1:(B+1)){
dk=data_resam[,1,k];zk=data_resam[,2,k]
auc[k] = sum((0.5*sum.I(zk,"<",zk,1*(dk==1)*vk)+0.5*sum.I(zk,"<=",zk,1*(dk==1)*vk))*(dk==0)*vk)/(sum(vk*(dk==1))*sum(vk*(dk==0)))
ap[k] = sum(vk*(dk==1)*sum.I(zk,"<=",zk,1*(dk==1)*vk)/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((dk==1)*vk)
}
}
event_rate=sum(status)/nn
event_rate_sd=sqrt(event_rate*(1-event_rate)/nn)
auc_summary=array(0,dim=c(1,3))
ap_summary=array(0,dim=c(2,3))
#####summary of AUC#####
auc_summary[1,1]<-auc[1]
auc_summary[1,2]<-max(quantile(auc,(1-alpha)/2,na.rm=T),0)
auc_summary[1,3]<-min(quantile(auc,(1+alpha)/2,na.rm=T),1)
######summary of AP
ap_summary[1,1]<-event_rate
ap_summary[1,2]<-event_rate-1.96*event_rate_sd
ap_summary[1,3]<-event_rate+1.96*event_rate_sd
ap_summary[2,1]<-ap[1]
ap_summary[2,2]<-max(quantile(ap,(1-alpha)/2,na.rm=T),0)
ap_summary[2,3]<-min(quantile(ap,(1+alpha)/2,na.rm=T),1)
colnames(auc_summary)<-c("Point Estimate",paste("Lower Limit(a=",alpha,")",sep=""),paste("Upper Limit(a=",alpha,")",sep=""))
rownames(auc_summary) = c("AUC")
write.csv(signif(auc_summary,3),file=paste("APBinary_auc_summary(","method=",method,",B=",B,").csv",sep=""))
colnames(ap_summary)<-c("Point Estimate",paste("Lower Limit(a=",alpha,")",sep=""),paste("Upper Limit(a=",alpha,")",sep=""))
rownames(ap_summary) = c("event rate","AP")
write.csv(signif(ap_summary,3),file=paste("APBinary_ap_summary(","method=",method,",B=",B,").csv",sep=""))
if(!is.null(cut.values)){
colnames(PPV)=c("cut.off values","PPV")
colnames(TPF)=c("cut.off values","TPF")
write.csv(signif(PPV,3),file=paste("APBinary_PPV.csv",sep=""))
write.csv(signif(TPF,3),file=paste("APBinary_TPF.csv",sep=""))
return(list(PPV=signif(PPV,3),TPF=signif(TPF,3),ap_summary=signif(ap_summary,3),auc_summary=signif(auc_summary,3)))
}
return(list(ap_summary=signif(ap_summary,3),auc_summary=signif(auc_summary,3)))
}
| /scratch/gouwar.j/cran-all/cranData/APtools/R/APBinary.R |
#' Estimating the Time-dependent AP and AUC for Censored Time to Event Outcome Data.
#'
#' \code{APSurv} This function calculates the estimates of the AP and AUC
#' for censored time to event data as well as their
#' confidence intervals using the perturbation or the
#' nonparametric bootstrap resampling method. The estimation
#' method is based on Yuan, Y., Zhou, Q. M., Li, B., Cai, H., Chow, E. J., Armstrong, G. T. (2018). A threshold-free summary index of prediction accuracy for censored time to event data. Statistics in medicine, 37(10), 1671-1681.
#'
#' @param stime Censored event time.
#' @param status Binary indicator of censoring. 1 indicates observing event of interest, 0 otherwise. Other values will be treated as competing risk event.
#' @param marker Numeric risk score. Data can be continuous or ordinal.
#' @param t0.list Prediction time intervals of interest. It could be one numerical value or a vector of numerical values, which must be in the range of stime.
#' @param cut.values risk score values to use as a cut-off for calculation of time-dependent positive predictive values (PPV) and true positive fractions (TPF). The default value is NULL.
#' @param method Method to obtain confidence intervals.
#' @param alpha Confidence level. The default level is 0.95.
#' @param B Number of resampling to obtain confidence interval. The default value is 1000.
#' @param weight Optional. The default value is NULL, in which case the observations are weighted by the inverse of the probability that their respective time-dependent event status (whether the event occurs within a specified time period) is observed.
#' @param Plot Whether to plot the time-dependent AP versus the prediction time intervals. The default value is TRUE, in which case the AP is evaluated at the time points which partition the range of the event times of the data into 100 intervals.
#' @importFrom survival survfit
#' @importFrom survival coxph
#' @importFrom stats rexp
#' @importFrom stats quantile
#' @importFrom utils write.csv
#' @importFrom graphics plot
#' @importFrom graphics lines
#' @importFrom graphics legend
#' @importFrom cmprsk cuminc
#' @importFrom cmprsk timepoints
#' @export APSurv
APSurv <- function(stime,status,marker,t0.list,cut.values=NULL,method="none",alpha=0.95,B=1000,weight=NULL,Plot=TRUE)
{
############Checking the Formation############
if((length(stime)!=length(status))|(length(status)!=length(marker))){
stop("The length of each data is not equal!\n")
}
cumi = cuminc(stime, status)
er = timepoints(cumi, times = t0.list)
pi.list <- er$est[1, ]
Di = 1 * (status != 0)
fit1 = coxph(Surv(stime, Di) ~ 1)
dfit1 = survfit(fit1)
tt = dfit1$time
if (max(t0.list) >= max(tt)) {
stop("The prediction time intervals of interest are out of range!\n")
}
N_j = length(t0.list)
data0 = cbind(stime, status, marker)
nn = nrow(data0)
auc = ap = ap_event = array(0, dim = c(B + 1, N_j))
############Set Cut-Off Value############
xk=Ti=stime;zk=marker;Di = 1*(data0[,2]!=0);dk=status
vk = rep(1,nn)
if(!is.null(cut.values)){
if(!((max(cut.values)<=max(marker))&(min(cut.values)>=min(marker)))){
cut.values=cut.values[(min(marker)<=cut.values)&(cut.values<=max(marker))]
cat("Warning: Some cut values are out of range!\n")
}
if(length(cut.values)==0){cut.values=NULL;cat("Warning: No avaliable cut values!\n")}
if(!is.null(cut.values)){
scl=cut.values
PPV=TPF=array(0,dim=c(length(scl),N_j+1))
PPV[,1]=TPF[,1]=scl
}
}
###########plot##################
if(Plot==TRUE){
t0_l=seq(from=min(stime),to=max(stime),length.out=102)[c(-1,-102)]
ap_plot=rep(0,length(t0_l))
xk=stime;zk=marker;dk=status;Di = 1*(data0[,2]!=0);
for (j in 1:length(t0_l)){
t0<-t0_l[j]
if(is.null(weight)){
############Calculate the Weight############
tt = c(t0,Ti[Ti<=t0])
Wi = rep(0,length(Ti)); Vi=rep(1,length(Ti))
tmpind = rank(tt)
Ghat.tt = summary(survfit(Surv(Ti,1-Di)~1, se.fit=F, type='fl', weights=Vi), sort(tt))$surv[tmpind]
Wi[Ti <= t0] = 1*(Di[Ti<=t0]!=0)/Ghat.tt[-1]; Wi[Ti > t0] = 1/Ghat.tt[1]
wk = Wi
}else{
wk = weight
}
ap_plot[j]= sum(wk*vk*(xk<=t0)*(dk==1)*sum.I(zk,"<=",zk,1*(xk<=t0)*vk*wk*(dk==1))/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((xk<=t0)*vk*wk*(dk==1))
}
#use survival to find the corresponding event rate r based on t0
cumi=cuminc(stime, status)
er=timepoints(cumi, times=t0_l)
pi_l <- er$est[1,]
###########plot##################
plot(t0_l,pi_l,type="l",xlim=c(0,max(t0_l)),ylim=c(0,max(ap_plot)),col="purple",lwd=2,xlab="Time",ylab="AP",main="AP vs t0",cex.main=1.5,cex.lab=1.2)
lines(t0_l,ap_plot,col="red",lwd=2)
legend("topleft",c("random marker",colnames(data0)[3]),col=c("purple","red"),lwd=2,cex=1.2)
}
if(method=="none"){
auc=ap=ap_event=rep(0,N_j)
for (j in 1:N_j){
t0=t0.list[j]
if(is.null(weight)){
############Calculate the Weight############
tt = c(t0,Ti[Ti<=t0])
Wi = rep(0,length(Ti)); Vi=rep(1,length(Ti))
tmpind = rank(tt)
Ghat.tt = summary(survfit(Surv(Ti,1-Di)~1, se.fit=F, type='fl', weights=Vi), sort(tt))$surv[tmpind]
Wi[Ti <= t0] = 1*(Di[Ti<=t0]!=0)/Ghat.tt[-1]; Wi[Ti > t0] = 1/Ghat.tt[1]
wk = Wi
}else{
wk = weight
}
############Point estimation############
if(!is.null(cut.values)){
TPF[,j+1] = sum.I(scl,"<",zk,1*(dk==1)*(xk<=t0)*wk*vk)/sum(1*(dk==1)*(xk<=t0)*wk*vk) ## P(Y> cl|T<=t0)
PPV[,j+1] = sum.I(scl,"<",zk,1*(dk==1)*(xk<=t0)*wk*vk)/sum.I(scl,"<",zk,vk) ## P(T<=t0|Y> cl)
}
auc[j] = sum((0.5*sum.I(zk,"<=",zk,1*(xk<=t0)*wk*vk*(dk==1))+0.5*sum.I(zk,"<",zk,1*(xk<=t0)*wk*vk*(dk==1)))*(xk>t0)*wk*vk)/(sum(vk*wk*(xk<=t0)*(dk==1))*sum(vk*wk*(xk>t0)))
ap[j] = sum(wk*vk*(xk<=t0)*(dk==1)*sum.I(zk,"<=",zk,1*(xk<=t0)*vk*wk*(dk==1))/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((xk<=t0)*vk*wk*(dk==1))
ap_event[j] = ap[j]/pi.list[j]
}
auc_summary=array(0,dim=c(N_j,3))
ap_summary=array(0,dim=c(N_j,4))
auc_summary[,1]=ap_summary[,1]=t0.list
auc_summary[,2]=ap_summary[,2]=signif(pi.list,3)
#####summary of AUC#####
auc_summary[,3]=signif(auc,3)
######summary of AP
ap_summary[,3]=signif(ap,3)
ap_summary[,4]=signif(ap_event,3)
colnames(auc_summary)=c("t0=","event rate","AUC(t)")
#rownames(auc_summary) = c(t0.list)
write.csv(auc_summary,file=paste("APSurv_auc_summary(","method=",method,").csv",sep=""))
colnames(ap_summary)=c("t0=","event rate","AP(t)","AP/(event rate)")
#rownames(ap_summary) = c(t0.list)
write.csv(ap_summary,file=paste("APSurv_ap_summary(","method=",method,").csv",sep=""))
if(!is.null(cut.values)){
colnames(PPV)=c("cut.off values",paste("t0=",c(t0.list)))
colnames(TPF)=c("cut.off values",paste("t0=",c(t0.list)))
write.csv(signif(PPV,3),file=paste("APSurv_PPV.csv",sep=""))
write.csv(signif(TPF,3),file=paste("APSurv_TPF.csv",sep=""))
return(list(PPV=signif(PPV,3),TPF=signif(TPF,3),ap_summary=ap_summary,auc_summary=auc_summary))
}
return(list(ap_summary=ap_summary,auc_summary=auc_summary))
}
if(method=="perturbation"){
for (j in 1:N_j){
t0=t0.list[j]
vk1=matrix(rexp(nn*B,1),nrow=nn,ncol=B)
if(is.null(weight)){
############Calculate the Weight############
tt = c(t0,Ti[Ti<=t0])
Wi = rep(0,length(Ti)); Vi=rep(1,length(Ti))
tmpind = rank(tt)
Ghat.tt = summary(survfit(Surv(Ti,1-Di)~1, se.fit=F, type='fl', weights=Vi), sort(tt))$surv[tmpind]
Wi[Ti <= t0] = 1*(Di[Ti<=t0]!=0)/Ghat.tt[-1]; Wi[Ti > t0] = 1/Ghat.tt[1]
wk = Wi
wk1 = array(wk,dim=c(length(wk),B))
}else{
wk = weight
wk1 = array(wk,dim=c(length(wk),B))
}
############Point estimation############
if(!is.null(cut.values)){
TPF[,j+1] = sum.I(scl,"<",zk,1*(dk==1)*(xk<=t0)*wk*vk)/sum(1*(dk==1)*(xk<=t0)*wk*vk) ## P(Y> cl|T<=t0)
PPV[,j+1] = sum.I(scl,"<",zk,1*(dk==1)*(xk<=t0)*wk*vk)/sum.I(scl,"<",zk,vk) ## P(T<=t0|Y> cl)
}
auc[1,j] =sum((0.5*sum.I(zk,"<=",zk,1*(xk<=t0)*wk*vk*(dk==1))+0.5*sum.I(zk,"<",zk,1*(xk<=t0)*wk*vk*(dk==1)))*(xk>t0)*wk*vk)/(sum(vk*wk*(xk<=t0)*(dk==1))*sum(vk*wk*(xk>t0)))
ap[1,j] = sum(wk*vk*(xk<=t0)*(dk==1)*sum.I(zk,"<=",zk,1*(xk<=t0)*vk*wk*(dk==1))/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((xk<=t0)*vk*wk*(dk==1))
ap_event[1, j] = ap[1, j] / pi.list[j]
############Perturbation Process############
cat("t0=",t0,"\n",sep="")
auc[2:(B+1),j]= apply(0.5*sum.I(zk,"<=",zk,1*(xk<=t0)*wk1*vk1*(dk==1))*(xk>t0)*wk1*vk1*(dk==1)+0.5*sum.I(zk,"<",zk,1*(xk<=t0)*wk1*vk1*(dk==1))*(xk>t0)*wk1*vk1*(dk==1),2,sum,na.rm=T)/(apply(vk1*wk1*(xk<=t0)*(dk==1),2,sum)*apply(vk1*wk1*(xk>t0)*(dk==1),2,sum))
ap[2:(B+1),j] = apply(wk1*vk1*(xk<=t0)*(dk==1)*sum.I(zk,"<=",zk,1*(xk<=t0)*vk1*wk1*(dk==1))/sum.I(zk,"<=",zk,vk1),2,sum,na.rm=T)/apply((xk<=t0)*vk1*wk1*(dk==1),2,sum)
ap_event[2:(B + 1), j] = ap[2:(B + 1), j] / pi.list[j]
}
}
if(method=="bootstrap"){
data_resam=array(0,dim=c(nn,ncol(data0),B+1))
data_resam[,,1]=as.matrix(data0)
index=matrix(0,nrow=nn,ncol=B+1)
index[,1]=seq(from=1,to=nn,length=nn)
for(k in 2:(B+1)){
index[,k]=sample(c(1:nn),nn,replace=TRUE)
data_resam[,,k]=as.matrix(data0[as.vector(index[,k]),])
}
pi.list_bs=matrix(0,nrow=B+1,ncol=N_j)
for (j in 1:N_j){
t0<-t0.list[j]
if(is.null(weight)){
tt = c(t0,Ti[Ti<=t0])
Vi = rep(1,length(Ti));Wi = rep(0,length(Ti));tmpind = rank(tt)
Ghat.tt = summary(survfit(Surv(Ti,1-Di)~1, se.fit=F, type='fl', weights=Vi), sort(tt))$surv[tmpind]
Wi[Ti <= t0] = 1*(Di[Ti<=t0]!=0)/Ghat.tt[-1]; Wi[Ti > t0] = 1/Ghat.tt[1]
wkc = Wi
}else{
wkc=weight
}
if(!is.null(cut.values)){
wk=wkc
xk <- data_resam[,1,1]; zk <- data_resam[,3,1]; dk <- data_resam[,2,1]
TPF[,j+1] = sum.I(scl,"<",zk,1*(dk==1)*(xk<=t0)*wk*vk)/sum(1*(dk==1)*(xk<=t0)*wk*vk) ## P(Y> cl|T<=t0)
PPV[,j+1] = sum.I(scl,"<",zk,1*(dk==1)*(xk<=t0)*wk*vk)/sum.I(scl,"<",zk,vk) ## P(T<=t0|Y> cl)
}
cat("t0=",t0,"\n",sep="")
for(k in 1:(B+1)){
wk=wkc[index[,k]]
xk <- data_resam[,1,k]; zk <- data_resam[,3,k]; dk <- data_resam[,2,k]
auc[k,j]= sum((0.5*sum.I(zk,"<=",zk,1*(xk<=t0)*wk*vk*(dk==1))+0.5*sum.I(zk,"<",zk,1*(xk<=t0)*wk*vk*(dk==1)))*(xk>t0)*wk*vk)/(sum(vk*wk*(xk<=t0)*(dk==1))*sum(vk*wk*(xk>t0)))
ap[k,j] = sum(wk*vk*(xk<=t0)*(dk==1)*sum.I(zk,"<=",zk,1*(xk<=t0)*vk*wk*(dk==1))/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((xk<=t0)*vk*wk*(dk==1))
ap_event[k, j] = ap[k, j]/pi.list[j]
}
}
}
auc_summary=array(0,dim=c(N_j,5))
ap_summary=array(0,dim=c(N_j,8))
auc_summary[,1]=ap_summary[,1]=t0.list
auc_summary[,2]=ap_summary[,2]=signif(pi.list,3)
#####summary of AUC#####
for (j in 1:N_j){
auc_summary[j,3]=signif(auc[1,j],3)
auc_summary[j,4]=signif(max(quantile(auc[,j],(1-alpha)/2,na.rm=T),0),3)
auc_summary[j,5]=signif(min(quantile(auc[,j],(1+alpha)/2,na.rm=T),1),3)
}
######summary of AP
for (j in 1:N_j){
ap_summary[j,3]=signif(ap[1,j],3)
ap_summary[j,4]=signif(max(quantile(ap[,j],(1-alpha)/2,na.rm=T),0),3)
ap_summary[j,5]=signif(min(quantile(ap[,j],(1+alpha)/2,na.rm=T),1),3)
ap_summary[j,6]=signif(ap_event[1,j],3)
ap_summary[j,7]=signif(quantile(ap_event[,j],(1-alpha)/2,na.rm=T),3)
ap_summary[j,8]=signif(quantile(ap_event[,j],(1+alpha)/2,na.rm=T),3)
}
colnames(auc_summary)=c("t0=","event rate","AUC(t)",paste("Lower Limit(a=",alpha,")",sep=""),paste("Upper Limit(a=",alpha,")",sep=""))
#rownames(auc_summary) = c(t0.list)
write.csv(auc_summary,file=paste("APSurv_auc_summary(","method=",method,",B=",B,").csv",sep=""))
colnames(ap_summary)=c("t0=","event rate","AP(t)",paste("Lower Limit(a=",alpha,")",sep=""),paste("Upper Limit(a=",alpha,")",sep=""),"AP/(event rate)",paste("Lower Limit(a=",alpha,")",sep=""),paste("Upper Limit(a=",alpha,")",sep=""))
#rownames(ap_summary) = c(t0.list)
write.csv(ap_summary,file=paste("APSurv_ap_summary(","method=",method,",B=",B,").csv",sep=""))
if(!is.null(cut.values)){
colnames(PPV)=c("cut.off values",paste("t0=",c(t0.list)))
colnames(TPF)=c("cut.off values",paste("t0=",c(t0.list)))
write.csv(signif(PPV,3),file=paste("APSurv_PPV.csv",sep=""))
write.csv(signif(TPF,3),file=paste("APSurv_TPF.csv",sep=""))
return(list(PPV=signif(PPV,3),TPF=signif(TPF,3),ap_summary=ap_summary,auc_summary=auc_summary))
}
return(list(ap_summary=ap_summary,auc_summary=auc_summary))
}
| /scratch/gouwar.j/cran-all/cranData/APtools/R/APSurv.R |
#' Comparison of two risk scores based on the differences and ratio of their APs.
#'
#' \code{CompareAP} This function estimates the difference between and the
#' ratio of two APs in order to compare two markers for
#' censored time to event data or binary data. The
#' corresponding confidence intervals are provided.
#'
#' @param status Binary indicator. For binary data, 1 indicates case and 0 otherwise. For survival data, 1 indicates event and 0 otherwise.
#' @param marker1 Risk score 1 (to be compared to risk score 2). Its length is required to be the same as the length of status.
#' @param marker2 Risk score 2 (to be compared to risk score 1). Its length is required to be the same as the length of status.
#' @param stime Censored event time. If dealing with binary outcome, skip this argument which is set to be NULL.
#' @param t0.list Prediction time intervals of interest. It could be one numerical value or a vector of numerical values, which must be in the range of stime.
#' @param method Method to obtain confidence intervals.
#' @param alpha Confidence level. The default level is 0.95.
#' @param B Number of resampling to obtain confidence interval. The default value is 1000.
#' @param weight Optional. The default value is NULL, in which case the observations are weighted by the inverse of the probability that their respective time-dependent event status (whether the event occurs within a specified time period) is observed.
#' @param Plot Optional argument for event time data, i.e. stime is not NULL.
#' @importFrom survival survfit
#' @importFrom survival coxph
#' @importFrom stats rexp
#' @importFrom stats quantile
#' @importFrom utils write.csv
#' @importFrom graphics par
#' @importFrom graphics plot
#' @importFrom graphics lines
#' @importFrom graphics legend
#' @importFrom cmprsk cuminc
#' @importFrom cmprsk timepoints
#' @export CompareAP
CompareAP <- function(status,marker1,marker2,stime=NULL,t0.list=NULL,method="none",alpha=0.95,B=1000,weight=NULL,Plot=TRUE)
{
############Checking the Formation############
if(is.null(stime)&(!is.null(t0.list))){
stop("When stime is NULL, t0.list should be NULL!\n")
}
if(is.null(stime)){
if((length(status)!=length(marker1))|(length(marker1)!=length(marker2))){
stop("The length of each data is not equal!\n")
}
data0=cbind(status,marker1,marker2)
nn<-nrow(data0)
vk = rep(1,nn)
auc=ap=array(0,dim=c(B+1,2))
if(method=="none"){
ap=array(0,dim=c(2))
for(i in 1:2){
dk=data0[,1];zk=data0[,i+1]
ap[i] = sum(vk*(dk==1)*sum.I(zk,"<=",zk,1*(dk==1)*vk)/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((dk==1)*vk)
}
event_rate=sum(status)/nn
dap_summary=array(0,dim=c(5,1))
######summary of propertion of cases
dap_summary[1,1]<-event_rate
######summary of AP
for (i in 1:2){
dap_summary[i+1,1]<-ap[i]
}
#####summary of AP1-AP2#####
if((ap[1]-ap[2])>=0){
dap1=ap[1]-ap[2]
flag1="AP1-AP2"
}
else{
dap1=ap[2]-ap[1]
flag1="AP2-AP1"
}
dap_summary[4,1]<-dap1
#####summary of AP1/AP2#####
if((ap[1]/ap[2])>=1){
dap2=ap[1]/ap[2]
flag2="AP1/AP2"
}
else{
dap2=ap[2]/ap[1]
flag2="AP2/AP1"
}
dap_summary[5,1]<-dap2
colnames(dap_summary)<-c("point estimation")
rownames(dap_summary)<-c("propertion of cases","AP1","AP2",flag1,flag2)
write.csv(signif(dap_summary,3),file=paste("CompareAP_Binary_dap_summary(","method=",method,").csv",sep=""))
return(list(dap_summary=signif(dap_summary,3)))
}
if(method=="perturbation"){
vk1<-matrix(rexp(nn*B,1),nrow=nn,ncol=B)
for(i in 1:2){
dk=data0[,1];zk=data0[,i+1]
#auc[1,i] = sum((0.5*sum.I(zk,"<",zk,1*(dk==1)*vk)+0.5*sum.I(zk,"<=",zk,1*(dk==1)*vk))*(dk==0)*vk)/(sum(vk*(dk==1))*sum(vk*(dk==0)))
ap[1,i] = sum(vk*(dk==1)*sum.I(zk,"<=",zk,1*(dk==1)*vk)/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((dk==1)*vk)
####save true value
#auc[2:(B+1),i] = apply((0.5*sum.I(zk,"<",zk,1*(dk==1)*vk1)+0.5*sum.I(zk,"<=",zk,1*(dk==1)*vk1))*(dk==0)*vk1,2,sum,na.rm=T)/(apply(vk1*(dk==1),2,sum)*apply(vk1*(dk==0),2,sum))
ap[2:(B+1),i] = apply(vk1*(dk==1)*sum.I(zk,"<=",zk,1*(dk==1)*vk1)/sum.I(zk,"<=",zk,vk1),2,sum,na.rm=T)/apply((dk==1)*vk1,2,sum)
}
}
if(method=="bootstrap"){
data_resam=array(0,dim=c(nn,ncol(data0),B+1))
data_resam[,,1]=as.matrix(data0)
for(k in 2:(B+1)){
index=sample(c(1:nn),nn,replace=TRUE)
data_resam[,,k]=as.matrix(data0[as.vector(index),])
}
for(i in 1:2){
for(k in 1:(B+1)){
dk=data_resam[,1,k];zk=data_resam[,i+1,k]
#auc[k,i] = sum((0.5*sum.I(zk,"<",zk,1*(dk==1)*vk)+0.5*sum.I(zk,"<=",zk,1*(dk==1)*vk))*(dk==0)*vk)/(sum(vk*(dk==1))*sum(vk*(dk==0)))
ap[k,i] = sum(vk*(dk==1)*sum.I(zk,"<=",zk,1*(dk==1)*vk)/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((dk==1)*vk)
}
}
}
event_rate=sum(status)/nn
event_rate_sd=sqrt(event_rate*(1-event_rate)/nn)
dap_summary=array(0,dim=c(5,3))
######summary of propertion of cases
dap_summary[1,1]<-event_rate
dap_summary[1,2]<-event_rate-1.96*event_rate_sd
dap_summary[1,3]<-event_rate+1.96*event_rate_sd
######summary of AP
for (i in 1:2){
dap_summary[i+1,1]<-ap[1,i]
dap_summary[i+1,2]<-max(quantile(ap[,i],(1-alpha)/2,na.rm=T),0)
dap_summary[i+1,3]<-min(quantile(ap[,i],(1+alpha)/2,na.rm=T),1)
}
#####summary of AP1-AP2#####
if((mean(ap[,1])-mean(ap[,2]))>=0){
dap1=ap[,1]-ap[,2]
flag1="AP1-AP2"
}
else{
dap1=ap[,2]-ap[,1]
flag1="AP2-AP1"
}
dap_summary[4,1]<-dap1[1]
dap_summary[4,2]<-quantile(dap1,(1-alpha)/2,na.rm=T)
dap_summary[4,3]<-quantile(dap1,(1+alpha)/2,na.rm=T)
#####summary of AP1/AP2#####
if((mean(ap[,1])/mean(ap[,2]))>=1){
dap2=ap[,1]/ap[,2]
flag2="AP1/AP2"
}
else{
dap2=ap[,2]/ap[,1]
flag2="AP2/AP1"
}
dap_summary[5,1]<-dap2[1]
dap_summary[5,2]<-quantile(dap2,(1-alpha)/2,na.rm=T)
dap_summary[5,3]<-quantile(dap2,(1+alpha)/2,na.rm=T)
colnames(dap_summary)<-c("point estimation",paste("Lower Limit(a=",alpha,")",sep=""),paste("Upper Limit(a=",alpha,")",sep=""))
rownames(dap_summary)<-c("propertion of cases","AP1","AP2",flag1,flag2)
write.csv(signif(dap_summary,3),file=paste("CompareAP_Binary_dap_summary(","method=",method,",B=",B,").csv",sep=""))
return(list(dap_summary=signif(dap_summary,3)))
}
if(!is.null(stime)){
if((length(stime)!=length(status))|(length(status)!=length(marker1))|(length(marker1)!=length(marker2))){
stop("The length of each data is not equal!\n")
}
if(is.null(t0.list)){
stop("Please entry t0.list: prediction time intervals of interest for event time outcome!\n")
}
fit1=coxph(Surv(stime,status)~1)
dfit1=survfit(fit1)
tt=dfit1$time
if(max(t0.list)>=max(tt)){
stop("The prediction time intervals of interest are out of range!\n")
}
data0=cbind(stime,status,marker1,marker2)
N_j=length(t0.list)
nn<-nrow(data0)
auc=ap=array(0,dim=c(B+1,N_j,2))
Ti = data0[,1]; Di = 1*(data0[,2]!=0)
vk = rep(1,nn)
###########plot##################
if(Plot==TRUE){
t0_l=seq(from=min(stime),to=max(stime),length.out=102)[c(-1,-102)]
ap_plot=auc_plot=matrix(0,nrow=length(t0_l),ncol=2)
for (j in 1:length(t0_l)){
t0<-t0_l[j]
if(is.null(weight)){
############Calculate the Weight############
tt = c(t0,Ti[Ti<=t0])
Wi = rep(0,length(Ti)); Vi=rep(1,length(Ti))
tmpind = rank(tt)
Ghat.tt = summary(survfit(Surv(Ti,1-Di)~1, se.fit=F, type='fl', weights=Vi), sort(tt))$surv[tmpind]
Wi[Ti <= t0] = 1*(Di[Ti<=t0]!=0)/Ghat.tt[-1]; Wi[Ti > t0] = 1/Ghat.tt[1]
wk = Wi
}else{
wk = weight
}
xk=stime;zk=marker1;dk=status;
ap_plot[j,1] = sum(wk*vk*(xk<=t0)*(dk==1)*sum.I(zk,"<=",zk,1*(xk<=t0)*vk*wk*(dk==1))/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((xk<=t0)*vk*wk*(dk==1))
auc_plot[j,1] = sum((0.5*sum.I(zk,"<=",zk,1*(xk<=t0)*wk*vk*(dk==1))+0.5*sum.I(zk,"<",zk,1*(xk<=t0)*wk*vk*(dk==1)))*(xk>t0)*wk*vk)/(sum(vk*wk*(xk<=t0)*(dk==1))*sum(vk*wk*(xk>t0)))
zk=marker2;
ap_plot[j,2] = sum(wk*vk*(xk<=t0)*(dk==1)*sum.I(zk,"<=",zk,1*(xk<=t0)*vk*wk*(dk==1))/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((xk<=t0)*vk*wk*(dk==1))
auc_plot[j,2] = sum((0.5*sum.I(zk,"<=",zk,1*(xk<=t0)*wk*vk*(dk==1))+0.5*sum.I(zk,"<",zk,1*(xk<=t0)*wk*vk*(dk==1)))*(xk>t0)*wk*vk)/(sum(vk*wk*(xk<=t0)*(dk==1))*sum(vk*wk*(xk>t0)))
}
if((mean(ap_plot[,1])/mean(ap_plot[,2]))>=1){
dap_plot=ap_plot[,1]/ap_plot[,2]
flag2="AP1/AP2"
}else{
dap_plot=ap_plot[,2]/ap_plot[,1]
flag2="AP2/AP1"
}
#use survival to find the corresponding event rate r based on t0
cumi=cuminc(stime, status)
er=timepoints(cumi, times=t0_l)
pi_l <- er$est[1,]
###########plot##################
par(mfrow=c(1,3))
plot(t0_l,rep(0.5,length(t0_l)),type="l",xlim=c(0,max(t0_l)),ylim=c(0.5,max(auc_plot)),col="purple",lwd=2,xlab="Time",ylab="AUC",main="AUC vs t0",cex.main=1.5,cex.lab=1.2)
lines(t0_l,auc_plot[,1],col="black",lwd=2)
lines(t0_l,auc_plot[,2],col="red",lwd=2)
legend("left",c("random marker","marker1","marker2"),bty="n",col=c("purple","black","red"),lwd=2,cex=1.2)
plot(t0_l,pi_l,type="l",xlim=c(0,max(t0_l)),ylim=c(0,max(ap_plot)),col="purple",lwd=2,xlab="Time",ylab="AP",main="AP vs t0",cex.main=1.5,cex.lab=1.2)
lines(t0_l,ap_plot[,1],col="black",lwd=2)
lines(t0_l,ap_plot[,2],col="red",lwd=2)
legend("topleft",c("random marker","marker1","marker2"),bty="n",col=c("purple","black","red"),lwd=2,cex=1.2)
plot(t0_l,dap_plot,type="l",xlim=c(0,max(t0_l)),ylim=c(0,max(dap_plot)),lty=1,col="black",lwd=2,xlab="Time",ylab=flag2,main=paste(flag2,"vs t0"),cex.main=1.5,cex.lab=1.2)
}
if(method=="none"){
auc=ap=array(0,dim=c(N_j,2))
for (j in 1:N_j){
t0<-t0.list[j]
if(is.null(weight)){
############Calculate the Weight############
tt = c(t0,Ti[Ti<=t0])
Wi = rep(0,length(Ti)); Vi=rep(1,length(Ti))
tmpind = rank(tt)
Ghat.tt = summary(survfit(Surv(Ti,1-Di)~1, se.fit=F, type='fl', weights=Vi), sort(tt))$surv[tmpind]
Wi[Ti <= t0] = 1*(Di[Ti<=t0]!=0)/Ghat.tt[-1]; Wi[Ti > t0] = 1/Ghat.tt[1]
wk = Wi
}else{
wk=weight
}
for(i in 1:2){
xk <- data0[,1]; dk <- data0[,2];zk <- data0[,i+2];
ap[j,i] = sum(wk*vk*(xk<=t0)*(dk==1)*sum.I(zk,"<=",zk,1*(xk<=t0)*vk*wk*(dk==1))/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((xk<=t0)*vk*wk*(dk==1)) }
}
#use survival to find the corresponding event rate r based on t0
cumi=cuminc(stime, status)
er=timepoints(cumi, times=t0.list)
pi.list <- er$est[1,]
dap_summary=matrix(0,nrow=N_j,ncol=6)
dap_summary[,1]=t0.list
dap_summary[,2]=pi.list
######summary of AP
dap_summary[,3]<-ap[,1]
dap_summary[,4]<-ap[,2]
#####summary of AP1-AP2#####
if((mean(ap[,1])-mean(ap[,2]))>=0){
dap1=ap[,1]-ap[,2]
flag1="AP1(t)-AP2(t)"
}else{
dap1=ap[,2]-ap[,1]
flag1="AP2(t)-AP1(t)"
}
dap_summary[,5]<-dap1
#####summary of AP1/AP2#####
if((mean(ap[,1])/mean(ap[,2]))>=1){
dap2=ap[,1]/ap[,2]
flag2="AP1(t)/AP2(t)"
}else{
dap2=ap[,2]/ap[,1]
flag2="AP2(t)/AP1(t)"
}
dap_summary[,6]<-dap2
colnames(dap_summary)<-c("t0=","event rate","AP1(t)","AP2(t)",flag1,flag2)
write.csv(signif(dap_summary,3),file=paste("CompareAP_Survival_dap_summary(","method=",method,").csv",sep=""))
return(list(dap_summary=signif(dap_summary,3)))
}
if(method=="perturbation"){
for (j in 1:N_j){
t0<-t0.list[j]
cat("t0=",t0,"\n",sep="")
vk1<-matrix(rexp(nn*B,1),nrow=nn,ncol=B)
if(is.null(weight)){
############Calculate the Weight############
tt = c(t0,Ti[Ti<=t0])
Wi = rep(0,length(Ti)); Vi=rep(1,length(Ti))
tmpind = rank(tt)
Ghat.tt = summary(survfit(Surv(Ti,1-Di)~1, se.fit=F, type='fl', weights=Vi), sort(tt))$surv[tmpind]
Wi[Ti <= t0] = 1*(Di[Ti<=t0]!=0)/Ghat.tt[-1]; Wi[Ti > t0] = 1/Ghat.tt[1]
wk = Wi
wk1=array(wk,dim=c(length(wk),B))
}else{
wk=weight
wk1=array(wk,dim=c(length(wk),B))
}
for(i in 1:2){
xk <- data0[,1]; dk <- data0[,2]; zk <- data0[,i+2];
#auc1[1,j,i] = sum((0.5*sum.I(zk,"<=",zk,1*(xk<=t0)*wk*vk*(dk==1))+0.5*sum.I(zk,"<",zk,1*(xk<=t0)*wk*vk*(dk==1)))*(xk>t0)*wk*vk)/(sum(vk*wk*(xk<=t0)*(dk==1))*sum(vk*wk*(xk>t0)))
#auc2[1,j,i] = sum((0.5*sum.I(zk,"<=",zk,1*(xk<=t0)*wk*vk*(dk==2))+0.5*sum.I(zk,"<",zk,1*(xk<=t0)*wk*vk*(dk==2)))*(xk>t0)*wk*vk)/(sum(vk*wk*(xk<=t0)*(dk==2))*sum(vk*wk*(xk>t0)))
ap[1,j,i] = sum(wk*vk*(xk<=t0)*(dk==1)*sum.I(zk,"<=",zk,1*(xk<=t0)*vk*wk*(dk==1))/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((xk<=t0)*vk*wk*(dk==1))
#ap2[1,j,i] = sum(wk*vk*(xk<=t0)*(dk==2)*sum.I(zk,"<=",zk,1*(xk<=t0)*vk*wk*(dk==2))/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((xk<=t0)*vk*wk*(dk==2))
#auc[2:(B+1),j,i]= apply(0.5*sum.I(zk,"<=",zk,1*(xk<=t0)*wk1*vk1)*(xk>t0)*wk1*vk1+0.5*sum.I(zk,"<",zk,1*(xk<=t0)*wk1*vk1)*(xk>t0)*wk1*vk1,2,sum,na.rm=T)/(apply(vk1*wk1*(xk<=t0),2,sum)*apply(vk1*wk1*(xk>t0),2,sum))
#auc1[2:(B+1),j,i]= apply(0.5*sum.I(zk,"<=",zk,1*(xk<=t0)*wk1*vk1*(dk==1))*(xk>t0)*wk1*vk1*(dk==1)+0.5*sum.I(zk,"<",zk,1*(xk<=t0)*wk1*vk1*(dk==1))*(xk>t0)*wk1*vk1*(dk==1),2,sum,na.rm=T)/(apply(vk1*wk1*(xk<=t0)*(dk==1),2,sum)*apply(vk1*wk1*(xk>t0)*(dk==1),2,sum))
#auc2[2:(B+1),j,i]= apply(0.5*sum.I(zk,"<=",zk,1*(xk<=t0)*wk1*vk1*(dk==2))*(xk>t0)*wk1*vk1*(dk==2)+0.5*sum.I(zk,"<",zk,1*(xk<=t0)*wk1*vk1*(dk==2))*(xk>t0)*wk1*vk1*(dk==2),2,sum,na.rm=T)/(apply(vk1*wk1*(xk<=t0)*(dk==2),2,sum)*apply(vk1*wk1*(xk>t0)*(dk==2),2,sum))
ap[2:(B+1),j,i] = apply(wk1*vk1*(xk<=t0)*(dk==1)*sum.I(zk,"<=",zk,1*(xk<=t0)*vk1*wk1*(dk==1))/sum.I(zk,"<=",zk,vk1),2,sum,na.rm=T)/apply((xk<=t0)*vk1*wk1*(dk==1),2,sum)
#ap2[2:(B+1),j,i] = apply(wk1*vk1*(xk<=t0)*(dk==2)*sum.I(zk,"<=",zk,1*(xk<=t0)*vk1*wk1*(dk==2))/sum.I(zk,"<=",zk,vk1),2,sum,na.rm=T)/apply((xk<=t0)*vk1*wk1*(dk==2),2,sum)
}
}
}
if(method=="bootstrap"){
data_resam=array(0,dim=c(nn,ncol(data0),B+1))
data_resam[,,1]=as.matrix(data0)
index=matrix(0,nrow=nn,ncol=B+1)
index[,1]=seq(from=1,to=nn,length=nn)
for(k in 2:(B+1)){
index[,k]=sample(c(1:nn),nn,replace=TRUE)
data_resam[,,k]=as.matrix(data0[as.vector(index[,k]),])
}
for (j in 1:N_j){
t0<-t0.list[j]
cat("t0=",t0,"\n",sep="")
if(is.null(weight)){
tt = c(t0,Ti[Ti<=t0])
Wi = rep(0,length(Ti)); Vi=rep(1,length(Ti))
tmpind = rank(tt)
Ghat.tt = summary(survfit(Surv(Ti,1-Di)~1, se.fit=F, type='fl', weights=Vi), sort(tt))$surv[tmpind]
Wi[Ti <= t0] = 1*(Di[Ti<=t0]!=0)/Ghat.tt[-1]; Wi[Ti > t0] = 1/Ghat.tt[1]
wkc = Wi
}else{
wkc=weight
}
for(i in 1:2){
for(k in 1:(B+1)){
wk=wkc[index[,k]]
xk <- data_resam[,1,k]; dk <- data_resam[,2,k]; zk <- data_resam[,i+2,k];
#auc1[k,j,i]= sum((0.5*sum.I(zk,"<=",zk,1*(xk<=t0)*wk*vk*(dk==1))+0.5*sum.I(zk,"<",zk,1*(xk<=t0)*wk*vk*(dk==1)))*(xk>t0)*wk*vk)/(sum(vk*wk*(xk<=t0)*(dk==1))*sum(vk*wk*(xk>t0)))
#auc2[k,j,i]= sum((0.5*sum.I(zk,"<=",zk,1*(xk<=t0)*wk*vk*(dk==2))+0.5*sum.I(zk,"<",zk,1*(xk<=t0)*wk*vk*(dk==2)))*(xk>t0)*wk*vk)/(sum(vk*wk*(xk<=t0)*(dk==2))*sum(vk*wk*(xk>t0)))
ap[k,j,i] = sum(wk*vk*(xk<=t0)*(dk==1)*sum.I(zk,"<=",zk,1*(xk<=t0)*vk*wk*(dk==1))/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((xk<=t0)*vk*wk*(dk==1))
#ap2[k,j,i] = sum(wk*vk*(xk<=t0)*(dk==2)*sum.I(zk,"<=",zk,1*(xk<=t0)*vk*wk*(dk==2))/sum.I(zk,"<=",zk,vk),na.rm=T)/sum((xk<=t0)*vk*wk*(dk==2))
}
}
}
}
#use survival to find the corresponding event rate r based on t0
cumi=cuminc(stime, status)
er=timepoints(cumi, times=t0.list)
pi.list <- er$est[1,]
#p2.list <- er$est[2,]
dap_summary=matrix(0,nrow=N_j,ncol=14)
dap_summary[,1]=t0.list
dap_summary[,2]=pi.list
######summary of AP
for (j in 1:N_j){
dap_summary[j,3]<-ap[1,j,1]
dap_summary[j,4]<-max(quantile(ap[,j,1],(1-alpha)/2,na.rm=T),0)
dap_summary[j,5]<-min(quantile(ap[,j,1],(1+alpha)/2,na.rm=T),1)
dap_summary[j,6]<-ap[1,j,2]
dap_summary[j,7]<-max(quantile(ap[,j,2],(1-alpha)/2,na.rm=T),0)
dap_summary[j,8]<-min(quantile(ap[,j,2],(1+alpha)/2,na.rm=T),1)
}
#####summary of AP1-AP2#####
if((mean(ap[,,1])-mean(ap[,,2]))>=0){
dap1=ap[,,1]-ap[,,2]
flag1="AP1(t)-AP2(t)"
}else{
dap1=ap[,,2]-ap[,,1]
flag1="AP2(t)-AP1(t)"
}
for (j in 1:N_j){
dap_summary[j,9]<-dap1[1,j]
dap_summary[j,10]<-quantile(dap1[,j],(1-alpha)/2,na.rm=T)
dap_summary[j,11]<-quantile(dap1[,j],(1+alpha)/2,na.rm=T)
}
#####summary of AP1/AP2#####
if((mean(ap[,,1])/mean(ap[,,2]))>=1){
dap2=ap[,,1]/ap[,,2]
flag2="AP1(t)/AP2(t)"
}else{
dap2=ap[,,2]/ap[,,1]
flag2="AP2(t)/AP1(t)"
}
for (j in 1:N_j){
dap_summary[j,12]<-dap2[1,j]
dap_summary[j,13]<-quantile(dap2[,j],(1-alpha)/2,na.rm=T)
dap_summary[j,14]<-quantile(dap2[,j],(1+alpha)/2,na.rm=T)
}
colnames(dap_summary)<-c("t0=","event rate","AP1(t)","(L,","U)","AP2(t)","(L,","U)",flag1,"(L,","U)",flag2,"(L,","U)")
#rownames(dap_summary) = c(t0.list)
write.csv(signif(dap_summary,3),file=paste("CompareAP_Survival_dap_summary(","method=",method,",B=",B,").csv",sep=""))
return(list(dap_summary=signif(dap_summary,3)))
}
}
| /scratch/gouwar.j/cran-all/cranData/APtools/R/CompareAP.R |
sum.I <- function(yy,FUN,Yi,Vi=NULL){
if (FUN=="=") {
out = rank(c(Yi,yy),ties.method="f")[-c(1:length(Yi))]-rank(c(yy,Yi),ties.method="f")[1:length(yy)]
return(out)
} else {
if (FUN=="<"|FUN==">=") { yy <- -yy; Yi <- -Yi}
# for each distinct ordered failure time t[j], number of Xi < t[j]
pos <- rank(c(yy,Yi),ties.method='f')[1:length(yy)]-rank(yy,ties.method='f')
if (substring(FUN,2,2)=="=") pos <- length(Yi)-pos # number of Xi>= t[j]
if (!is.null(Vi)) {
## if FUN contains '=', tmpind is the order of decending
if(substring(FUN,2,2)=="=") tmpind <- order(-Yi) else tmpind <- order(Yi)
##Vi <- cumsum2(as.matrix(Vi)[tmpind,])
Vi <- apply(as.matrix(Vi)[tmpind,,drop=F],2,cumsum)
return(rbind(0,Vi)[pos+1,])
} else return(pos)
}
}
| /scratch/gouwar.j/cran-all/cranData/APtools/R/sum.I.R |
##################################################
#' Air Quality Evaluation
##################################################
#'
#' R AQEval: R code for the analysis of discrete
#' change in Air Quality time-series.
#'
#' @section AQEval:
#' \code{AQEval} was developed for use by those tasked with
#' the routine detection, characterisation and quantification
#' of discrete changes in air quality time-series.
#'
#' The main functions, \code{\link{quantBreakPoints}}
#' and \code{\link{quantBreakSegments}}, use
#' break-point/segment (BP/S) methods
#' based on the consecutive use of methods in the
#' \code{strucchange} and \code{segmented} \code{R} packages
#' to first detection (as break-points) and then characterise
#' and quantify (as segments), discrete changes in
#' air-quality time-series.
#'
#' \code{AQEval} functions adopt an \code{openair}-friendly
#' approach using function and data structures that many
#' in the air quality research community are already familiar
#' with.
#' Most notably, most functions expect supplied data
#' to be time-series, to be supplied as a single
#' \code{data.frame} (or similar R object), and for
#' time-series to be identified by column names.
#' The main functions are typically structured expect
#' first the \code{data.frame}, then the name of the
#' pollutant to be used, then other arguments:
#'
#' \code{function(data, "polluant.name", ...)}
#'
#' \code{output <- function(data, "polluant.name", ...)}
#'
#' @seealso
#'
#' For more about data structure and an example data set,
#' see \code{\link{AQEval.data}}
#'
#' For more about the main functions, see
#' \code{\link{quantBreakPoints}}
#' and \code{\link{quantBreakSegments}}
#' @author Karl Ropkins
#' @references
#' Ropkins et al (In Prep).
#'
#' @docType package
#' @name AQEval
NULL
| /scratch/gouwar.j/cran-all/cranData/AQEval/R/AQEval-package.R |
############################################
#' @title AQEval Example data
############################################
#'
#' @name AQEval.data
#' @aliases aq.data
#' @description Data packaged with AQEval for
#' use with example code.
#' @usage
#' aq.data
#' @format (26280x6) 'tbl_df' objects
#' \describe{
#' \item{date}{Time-series of POSIX class date and time records.}
#' \item{no2}{Time-series of
#' nitrogen dioxide measurements from local site.}
#' \item{bg.no2}{Time-series of
#' nitrogen dioxide measurements from nearby
#' background site.}
#' \item{ws}{Time-series of
#' local wind speed measurements.}
#' \item{wd}{Time-series of
#' local wind direction measurements.}
#' \item{air_temp}{Time-series of
#' local air temperature measurements.}
#' }
#' @details Most of functions in \code{AQEval} adopt the
#' \code{openair} convention of assuming supplied data is
#' a single \code{data.frame} or similar.
#' The data frame was initially adopted for two reasons:
#' \itemize{
#' \item Firstly, air quality data collected and archived
#' in numerous formats and keeping the import requirements
#' simple minimises the frustrations associated with data
#' importation.
#' \item Secondly, restricting the user to work with a single
#' data format greatly simplifies data management for
#' those less familiar with programming environments.
#' }
#' As part of this work several \code{openair} coding
#' conventions were adopted, most importantly that data
#' sets should include a column named \code{date} of
#' \code{POSIX} class data-and-time-stamps
#' (\code{\link{DateTimeClasses}}).
#' This and other conventions, such as the use of
#' \code{ws} and \code{wd} for numeric wind speed and
#' direction data-series, and \code{site} and \code{code}
#' for character or factor monitoring site name and
#' identifier code, are now commonplace for many working
#' with R in the air quality research community, and many
#' air quality archives provide data in (or support import
#' functions that convert their own data structures to)
#' this \code{openair}-friendly structure.
#' @source Air quality and meteorological data packaged
#' for use with AQEval Examples.
#'
#' Time-series sources:
#' \itemize{
#' \item \strong{date} Date-and-time-stamp of POSIX class
#' (\code{\link{DateTimeClasses}}).
#' \item \strong{no2} Nitrogen dioxide downloaded from King's
#' College London Archive using \code{importKCL}
#' function in \code{openair}.
#' \item \strong{bg.no2} Nitrogen dioxide downloaded from
#' the Automatic Urban and Rural Network Archive using
#' \code{importAURN} function in \code{openair}.
#' \item \strong{ws}, \strong{wd}, \strong{air_temp} Wind
#' speed, wind direction and air temperature downloaded from
#' NOAA's Integrated Surface Database using \code{importNOAA}
#' function in \code{worldmet}.
#' }
#' @seealso
#' \code{\link{DateTimeClasses}}
#'
#' \code{\link{openair}}: functions \code{\link{importAURN}} and
#' \code{\link{importKCL}}
#'
#' \code{worldmet}: function \code{importNOAA} (See References)
#'
#'@references
#' Regarding \code{openair} and \code{openair}-friendly
#' data structuring, see:
#'
#' Carslaw, D. C. and K. Ropkins (2012), openair --- an
#' R package for air quality data analysis.
#' Environmental Modelling & Software. Volume 27-28,
#' 52-61,
#' DOI \doi{10.1016/j.envsoft.2011.09.008}
#'
#' Ropkins, K. and D.C. Carslaw (2012), openair-Data
#' Analysis Tools for the Air Quality Community. R Journal,
#' 4(1).
#' URL \url{https://journal.r-project.org/archive/2012/RJ-2012-003/RJ-2012-003.pdf}
#'
#' Regarding \code{worldmet}, see:
#'
#' David Carslaw (2021), worldmet: Import Surface
#' Meteorological Data from NOAA Integrated Surface
#' Database (ISD). R package version 0.9.5.
#' URL \url{https://CRAN.R-project.org/package=worldmet}
#'
#' @examples
#' #data set used in AQEval Examples
#' dim(aq.data)
#' head(aq.data)
#' with(aq.data, plot(date, no2, type="l"))
#'
"aq.data"
| /scratch/gouwar.j/cran-all/cranData/AQEval/R/AQEval.data.R |
#############################################
#misc set up
#should tidy this...
#############################################
#defined globals
#' @importFrom utils capture.output combn flush.console
#' @importFrom grDevices grey
#' @importFrom graphics abline lines par points segments
#' @importFrom stats coef dnorm fitted formula is.empty.model
#' lm.fit lm.wfit model.matrix model.offset model.response
#' model.weights pnorm pt qnorm qt quantile residuals runif
#' sd splinefun summary.glm summary.lm update update.formula
#' vcov weights
#############################################
#undefined globals
utils::globalVariables(c("end.date", "err", "freq", "my.y",
"pred", "spec", "start.date",
"splineDesign", "spline.des", "segmented",
"seg.control", "seg.lm.fit"))
##############################################
#misc sub functions
#not exported
#may change if methods change
##############################################
aqe_buildBreaks <- function(data, name.pol, ...){
breaks <- findBreakPoints(data, name.pol, ...)
x.args <- list(...)
if("test" %in% names(x.args) && !x.args$test){
message("Using all ", nrow(breaks), " suggested breaks",
sep="")
breaks
} else {
test <- testBreakPoints(data, name.pol, breaks)
temp <- which(test$suggest=="(<-)")
if(length(temp)<1) return(breaks)
if(all(test$breaks[temp]=="NA")) return(NULL)
temp <- as.numeric(strsplit(test$breaks[temp],
"[+]")[[1]])
message("Using ", length(temp), " of ", nrow(breaks),
" suggested breaks: ", paste(temp, collapse = ",",
sep=","), sep="")
breaks[temp,]
}
}
#this needs tidying
aqe_plotQuantBreakPoints <- function(data, name.pol, breaks,
ylab = NULL, xlab = NULL,
pt.col = c("lightgrey", "darkgrey"),
line.col = "red", break.col ="blue",
scalelabs = c("data", "trend", "break"),
event = NULL, auto.text = TRUE, ...){
#think about default declarations...
# have to match these and those in main function at moment...
if(is.null(ylab)) ylab <- name.pol
if(is.null(xlab)) xlab <- "date"
if(length(pt.col)<2) pt.col <- rep(pt.col, 2)
#plot
temp <- data
names(temp)[names(temp) == name.pol] <- "my.y"
plt <- ggplot2::ggplot(data = temp,
ggplot2::aes(x = date, y = my.y,
ymin = pred -(1.96 * err),
ymax = pred + (1.96 * err))) +
ggplot2::geom_point(col = pt.col[2],
fill = pt.col[1],
ggplot2::aes(pch = "data"),
na.rm = TRUE)
if(is.data.frame(breaks) && nrow(breaks)>0){
plt <- plt +
ggplot2::geom_vline(data=breaks,
ggplot2::aes(xintercept = temp$date[breaks[,1]],
#col="confidence",
linetype = "confidence"),
col=break.col) +
ggplot2::geom_vline(data=breaks,
ggplot2::aes(xintercept = temp$date[breaks[,2]],
#col="break",
linetype="break"),
col=break.col) +
ggplot2::geom_vline(data=breaks,
ggplot2::aes(xintercept = temp$date[breaks[,3]],
#col="confidence",
linetype = "confidence"),
col=break.col)
}
plt <- plt +
ggplot2::geom_ribbon(ggplot2::aes(fill = " confidence"),
alpha = 0.25) +
ggplot2::geom_path(ggplot2::aes(y = pred, col = " trend")) +
ggplot2::ylab(aqe_quickText(ylab, auto.text)) +
ggplot2::xlab(aqe_quickText(xlab, auto.text)) +
ggplot2::scale_shape_manual(name="",
values=c(21),
labels=c(scalelabs[1]))+
ggplot2::scale_color_manual(name="",
values=c(line.col),
labels=c(paste(" ", scalelabs[2], sep=""))) +
ggplot2::scale_fill_manual(name="",
values=c(line.col),
labels=c(" confidence")) +
ggplot2::scale_linetype_manual(name="",
values=c("solid", "dotted"),
labels=c(scalelabs[3], "confidence"))+
#ggplot2::scale_color_manual(name="breaks",
# values=c(break.col, break.col),
# labels=c("break", "confidence")) +
ggplot2::guides(
shape = ggplot2::guide_legend(order = 1),
color = ggplot2::guide_legend(order = 2),
fill = ggplot2::guide_legend(order = 3)
) +
ggplot2::theme_bw() +
ggplot2::theme(legend.position="top",
legend.spacing.x = ggplot2::unit(0, 'cm'),
axis.title.y = ggtext::element_markdown(),
axis.title.x = ggtext::element_markdown())
if(!is.null(event)){
#requested intervention marker
#(not a fan of this)
if(!is.list(event)){
warning("expecting event to be list; ignoring; see help")
} else {
event <- loa::listUpdate(list(label=NA, y=NA, x=NA, col="black",
hjust=1, line.size=0.5,
font.size=5),
event)
if(is.na(event$y)) {
ref <- ggplot2::layer_scales(plt)$y$range$range
event$y <- ((ref[2] - ref[1])*0.9) + ref[1]
}
if(is.character(event$x)){
event$x <- as.POSIXct(event$x)
}
if(!is.na(event$x)){
plt <- plt + ggplot2::geom_vline(xintercept=event$x, col=event$col,
size=event$line.size)
if(!is.na(event$label)){
#this will not work line very thick...
if(event$hjust==0) {
event$label <- paste(" ", event$label, sep="")
event$label <- gsub("\n", "\n ", event$label)
}
if(event$hjust==1) {
event$label <- paste(event$label, " ", sep="")
event$label <- gsub("\n", " \n", event$label)
}
plt <- plt + ggplot2::geom_text(label=event$label, x=event$x,
y=event$y, col=event$col,
hjust = event$hjust, size=event$font.size)
}
}
}
}
#output
plt
}
aqe_fitBreakPointsModel <- function(data, name.pol, breaks){
bpts <- breaks$bpt
#############################
#this needs thinking about
#assumes data regular???
#############################
counter <- 1:nrow(data)
counter <- ifelse(counter %in% bpts, 1, 0)
counter <- factor(paste("SGM", cumsum(counter) + 1, sep = ""))
data$counter <- counter
counter2 <- 1:length(levels(counter))
ref <- paste("x", counter2, sep = "")
for (i in 1:length(counter2)) {
ii <- levels(counter)[i]
p <- rep(0, nrow(data))
p[data$counter == ii] <- data$date[data$counter == ii]
data[, ref[i]] <- p
}
ff <- paste(name.pol, "~", sep = "")
ff <- as.formula(paste(ff, paste(ref, collapse = "+"), sep = ""))
#output
lm(ff, data = data)
}
aqe_makeBreakPointsReport <- function(data, breaks){
bpts <- breaks$bpt
if (length(bpts) > 0) {
ls <- lapply(1:length(bpts), function(i) {
temp <- data[(bpts[i] - 1):(bpts[i]), c("pred", "err")]
temp <- as.data.frame(temp)
out <- data.frame(date = data$date[bpts[i]],
date.low = data$date[breaks[i,1]],
date.high = data$date[breaks[i,3]],
c0 = signif(temp[1,1], 4),
c1 = signif(temp[2, 1], 4),
c.delta = signif(temp[2,1] - temp[1, 1], 4),
per.delta = signif(((temp[2,1] - temp[1, 1])/temp[1, 1]) * 100, 2))
temp[1, 1] <- temp[1, 1] + (1.96 * temp[1, 2])
temp[2, 1] <- temp[2, 1] - (1.96 * temp[2, 2])
out <- cbind(out, data.frame(upper.c0 = signif(temp[1, 1], 4),
upper.c1 = signif(temp[2, 1], 4),
upper.c.delta = signif(temp[2, 1] - temp[1, 1], 4),
upper.per.delta = signif(((temp[2, 1] - temp[1, 1])/temp[1, 1]) * 100, 2)))
temp <- data[(bpts[i] - 1):(bpts[i]), c("pred", "err")]
temp <- as.data.frame(temp)
temp[1, 1] <- temp[1, 1] - (1.96 * temp[1, 2])
temp[2, 1] <- temp[2, 1] + (1.96 * temp[2, 2])
cbind(out, data.frame(lower.c0 = signif(temp[1, 1], 4),
lower.c1 = signif(temp[2, 1], 4),
lower.c.delta = signif(temp[2, 1] - temp[1, 1], 4),
lower.per.delta = signif(((temp[2, 1] - temp[1, 1])/temp[1, 1]) * 100, 2)))
})
do.call(rbind, ls)
}
else {
NULL
}
}
aqe_summariseBreakPointsReport <- function(report){
if (is.null(report)) {
message("no break points declared...")
}
else {
for (i in 1:nrow(report)) {
message("\n", as.character(report[i, 1]), " (",
as.character(report[i, 2]), " to ",
as.character(report[i, 3]), ")",
sep = "")
message(report[i, 4], "->", report[i, 5], ";",
report[i, 6], " (", report[i, 7], "%)", sep = "")
#########################
#to do
#########################
#include option for old update
}
}
}
#this is older version of above
#replace to make outputs more consistent
#but could re-introduce but would probably
#want to have similar option for quantBreakSegments
aqe_summariseBreakPointsReport.old <- function(report){
if (is.null(report)) {
message("no breakpoints declared...")
}
else {
for (i in 1:nrow(report)) {
message("\n", as.character(report[i, 1]), "(",
as.character(report[i, 2]), "->",
as.character(report[i, 3]), ")",
sep = "")
message(report[i, 4], "->", report[i, 5], ";",
report[i, 6], " (", report[i, 7], "%)\n", sep = "")
message("[Upper] ", report[i, 8], "->", report[i, 9],
";", report[i, 10], " (", report[i, 11],
"%)", sep = "")
message("[Lower]", report[i, 12], "->",
report[i, 13], ";", report[i, 14], " (",
report[i, 15],
"%)", sep = "")
}
}
}
##############################
#new / not fully tested
#this is local alternative to my openair::quickText
#needed this because that does not play nicely with
#multi-line axis labels and ggplot2
#(folks seem to want both...)
#
#notes
#this needs ggtext::element_markdown() in ggplot theme
#or similar..
#seems to be a character spacing issue in rstudio
#console outputs but not r console or studio
#markdown...
#raised with ggtext admin
#[link]
#needs graphics drivers sorted... IT for admin systems...
##############################
aqe_quickText <- function (text, auto.text = TRUE)
{
#openair::quicktext alternative for aqe
#needed because (1) openair quicktext cannot cope
#with super and subscripts and multiple lines of text
#in ggplots...
#and we are regularly going to three lines for
#some report figures
#this uses ggtext
#need to add ggtext to imports...
#chasing something like...
#plt + xlab("") +
# ylab("top line<br>NO<sub>2</sub><br>[μg.m<sup>-3</sup>]") +
# theme(axis.title.y = element_markdown())
if (!auto.text)
return(ans <- text)
#currently based on openair quicktext
#check can we make gsub case non-sensitive
#without big speed penalty???
ans <- text
ans <- gsub("NO2", "NO<sub>2</sub>", ans)
ans <- gsub("no2", "NO<sub>2</sub>", ans)
ans <- gsub("NOX", "NO<sub>x</sub>", ans)
ans <- gsub("nox", "NO<sub>x</sub>", ans)
ans <- gsub("NOx", "NO<sub>x</sub>", ans)
ans <- gsub("NH3", "NH<sub>3</sub>", ans)
ans <- gsub("nh3", "NH<sub>3</sub>", ans)
ans <- gsub("co ", "CO ", ans)
ans <- gsub("co,", "CO,", ans)
ans <- gsub("nmhc", "NHHC", ans)
ans <- if (nchar(as.character(text)) == 2 && length(grep("ws",
text)) > 0) {
gsub("ws", "wind spd.", ans)
}
else {
ans
}
ans <- gsub("wd", "wind dir.", ans)
ans <- gsub("rh ", "relative humidity ",
ans)
ans <- gsub("PM10", "PM<sub>10</sub>", ans)
ans <- gsub("pm10", "PM<sub>10</sub>", ans)
ans <- gsub("pm1", "PM<sub>1</sub>", ans)
ans <- gsub("PM1", "PM<sub>1</sub>", ans)
ans <- gsub("PM4", "PM<sub>4</sub>", ans)
ans <- gsub("pm4", "PM<sub>4</sub>", ans)
ans <- gsub("PMtot", "PM<sub>total</sub>", ans)
ans <- gsub("pmtot", "PM<sub>total</sub>", ans)
ans <- gsub("pmc", "PM<sub>coarse</sub>", ans)
ans <- gsub("pmcoarse", "PM<sub>coarse</sub>",
ans)
ans <- gsub("PMc", "PM<sub>coarse</sub>", ans)
ans <- gsub("PMcoarse", "PM<sub>coarse</sub>",
ans)
ans <- gsub("pmf", "PM<sub>fine</sub>", ans)
ans <- gsub("pmfine", "PM<sub>fine</sub>", ans)
ans <- gsub("PMf", "PM<sub>fine</sub>", ans)
ans <- gsub("PMfine", "PM<sub>fine</sub>", ans)
ans <- gsub("PM2.5", "PM<sub>2.5</sub>", ans)
ans <- gsub("pm2.5", "PM<sub>2.5</sub>", ans)
ans <- gsub("pm25", "PM<sub>2.5</sub>", ans)
ans <- gsub("PM2.5", "PM<sub>2.5</sub>", ans)
ans <- gsub("PM25", "PM<sub>2.5</sub>", ans)
ans <- gsub("pm25", "PM<sub>2.5</sub>", ans)
ans <- gsub("O3", "O<sub>3</sub>", ans)
ans <- gsub("o3", "O<sub>3</sub>", ans)
ans <- gsub("ozone", "O<sub>3</sub>", ans)
ans <- gsub("CO2", "CO<sub>2</sub>", ans)
ans <- gsub("co2", "CO<sub>2</sub>", ans)
ans <- gsub("SO2", "SO<sub>2</sub>", ans)
ans <- gsub("so2", "SO<sub>2</sub>", ans)
ans <- gsub("H2S", "H<sub>2</sub>S", ans)
ans <- gsub("h2s", "H<sub>2</sub>S", ans)
ans <- gsub("CH4", "CH<sub>4</sub>", ans)
ans <- gsub("ch4", "CH<sub>4</sub>", ans)
ans <- gsub("dgrC", "<sup>o</sup>C", ans)
ans <- gsub("degreeC", "<sup>o</sup>C",
ans)
ans <- gsub("deg. C", "<sup>o</sup>C", ans)
ans <- gsub("degreesC", "<sup>o</sup>C",
ans)
# ans <- gsub("degrees", "' * degree *'", ans)
# ans <- gsub("Delta", "' * Delta *'", ans)
# ans <- gsub("delta", "' * Delta *'", ans)
ans <- gsub("ug/m3", "μg.m<sup>-3</sup>",
ans)
ans <- gsub("ug.m-3", "μg.m<sup>-3</sup>",
ans)
ans <- gsub("ug m-3", "μg.m<sup>-3</sup>",
ans)
ans <- gsub("ugm-3", "μg.m<sup>-3</sup>",
ans)
ans <- gsub("mg/m3", "mg.m<sup>-3</sup>",
ans)
ans <- gsub("mg.m-3", "mg.m<sup>-3</sup>",
ans)
ans <- gsub("mg m-3", "mg.m<sup>-3</sup>",
ans)
ans <- gsub("mgm-3", "mg.m<sup>-3</sup>",
ans)
ans <- gsub("ng/m3", "ng.m<sup>-3</sup>",
ans)
ans <- gsub("ng.m-3", "ng.m<sup>-3</sup>",
ans)
ans <- gsub("ng m-3", "ng.m<sup>-3</sup>",
ans)
ans <- gsub("ngm-3", "ng.m<sup>-3</sup>",
ans)
ans <- gsub("m/s2", "m.s<sup>-2</sup>", ans)
ans <- gsub("m/s", "m.s<sup>-1</sup>", ans)
ans <- gsub("m.s-1", "m.s<sup>-1</sup>", ans)
ans <- gsub("m s-1", "m.s<sup>-1</sup>", ans)
ans <- gsub("g/km", "g.km<sup>-1</sup>", ans)
ans <- gsub("g/s", "g.s<sup>-1</sup>", ans)
ans <- gsub("kW/t", "kW.t<sup>-1</sup>", ans)
ans <- gsub("g/hour", "g.hour<sup>-1</sup>", ans)
ans <- gsub("g/hr", "g.hour<sup>-1</sup>", ans)
ans <- gsub("g/m3", "g.m<sup>-3</sup>", ans)
ans <- gsub("g/kg", "g.kg<sup>-1</sup>", ans)
ans <- gsub("km/hr/s", "km.hour<sup>-1</sup>s<sup>-1</sup>",
ans)
ans <- gsub("km/hour/s", "km.hour<sup>-1</sup>s<sup>-1</sup>",
ans)
ans <- gsub("km/h/s", "km.hour<sup>-1</sup>s<sup>-1</sup>",
ans)
ans <- gsub("km/hr", "km.hour<sup>-1", ans)
ans <- gsub("km/h", "km.hour<sup>-1", ans)
ans <- gsub("km/hour", "km.hour<sup>-1", ans)
ans <- gsub("r2", "R<sup>2", ans)
ans <- gsub("R2", "R<sup>2", ans)
#ans <- gsub("tau ", "' * tau * '", ans)
#ans <- gsub("umol/m2/s", "' * mu * 'mol m' ^-2 * ' s' ^-1 *'",
# ans)
#ans <- gsub("umol/m2", "' * mu * 'mol m' ^-2 *'",
# ans)
ans <- gsub("\n", "<br>", ans)
ans
}
##############################
#break-segments
##############################
aqe_makeBreakSegmentsReport <- function(data, segments){
if(!is.null(segments) && nrow(segments)>0){
#this needs tidying
#err ranges need calculating
data.frame(s1.date1 = data$date[segments[, 2]],
s1.date2 = data$date[segments[, 5]],
s1.date.delta = data$date[segments[, 5]] -
data$date[segments[, 2]],
s1.c0 = data$pred[segments[, 2]],
s1.c1 = data$pred[segments[, 5]],
s1.c.delta = data$pred[segments[, 5]] -
data$pred[segments[, 2]],
s1.per.delta = (data$pred[segments[, 5]] -
data$pred[segments[, 2]]) /
data$pred[segments[, 2]] * 100,
#################################
#to do
#################################
#diffs to calculate/report
#confidences to report
stringsAsFactors = FALSE)
} else { NULL }
}
aqe_plotQuantBreakSegments01 <- function(data, name.pol, segments,
ylab = NULL, xlab = NULL,
pt.col = c("lightgrey", "darkgrey"),
line.col = "red", break.col ="blue",
scalelabs = c("data", "trend",
"change"),
event = NULL,
auto.text=TRUE, ...){
#using plotQuantBreakPoints
if(!is.null(segments) && nrow(segments)>0){
temp <- segments[,4:6]
names(temp) <- names(segments[,1:3])
segments <- rbind(segments[,1:3], temp)
names(segments) <- c("lower", "bpt", "upper")
if(any(segments<2)){
segments[segments<2] <-2
warning("quantBreakSegments: break(s) too near '", name.pol,
"' start...\n\tTried to fix but strongly recommend re-running ",
"\n\tfindBreakPoints with different range\n",
call.=FALSE)
}
}
aqe_plotQuantBreakPoints(data, name.pol, segments,
ylab = ylab, xlab = xlab,
pt.col = pt.col, line.col = line.col,
break.col = break.col,
scalelabs = scalelabs,
event=event,
auto.text=auto.text, ...)
}
aqe_plotQuantBreakSegments02 <- function(data, name.pol, segments,
ylab = NULL, xlab = NULL,
pt.col = c("lightgrey", "darkgrey"),
line.col = "red", break.col ="blue",
scalelabs = c("data", "trend",
"change"),
event=NULL,
auto.text=TRUE, ...){
#using plotQuantBreakPoints
if(!is.null(segments) && nrow(segments)>0){
segments <- segments[2:nrow(segments),1:3]
names(segments) <- c("lower", "bpt", "upper")
if(any(is.na(segments))) {
if(any(is.na(segments[,1]))){
segments[,1] <- ifelse(is.na(segments[,1]), 1, segments[,1])
}
if(any(is.na(segments[, ncol(segments)]))){
segments[,ncol(segments)] <- ifelse(is.na(segments[,ncol(segments)]),
nrow(data),
segments[,ncol(segments)])
}
if(any(is.na(segments)))
warning("quantBreakSegments: found suspect segment(s)...'",
"\n\tTried to fix but failed...\n",
call.=FALSE)
}
if(any(segments<2)){
segments[segments<2] <-2
warning("quantBreakSegments: break(s) too near '", name.pol,
"' start...\n\tTried to fix but strongly recommend re-running ",
"\n\tfindBreakPoints with different range\n",
call.=FALSE)
}
}
aqe_plotQuantBreakPoints(data, name.pol, segments,
ylab = ylab, xlab = xlab,
pt.col = pt.col, line.col = line.col,
break.col = break.col,
scalelabs = scalelabs,
event=event,
auto.text=auto.text)
}
aqe_summariseBreakSegmentsReport <- function(report){
if (is.null(report)) {
message("no change ranges declared...")
}
else {
message("building ", nrow(report), " segments")
for (i in 1:nrow(report)) {
message("\n", as.character(report[i, 1]), " to ",
as.character(report[i, 2]), " (",
as.character(report[i, 3]), ")",
sep = "")
message(signif(report[i, 4], 4), "->",
signif(report[i, 5], 4), ";",
signif(report[i, 6], 4), " (",
signif(report[i, 7], 4), "%)", sep = "")
#########################
#to do
#########################
#report confidences and diffs?
#need to agree method...
}
}
}
aqe_fitBreakSegmentsModel01 <- function(data, name.pol, breaks){
#function in quantBreakPoints
mod <- aqe_fitBreakPointsModel(data, name.pol, breaks)
data$pred <- rep(NA, nrow(data))
data$err <- data$pred
ans <- predict(mod, se.fit = TRUE)
data$pred[as.numeric(names(ans$fit))] <- ans$fit
data$err[as.numeric(names(ans$fit))] <- ans$se.fit
if(!is.null(breaks) && nrow(breaks)>0){
for(i in 1:nrow(breaks)){
temp <- data$pred[c(breaks[i,1]:breaks[i,3])]
data$pred[c(breaks[i,1]:breaks[i,3])] <-
seq(temp[1], temp[length(temp)], length.out = length(temp))
}
}
data$count <- as.numeric(data$date)
data$count <- (data$count - min(data$count, na.rm=TRUE)) + 1
ff <- as.formula(paste(name.pol, "~pred", sep=""))
lm(ff, data=data)
}
aqe_makeSegmentsFromBreaks01 <- function(breaks){
#make segments
segments <- NULL
if(!is.null(breaks) && nrow(breaks)>0){
segments <- data.frame(seg.1.low=breaks$lower -
(breaks$bpt-breaks$lower),
seg.1=breaks$lower,
seg.1.high=breaks$bpt,
seg.2.low=breaks$bpt,
seg.2=breaks$upper,
seg.2.high=breaks$upper +
(breaks$upper-breaks$bpt),
stringsAsFactors = FALSE)
}
segments
}
#fitBreakSegmentsModel02
aqe_fitBreakSegmentsModel02 <- function(data, name.pol, breaks,
seg.seed = 12345){
#reinstate iterative segmented as method 02
#(needs extra stops for out of range cases)
#(might need change of outputs for method 01)
temp <- as.data.frame(data)[c("date", name.pol)]
#not happy with this naming strategy
temp$..ref <- 1:nrow(temp)
temp$..d.prop <- as.numeric(temp$date)
temp$..d.prop <- temp$..d.prop - min(temp$..d.prop,
na.rm=TRUE)
temp$..d.prop <- temp$..d.prop/max(temp$..d.prop,
na.rm=TRUE)
ff <- as.formula(paste(name.pol, "..d.prop", sep="~"))
#print("fit")
#############################
mod <- lm(ff, temp)
#print("post-fit")
#####################
#send back if not breaks
#####################
if(is.null(breaks) || nrow(breaks)<1){
return(list(mod=mod, segments=NULL))
}
#####################
#segs0 <- temp$..d.prop[sort(as.vector(unlist(breaks[rep(c(2),2)])))]
#segsd <- temp$..d.prop[sort(as.vector(unlist(breaks[c(1,3)])))]
segs0 <- temp$..d.prop[as.vector(unlist(breaks[rep(c(2),2)]))]
segsd <- temp$..d.prop[as.vector(unlist(breaks[c(1,3)]))]
segs0 <- segs0[order(segsd)]
segsd <- segsd[order(segsd)]
segsd <- segs0-segsd
test <- do.call(expand.grid, rep(list(c(0.25,1,1.75)),
length(segs0)))
ref <- -10
smod <- NULL
for(i in 1:nrow(test)){
#print("start")
#######################
ttest <- as.vector(unlist(test[i,]))
segs <- segs0 + (segsd * ttest)
segs <- segs[order(segs)]
#segs <- sort(segs)
if(any(segs<0.01)){
segs[segs<0.01] <- 0.01
}
if(any(segs>0.99)){
segs[segs>0.99] <- 0.99
}
#print("this fit")
#######################
log <- capture.output({
#tmod <- try(suppressWarnings(
# local_segmented(mod, seg.Z=~..d.prop, psi=segs,
# control=local_seg.control(it.max=1, n.boot=0))
# ), silent=FALSE)
tmod <- try(suppressWarnings(
local_segmented(mod, seg.Z=~..d.prop, psi=segs,
control=local_seg.control(it.max=1, n.boot=0,
seed=seg.seed))
), silent=TRUE)
})
#print(class(tmod))
###########################
if(class(tmod)[1]!="try-error"){
ans <- try(suppressWarnings(local_summary.segmented(tmod)$adj.r.squared),
silent=TRUE)
#print(ans)
#print(ref)
############################
if(class(ans)[1]!="try-error" && ans>ref){
ref <- ans
smod <- tmod
}
}
#print("end")
#######################
}
#if no segmented model built...
#fault 1: sot, thomas lewin, bpt 4 at end...
if(is.null(smod)) {
stop(paste("quantBreakSegments(): segmented model trips",
"\n\t(close to end break?)", sep=""),
call. = TRUE)
#return(list(mod=mod, segments=NULL))
}
segs <- local_confint.segmented(smod)
segs.low <- round(approx(temp$..d.prop, temp$..ref, segs[,2])$y)
segs.hi <- round(approx(temp$..d.prop, temp$..ref, segs[,3])$y)
segs <- round(approx(temp$..d.prop, temp$..ref, segs[,1])$y)
segments <- data.frame(
seg.str.low = c(1, segs.low),
seg.str = c(1, segs),
seg.str.hi = c(1, segs.hi),
seg.end.low = c(segs.low, nrow(data)),
seg.end = c(segs, nrow(data)),
seg.end.hi = c(segs.hi, nrow(data))
)
#this returns mod and segments
#print("here")
#######################
list(mod=smod, segments=segments)
}
local_summary.segmented <-
function (object, short = FALSE, var.diff = FALSE, p.df = "p",
.vcov = NULL, ...)
{
#print("lss start")
#######################
if (is.null(object$psi))
object <- object[[length(object)]]
if (!is.null(.vcov))
var.diff <- FALSE
if (var.diff && length(object$nameUV$Z) > 1) {
var.diff <- FALSE
warning(" 'var.diff' set to FALSE with multiple segmented variables",
call. = FALSE)
}
nomiU <- object$nameUV$U
nomiV <- object$nameUV$V
idU <- match(nomiU, names(coef(object)[!is.na(coef(object))]))
idV <- match(nomiV, names(coef(object)[!is.na(coef(object))]))
beta.c <- coef(object)[nomiU]
#print("lss model")
#################
if ("segmented.default" == as.character(object$call)[1]) {
summ <- c(summary(object, ...), object["psi"])
summ[c("it", "epsilon")] <- object[c("it", "epsilon")]
return(summ)
}
if ("lm" %in% class(object) && !"glm" %in% class(object)) {
summ <- c(summary.lm(object, ...), object["psi"])
summ$Ttable <- summ$coefficients
if (var.diff) {
Qr <- object$qr
p <- object$rank
p1 <- 1L:p
inv.XtX <- chol2inv(Qr$qr[p1, p1, drop = FALSE])
X <- qr.X(Qr, FALSE)
attr(X, "assign") <- NULL
K <- length(unique(object$id.group))
dev.new <- tapply(object$residuals, object$id.group,
function(.x) {
sum(.x^2)
})
summ$df.new <- tapply(object$residuals, object$id.group,
function(.x) {
(length(.x) - eval(parse(text = p.df)))
})
if (any(summ$df.new <= 0))
stop("nonpositive df when computig the group-specific variances.. reduce 'p.df'?",
call. = FALSE)
summ$sigma.new <- sqrt(dev.new/summ$df.new)
sigma.i <- rowSums(model.matrix(~0 + factor(object$id.group)) %*%
diag(summ$sigma.new))
var.b <- inv.XtX %*% crossprod(X * sigma.i) %*%
inv.XtX
dimnames(var.b) <- dimnames(summ$cov.unscaled)
summ$cov.var.diff <- var.b
summ$Ttable[, 2] <- sqrt(diag(var.b))
summ$Ttable[, 3] <- summ$Ttable[, 1]/summ$Ttable[,
2]
summ$Ttable[, 4] <- 2 * pt(abs(summ$Ttable[, 3]),
df = object$df.residual, lower.tail = FALSE)
dimnames(summ$Ttable) <- list(names(object$coefficients)[Qr$pivot[p1]],
c("Estimate", "Std. Error", "t value", "Pr(>|t|)"))
}
if (!is.null(.vcov)) {
summ$Ttable[, 2] <- sqrt(diag(.vcov))
summ$Ttable[, 3] <- summ$Ttable[, 1]/summ$Ttable[,
2]
summ$Ttable[, 4] <- 2 * pt(abs(summ$Ttable[, 3]),
df = object$df.residual, lower.tail = FALSE)
}
summ$Ttable[idU, 4] <- NA
summ$Ttable <- summ$Ttable[-idV, ]
summ[c("it", "epsilon", "conv.warn")] <- object[c("it",
"epsilon", "id.warn")]
summ$var.diff <- var.diff
summ$short <- short
class(summ) <- c("summary.segmented", "summary.lm")
#print("lss stop")
#######################
return(summ)
}
if (inherits(object, "glm")) {
summ <- c(summary.glm(object, ...), object["psi"])
summ$Ttable <- summ$coefficients[-idV, ]
summ$Ttable[idU, 4] <- NA
summ[c("it", "epsilon", "conv.warn")] <- object[c("it",
"epsilon", "id.warn")]
summ$short <- short
class(summ) <- c("summary.segmented", "summary.glm")
return(summ)
}
if ("Arima" %in% class(object)) {
coeff <- object$coef
v <- sqrt(diag(object$var.coef))
Ttable <- cbind(coeff[-idV], v[-idV], coeff[-idV]/v[-idV])
colnames(Ttable) <- c("Estimate", "Std. Error", "t value")
object$Ttable <- Ttable
object$short <- short
summ <- object
summ[c("it", "epsilon", "conv.warn")] <- object[c("it",
"epsilon", "id.warn")]
class(summ) <- c("summary.segmented", "summary.Arima")
return(summ)
}
}
#######################################
#local versions of segmented functions
#######################################
#added while looking at changes
#segmented 1.3 -> 1.4
######################################
#note:
#segmented sets seed which R core do
#not like...
#######################################
#source:
#https://CRAN.R-project.org/package=segmented
local_segmented <-
function (obj, seg.Z, psi, npsi, fixed.psi = NULL, control = local_seg.control(),
model = TRUE, keep.class = FALSE, ...)
{
#print("ls start")
#######################
build.all.psi <- function(psi, fixed.psi) {
all.names.psi <- union(names(psi), names(fixed.psi))
all.psi <- vector("list", length = length(all.names.psi))
names(all.psi) <- all.names.psi
for (i in names(all.psi)) {
if (!is.null(psi[[i]])) {
psi[[i]] <- sort(psi[[i]])
names(psi[[i]]) <- paste("U", 1:length(psi[[i]]),
".", i, sep = "")
}
if (!is.null(fixed.psi[[i]])) {
fixed.psi[[i]] <- sort(fixed.psi[[i]])
names(fixed.psi[[i]]) <- paste("U", 1:length(fixed.psi[[i]]),
".fixed.", i, sep = "")
}
all.psi[[i]] <- sort
}
return(all.psi)
}
if (missing(seg.Z)) {
if (length(all.vars(formula(obj))) == 2)
seg.Z <- as.formula(paste("~", all.vars(formula(obj))[2]))
else stop("please specify 'seg.Z'")
}
n.Seg <- length(all.vars(seg.Z))
id.npsi <- FALSE
if ("V" %in% sub("V[1-9]*[0-9]", "V", c(all.vars(seg.Z),
all.vars(formula(obj))[-1])))
stop("variable names 'V', 'V1', .. are not allowed")
if ("U" %in% sub("U[1-9]*[0-9]", "U", c(all.vars(seg.Z),
all.vars(formula(obj))[-1])))
stop("variable names 'U', 'U1', .. are not allowed")
if (any(c("$", "[") %in% all.names(seg.Z)))
stop(" '$' or '[' not allowed in 'seg.Z' ")
if (missing(psi)) {
if (n.Seg == 1) {
if (missing(npsi))
npsi <- 1
npsi <- lapply(npsi, function(.x) .x)
if (length(npsi) != length(all.vars(seg.Z)))
stop("seg.Z and npsi do not match")
names(npsi) <- all.vars(seg.Z)
}
else {
if (missing(npsi)) {
npsi <- rep(1, n.Seg)
names(npsi) <- all.vars(seg.Z)
}
if (length(npsi) != n.Seg)
stop(" 'npsi' and seg.Z should have the same length")
if (!all(names(npsi) %in% all.vars(seg.Z)))
stop(" names in 'npsi' and 'seg.Z' do not match")
}
psi <- lapply(npsi, function(.x) rep(NA, .x))
id.npsi <- TRUE
}
else {
if (n.Seg == 1) {
if (!is.list(psi)) {
psi <- list(psi)
names(psi) <- all.vars(seg.Z)
}
}
else {
if (!is.list(psi))
stop("with multiple terms in `seg.Z', `psi' should be a named list")
if (n.Seg != length(psi))
stop("A wrong number of terms in `seg.Z' or `psi'")
if (!all(names(psi) %in% all.vars(seg.Z)))
stop("Names in `seg.Z' and `psi' do not match")
}
}
fc <- min(max(abs(control$fc), 0.8), 1)
min.step <- control$min.step
alpha <- control$alpha
it.max <- old.it.max <- control$it.max
digits <- control$digits
toll <- control$toll
if (toll < 0)
stop("Negative tolerance ('tol' in seg.control()) is meaningless",
call. = FALSE)
visual <- control$visual
stop.if.error <- control$stop.if.error
fix.npsi <- fix.npsi <- control$fix.npsi
if (!is.null(stop.if.error)) {
warning(" Argument 'stop.if.error' is working, but will be removed in the next releases. Please use 'fix.npsi' for the future..")
}
else {
stop.if.error <- fix.npsi
}
break.boot = control$break.boot
n.boot <- control$n.boot
size.boot <- control$size.boot
gap <- control$gap
random <- control$random
pow <- control$pow
conv.psi <- control$conv.psi
visualBoot <- FALSE
if (n.boot > 0) {
if (!is.null(control$seed)) {
set.seed(control$seed)
employed.Random.seed <- control$seed
}
else {
employed.Random.seed <- eval(parse(text = paste(sample(0:9,
size = 6), collapse = "")))
set.seed(employed.Random.seed)
}
if (visual) {
visual <- FALSE
visualBoot <- TRUE
}
}
last <- control$last
K <- control$K
h <- control$h
orig.call <- Call <- mf <- obj$call
orig.call$formula <- mf$formula <- formula(obj)
m <- match(c("formula", "data", "subset", "weights", "na.action",
"offset"), names(mf), 0L)
mf <- mf[c(1, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
if (class(mf$formula)[1] == "name" && !"~" %in% paste(mf$formula))
mf$formula <- eval(mf$formula)
mfExt <- mf
mf$formula <- update.formula(mf$formula, paste(seg.Z, collapse = ".+"))
if (!is.null(obj$call$offset) || !is.null(obj$call$weights) ||
!is.null(obj$call$subset)) {
mfExt$formula <- update.formula(mf$formula, paste(".~.+",
paste(c(all.vars(obj$call$offset), all.vars(obj$call$weights),
all.vars(obj$call$subset)), collapse = "+")))
}
mf <- eval(mf, parent.frame())
n <- nrow(mf)
nomiOff <- setdiff(all.vars(formula(obj)), names(mf))
if (length(nomiOff) >= 1)
mfExt$formula <- update.formula(mfExt$formula, paste(".~.+",
paste(nomiOff, collapse = "+"), sep = ""))
nomiTUTTI <- all.vars(mfExt$formula)
nomiNO <- NULL
for (i in nomiTUTTI) {
r <- try(eval(parse(text = i), parent.frame()), silent = TRUE)
if (class(r)[1] != "try-error" && length(r) == 1 && !is.function(r) &&
!i %in% names(mf))
nomiNO[[length(nomiNO) + 1]] <- i
}
if (!is.null(nomiNO))
mfExt$formula <- update.formula(mfExt$formula, paste(".~.-",
paste(nomiNO, collapse = "-"), sep = ""))
mfExt <- eval(mfExt, parent.frame())
weights <- as.vector(model.weights(mf))
offs <- as.vector(model.offset(mf))
mt <- attr(mf, "terms")
interc <- attr(mt, "intercept")
y <- model.response(mf, "any")
XREG <- if (!is.empty.model(mt))
model.matrix(mt, mf, obj$contrasts)
namesXREG0 <- colnames(XREG)
nameLeftSlopeZero <- setdiff(all.vars(seg.Z), names(coef(obj)))
namesXREG0 <- setdiff(namesXREG0, nameLeftSlopeZero)
id.duplic <- match(all.vars(formula(obj)), all.vars(seg.Z),
nomatch = 0) > 0
if (any(id.duplic)) {
new.mf <- mf[, all.vars(formula(obj))[id.duplic], drop = FALSE]
new.XREGseg <- data.matrix(new.mf)
XREG <- cbind(XREG, new.XREGseg)
}
n.psi <- length(unlist(psi))
id.n.Seg <- (ncol(XREG) - n.Seg + 1):ncol(XREG)
XREGseg <- XREG[, id.n.Seg, drop = FALSE]
XREG <- XREG[, match(c("(Intercept)", namesXREG0), colnames(XREG),
nomatch = 0), drop = FALSE]
XREG <- XREG[, unique(colnames(XREG)), drop = FALSE]
n <- nrow(XREG)
Z <- lapply(apply(XREGseg, 2, list), unlist)
name.Z <- names(Z) <- colnames(XREGseg)
if (length(Z) == 1 && is.vector(psi) && (is.numeric(psi) ||
is.na(psi))) {
psi <- list(as.numeric(psi))
names(psi) <- name.Z
}
if (!is.list(Z) || !is.list(psi) || is.null(names(Z)) ||
is.null(names(psi)))
stop("Z and psi have to be *named* list")
id.nomiZpsi <- match(names(Z), names(psi))
if ((length(Z) != length(psi)) || any(is.na(id.nomiZpsi)))
stop("Length or names of Z and psi do not match")
nome <- names(psi)[id.nomiZpsi]
psi <- psi[nome]
if (id.npsi) {
for (i in 1:length(psi)) {
K <- length(psi[[i]])
if (any(is.na(psi[[i]])))
psi[[i]] <- if (control$quant) {
quantile(Z[[i]], prob = seq(0, 1, l = K +
2)[-c(1, K + 2)], names = FALSE)
}
else {
(min(Z[[i]]) + diff(range(Z[[i]])) * (1:K)/(K +
1))
}
}
}
else {
for (i in 1:length(psi)) {
if (any(is.na(psi[[i]])))
psi[[i]] <- if (control$quant) {
quantile(Z[[i]], prob = seq(0, 1, l = K +
2)[-c(1, K + 2)], names = FALSE)
}
else {
(min(Z[[i]]) + diff(range(Z[[i]])) * (1:K)/(K +
1))
}
}
}
id.psi.fixed <- FALSE
if (!is.null(fixed.psi)) {
id.psi.fixed <- TRUE
if (is.numeric(fixed.psi) && n.Seg == 1) {
fixed.psi <- list(fixed.psi)
names(fixed.psi) <- all.vars(seg.Z)
}
if (is.list(fixed.psi)) {
if (!(names(fixed.psi) %in% all.vars(seg.Z)))
stop("names(fixed.psi) is not a subset of variables in 'seg.Z' ")
}
else {
stop(" 'fixed.psi' has to be a named list ")
}
fixed.psi <- lapply(fixed.psi, sort)
Zfixed <- matrix(unlist(mapply(function(x, y) rep(x,
y), Z[names(fixed.psi)], sapply(fixed.psi, length),
SIMPLIFY = TRUE)), nrow = n)
n.fixed.psi <- sapply(fixed.psi, length)
rip.nomi <- rep(names(fixed.psi), n.fixed.psi)
rip.numeri <- unlist(lapply(n.fixed.psi, function(.x) 1:.x))
colnames(Zfixed) <- paste("U", rip.numeri, ".fixed.",
rip.nomi, sep = "")
PSI <- matrix(unlist(fixed.psi), ncol = ncol(Zfixed),
nrow = n, byrow = TRUE)
fixedU <- (Zfixed - PSI) * (Zfixed > PSI)
XREG <- cbind(XREG, fixedU)
}
initial.psi <- psi
a <- sapply(psi, length)
id.psi.group <- rep(1:length(a), times = a)
Z <- matrix(unlist(mapply(function(x, y) rep(x, y), Z, a,
SIMPLIFY = TRUE)), nrow = n)
psi <- unlist(psi)
psi <- unlist(tapply(psi, id.psi.group, sort))
k <- ncol(Z)
PSI <- matrix(rep(psi, rep(n, k)), ncol = k)
c1 <- apply((Z <= PSI), 2, all)
c2 <- apply((Z >= PSI), 2, all)
if (sum(c1 + c2) != 0 || is.na(sum(c1 + c2)))
stop("starting psi out of the admissible range")
colnames(Z) <- nomiZ <- rep(nome, times = a)
ripetizioni <- as.numeric(unlist(sapply(table(nomiZ)[order(unique(nomiZ))],
function(.x) {
1:.x
})))
nomiU <- paste("U", ripetizioni, sep = "")
nomiU <- paste(nomiU, nomiZ, sep = ".")
nomiV <- paste("V", ripetizioni, sep = "")
nomiV <- paste(nomiV, nomiZ, sep = ".")
if (it.max == 0) {
U <- (Z > PSI) * (Z - PSI)
colnames(U) <- paste(ripetizioni, nomiZ, sep = ".")
nomiU <- paste("U", colnames(U), sep = "")
for (i in 1:ncol(U)) mfExt[nomiU[i]] <- mf[nomiU[i]] <- U[,
i]
Fo <- update.formula(formula(obj), as.formula(paste(".~.+",
paste(nomiU, collapse = "+"))))
obj <- update(obj, formula = Fo, evaluate = FALSE, data = mfExt)
if (!is.null(obj[["subset"]]))
obj[["subset"]] <- NULL
obj <- eval(obj, envir = mfExt)
if (model)
obj$model <- mf
psi <- cbind(psi, psi, 0)
rownames(psi) <- paste(paste("psi", ripetizioni, sep = ""),
nomiZ, sep = ".")
colnames(psi) <- c("Initial", "Est.", "St.Err")
obj$psi <- psi
return(obj)
}
if (is.null(weights))
weights <- rep(1, n)
if (is.null(offs))
offs <- rep(0, n)
initial <- psi
obj0 <- obj
dev0 <- sum(obj$residuals^2)
list.obj <- list(obj)
nomiOK <- nomiU
invXtX <- if (!is.null(obj$qr))
chol2inv(qr.R(obj$qr))
else NULL
Xty <- crossprod(XREG, y)
opz <- list(toll = toll, h = h, stop.if.error = stop.if.error,
dev0 = dev0, visual = visual, it.max = it.max, nomiOK = nomiOK,
id.psi.group = id.psi.group, gap = gap, visualBoot = visualBoot,
pow = pow, digits = digits, invXtX = invXtX, Xty = Xty,
conv.psi = conv.psi, alpha = alpha, fix.npsi = fix.npsi,
min.step = min.step, fc = fc)
if (n.boot <= 0) {
#print("here?")
obj <- local_seg.lm.fit(y, XREG, Z, PSI, weights, offs, opz)
}
else {
obj <- local_seg.lm.fit.boot(y, XREG, Z, PSI, weights, offs,
opz, n.boot = n.boot, size.boot = size.boot, random = random,
break.boot = break.boot)
}
if (!is.list(obj)) {
warning("No breakpoint estimated", call. = FALSE)
return(obj0)
}
if (obj$obj$df.residual == 0)
warning("no residual degrees of freedom (other warnings expected)",
call. = FALSE)
id.psi.group <- obj$id.psi.group
nomiOK <- obj$nomiOK
nomiFINALI <- unique(sub("U[1-9]*[0-9].", "", nomiOK))
nomiSenzaPSI <- setdiff(name.Z, nomiFINALI)
if (length(nomiSenzaPSI) >= 1)
warning("no breakpoints found for: ", paste(nomiSenzaPSI,
" "), call. = FALSE)
it <- obj$it
psi <- obj$psi
psi.values <- if (n.boot <= 0)
obj$psi.values
else obj$boot.restart
U <- obj$U
V <- obj$V
id.warn <- obj$id.warn
rangeZ <- obj$rangeZ
obj <- obj$obj
k <- length(psi)
beta.c <- coef(obj)[paste("U", 1:ncol(U), sep = "")]
Vxb <- V %*% diag(beta.c, ncol = length(beta.c))
length.psi <- tapply(as.numeric(as.character(names(psi))),
as.numeric(as.character(names(psi))), length)
forma.nomiU <- function(xx, yy) paste("U", 1:xx, ".", yy,
sep = "")
forma.nomiVxb <- function(xx, yy) paste("psi", 1:xx, ".",
yy, sep = "")
nomiU <- unlist(mapply(forma.nomiU, length.psi, nomiFINALI))
nomiVxb <- unlist(mapply(forma.nomiVxb, length.psi, nomiFINALI))
psi.list <- vector("list", length = length(unique(nomiZ)))
names(psi.list) <- unique(nomiZ)
names(psi) <- rep(nomiFINALI, length.psi)
for (i in names(psi.list)) {
psi.list[[i]] <- psi[names(psi) == i]
}
for (i in 1:ncol(U)) {
mfExt[nomiU[i]] <- mf[nomiU[i]] <- U[, i]
mfExt[nomiVxb[i]] <- mf[nomiVxb[i]] <- Vxb[, i]
}
nnomi <- c(nomiU, nomiVxb)
Fo <- update.formula(formula(obj0), as.formula(paste(".~.+",
paste(nnomi, collapse = "+"))))
if (id.psi.fixed) {
for (i in 1:ncol(fixedU)) mfExt[colnames(fixedU)[i]] <- mf[colnames(fixedU)[i]] <- fixedU[,
i]
Fo <- update.formula(Fo, paste(c("~.", colnames(fixedU)),
collapse = "+"))
}
objF <- update(obj0, formula = Fo, evaluate = FALSE, data = mfExt)
if (!is.null(objF[["subset"]]))
objF[["subset"]] <- NULL
objF <- eval(objF, envir = mfExt)
objF$offset <- obj0$offset
isNAcoef <- any(is.na(objF$coefficients))
if (isNAcoef) {
if (stop.if.error) {
message("breakpoint estimate(s):", as.vector(psi))
stop("at least one coef is NA: breakpoint(s) at the boundary? (possibly with many x-values replicated)",
call. = FALSE)
}
else {
warning("some estimate is NA: too many breakpoints? 'var(hat.psi)' cannot be computed \n ..returning a 'lm' model",
call. = FALSE)
Fo <- update.formula(formula(obj0), as.formula(paste(".~.+",
paste(nomiU, collapse = "+"))))
objF <- update(obj0, formula = Fo, evaluate = TRUE,
data = mfExt)
names(psi) <- nomiVxb
objF$psi <- psi
return(objF)
}
}
if (!gap) {
names.coef <- names(objF$coefficients)
names(obj$coefficients)[match(c(paste("U", 1:k, sep = ""),
paste("V", 1:k, sep = "")), names(coef(obj)))] <- nnomi
objF$coefficients[names.coef] <- obj$coefficients[names.coef]
objF$fitted.values <- obj$fitted.values
objF$residuals <- obj$residuals
}
Cov <- vcov(objF)
id <- match(nomiVxb, names(coef(objF)))
vv <- if (length(id) == 1)
Cov[id, id]
else diag(Cov[id, id])
a <- tapply(id.psi.group, id.psi.group, length)
ris.psi <- matrix(NA, length(psi), 3)
colnames(ris.psi) <- c("Initial", "Est.", "St.Err")
rownames(ris.psi) <- nomiVxb
ris.psi[, 2] <- psi
ris.psi[, 3] <- sqrt(vv)
a.ok <- NULL
for (j in name.Z) {
if (j %in% nomiFINALI) {
a.ok[length(a.ok) + 1] <- a[1]
a <- a[-1]
}
else {
a.ok[length(a.ok) + 1] <- 0
}
}
initial <- unlist(mapply(function(x, y) {
if (is.na(x)[1])
rep(x, y)
else x
}, initial.psi[nomiFINALI], a.ok[a.ok != 0], SIMPLIFY = TRUE))
if (stop.if.error)
ris.psi[, 1] <- initial
objF$rangeZ <- rangeZ
objF$psi.history <- psi.values
objF$psi <- ris.psi
objF$it <- it
objF$epsilon <- obj$epsilon
objF$call <- match.call()
objF$nameUV <- list(U = drop(nomiU), V = rownames(ris.psi),
Z = nomiFINALI)
objF$id.group <- if (length(name.Z) <= 1)
-rowSums(as.matrix(V))
objF$id.psi.group <- id.psi.group
objF$id.warn <- id.warn
objF$orig.call <- orig.call
objF$indexU <- build.all.psi(psi.list, fixed.psi)
if (model)
objF$model <- mf
if (n.boot > 0)
objF$seed <- employed.Random.seed
class(objF) <- c("segmented", class(obj0))
list.obj[[length(list.obj) + 1]] <- objF
class(list.obj) <- "segmented"
#print("end")
#######################
if (last)
list.obj <- list.obj[[length(list.obj)]]
return(list.obj)
}
local_seg.lm.fit <-
function (y, XREG, Z, PSI, w, offs, opz, return.all.sol = FALSE)
{
#print("start")
#######################
useExp.k = TRUE
est.k <- function(x1, y1, L0) {
ax <- log(x1)
.x <- cbind(1, ax, ax^2)
b <- drop(solve(crossprod(.x), crossprod(.x, y1)))
const <- b[1] - L0
DD <- sqrt(b[2]^2 - 4 * const * b[3])
kk <- exp((-b[2] + DD)/(2 * b[3]))
return(round(kk))
}
dpmax <- function(x, y, pow = 1) {
if (pow == 1)
-(x > y)
else -pow * ((x - y) * (x > y))^(pow - 1)
}
mylm <- function(x, y, w, offs = rep(0, length(y))) {
x1 <- x * sqrt(w)
y <- y - offs
y1 <- y * sqrt(w)
b <- drop(solve(crossprod(x1), crossprod(x1, y1)))
fit <- drop(tcrossprod(x, t(b)))
r <- y - fit
o <- list(coefficients = b, fitted.values = fit, residuals = r,
df.residual = length(y) - length(b))
o
}
mylmADD <- function(invXtX, X, v, Xty, y) {
vtv <- sum(v^2)
Xtv <- crossprod(X, v)
m <- invXtX %*% Xtv
d <- drop(1/(vtv - t(Xtv) %*% m))
r <- -d * m
invF <- invXtX + d * tcrossprod(m)
newINV <- rbind(cbind(invF, r), c(t(r), d))
b <- crossprod(newINV, c(Xty, sum(v * y)))
fit <- tcrossprod(cbind(X, v), t(b))
r <- y - fit
o <- list(coefficients = b, fitted.values = fit, residuals = r)
o
}
in.psi <- function(LIM, PSI, ret.id = TRUE) {
a <- PSI[1, ] <= LIM[1, ]
b <- PSI[1, ] >= LIM[2, ]
is.ok <- !a & !b
if (ret.id)
return(is.ok)
isOK <- all(is.ok) && all(!is.na(is.ok))
isOK
}
far.psi <- function(Z, PSI, id.psi.group, ret.id = TRUE,
fc = 0.93) {
nSeg <- length(unique(id.psi.group))
npsij <- tapply(id.psi.group, id.psi.group, length)
nj <- sapply(unique(id.psi.group), function(.x) {
tabulate(rowSums((Z > PSI)[, id.psi.group == .x,
drop = FALSE]) + 1)
}, simplify = FALSE)
ff <- id.far.ok <- vector("list", length = nSeg)
for (i in 1:nSeg) {
if (length(nj[[i]]) != npsij[i] + 1)
nj[[i]] <- tabulate(rowSums((Z >= PSI)[, id.psi.group ==
i, drop = FALSE]) + 1)
id.ok <- (nj[[i]] >= 2)
id.far.ok[[i]] <- id.ok[-length(id.ok)] & id.ok[-1]
ff[[i]] <- ifelse(diff(nj[[i]]) > 0, 1/fc, fc)
}
id.far.ok <- unlist(id.far.ok)
ff <- unlist(ff)
if (!ret.id) {
return(all(id.far.ok))
}
else {
attr(id.far.ok, "factor") <- ff
return(id.far.ok)
}
}
adj.psi <- function(psii, LIM) {
pmin(pmax(LIM[1, ], psii), LIM[2, ])
}
n <- length(y)
min.step <- opz$min.step
rangeZ <- apply(Z, 2, range)
alpha <- opz$alpha
limZ <- apply(Z, 2, quantile, names = FALSE, probs = c(alpha,
1 - alpha))
psi <- PSI[1, ]
id.psi.group <- opz$id.psi.group
conv.psi <- opz$conv.psi
h <- opz$h
digits <- opz$digits
pow <- opz$pow
nomiOK <- opz$nomiOK
toll <- opz$toll
h <- opz$h
gap <- opz$gap
fix.npsi <- opz$stop.if.error
dev.new <- opz$dev0
visual <- opz$visual
it.max <- old.it.max <- opz$it.max
fc <- opz$fc
names(psi) <- id.psi.group
it <- 0
epsilon <- 10
k.values <- dev.values <- NULL
psi.values <- list()
psi.values[[length(psi.values) + 1]] <- NA
sel.col.XREG <- unique(sapply(colnames(XREG), function(x) match(x,
colnames(XREG))))
if (is.numeric(sel.col.XREG))
XREG <- XREG[, sel.col.XREG, drop = FALSE]
invXtX <- opz$invXtX
Xty <- opz$Xty
if (!in.psi(limZ, PSI, FALSE))
stop("starting psi out of the range.. see 'alpha' in seg.control.",
call. = FALSE)
if (!far.psi(Z, PSI, id.psi.group, FALSE))
stop("psi values too close each other. Please change (decreases number of) starting values",
call. = FALSE)
n.psi1 <- ncol(Z)
U <- ((Z - PSI) * (Z > PSI))
if (pow[1] != 1)
U <- U^pow[1]
obj0 <- try(mylm(cbind(XREG, U), y, w, offs), silent = TRUE)
if (class(obj0)[1] == "try-error")
obj0 <- lm.wfit(cbind(XREG, U), y, w, offs)
L0 <- sum(obj0$residuals^2 * w)
n.intDev0 <- nchar(strsplit(as.character(L0), "\\.")[[1]][1])
dev.values[length(dev.values) + 1] <- opz$dev0
dev.values[length(dev.values) + 1] <- L0
psi.values[[length(psi.values) + 1]] <- psi
if (visual) {
message(paste("iter = ", sprintf("%2.0f", 0), " dev = ",
sprintf(paste("%", n.intDev0 + 6, ".5f", sep = ""),
L0), " k = ", sprintf("%2.0f", NA), " n.psi = ",
formatC(length(unlist(psi)), digits = 0, format = "f"),
" ini.psi = ", paste(formatC(unlist(psi), digits = 3,
format = "f"), collapse = " "), sep = ""),
"\n")
}
id.warn <- FALSE
id.psi.changed <- rep(FALSE, it.max)
while (abs(epsilon) > toll) {
it <- it + 1
n.psi0 <- n.psi1
n.psi1 <- ncol(Z)
if (n.psi1 != n.psi0) {
U <- ((Z - PSI) * (Z > PSI))
if (pow[1] != 1)
U <- U^pow[1]
obj0 <- try(mylm(cbind(XREG, U), y, w, offs), silent = TRUE)
if (class(obj0)[1] == "try-error")
obj0 <- lm.wfit(cbind(XREG, U), y, w, offs)
L0 <- sum(obj0$residuals^2 * w)
}
V <- dpmax(Z, PSI, pow = pow[2])
X <- cbind(XREG, U, V)
rownames(X) <- NULL
colnames(X)[(ncol(XREG) + 1):ncol(X)] <- c(paste("U",
1:ncol(U), sep = ""), paste("V", 1:ncol(V), sep = ""))
obj <- lm.wfit(x = X, y = y, w = w, offset = offs)
beta.c <- coef(obj)[paste("U", 1:ncol(U), sep = "")]
gamma.c <- coef(obj)[paste("V", 1:ncol(V), sep = "")]
if (any(is.na(c(beta.c, gamma.c)))) {
if (fix.npsi) {
if (return.all.sol)
return(list(dev.values, psi.values))
else stop("breakpoint estimate too close or at the boundary causing NA estimates.. too many breakpoints being estimated?",
call. = FALSE)
}
else {
id.coef.ok <- !is.na(gamma.c)
psi <- psi[id.coef.ok]
if (length(psi) <= 0) {
warning(paste("All breakpoints have been removed after",
it, "iterations.. returning 0"), call. = FALSE)
return(0)
}
gamma.c <- gamma.c[id.coef.ok]
beta.c <- beta.c[id.coef.ok]
Z <- Z[, id.coef.ok, drop = FALSE]
rangeZ <- rangeZ[, id.coef.ok, drop = FALSE]
limZ <- limZ[, id.coef.ok, drop = FALSE]
nomiOK <- nomiOK[id.coef.ok]
id.psi.group <- id.psi.group[id.coef.ok]
names(psi) <- id.psi.group
}
}
psi.old <- psi
psi <- psi.old + gamma.c/beta.c
if (!is.null(digits))
psi <- round(psi, digits)
PSI <- matrix(rep(psi, rep(n, length(psi))), ncol = length(psi))
U1 <- (Z - PSI) * (Z > PSI)
if (pow[1] != 1)
U1 <- U1^pow[1]
obj1 <- try(mylm(cbind(XREG, U1), y, w, offs), silent = TRUE)
if (class(obj1)[1] == "try-error")
obj1 <- try(lm.wfit(cbind(XREG, U1), y, w, offs),
silent = TRUE)
L1 <- if (class(obj1)[1] == "try-error")
L0 + 10
else sum(obj1$residuals^2 * w)
use.k <- k <- 1
L1.k <- NULL
L1.k[length(L1.k) + 1] <- L1
while (L1 > L0) {
k <- k + 1
use.k <- if (useExp.k)
2^(k - 1)
else k
psi <- psi.old + (gamma.c/beta.c)/(use.k * h)
if (!is.null(digits))
psi <- round(psi, digits)
PSI <- matrix(rep(psi, rep(n, length(psi))), ncol = length(psi))
U1 <- (Z - PSI) * (Z > PSI)
if (pow[1] != 1)
U1 <- U1^pow[1]
obj1 <- try(mylm(cbind(XREG, U1), y, w, offs), silent = TRUE)
if (class(obj1)[1] == "try-error")
obj1 <- lm.wfit(cbind(XREG, U1), y, w, offs)
L1 <- if (class(obj1)[1] == "try-error")
L0 + 10
else sum(obj1$residuals^2 * w)
L1.k[length(L1.k) + 1] <- L1
if (1/(use.k * h) < min.step) {
break
}
}
if (visual) {
flush.console()
message(paste("iter = ", sprintf("%2.0f", it), " dev = ",
sprintf(paste("%", n.intDev0 + 6, ".5f", sep = ""),
L1), " k = ", sprintf("%2.0f", k), " n.psi = ",
formatC(length(unlist(psi)), digits = 0, format = "f"),
" est.psi = ", paste(formatC(unlist(psi), digits = 3,
format = "f"), collapse = " "), sep = ""),
"\n")
}
epsilon <- if (conv.psi)
max(abs((psi - psi.old)/psi.old))
else (L0 - L1)/(abs(L0) + 0.1)
L0 <- L1
U <- U1
k.values[length(k.values) + 1] <- use.k
psi.values[[length(psi.values) + 1]] <- psi
dev.values[length(dev.values) + 1] <- L0
id.psi.far <- far.psi(Z, PSI, id.psi.group, TRUE, fc = opz$fc)
id.psi.in <- in.psi(limZ, PSI, TRUE)
id.psi.ok <- id.psi.in & id.psi.far
if (!all(id.psi.ok)) {
if (fix.npsi) {
psi <- psi * ifelse(id.psi.far, 1, attr(id.psi.far,
"factor"))
PSI <- matrix(rep(psi, rep(nrow(Z), length(psi))),
ncol = length(psi))
id.psi.changed[it] <- TRUE
}
else {
Z <- Z[, id.psi.ok, drop = FALSE]
PSI <- PSI[, id.psi.ok, drop = FALSE]
rangeZ <- rangeZ[, id.psi.ok, drop = FALSE]
limZ <- limZ[, id.psi.ok, drop = FALSE]
nomiOK <- nomiOK[id.psi.ok]
id.psi.group <- id.psi.group[id.psi.ok]
psi.old <- psi.old[id.psi.ok]
psi <- psi[id.psi.ok]
names(psi) <- id.psi.group
if (ncol(PSI) <= 0) {
warning(paste("All breakpoints have been removed after",
it, "iterations.. returning 0"), call. = FALSE)
return(0)
}
}
}
if (it >= it.max) {
id.warn <- TRUE
break
}
}
if (id.psi.changed[length(id.psi.changed)])
warning(paste("Some psi (", (1:length(psi))[!id.psi.far],
") changed after the last iter.", sep = ""), call. = FALSE)
if (id.warn)
warning(paste("max number of iterations (", it, ") attained",
sep = ""), call. = FALSE)
attr(psi.values, "dev") <- dev.values
attr(psi.values, "k") <- k.values
psi <- unlist(tapply(psi, id.psi.group, sort))
names(psi) <- id.psi.group
names.coef <- names(obj$coefficients)
PSI.old <- PSI
PSI <- matrix(rep(psi, rep(nrow(Z), length(psi))), ncol = length(psi))
if (sd(PSI - PSI.old) > 0 || id.psi.changed[length(id.psi.changed)]) {
U <- (Z - PSI) * (Z > PSI)
colnames(U) <- paste("U", 1:ncol(U), sep = "")
V <- -(Z > PSI)
colnames(V) <- paste("V", 1:ncol(V), sep = "")
obj <- lm.wfit(x = cbind(XREG, U), y = y, w = w, offset = offs)
L1 <- sum(obj$residuals^2 * w)
}
else {
obj <- obj1
}
obj$coefficients <- c(obj$coefficients, rep(0, ncol(V)))
names(obj$coefficients) <- names.coef
obj$epsilon <- epsilon
obj$it <- it
obj <- list(obj = obj, it = it, psi = psi, psi.values = psi.values,
U = U, V = V, rangeZ = rangeZ, epsilon = epsilon, nomiOK = nomiOK,
SumSquares.no.gap = L1, id.psi.group = id.psi.group,
id.warn = id.warn)
return(obj)
}
local_seg.lm.fit.boot <-
function (y, XREG, Z, PSI, w, offs, opz, n.boot = 10, size.boot = NULL,
jt = FALSE, nonParam = TRUE, random = FALSE, break.boot = n.boot)
{
extract.psi <- function(lista) {
dev.values <- lista[[1]][-1]
psi.values <- lista[[2]][-1]
dev.ok <- min(dev.values)
id.dev.ok <- which.min(dev.values)
if (is.list(psi.values))
psi.values <- matrix(unlist(psi.values), nrow = length(dev.values),
byrow = TRUE)
if (!is.matrix(psi.values))
psi.values <- matrix(psi.values)
psi.ok <- psi.values[id.dev.ok, ]
r <- list(SumSquares.no.gap = dev.ok, psi = psi.ok)
r
}
visualBoot <- opz$visualBoot
opz.boot <- opz
opz.boot$pow = c(1, 1)
opz1 <- opz
opz1$it.max <- 1
n <- length(y)
o0 <- try(suppressWarnings(seg.lm.fit(y, XREG, Z, PSI, w,
offs, opz, return.all.sol = FALSE)), silent = TRUE)
rangeZ <- apply(Z, 2, range)
if (!is.list(o0)) {
o0 <- suppressWarnings(seg.lm.fit(y, XREG, Z, PSI, w,
offs, opz, return.all.sol = TRUE))
o0 <- extract.psi(o0)
ss00 <- opz$dev0
if (!nonParam) {
warning("using nonparametric boot")
nonParam <- TRUE
}
}
if (is.list(o0)) {
est.psi00 <- est.psi0 <- o0$psi
ss00 <- o0$SumSquares.no.gap
if (!nonParam)
fitted.ok <- fitted(o0)
}
else {
if (!nonParam)
stop("the first fit failed and I cannot extract fitted values for the semipar boot")
if (random) {
est.psi00 <- est.psi0 <- apply(rangeZ, 2, function(r) runif(1,
r[1], r[2]))
PSI1 <- matrix(rep(est.psi0, rep(nrow(Z), length(est.psi0))),
ncol = length(est.psi0))
o0 <- try(suppressWarnings(seg.lm.fit(y, XREG, Z,
PSI1, w, offs, opz1)), silent = TRUE)
ss00 <- o0$SumSquares.no.gap
}
else {
est.psi00 <- est.psi0 <- apply(PSI, 2, mean)
ss00 <- opz$dev0
}
}
n.intDev0 <- nchar(strsplit(as.character(ss00), "\\.")[[1]][1])
all.est.psi.boot <- all.selected.psi <- all.est.psi <- matrix(NA,
nrow = n.boot, ncol = length(est.psi0))
all.ss <- all.selected.ss <- rep(NA, n.boot)
if (is.null(size.boot))
size.boot <- n
Z.orig <- Z
count.random <- 0
id.uguali <- 0
k.psi.change <- 1
alpha <- 0.1
for (k in seq(n.boot)) {
n.boot.rev <- 3
diff.selected.ss <- rev(diff(na.omit(all.selected.ss)))
if (length(diff.selected.ss) >= (n.boot.rev - 1) &&
all(round(diff.selected.ss[1:(n.boot.rev - 1)],
6) == 0)) {
qpsi <- sapply(1:ncol(Z), function(i) mean(est.psi0[i] >=
Z[, i]))
qpsi <- ifelse(abs(qpsi - 0.5) < 0.1, alpha, qpsi)
alpha <- 1 - alpha
est.psi0 <- sapply(1:ncol(Z), function(i) quantile(Z[,
i], probs = 1 - qpsi[i], names = FALSE))
}
PSI <- matrix(rep(est.psi0, rep(nrow(Z), length(est.psi0))),
ncol = length(est.psi0))
if (jt)
Z <- apply(Z.orig, 2, jitter)
if (nonParam) {
id <- sample(n, size = size.boot, replace = TRUE)
o.boot <- try(suppressWarnings(seg.lm.fit(y[id],
XREG[id, , drop = FALSE], Z[id, , drop = FALSE],
PSI[id, , drop = FALSE], w[id], offs[id], opz.boot)),
silent = TRUE)
}
else {
yy <- fitted.ok + sample(residuals(o0), size = n,
replace = TRUE)
o.boot <- try(suppressWarnings(seg.lm.fit(yy, XREG,
Z.orig, PSI, weights, offs, opz.boot)), silent = TRUE)
}
if (is.list(o.boot)) {
all.est.psi.boot[k, ] <- est.psi.boot <- o.boot$psi
}
else {
est.psi.boot <- apply(rangeZ, 2, function(r) runif(1,
r[1], r[2]))
}
PSI <- matrix(rep(est.psi.boot, rep(nrow(Z), length(est.psi.boot))),
ncol = length(est.psi.boot))
opz$h <- max(opz$h * 0.9, 0.2)
opz$it.max <- opz$it.max + 1
o <- try(suppressWarnings(seg.lm.fit(y, XREG, Z.orig,
PSI, w, offs, opz, return.all.sol = TRUE)), silent = TRUE)
if (!is.list(o) && random) {
est.psi0 <- apply(rangeZ, 2, function(r) runif(1,
r[1], r[2]))
PSI1 <- matrix(rep(est.psi0, rep(nrow(Z), length(est.psi0))),
ncol = length(est.psi0))
o <- try(suppressWarnings(seg.lm.fit(y, XREG, Z,
PSI1, w, offs, opz1)), silent = TRUE)
count.random <- count.random + 1
}
if (is.list(o)) {
if (!"coefficients" %in% names(o$obj))
o <- extract.psi(o)
all.est.psi[k, ] <- o$psi
all.ss[k] <- o$SumSquares.no.gap
if (o$SumSquares.no.gap <= ifelse(is.list(o0), o0$SumSquares.no.gap,
10^12)) {
o0 <- o
k.psi.change <- k
}
est.psi0 <- o0$psi
all.selected.psi[k, ] <- est.psi0
all.selected.ss[k] <- o0$SumSquares.no.gap
}
if (visualBoot) {
flush.console()
message(paste("boot sample = ", sprintf("%2.0f", k),
" opt.dev = ", sprintf(paste("%", n.intDev0 +
6, ".5f", sep = ""), o0$SumSquares.no.gap),
" n.psi = ", formatC(length(unlist(est.psi0)),
digits = 0, format = "f"), " est.psi = ",
paste(formatC(unlist(est.psi0), digits = 3,
format = "f"), collapse = " "), sep = ""),
"\n")
}
asss <- na.omit(all.selected.ss)
if (length(asss) > break.boot) {
if (all(rev(round(diff(asss), 6))[1:(break.boot -
1)] == 0))
break
}
}
all.selected.psi <- rbind(est.psi00, all.selected.psi)
all.selected.ss <- c(ss00, all.selected.ss)
ris <- list(all.selected.psi = drop(all.selected.psi), all.selected.ss = all.selected.ss,
all.psi = all.est.psi, all.ss = all.ss)
if (is.null(o0$obj)) {
PSI1 <- matrix(rep(est.psi0, rep(nrow(Z), length(est.psi0))),
ncol = length(est.psi0))
o0 <- try(seg.lm.fit(y, XREG, Z, PSI1, w, offs, opz1),
silent = TRUE)
}
if (!is.list(o0))
return(0)
o0$boot.restart <- ris
#rm(.Random.seed, envir = globalenv())
return(o0)
}
local_seg.control <-
function (n.boot = 10, display = FALSE, tol = 1e-05, it.max = 30,
fix.npsi = TRUE, K = 10, quant = TRUE, maxit.glm = 25, h = 1,
break.boot = 5, size.boot = NULL, jt = FALSE, nonParam = TRUE,
random = TRUE, seed = 12345, fn.obj = NULL, digits = NULL,
conv.psi = FALSE, alpha = 0.02, min.step = 1e-04,
powers = c(1,1), last = TRUE, stop.if.error = NULL, gap = FALSE,
fc = 0.95)
{
print("control")
####################
list(toll = tol, it.max = it.max, visual = display, stop.if.error = stop.if.error,
K = K, last = last, maxit.glm = maxit.glm, h = h, n.boot = n.boot,
size.boot = size.boot, gap = gap, jt = jt, break.boot = break.boot,
nonParam = nonParam, random = random, pow = powers,
seed = seed, quant = quant, fn.obj = fn.obj, digits = digits,
conv.psi = conv.psi, alpha = alpha, fix.npsi = fix.npsi,
min.step = min.step, fc = fc)
}
local_confint.segmented <-
function (object, parm, level = 0.95, method = c("delta", "score",
"gradient"), rev.sgn = FALSE, var.diff = FALSE, is = FALSE,
digits = max(4, getOption("digits") - 1), .coef = NULL,
.vcov = NULL, ...)
{
method <- match.arg(method)
cls <- class(object)
if (length(cls) == 1)
cls <- c(cls, cls)
if (method %in% c("score", "gradient") && !all(cls[1:2] ==
c("segmented", "lm")))
stop("Score- or Gradient-based CI only work with segmented lm models")
estcoef <- if (is.null(.coef))
coef(object)
else .coef
COV <- if (is.null(.vcov))
vcov(object, var.diff = var.diff, is = is, ...)
else .vcov
confintSegDelta <- function(object, parm, level = 0.95,
rev.sgn = FALSE, var.diff = FALSE, is = FALSE, ...) {
f.U <- function(nomiU, term = NULL) {
k <- length(nomiU)
nomiUsenzaU <- strsplit(nomiU, "\\.")
nomiU.ok <- vector(length = k)
for (i in 1:k) {
nomi.i <- nomiUsenzaU[[i]][-1]
if (length(nomi.i) > 1)
nomi.i <- paste(nomi.i, collapse = ".")
nomiU.ok[i] <- nomi.i
}
if (!is.null(term))
nomiU.ok <- (1:k)[nomiU.ok %in% term]
return(nomiU.ok)
}
if (var.diff && length(object$nameUV$Z) > 1) {
var.diff <- FALSE
warning(" 'var.diff' set to FALSE with multiple segmented variables",
call. = FALSE)
}
if (missing(parm)) {
nomeZ <- object$nameUV$Z
if (length(rev.sgn) == 1)
rev.sgn <- rep(rev.sgn, length(nomeZ))
}
else {
if (!all(parm %in% object$nameUV$Z)) {
stop("invalid 'parm' name", call. = FALSE)
}
else {
nomeZ <- parm
}
}
if (length(nomeZ) > 1) {
warning("There are multiple segmented terms. The first is taken",
call. = FALSE, immediate. = TRUE)
nomeZ <- nomeZ[1]
}
if (length(rev.sgn) != length(nomeZ))
rev.sgn <- rep(rev.sgn, length.out = length(nomeZ))
rr <- list()
z <- if ("lm" %in% class(object))
abs(qt((1 - level)/2, df = object$df.residual))
else abs(qnorm((1 - level)/2))
for (i in 1:length(nomeZ)) {
nomi.U <- object$nameUV$U[f.U(object$nameUV$U, nomeZ[i])]
nomi.V <- object$nameUV$V[f.U(object$nameUV$V, nomeZ[i])]
m <- matrix(, length(nomi.U), 3)
colnames(m) <- c("Est.", paste("CI", "(", level *
100, "%", ")", c(".low", ".up"), sep = ""))
for (j in 1:length(nomi.U)) {
sel <- c(nomi.V[j], nomi.U[j])
V <- COV[sel, sel]
b <- estcoef[sel[2]]
th <- c(b, 1)
orig.coef <- drop(diag(th) %*% estcoef[sel])
gammma <- orig.coef[1]
est.psi <- object$psi[sel[1], 2]
V <- diag(th) %*% V %*% diag(th)
se.psi <- sqrt((V[1, 1] + V[2, 2] * (gammma/b)^2 -
2 * V[1, 2] * (gammma/b))/b^2)
r <- c(est.psi, est.psi - z * se.psi, est.psi +
z * se.psi)
if (rev.sgn[i])
r <- c(-r[1], rev(-r[2:3]))
m[j, ] <- r
}
m <- m[order(m[, 1]), , drop = FALSE]
rownames(m) <- nomi.V
if (rev.sgn[i]) {
rownames(m) <- rev(rownames(m))
}
rr[[length(rr) + 1]] <- m
}
names(rr) <- nomeZ
return(rr[[1]])
}
confintSegIS <- function(obj, parm, d.h = 1.5, h = 2.5,
conf.level = level, ...) {
ci.IS <- function(obj.seg, nomeZ, nomeUj, stat = c("score",
"gradient"), transf = FALSE, h = -1, sigma, conf.level = 0.95,
use.z = FALSE, is = TRUE, fit.is = TRUE, var.is = TRUE,
bw = NULL, smooth = 0, msgWarn = FALSE, n.values = 50,
altro = FALSE, cadj = FALSE, plot = FALSE, add = FALSE,
agg = FALSE, raw = FALSE, useSeg = FALSE) {
u.psiX <- function(psi, sigma, x, y, XREG = NULL,
scale = FALSE, est.psi = NULL, interc = FALSE,
pow = c(1, 1), lag = 0, robust = FALSE, GS = FALSE,
is = FALSE, se.psi, var.is = TRUE, which.return = 3,
fit.is = FALSE, altro = FALSE, cadj = FALSE,
transf = FALSE) {
varUpsi.fn <- function(X, sigma = 1, r = NULL) {
INF <- crossprod(X)/(sigma^2)
if (is.null(r)) {
vv <- INF[1, 1] - (INF[1, -1] %*% solve(INF[-1,
-1], INF[-1, 1]))
}
else {
u <- X * r/(sigma^2)
V <- crossprod(u)
I22 <- solve(INF[-1, -1])
vv <- V[1, 1] - INF[1, -1] %*% I22 %*% V[1,
-1] - V[1, -1] %*% I22 %*% INF[-1, 1] +
INF[1, -1] %*% I22 %*% V[-1, -1] %*% I22 %*%
INF[-1, 1]
}
return(vv)
}
dpmax <- function(x, y, pow = 1) {
if (pow == 1)
-(x > y)
else -pow * (x > y) * (x - y)^(pow - 1)
}
if (cadj && which.return != 3)
stop("cadj=TRUE can return only the studentized score")
if (is && missing(se.psi))
stop("is=TRUE needs se.psi")
if (interc)
XREG <- cbind(rep(1, length(y)), XREG)
if (fit.is) {
XX <- if (altro)
cbind((x - psi) * pnorm((x - psi)/se.psi) +
se.psi * dnorm((x - psi)/se.psi), XREG)
else cbind((x - psi) * pnorm((x - psi)/se.psi),
XREG)
o <- lm.fit(x = XX, y = y)
}
else {
.U <- (x > psi) * (x - psi)
if (pow[1] != 1)
.U <- .U^pow[1]
XX <- cbind(.U, XREG)
o <- lm.fit(x = XX, y = y)
}
b <- o$coef[1]
mu <- o$fitted.values
n <- length(mu)
V <- NULL
if (GS) {
if (is.null(est.psi))
stop("'GS=TRUE' needs 'est.psi'")
gs <- b * (sum((y - mu) * V)/(sigma^2)) *
(est.psi - psi)
gs <- sqrt(pmax(gs, 0)) * sign(est.psi - psi)
return(gs)
}
if (is) {
r <- -b * sum(((y - mu) * pnorm((x - psi)/se.psi)))/sigma^2
XX <- if (var.is)
cbind(-b * pnorm((x - psi)/se.psi), XX)
else cbind(-b * I(x > psi), XX)
}
else {
r <- -b * sum((y - mu) * I(x > psi))/sigma^2
XX <- cbind(-b * I(x > psi), XX)
}
if (scale) {
if (!is.null(est.psi)) {
mu <- attr(est.psi, "fitted")
est.b <- attr(est.psi, "b")
est.psi <- as.numeric(est.psi)
if (is) {
XX <- if (var.is)
cbind(-est.b * pnorm((x - est.psi)/se.psi),
XX[, -1])
else cbind(-est.b * I(x > est.psi), XX[,
-1])
}
else {
XX <- cbind(-est.b * I(x > est.psi), XX[,
-1])
}
}
rr <- if (robust)
(y - mu)
else NULL
v.Upsi <- try(varUpsi.fn(XX, sigma, r = rr),
silent = TRUE)
if (!is.numeric(v.Upsi))
return(NA)
if (v.Upsi <= 0)
return(NA)
}
names(r) <- NULL
r <- c(r, v.Upsi, r/sqrt(max(v.Upsi, 0)))
r <- r[which.return]
if (cadj)
r <- sign(r) * sqrt((r^2) * (1 - (3 - (r^2))/(2 *
n)))
r
}
u.psiXV <- Vectorize(u.psiX, vectorize.args = "psi",
USE.NAMES = FALSE)
gs.fn <- function(x, y, estpsi, sigma2, psivalue,
pow = c(1, 1), adj = 1, is = FALSE, sepsi, XREG = NULL,
fit.is = FALSE, altro = FALSE, transf = FALSE) {
logitDeriv <- function(kappa) exp(kappa) * diff(intv)/((1 +
exp(kappa))^2)
logit <- function(psi) log((psi - min(intv))/(max(intv) -
psi))
logitInv <- function(kappa) (min(intv) + max(intv) *
exp(kappa))/(1 + exp(kappa))
intv <- quantile(x, probs = c(0.02, 0.98), names = FALSE)
if (is && missing(sepsi))
stop("SE(psi) is requested when is=TRUE")
k <- length(psivalue)
r <- vector(length = k)
for (i in 1:k) {
psii <- psivalue[i]
if (fit.is) {
X <- if (altro)
cbind(1, x, (x - psii) * pnorm((x - psii)/sepsi) +
sepsi * dnorm((x - psii)/sepsi), XREG)
else cbind(1, x, (x - psii) * pnorm((x -
psii)/sepsi), XREG)
}
else {
.U <- (x - psii) * (x > psii)
if (pow[1] != 1)
.U <- .U^pow[1]
X <- cbind(1, x, .U, XREG)
}
o <- lm.fit(y = y, x = X)
b <- o$coef[3]
if (is) {
v <- pnorm((x - psii)/sepsi)
}
else {
v <- if (pow[2] == 1)
I(x > psii)
else pow[2] * pmax(x - psii, 0)^(pow[2] -
1)
}
if (transf)
v <- v * logitDeriv(logit(psii))
r[i] <- -(b/sigma2) * sum((y - o$fitted) *
v)
r[i] <- if (!transf)
r[i] * (estpsi - psii)
else r[i] * (logit(estpsi) - logit(psii))
if (altro && fit.is)
r[i] <- r[i] + (estpsi - psii) * ((b * sepsi *
sum(dnorm((x - psii)/sepsi))) * (b/sigma2))
}
if (adj > 0) {
r <- if (adj == 1)
pmax(r, 0)
else abs(r)
}
if (transf)
psivalue <- logit(psivalue)
segni <- if (transf)
sign(logit(estpsi) - psivalue)
else sign(estpsi - psivalue)
r <- cbind(psi = psivalue, gs.Chi = r, gs.Norm = sqrt(r) *
segni)
r
}
monotSmooth <- function(xx, yy, hat.psi, k = 20,
w = 0) {
bspline <- function(x, ndx, xlr = NULL, knots,
deg = 3, deriv = 0) {
if (missing(knots)) {
if (is.null(xlr)) {
xl <- min(x) - 0.01 * diff(range(x))
xr <- max(x) + 0.01 * diff(range(x))
}
else {
if (length(xlr) != 2)
stop("quando fornito, xlr deve avere due componenti")
xl <- xlr[1]
xr <- xlr[2]
}
dx <- (xr - xl)/ndx
knots <- seq(xl - deg * dx, xr + deg * dx,
by = dx)
}
B <- splineDesign(knots, x, ord = deg + 1,
derivs = rep(deriv, length(x)))
r <- list(B = B, degree = deg, knots = knots)
r
}
if (length(k) == 1)
r <- bspline(xx, ndx = k)
else r <- bspline(xx, knots = k)
B <- r$B
knots <- r$knots
degree <- r$degree
D1 <- diff(diag(ncol(B)), diff = 1)
d <- drop(solve(crossprod(B), crossprod(B, yy)))
B0 <- spline.des(knots, c(min(xx), hat.psi,
max(xx)), degree + 1)$design
P <- tcrossprod(B0[2, ]) * 10^12
e <- rep(1, length(d))
ww <- (1/(abs(xx - hat.psi) + diff(range(xx))/100))^w
it <- 0
while (!isTRUE(all.equal(e, rep(0, length(e))))) {
v <- 1 * I(diff(d) > 0)
E <- (10^12) * crossprod(D1 * sqrt(v))
d.old <- d
M <- crossprod(B * sqrt(ww)) + E + P
d <- drop(solve(M + 0.001 * diag(ncol(M)),
crossprod(B, ww * yy)))
e <- d - d.old
it <- it + 1
if (it >= 20)
break
}
fit <- drop(B %*% d)
return(fit)
}
miop <- function(x, y, xs = x, ys = y, h = FALSE,
v = FALSE, only.lines = FALSE, top = TRUE, right = TRUE,
col.h = grey(0.6), col.v = col.h, ...) {
if (only.lines)
h <- v <- TRUE
if (!only.lines)
plot(x, y, type = "l", ...)
if (v) {
y0 <- if (top)
par()$usr[4]
else par()$usr[3]
segments(xs, y0, xs, ys, col = col.v, lty = 3)
}
if (h) {
x0 <- if (right)
par()$usr[2]
else par()$usr[1]
segments(xs, ys, x0, ys, col = col.h, lty = 3,
lwd = 1.2)
}
invisible(NULL)
}
f.Left <- function(x, y) {
yy <- rev(y)
xx <- rev(x)
idList <- NULL
while (any(diff(yy) < 0)) {
id <- which(diff(yy) < 0)[1]
idList[length(idList) + 1] <- id + 1
yy <- yy[-(id + 1)]
xx <- xx[-(id + 1)]
}
r <- cbind(xx, yy)
r
}
f.Right <- function(x, y) {
xx <- x
yy <- y
idList <- NULL
while (any(diff(yy) > 0)) {
id <- which(diff(yy) > 0)[1]
idList[length(idList) + 1] <- id + 1
yy <- yy[-(id + 1)]
xx <- xx[-(id + 1)]
}
r <- cbind(xx, yy)
r
}
stat <- match.arg(stat)
if (missing(sigma))
sigma <- summary.lm(obj.seg)$sigma
if (cadj)
use.z = TRUE
zalpha <- if (use.z)
-qnorm((1 - conf.level)/2)
else -qt((1 - conf.level)/2, df = obj.seg$df.residual)
if (!is.numeric(h))
stop(" 'h' should be numeric")
if (sign(h) >= 0)
h <- abs(h[1])
Y <- obj.seg$model[, 1]
X <- obj.seg$model[, nomeZ]
formula.lin <- update.formula(formula(obj.seg),
paste(".~.", paste("-", paste(obj.seg$nameUV$V,
collapse = "-"))))
formula.lin <- update.formula(formula.lin, paste(".~.-",
nomeUj))
XREG <- model.matrix(formula.lin, data = obj.seg$model)
if (ncol(XREG) == 0)
XREG <- NULL
nomePsij <- sub("U", "psi", nomeUj)
est.psi <- obj.seg$psi[nomePsij, "Est."]
se.psi <- obj.seg$psi[nomePsij, "St.Err"]
if (any(h < 0)) {
all.range <- TRUE
valori <- seq(quantile(X, probs = 0.05, names = FALSE),
quantile(X, probs = 0.95, names = FALSE),
l = n.values)
}
else {
all.range <- FALSE
valori <- seq(max(quantile(X, probs = 0.05,
names = FALSE), est.psi - h * se.psi), min(quantile(X,
probs = 0.95, names = FALSE), est.psi + h *
se.psi), l = n.values)
}
n <- length(Y)
min.X <- min(X)
max.X <- max(X)
if (!is.null(bw))
se.psi <- eval(parse(text = bw))
if (stat == "score") {
U.valori <- u.psiXV(psi = valori, sigma = sigma,
x = X, y = Y, XREG = XREG, is = is, se.psi = se.psi,
scale = TRUE, pow = c(1, 1), fit.is = fit.is,
altro = altro, cadj = cadj, var.is = var.is,
transf = transf)
statlab <- "Score statistic"
if (plot && raw)
U.raw <- u.psiXV(valori, sigma, X, Y, XREG,
is = FALSE, scale = TRUE, pow = c(1, 1),
fit.is = FALSE, altro = altro, cadj = cadj,
var.is = FALSE, transf = transf)
}
else {
U.valori <- gs.fn(X, Y, est.psi, sigma^2, valori,
is = is, sepsi = se.psi, XREG = XREG, fit.is = fit.is,
altro = altro, transf = transf, pow = c(1,
1))[, 3]
statlab <- "Gradient statistic"
if (plot && raw)
U.raw <- gs.fn(X, Y, est.psi, sigma^2, valori,
is = FALSE, XREG = XREG, fit.is = FALSE,
altro = altro, transf = transf)[, 3]
}
if (any(is.na(U.valori))) {
warning("removing NA in the statistic values")
valori <- valori[!is.na(U.valori)]
U.valori <- U.valori[!is.na(U.valori)]
}
logit <- function(psi) log((psi - min(intv))/(max(intv) -
psi))
logitInv <- function(kappa) (min(intv) + max(intv) *
exp(kappa))/(1 + exp(kappa))
intv <- quantile(X, probs = c(0.02, 0.98), names = FALSE)
if (stat == "gradient" && transf) {
est.psi <- logit(est.psi)
valori <- logit(valori)
x.lab <- "kappa"
}
if (plot && !add) {
x.lab <- "psi"
if (raw) {
plot(valori, U.raw, xlab = x.lab, ylab = statlab,
type = "l")
points(valori, U.valori, xlab = x.lab, ylab = statlab,
type = "l", col = 2)
}
else {
plot(valori, U.valori, xlab = x.lab, ylab = statlab,
type = "l", col = 2)
}
abline(h = 0, lty = 3)
segments(est.psi, 0, est.psi, -20, lty = 2)
}
if (prod(range(U.valori)) >= 0)
stop("the signs of stat at extremes are not discordant, increase 'h' o set 'h=-1' ")
if (smooth == 0) {
valoriLeft <- valori[valori <= est.psi]
UvaloriLeft <- U.valori[valori <= est.psi]
vLeft <- f.Left(valoriLeft, UvaloriLeft)
valori.ok <- vLeft[, 1]
Uvalori.ok <- vLeft[, 2]
f.interpL <- splinefun(Uvalori.ok, valori.ok,
method = "mono", ties = min)
valoriRight <- valori[valori >= est.psi]
UvaloriRight <- U.valori[valori >= est.psi]
vRight <- f.Right(valoriRight, UvaloriRight)
valori.ok <- vRight[, 1]
Uvalori.ok <- vRight[, 2]
f.interpR <- splinefun(Uvalori.ok, valori.ok,
method = "mono", ties = min)
}
else {
if (useSeg) {
oseg <- try(suppressWarnings(segmented(lm(U.valori ~
valori), ~valori, psi = quantile(valori,
c(0.25, 0.75), names = FALSE), control = seg.control(n.boot = 0,
stop.if.error = F))), silent = TRUE)
if (class(oseg)[1] == "try-error") {
oseg <- try(suppressWarnings(segmented(lm(U.valori ~
valori), ~valori, psi = quantile(valori,
0.5, names = FALSE), control = seg.control(n.boot = 0))),
silent = TRUE)
}
if (class(oseg)[1] == "segmented") {
if (plot)
lines(valori, oseg$fitted, lty = 3, lwd = 1.5)
soglie <- oseg$psi[, 2]
iid <- cut(valori, c(min(valori) - 1000,
soglie, max(valori) + 1000), labels = FALSE)
slopes <- cumsum(oseg$coef[2:(length(oseg$coef) -
length(soglie))])
slopes <- rep(slopes, table(iid))
valori <- valori[slopes <= 0]
U.valori <- U.valori[slopes <= 0]
}
}
fr <- monotSmooth(valori, U.valori, est.psi,
k = 7)
fr <- fr - (0.2/diff(range(valori))) * (valori -
mean(valori))
vLeft <- cbind(valori[valori <= est.psi], fr[valori <=
est.psi])
vRight <- cbind(valori[valori >= est.psi], fr[valori >=
est.psi])
if (!all.range) {
if ((min(valori) > intv[1]) && (fr[1] < max(zalpha)))
return("errLeft")
if ((max(valori) < intv[2]) && (fr[length(fr)] >
min(-zalpha)))
return("errRight")
}
f.interpL <- f.interpR <- splinefun(fr, valori,
"m", ties = min)
}
L <- f.interpL(zalpha)
U <- f.interpR(-zalpha)
delta <- est.psi - f.interpL(0)
if (plot) {
if (!agg)
delta <- 0
lines(vLeft, col = 3)
lines(vRight, col = 3)
vv <- seq(0, zalpha * 1.2, l = 50)
lines(f.interpL(vv) + delta, vv, col = grey(0.8,
alpha = 0.6), lwd = 4)
vv <- seq(0, -zalpha * 1.2, l = 50)
lines(f.interpR(vv) + delta, vv, col = grey(0.8,
alpha = 0.6), lwd = 4)
points(est.psi, 0, pch = 19)
miop(c(L, U) + delta, c(zalpha, -zalpha), only.lines = TRUE,
top = FALSE, right = FALSE)
}
if (stat == "gradient" && transf) {
L <- logitInv(L)
U <- logitInv(U)
}
L <- pmax(L, quantile(X, probs = 0.02))
U <- pmin(U, quantile(X, probs = 0.98))
r <- c(est.psi, L, U)
return(r)
}
if (!all(class(obj) == c("segmented", "lm")))
stop("A segmented lm object is requested")
if (missing(parm)) {
nomeZ <- parm <- obj$nameUV$Z
}
else {
if (!all(parm %in% obj$nameUV$Z))
stop("invalid 'parm' ")
nomeZ <- parm
}
if (length(parm) > 1) {
warning("There are multiple segmented terms. The first is taken",
call. = FALSE, immediate. = TRUE)
nomeZ <- parm[1]
}
nomiU.term <- grep(nomeZ, obj$nameUV$U, value = TRUE)
ra <- matrix(NA, length(nomiU.term), 3)
rownames(ra) <- nomiU.term
for (U.j in nomiU.term) {
if (any(c(d.h, h) < 0)) {
ra[U.j, ] <- ci.IS(obj, nomeZ, U.j, h = -1,
conf.level = level, ...)
}
d.h <- min(max(d.h, 1.5), 10)
a <- "start"
it <- 0
while (is.character(a)) {
a <- try(ci.IS(obj, nomeZ, U.j, h = h, conf.level = level,
...), silent = TRUE)
h <- h * d.h
it <- it + 1
if (it >= 20)
break
}
ra[U.j, ] <- a
}
colnames(ra) <- c("Est.", paste("CI", "(", level * 100,
"%", ")", c(".low", ".up"), sep = ""))
rownames(ra) <- sub("U", "psi", nomiU.term)
ra
}
if (method == "delta") {
r <- confintSegDelta(object, parm, level, rev.sgn, var.diff,
is, ...)
}
else {
r <- confintSegIS(object, parm, stat = method, conf.level = level,
...)
}
r <- signif(r, digits)
return(r)
}
| /scratch/gouwar.j/cran-all/cranData/AQEval/R/aqe.misc.R |
#################################################
#' @title Some functions to calculate statistics
#################################################
#' @name calculate.stats
#' @aliases calcDateRangeStat calcRollingDateRangeStat
#' @description Calculate data set statistics for
#' selected time intervals.
#' @param data (data.frame, tibble, etc) Data set containing
#' data statistic to be calculated for, and \code{date}
#' column of date/time records.
#' @param pollutant (character) The name(s) of data-series to
#' analyse in \code{data}, by default all columns in
#' supplied data except \code{date}.
#' @param ... extra arguments.
#' @param method (numeric) Method to use when calculating
#' statistic.
#' @param from (various) Start date(s) to subsample from when
#' calculating statistic, by default end of supplied
#' \code{data} date range.
#' @param to (various) End date(s) to subsample to when
#' calculating statistic, by default end of supplied
#' \code{data} date range.
#' @param stat (function) Statistic to be applied to selected
#' data, by default \code{mean(pollutant, na.rm=TRUE)}.
#' @param range (character) For \code{calcRollingDateRange},
#' the range the rolling date windows, by default
#' \code{'year'} for annual statistic calculations.
#' @param res (character) For \code{calcRollingDateRange},
#' the resolution to calculate the rolling statistic at, by
#' default \code{'day'} to calculate this once per day.
#' @note These functions are in development and likely to change
#' significantly in future versions, please handle with
#' care.
#' @returns These functions return \code{data.frame}s of function
#' outputs.
#kr version 0.4 (first packaged) 2020/11/02
# version 0.5 2020/12/16
#to think about/do
##################################
#error handle - for bad inputs
#faster options - using method to test these...
#statistic as arg, option?
#time window and output resolution?
#better option/control than only.last...
#doc sources
#also see
#https://stackoverflow.com/questions/10803010/how-to-calculate-rolling-average-of-past-1-month-in-r
#regarding rolling windows...
#(not a fan of importing lubridate in full...)
#(maybe try dplyr approach?)
#' @import lubridate
#' @rdname calculate.stats
#' @export
calcDateRangeStat <-
function(data, from = NULL, to = NULL,
stat = NULL, pollutant = NULL,
..., method=2){
#calculates stat for all data in date range
#to to from
# currently no sub- grouping
from <- aqe_prepFromDate(from, data)
to <- aqe_prepToDate(to, data)
dates <- data.frame(start.date = from, end.date = to,
stringsAsFactors = FALSE)
if(is.null(stat)){
stat <- function(x){
if(is.numeric(x)) mean(x, na.rm=TRUE) else NA
}
}
test <- if(is.null(pollutant)){
names(data)[names(data)!="date"]
} else {
pollutant
}
#method 1 faster than 2/but neither great
if(method==1){
ans <- lapply(test,
function(x){
data$.rstat <- data[[x]]
#removed pipe from next section of code
#(not sure about trade-offs)
df.stat <- dplyr::group_by(dates, start.date,
end.date)
df.stat <- dplyr::summarize(df.stat,
rstat = stat(data$.rstat[data$date >= start.date &
data$date < end.date]))
names(df.stat)[names(df.stat)=="rstat"] <- x
df.stat
})
df.stat <- purrr::reduce(ans, dplyr::full_join)
}
if(method==2){
ans <- lapply(1:nrow(dates), function(x){
temp <- data[data$date >= dates$start.date[x] & data$date < dates$end.date[x], test]
x.df <- sapply(temp,FUN=function(x) stat(x))
data.frame(start.date=dates$start.date[x],
end.date=dates$end.date[x],
t(x.df))
})
df.stat <- dplyr::bind_rows(ans)
#think about this
#or making same class as input?
##df.stat <- dplyr::as_tibble(df.stat)
##eval(parse(text = paste0("as.", class, "(df.stat, ...)")))
if(class(data)[1] %in% c("tbl", "tbl_df")){
df.stat <- dplyr::as_tibble(df.stat)
}
}
#looking for faster ways of doing this...
df.stat
}
#' @rdname calculate.stats
#' @export
calcRollingDateRangeStat <-
function(data,
range = "year", res = "day",
stat = NULL, pollutant = NULL,
from = NULL, to = NULL,
..., method=2){
#calculates stat for rolling date range
#think about to and from?
#ignores all sub-grouping (see calcDateRangeStat)
#works at res(olution) at moment because it is tidier
#might want option to turn this off?
temp <- strsplit(res, " ")[[1]]
temp <- temp[length(temp)]
start <- round.POSIXt(min(data$date, na.rm=TRUE), units=temp)
end <- round.POSIXt(max(data$date, na.rm=TRUE), units=temp)
#calculating at end of range
end <- seq(start, end, by=res)
#using lubridate because of leap years, etc...
if(length(grep(" ", range))==1){
temp <- strsplit(range, " ")[[1]]
step <- period(as.numeric(temp[1]), temp[[2]])
} else {
step <- period(1, range)
}
start <- end %m-% step
from <- aqe_prepFromDate(from, data)
to <- aqe_prepToDate(to, data)
dates <- data.frame(start = start, end = end)
dates <- dplyr::filter(dates,
start >= from,
end <= to)
calcDateRangeStat(data, dates$start, dates$end, stat,
pollutant,
method=method, ...)
}
############################################
#unexported code
#could be using a few of these elsewhere?
#would standardise handling of same name args...
aqe_prepFromDate <- function(from=NULL, data){
if(is.null(from)){
from <- min(data$date, na.rm=TRUE)
}
if(is.numeric(from)){
#allowed numeric year
from <- paste(from, "-01-01", sep="")
}
#assuming character from here
test <- attributes(data$date)$tzone
from <- if(is.null(test)){
as.POSIXct(from, "%Y-%m-%d")
} else {
as.POSIXct(from, "%Y-%m-%d", tz=test)
}
from
}
aqe_prepToDate <- function(to=NULL, data){
if(is.null(to)){
to <- max(data$date, na.rm=TRUE)
}
if(is.numeric(to)){
#allowed numeric year
to <- paste(to, "-12-31 23:59:59", sep="")
}
#assuming character from here
test <- attributes(data$date)$tzone
to <- if(is.null(test)){
as.POSIXct(to, "%Y-%m-%d %H:%M:%S")
} else {
as.POSIXct(to, "%Y-%m-%d %H:%M:%S", tz=test)
}
to
}
aqe_prepXArgs <- function(x.args, data, mod){
if(is.null(x.args)){
x.args <- if(is.null(mod)){
c("date", "wd", "ws", "air_temp")
} else {
temp <- as.formula(paste("~", mod, sep=""))
temp <- all.vars(temp[[2]])
temp <- temp[!temp %in% c("year.day", "week.day",
"hour.day", "count")]
temp <- unique(c("date", temp))
temp
}
}
x.args
}
aqe_prepPadData <- function(pad.data, data){
if(is.null(pad.data)){
stop("missing pad.data")
}
if(is.numeric(pad.data)){
#allowed numeric year taken from data
pad.data <- data[format(data$date, "%Y") %in%
pad.data,]
}
pad.data
}
aqe_padData <- function(from, to, data, pad.data, x.args,
diagnostic=FALSE){
#x.args
local.args <- x.args[x.args %in% names(pad.data)]
#could check length of x.args?
pad.data <- pad.data[local.args[local.args %in%
names(pad.data)]]
pad.data$padded <- TRUE
data$padded <- FALSE
#to do
####################################
#pad from
#pad to
#note
#this does not work if ref data is one year but not 2...
#sort or or prevent?
if(to > max(data$date, na.rm=TRUE)){
if(diagnostic){
print("padding end")
}
.year <- as.numeric(format(max(data$date,
na.rm=TRUE),
"%Y"))
pad <- TRUE
while(as.numeric(pad)==1){
ref <- pad.data
t2 <- as.numeric(pad.data$date)
t2 <- t2 - min(t2, na.rm=TRUE)
t1 <- as.POSIXct(paste(.year, "-",
format(pad.data$date[1],
"%m-%d %H:%M:%S"),
sep=""),
"%Y-%m-%d %H:%M:%S",
tz= attr(pad.data$date[1], "tzone"))
ref$date <- t1 + t2
ref0 <- filter(ref, date > max(data$date,
na.rm=TRUE))
#print(dim(ref))
if(nrow(ref0)>0){
data <- bind_rows(data, ref0)
}
if(to < max(data$date, na.rm=TRUE)){
data <- filter(data, date <= to)
}
.year <- .year + 1
test <- as.numeric(format(max(data$date,
na.rm=TRUE),
"%Y"))
if(test > .year) .year <- test
if(to < max(ref$date, na.rm=TRUE)){
pad <- FALSE
}
}
}
#out
data
}
| /scratch/gouwar.j/cran-all/cranData/AQEval/R/calculate.stats.R |
############################################
#' @title find and test break-points
############################################
#' @name find.breaks
#' @aliases findBreakPoints textBreakSegments
#' @description Finding and testing break-points in
#' conventionally formatted air quality data sets.
#' @param data Data source, typically a \code{data.frame}
#' or similar, containing data-series to apply function to
#' and a paired time-stamped data-series, called \code{date}.
#' @param pollutant Name of time-series, assumed to be
#' a column in \code{date}.
#' @param h (\code{findBreakPoints} only) The data/time window
#' size to use when looking for breaks in a supplied time-series,
#' expressed as proportion of time-series (0-1), default 0.15.
#' @param ... other parameters
#' @param breaks (\code{testBreakPoints} only) \code{data.frame}
#' of The break-points and confidence intervals, typically a
#' \code{findBreakPoints} output.
#' @details \code{findBreakPoints} uses methods from
#' \code{strucchange} package (see references) and
#' modifications as suggested by the main author of
#' \code{strucchange} to handle missing cases to find
#' potential breaks-points in a supplied time-series.
#'
#' \code{testBreakPoints} tests and identifies most likely
#' break-points using methods proposed for use with
#' \code{quantBreakPoints} and \code{quantBreakSegments}
#' and conventionally formatted air quality data sets.
#' @returns \code{findBreakPoints} returns a \code{data.frame}
#' of found break-points.
#'
#' \code{testBreakPoints} return a likely break-point/segment
#' report.
#' @references
#' Regarding \code{strucchange} methods see
#' \code{\link{breakpoints}}, and:
#'
#' Achim Zeileis, Friedrich Leisch, Kurt Hornik and Christian Kleiber
#' (2002). strucchange: An R Package for Testing for Structural Change
#' in Linear Regression Models. Journal of Statistical Software, 7(2),
#' 1-38. URL \url{https://www.jstatsoft.org/v07/i02/}.
#'
#' Achim Zeileis, Christian Kleiber, Walter Kraemer and Kurt Hornik
#' (2003). Testing and Dating of Structural Changes in Practice.
#' Computational Statistics & Data Analysis, 44, 109-123.
#'
#' Regarding missing data handling, see:
#'
#' URL:
#' \url{https://stackoverflow.com/questions/43243548/strucchange-not-reporting-breakdates}.
#'
#' Regarding \code{testBreakPoints}, see:
#'
#' Ropkins et al (In Prep).
#' @seealso
#' \code{\link{find.breaks}}.
#############################
#findBreakPoints
#############################
#to do
#check NA handling and non-regular data handling
# added an initial 'fix' but needs work
#diagnostic option for this function???
#need to check nothing in strucchange output
# currently dropped is worth keeping...
#error catchers for?
# expected time-series, date
# bad calls, missing time-series
# aqeval_... checkPrep
#finish documenting
#think about rename/manage/set data columns local function
# aqeval_renameDataColumns?
#splatted function
#' @import strucchange
#' @importFrom stats approx as.formula confint filter lm na.omit pchisq predict qchisq spec.pgram
#' @rdname find.breaks
#' @export
findBreakPoints <-
function (data, pollutant, h = 0.15, ...)
{
#require(strucchange)
name.pol <- pollutant
#NA handling based on
#https://stackoverflow.com/questions/43243548/strucchange-not-reporting-breakdates
#package author recommendation
#but should look into better option...
d <- na.omit(data.frame(ans = data[, name.pol],
date = data$date,
..ref = 1:nrow(data)))
names(d) <- c("ans", "date", "..ref")
bp <- breakpoints(ans ~ 1, data = d, h = h, ...)
if(is.logical(bp$breakpoints)) return(NULL)
temp <- suppressWarnings(confint(bp)$confint)
if(any(is.na(temp))){
#print(dimnames(temp))
fix <- c(t(temp))
#https://stackoverflow.com/questions/7735647/replacing-nas-with-latest-non-na-value
fix <- fix[which(!is.na(fix))[c(1,1:sum(!is.na(fix)))][cumsum(!is.na(fix))+1]]
temp <- matrix(fix, ncol=ncol(temp),
dimnames = dimnames(temp))
#print(temp)
warning("findBreakPoints: some suspect confidences",
"\n\tTried to fix but strongly recommend",
"\n\tre-running with different range or 'h'\n",
call.=FALSE)
}
if(any(temp<2)){
#not sure about this...
#but may kill later code if ignored...
temp[temp<2] <- 2
warning("findBreakPoints: break(s) too near '", pollutant,
"' start...\n\tTried to fix but strongly recommend",
"\n\tre-running with different range or 'h'\n", call.=FALSE)
}
if(any(temp>(nrow(d)-1))){
#not sure about this...
#but may kill later code if ignored...
temp[temp>(nrow(d)-1)] <- (nrow(d)-1)
warning("findBreakPoints: break(s) too near '", pollutant,
"' end...\n\tTried to fix but strongly recommend",
"\n\tre-running with different range or 'h'\n",
call.=FALSE)
}
#my.bp <- d$date[temp]
#ans <- as.data.frame(t(matrix(which(data$date %in% my.bp),
# ncol(temp))))
#print(temp)
my.bp <- d$..ref[temp]
ans <- as.data.frame(t(matrix(my.bp, ncol(temp), byrow = TRUE)))
names(ans) <- c("lower", "bpt", "upper")
ans
}
## wrapper so code using earlier name still works
## currently not exporting
## @export
#findBreakPoints01 <- function(...) findBreakPoints(...)
#testBreakPoints
#kr v.0.0.1
#############################
#to do
#' @export
#' @rdname find.breaks
#' @importFrom dplyr bind_rows
testBreakPoints <-
function (data, pollutant, breaks, ...)
{
#test identified break points
#test zero breaks model...
mod <- aqe_fitBreakPointsModel(data, pollutant,
breaks=NULL)
ans <- aqe_makeBreakPointTestReport(mod, 0, NA)
#if breaks check each model
#print("here")
if(!is.null(breaks) && nrow(breaks)>0){
for(i in 1:nrow(breaks)){
temp <- combn(1:nrow(breaks), i,
simplify = FALSE)
for(j in 1:length(temp)){
mod <- aqe_fitBreakPointsModel(data,
pollutant, breaks=breaks[temp[[j]],])
ans <- bind_rows(aqe_makeBreakPointTestReport(
mod, i, temp[[j]]), ans)
}
}
}
ans$suggest <- if(all(is.na(ans$adj.r.sq))){
""
} else {
ifelse(!is.na(ans$adj.r.sq) &
ans$adj.r.sq == max(ans$adj.r.sq, na.rm=TRUE),
"(<-)", "")
}
ans
}
#local function
aqe_makeBreakPointTestReport <-
function(mod, elements, breaks){
signif <- all(summary(mod)$coefficients[,4]<0.05)
breaks <- paste(breaks, sep="", collapse = "+")
if(!signif){
data.frame(elements = elements, breaks = breaks,
signif = signif, adj.r.sq = NA,
stringsAsFactors = FALSE)
} else {
adj.r.sq <- summary(mod)$adj.r.squared
data.frame(elements = elements, breaks = breaks,
signif = signif, adj.r.sq = adj.r.sq,
stringsAsFactors = FALSE)
}
}
| /scratch/gouwar.j/cran-all/cranData/AQEval/R/find.breaks.R |
############################################
#' @title find nearby sites
############################################
#' @name find.near
#' @aliases findNearSites findNearLatLon
#' @description Function to find nearest locations in a
#' reference by latitude and longitude.
#' @param lat,lon (numeric) The supplied latitude and
#' longitude.
#' @param nmax (numeric) The maximum number of nearest sites
#' to report, by default 10.
#' @param ... Other parameters, currently ignored.
#' @param ref (\code{data.frame} or similar) The look-up table to
#' use when identifying nearby locations, and expected to
#' contain latitude, longitude and any required location
#' identifier data-series. By default, \code{findNearSites}
#' uses openair \code{importMeta} output if this is not
#' supplied but this is a required input for
#' \code{findNearLatLon}.
#' @param units (character) The units to use when reporting
#' distances to near locations; current options m.
#' @param pollutant (character) For \code{findNearSites}
#' only, the pollutant of interest, by default NO2.
#' @param site.type (character) For \code{findNearSites}
#' only, the monitoring site type, by default Rural
#' Background.
#' @details
#' If investigating air quality in a particular location,
#' for example a UK Clean Air Zone
#' (\url{https://www.gov.uk/guidance/driving-in-a-clean-air-zone}),
#' you may wish to locate an appropriate rural background air quality
#' monitoring station. \code{findNearSites} locates air quality monitoring
#' sites with openly available data such as that available from the UK AURN
#' network (\url{https://uk-air.defra.gov.uk/networks/network-info?view=aurn})
#' @note This function uses haversine formula to account
#' to the Earth's surface curvature, and uses 6371 km as
#' the radius of earth.
#' @returns \code{find.near} returns \code{data.frame} of near site meta
#' data.
#' @examples
#' #find rural background NO2 monitoring sites
#' #near latitude = 50, longitude = -1
#'
#' #not run: requires internet
#' \dontrun{
#' findNearSites(lat = 50, lon = -1)
#' }
#findNear...
#############################
#main function findNearLatLon
#############################
#error catchers for?
# expected time-series
# bad calls, missing time-series
# aqeval_... checkPrep?
#finish documenting
# references
#think about rename/manage/set data columns local function
# aqeval_renameDataColumns?
## #' @references need to reference formula
## #' @need to add dont run examples
#' @rdname find.near
#' @export
findNearLatLon <-
function(lat, lon = NULL,
nmax = 10, ...,
ref = NULL, units = "m")
{
#find near locations in a reference dataset
#replaces findNearSite
x.args <- list(...)
#if no ref stop
if(is.null(ref)){
stop("findNearLatLon halted, ref not supplied.\n\t(See ?findNearLatLon)", call.=FALSE)
}
#if data.frame supplied as lat
#look in this for latitude and longitude
if(is.data.frame(lat) & is.null(lon)){
#this assumes lat and long are names
lon <- aqe_getXFromData(c("longitude", "long", "lon"),
lat)
lat <- aqe_getXFromData(c("latitude", "lat"), lat)
}
#stop if lat and lon not supplied
#stop if NULLs supplied
#handle NAs and multiple lat/lon
if(any(missing(lat), missing(lon))){
stop("need both lat and lon")
}
lat <- unique(lat[!is.na(lat)])
lon <- unique(lon[!is.na(lon)])
if(any(length(lat)<1, length(lon)<1)){
stop("need both lat and lon")
}
if(any(length(lat)>1, length(lon)>1)){
warning("multiple lat/lon supplied; only using first")
lat <- lat[1]
lon <- lon[1]
}
#this expects ref to be there
#setup for haversine calcs
deg2rad <- function(x) x * (pi/180)
rad2deg <- function(x) x * (180/pi)
lat1 <- ref$latitude
lon1 <- ref$longitude
lat0 <- rep(lat, length(lat1))
lon0 <- rep(lon, length(lon1))
#using The haversine formula
dLat <- deg2rad(lat1-lat0)
dLon <- deg2rad(lon1-lon0)
lat0 <- deg2rad(lat0)
lat1 <- deg2rad(lat1)
#the square of half the chord length between the points
a <- sin(dLat/2) * sin(dLat/2) +
sin(dLon/2) * sin(dLon/2) * cos(lat0) * cos(lat1)
#the angular distance in radians
c <- 2 * atan2(sqrt(a), sqrt(1-a))
#radius of earth, km
R <- 6371
#scaling for output
sc <- NULL
if(units == "km") sc <- 1
if(units == "m") sc <- 1000
if(is.null(sc)){
stop("supplied units not recognised")
}
#############
#needs handling for unrecognised units
#############
#see previous version findNearLatLon
#or smoothLatLonPath handling
# in old grey.area
#output in requested scale
ref$distance <- R * c * sc
ref <- ref[order(ref$distance),]
if(nrow(ref)>nmax)
ref <- ref[1:nmax,]
#ref <- ref[,unique(c("code", "site", "distance",
# names(ref)))]
names(ref)[names(ref)=="distance"] <-
paste("distance", units, sep=".")
ref
}
## findNearSites is earlier version of findNear...
## probably going
## splatted function (first time around)
#' @rdname find.near
#' @import openair
#' @export
findNearSites <-
function(lat, lon, pollutant = "no2",
site.type = "rural background",
nmax = 10, ...,
ref = NULL, units = "m")
{
#find near AQ monitoring sites
#uses openair importMeta
#setup
x.args <- list(...)
#stop if lat and lon not supplied
#stop if NULLs supplied
#handle NAs and multiple lat/lon
if(any(missing(lat), missing(lon))){
stop("need both lat and lon")
}
lat <- unique(lat[!is.na(lat)])
lon <- unique(lon[!is.na(lon)])
if(any(length(lat)<1, length(lon)<1)){
stop("need both lat and lon")
}
if(any(length(lat)>1, length(lon)>1)){
warning("multiple lat/lon supplied; only using first")
lat <- lat[1]
lon <- lon[1]
}
#get reference if missing
#this currently only sources data from AURN
if(is.null(ref)){
ref <- importMeta(source="aurn", all=TRUE)
}
#limit search to sites monitoring pollutant of interest
ref <- ref[tolower(ref$variable) %in%
tolower(pollutant),]
#limit search to sites of type
#extra option all rather than have to write lot in full
#NB: only handle one species at moment
if(tolower(site.type)!="all"){
ref <- ref[tolower(ref$site_type) %in%
tolower(site.type),]
}
#limit search to date ranges
#trickier than you think...
#needs very careful documentation
##################################
if("date.range" %in% names(x.args)){
############################
#next bit will error if not
#formatted correctly
############################
#start: max of mins...
temp <- as.Date(ref$start_date)
t1 <- ifelse(temp > min(as.Date(x.args$date.range),
na.rm=TRUE),
temp,
min(as.Date(x.args$date.range), na.rm=TRUE))
#end: min of maxs
temp <- as.Date(gsub("ongoing", Sys.Date(), ref$end_date))
t2 <- ifelse(temp < max(as.Date(x.args$date.range),
na.rm=TRUE),
temp,
max(as.Date(x.args$date.range), na.rm=TRUE))
if("date.overlap" %in% names(x.args)){
temp <- strsplit(as.character(x.args$date.overlap), " ")[[1]]
if(length(temp)<2)
stop("need units if applying date.overlap")
############################
#next bit will error if not
#formatted correctly
t2 <- t2 - as.difftime(as.numeric(temp[1]),
units=temp[2])
}
ref <- ref[t2>t1,]
}
#setup for haversine calcs
deg2rad <- function(x) x * (pi/180)
rad2deg <- function(x) x * (180/pi)
lat1 <- ref$latitude
lon1 <- ref$longitude
lat0 <- rep(lat, length(lat1))
lon0 <- rep(lon, length(lon1))
#using The haversine formula
dLat <- deg2rad(lat1-lat0)
dLon <- deg2rad(lon1-lon0)
lat0 <- deg2rad(lat0)
lat1 <- deg2rad(lat1)
#the square of half the chord length between the points
a <- sin(dLat/2) * sin(dLat/2) +
sin(dLon/2) * sin(dLon/2) * cos(lat0) * cos(lat1)
#the angular distance in radians
c <- 2 * atan2(sqrt(a), sqrt(1-a))
#radius of earth, km
R <- 6371
#scaling for output
sc <- NULL
if(units == "km") sc <- 1
if(units == "m") sc <- 1000
#############
#handling for unrecognised units
#############
#could make option to replace na's with 0
#see previous version below
#or smoothLatLonPath handling
#output in requested scale
ref$distance <- R * c * sc
ref <- ref[order(ref$distance),]
if(nrow(ref)>nmax)
ref <- ref[1:nmax,]
ref <- ref[,unique(c("code", "site", "distance",
names(ref)))]
names(ref)[names(ref)=="distance"] <- paste("distance",
units,
sep=".")
ref
}
####################################
#unexported functions
####################################
#used by findNearLatLon
aqe_getXFromData <- function(x, data, ...){
#consider tolower
temp <- x[x %in% names(data)]
if(length(temp)<1){
stop("'x' not found")
}
data[[temp[1]]]
}
#think about
#from
#https://stackoverflow.com/questions/53639265/how-to-use-dynamic-arguments-in-a-dplyr-filter-within-function
#my_func = function(dat, ...){
# args <- rlang::enexprs(...)
# dat %>%
# filter(!!! args)
#}
#my_func = function(dat, ...){
# args <- enquos(...)
# ex_args <- unname(imap(args, function(expr, name) quo(!!sym(name)==!!expr)))
#
# dat %>% filter(!!!ex_args)
#}
#my_func(example_data, seg1 = 'b', seg2 = 'd')
| /scratch/gouwar.j/cran-all/cranData/AQEval/R/find.near.R |
############################################
#' @title isolateContribution
############################################
#' @name isolate.signal
#' @aliases isolateContribution
#' @description Environmental time-series signal processing:
#' Contribution isolation based on background subtraction,
#' deseasonalisation and/or deweathering.
#' @param data Data source, typically \code{data.frame}
#' (or similar), containing all time-series to be used when
#' applying signal processing.
#' @param pollutant The column name of the \code{data}
#' time-series to be signal processed.
#' @param background (optional) if supplied, the background
#' time-series to use as a background correction.
#' See below.
#' @param deseason logical or character vector, if
#' \code{TRUE} (default), the \code{pollutant} is
#' deseasonalised using \code{day.hour} and \code{year.day}
#' frequency terms, all calculate from the \code{data}
#' time stamp, assumed to be \code{date} in \code{data}.
#' Other options: \code{FALSE} to turn off
#' deseasonalisation; or a character vector of frequency
#' terms if user-defining. See below.
#' @param deweather logical or character vector, if
#' \code{TRUE} (default), the data is deweathered using
#' wind speed and direction, assumed to be \code{ws}
#' and \code{wd} in \code{data}). Other options: \code{FALSE}
#' to turn off deweathering; or a character vector of
#' \code{data} column names if user-defining. See below.
#' @param method numeric, contribution isolation method
#' (default 2). See Note.
#' @param add.term extra terms to add to the contribution
#' isolation model; ignore for now (in development).
#' @param formula (optional) Signal isolate model formula;
#' this allows user to set the signal isolation model formula
#' directly, but means other formula terms (\code{background},
#' \code{deseason} and \code{deweather}) will be ignored.
#' @param output output options; ignore for now (in development)
#' @param ... other arguments; ignore for now (in development)
#' @author Karl Ropkins
#' @returns \code{isolateContribution} returns a vector of
#' predictions of the \code{pollutant} time-series after
#' the requested signal isolation.
#' @details \code{isolateContribution} estimates and
#' subtracts \code{pollutant} variance associated with
#' factors that may hinder break-point/segment analysis:
#' \itemize{
#' \item{\strong{Background Correction}}{ If applied, this fits
#' the supplied \code{background} time-series as a
#' spline term: \code{s(background)}.}
#' \item{\strong{Seasonality}}{ If applied, this fits regular
#' frequency terms, e.g. \code{day.hour}, \code{year.day},
#' as spline terms, default TRUE is equivalent to
#' \code{s(day.hour)} and \code{s(year.day)}. All terms are
#' calculated from \code{date} column in \code{data}.}
#' \item{\strong{Weather}}{ If applied, this fits time-series of
#' identified meteorological measurements, e.g. wind speed
#' and direction (\code{ws} and \code{wd} in \code{data}).
#' If both \code{ws} and \code{wd} are present these are
#' fitted as a tensor term \code{te(ws, wd)}. Other
#' \code{deweather}ing terms, if included, are fitted
#' as spline term \code{s(term)}. The default \code{TRUE}
#' is equivalent to \code{te(ws, wd)}.}
#' }
#' Using the supplied arguments, it builds a signal
#' (\code{\link{mgcv}}) GAM model, calculates,
#' and returns the mean-centred residuals as an
#' estimate of the isolated local contribution.
#' @note \code{method} was included as part of method
#' development and testing work, and retained for now.
#' Please ignore for now.
#' @seealso Regarding seasonal terms and frequency
#' analysis, see \code{\link{stl}} and
#' \code{\link{spectralFrequency}}
#' @references
#' Regarding \code{\link{mgcv}} GAM fitting methods, see
#' Wood (2017) for general introduction and package
#' documentation regarding coding (\code{\link{mgcv}}):
#'
#' Wood, S.N. (2017) Generalized Additive Models:
#' an introduction with R (2nd edition), CRC, DOI:
#' \doi{10.1201/9781420010404}.
#'
#' Regarding \code{isolateContribution}, see:
#'
#' Ropkins et al (In Prep).
#' @seealso
#' \code{\link{mgcv}}, \code{\link{gam}}.
#' @examples
#' #fitting a simple deseasonalisation, deweathering
#' #and background correction (dswb) model to no2:
#'
#' aq.data$dswb.no2 <- isolateContribution(aq.data,
#' "no2", background="bg.no2")
#'
#' #compare at 7 day resolution:
#' temp <- openair::timeAverage(aq.data, "7 day")
#'
#' #without dswb
#' quantBreakPoints(temp, "no2", test=FALSE, h=0.1)
#'
#' #with dswb
#' quantBreakPoints(temp, "dswb.no2", test=FALSE, h=0.1)
#isolateContribtion function
##################################
#to do
##################################
#test
#tidy code - quick package for work Anthony is doing
# move any common elements to sub functions
#check alternative to mgcv - might be stuck with it but
#possible error catchers?
# bad call - missing time-series
# missing expected time-series, date, ws, wd
#document methods
#diagnostic plot
# marginal effect partial plot of model?
# nb: this needs to be sub function (in case plotting package changes...)
##################################
#notes
##################################
#this currently works on openair-like data 1hr resolution data.frame
# might need to think about other time-scales...
#this currently applies minimal dw and ds model
# might need to think about expanding modelling term...
#think about diagnostic options following T-IRP feedback...
#think about import
#think about making some of this internal functions
# for example the dS and dW fomula build...
#splatted function
#' @import mgcv
#' @rdname isolate.signal
#' @export
isolateContribution <-
function(data, pollutant, background = NULL,
deseason = TRUE, deweather = TRUE,
method = 2, add.term = NULL,
formula = NULL,
output = "mean",
...){
#####################
#setup
#####################
if(is.null(formula)){
#don't need any of this if formula is set!
deweather <- if(is.logical(deweather)){
if(deweather) c("ws", "wd") else character()
} else {
deweather
}
deseason <- if(is.logical(deseason)){
if(deseason) c("day.hour", "year.day") else character()
} else {
deseason
}
#check that all expected columns are there
if(is.null(background) & length(deseason)<1 & length(deweather)<1){
stop("Need (at least) one of 'background', 'deseason' or 'deweather'")
}
ref <- c("date", pollutant, background)
if(length(deweather)>1){
ref <- unique(c(ref, deweather))
}
if(!is.null(add.term)){
ref <- unique(c(ref, add.term))
}
} else {
ref <- unique(all.vars(formula))
if(length(formula)==2){
############################
#this will error if formula has no y term
#and pollutant missing
##########################
ref <- unique(c(pollutant, ref))
}
}
temp <- ref[!ref %in% names(data)]
####################
#untidy error message
###################
if(length(temp)>0){
stop(paste("Missing data-series: ", paste(temp, collapse=", "),
"\n", sep=""))
}
d1 <- data[ref] #currently not used...
d1$..counter <- 1:nrow(d1) #...
d1 <- as.data.frame(na.omit(d1)) #...
####################
#build model formula
#or use formula
####################
if(!is.null(formula)){
if(length(formula)==3){
ff <- formula
pollutant <- all.vars(ff[[1]])
} else {
ff <- paste(pollutant, as.character(formula),
sep=" ~ ")
ff <- as.formula(ff)
}
} else {
ff <- ""
######################
#method 3
#log(y)~ (and e^y)
#does not seem to be better
#test again/makes no sense...
######################
if(!is.null(background)){
ff <- paste(ff, "+s(", background, ")")
}
if(length(deweather)>0){
if("wd" %in% deweather & "ws" %in% deweather){
ff <- paste(ff, "+te(wd,ws)", sep="")
deweather <- deweather[!deweather %in% c("wd", "ws")]
}
if(length(deweather)>0){
fff <- paste("s(", deweather, ")", sep="", collapse = "+")
ff <- paste(ff, "+", fff, sep="")
}
}
temp <- TRUE
if(length(deseason)>0){
############################
#like to look at this again
############################
#method 2
#fit a spline better than factor...
############################
#add more seasonal terms
# week.day
# month.day??
# week.hour??
############################
#do we need to set d1 and data?
# unless we want to return data
# it seems unlikely
############################
#also we should be able to
#a lot of this...
############################
if(method==2){
if("year.day" %in% deseason){
d1$year.day <- as.numeric(format(d1$date, "%j"))
data$year.day <- as.numeric(format(data$date, "%j"))
ff <- paste(ff, "+s(year.day)", sep="")
}
####################################
#this will need better thinking
####################################
if("week.day" %in% deseason){
d1$week.day <- as.numeric(format(d1$date, "%w"))
data$week.day <- as.numeric(format(data$date, "%w"))
ff <- paste(ff, "s(week.day, k=5)", sep="")
}
if("day.hour" %in% deseason){
d1$day.hour <- as.numeric(format(d1$date, "%H"))
data$day.hour <- as.numeric(format(data$date, "%H"))
ff <- paste(ff, "+s(day.hour)", sep="")
}
} else{
#hold for testing...
if("year.day" %in% deseason){
d1$year.day <- as.numeric(format(d1$date, "%j"))
data$year.day <- as.numeric(format(data$date, "%j"))
ff <- paste(ff, "+year.day", sep="")
}
if("week.day" %in% deseason){
d1$week.day <- as.numeric(format(d1$date, "%w"))
data$week.day <- as.numeric(format(data$date, "%w"))
ff <- paste(ff, "+week.day", sep="")
}
if("day.hour" %in% deseason){
d1$day.hour <- as.numeric(format(d1$date, "%H"))
data$day.hour <- as.numeric(format(data$date, "%H"))
ff <- paste(ff, "+day.hour", sep="")
}
}
if(!is.null(add.term)){
#this currently only allows you to add one term
ff <- paste(ff, paste("+s(", add.term, ")",
sep="", collapse = ""))
}
}
ff <- gsub('^[+]','', ff)
ff <- paste(pollutant, " ~ ", ff, sep="")
ff <- as.formula(ff)
}
#think about how we do/document this...
message(paste(as.character(ff)[c(2,1,3)],
sep="", collapse = " "))
############################
#model
############################
mod <- gam(ff, data=data)
if("model" %in% output)
return(mod)
temp <- predict.gam(mod)
ans <- rep(NA, length=nrow(data))
ans[as.numeric(names(temp))] <- temp
################################
#scaling
################################
if("mean" %in% output){
ans <- data[[pollutant]] - ans
scale <- mean(data[[pollutant]], na.rm=TRUE)
} else {
#residual, might want to add warning?
ans <- data[[pollutant]] - ans
scale <- 0
}
ans <- ans + scale
########################
#this returns vector for data.frames and tbf_df...
########################
return(ans)
}
| /scratch/gouwar.j/cran-all/cranData/AQEval/R/isolate.signal.R |
############################################
#' @title Other Air Quality Models
############################################
#' @name other.aq.models
#' @rdname fitNearSiteModel
#' @description Other packaged Air Quality Models.
#' @param data \code{data.frame} (or similar) containing data-series
#' to be modelled; this is expected to contain 'date', 'site'
#' and pollutant of interest data-series.
#' @param pollutant The name of the \code{pollutant} (in
#' \code{data}) to model, by default 'NO2'.
#' @param y The name of the monitor site to be modelled,
#' assumed to be one several names in the \code{site} column of
#' \code{data}.
#' @param x The other sites to use when building the model, the
#' default 'rest' uses all supplied sites except 'y'.
#' @param elements The number of inputs to use in the
#' site models, can be any number up to length of x or
#' combination thereof; by default this is set as
#' \code{length(x):1}
#' @param ... extra arguments.
#' @details \code{fitNearSiteModel} builds an air quality
#' model for one location using air quality data from nearby
#' sites.
#' @returns \code{data} with model output added as additional
#' column.
## #' @references TO DO once paper published
#fitNearSiteModel
##############################################
#v0.0.2 22/02/2020
#fit model based on near site data
#to think about
#################################
#name of function
#merge this and findNearSites function
# documentation
#
#this NEEDS tidyr at moment
#this NEEDS mgcv at moment
#' @import mgcv
#' @export
fitNearSiteModel <-
function(data, pollutant = "no2", y, x = "rest",
elements = NULL, ...){
#setup
x.args <- list(...)
data <- aqe_checkData(data,
unique(c("date", "code",
pollutant)),
"fitNearSiteModel(data,...)")
data <- tidyr::spread(data, "code", pollutant)
#think I have to do this to make gams work...
#tibbles names confusing things
data <- as.data.frame(data)
names(data) <- make.names(names(data))
y <- make.names(y)
x <- make.names(x)
#
names <- names(data)[names(data)!="date"]
################
if(!y %in% names){
stop("fitNearSiteModel(data,...)\n\t",
"Expected data for site '", y, "'",
call. = FALSE)
}
if(is.character(x) && length(x)==1 &&
tolower(x)=="rest"){
x <- names[names!=y]
}
temp <- x[!x %in% names]
if(length(temp)>0){
stop("fitNearSiteModel(data,...)\n\t",
"Missing data for expected site(s):\n\t",
paste(x, collapse = ", "), call. = FALSE)
}
#elements
if(is.null(elements)){
#might need more here
elements <- length(x):1
}
#this is not tidy
#also want pollutant name here...
ans <- data.frame(date=data$date, ref=data[,y],
pred.temp=NA, pred=NA, model=NA)
#need option to add elements of own?
elements <- lapply(elements, function(element){
combn(x, element, simplify=FALSE)
})
for(i in 1:length(elements)){
#do for each level of elements
#if not already filled...
if(any(is.na(ans$pred))){
#build all models
mods <- elements[[i]]
mod.ls <- lapply(1:length(mods), function(j){
mod <- mods[[j]]
form <- paste("s(", mod, ")", sep="")
form <- paste(form, collapse = "+")
form <- paste(y, "~", form, sep="")
form <- as.formula(form)
mod <- paste(mod, collapse="+")
mod.ans <- gam(form, data = data)
list(mod=mod, gam=mod.ans, r=summary(mod.ans)$r.sq)
})
#reorder mod.ls
mod.ls <- mod.ls[rev(order(sapply(mod.ls,
function(x) x$r)))]
#add prediction to data to ans
#if not already filled
for(j in 1:length(mod.ls)){
if(any(is.na(ans$pred))){
#pred and add to ans as pred
#track mod in ans as model
mod.y <- predict(mod.ls[[j]]$gam,
newdata = data)
ans$pred.temp <- NA
ans$pred.temp[as.numeric(names(mod.y))] <-
mod.y
#extra linear model step here in original method
#test if needed...
ans$model[is.na(ans$pred) &
!is.na(ans$pred.temp)] <-
mod.ls[[j]]$mod
ans$pred[is.na(ans$pred) &
!is.na(ans$pred.temp)] <-
ans$pred.temp[is.na(ans$pred) &
!is.na(ans$pred.temp)]
#######################
#TO HERE
#######################
}
}
}
}
#remove pred.temp
ans <- ans[names(ans)!="pred.temp"]
ans
}
aqe_checkData <-
function(data, names, fun.name = "AQEval", tidy = TRUE){
temp <- names[!names %in% names(data)]
if(length(temp)>0){
stop(fun.name, "\n\tRequires missing data column(s):\n\t",
paste(temp, collapse = ", "), call.=FALSE)
}
if(tidy) data <- data[names]
data
}
| /scratch/gouwar.j/cran-all/cranData/AQEval/R/other.aq.models.R |
############################################
#' @title quantify break-point/segments
############################################
#' @name quantify.breaks
#' @aliases quantBreakPoints quantBreakSegments
#' @description Quantify either break-points or
#' break-segment methods for pollutant time-series
#' @param data Data source, typically a data.frame or similar,
#' containing data-series to model and a paired time-stamp
#' data-series, named date.
#' @param pollutant The name of the data-series to
#' break-point or break-segment model.
#' @param breaks (Optional) The break-points and
#' confidence intervals to use when building either
#' break-point or break-segment models. If not supplied
#' these are build using \code{\link{findBreakPoints}}
#' and supplied arguments.
#' @param ylab Y-label term, by default pollutant.
#' @param xlab X-label term, by default date.
#' @param pt.col Point fill and line colours for plot,
#' defaults lightgrey and darkgrey.
#' @param line.col Line colour for plot, default red.
#' @param break.col Break-point/segment colour for plot, default
#' blue.
#' @param event An optional list of plot terms for an event
#' marker, applied to a vertical line and text label. List
#' items include: \code{x} the event date (YYYY-MM-DD format)
#' require for both line and label; \code{y} by default 0.9 x
#' y-plot range; \code{label} the label text, required for
#' label; \code{line.size} the line width, by default 0.5;
#' \code{font.size} the text size, by default 5; and,
#' \code{hjust} the label left/right justification, 0 left,
#' 0.5 centre, 1 right (default). See also examples below.
#' @param show What to show before returning the break-point
#' quantification mode, by default plot and report.
#' @param ... other parameters
#' @param seg.method (\code{quantBreakSegments} only) the
#' break-segment fitting method to use.
#' @param seg.seed (\code{quantBreakSegments} only) the
#' seed setting to use when fitting break-segments, default
#' \code{12345}.
#' @details \code{quantBreakPoints} and
#' \code{quantBreakSegments} both use
#' \code{strucchange} methods to identify potential
#' break-points in time-series, and then quantify
#' these as conventional break-points or break-segments,
#' respectively:
#' \itemize{
#' \item \strong{Finding Break-points} Using the
#' \code{strucchange} methods of Zeileis and colleagues
#' and independent change detection model, the functions
#' apply a rolling-window approach, assuming the first
#' window (or data subset) is without change, building a
#' statistical model of that, advancing the window,
#' building a second model and comparing these, and so on,
#' to identify the most likely points of change in a
#' larger data-series. See also \code{\link{findBreakPoints}}
#' \item \strong{Quantifying Break-points} Using the
#' supplied break-points to build a break-point model.
#' \item \strong{Quantifying Break-segments} Using the
#' confidence regions for the supplied break-points as the
#' starting points to build a break-segment model.
#' }
#' @author Karl Ropkins
#' @references
#' Regarding \code{strucchange} methods see in-package
#' documentation, e.g. \code{\link[strucchange]{breakpoints}},
#' and:
#'
#' Achim Zeileis, Friedrich Leisch, Kurt Hornik and Christian Kleiber
#' (2002). strucchange: An R Package for Testing for Structural Change
#' in Linear Regression Models. Journal of Statistical Software, 7(2),
#' 1-38. URL \url{https://www.jstatsoft.org/v07/i02/}.
#'
#' Achim Zeileis, Christian Kleiber, Walter Kraemer and Kurt Hornik
#' (2003). Testing and Dating of Structural Changes in Practice.
#' Computational Statistics & Data Analysis, 44, 109-123.
#' DOI \doi{10.1016/S0167-9473(03)00030-6}.
#'
#' Regarding \code{segmented} methods see in-package
#' documentation, e.g.
#' \code{\link[segmented]{segmented}}, and:
#'
#' Vito M. R. Muggeo (2003). Estimating regression models
#' with unknown break-points. Statistics in Medicine, 22,
#' 3055-3071. DOI 10.1002/sim.1545.
#'
#' Vito M. R. Muggeo (2008). segmented: an R Package to
#' Fit Regression Models with Broken-Line Relationships.
#' R News, 8/1, 20-25.
#' URL \url{https://cran.r-project.org/doc/Rnews/}.
#'
#' Vito M. R. Muggeo (2016). Testing with a nuisance
#' parameter present only under the alternative: a
#' score-based approach with application to segmented
#' modelling. J of Statistical Computation and Simulation,
#' 86, 3059-3067.
#' DOI 10.1080/00949655.2016.1149855.
#'
#' Vito M. R. Muggeo (2017). Interval estimation for the
#' breakpoint in segmented regression: a smoothed
#' score-based approach. Australian & New Zealand Journal
#' of Statistics, 59, 311-322.
#' DOI 10.1111/anzs.12200.
#'
#' Regarding break-points/segment methods, see:
#'
#' Ropkins et al (In Prep).
#' @seealso
#' \code{\link{timeAverage}} in \code{openair},
#' \code{\link{breakpoints}} in \code{strucchange}, and
#' \code{\link{segmented}} in \code{segmented}.
#'
#' @returns Both functions use the \code{show} argument
#' to control which elements of the functions outputs
#' are shown but also invisible return a \code{list}
#' of all outputs which can caught using, e.g.:
#'
#' \code{brk.mod <- quantBreakPoints(data, pollutant)}
#' @note \code{AQEval} function \code{quantBreakSegments}
#' is currently running \code{segmented v.1.3-4} while we
#' evaluate latest version, \code{v.1.4-0}.
#' @examples
#' #using openair timeAverage to covert 1-hour data to 1-day averages
#'
#' temp <- openair::timeAverage(aq.data, "1 day")
#'
#' #break-points
#'
#' quantBreakPoints(temp, "no2", h=0.3)
#'
#' #break-segments
#'
#' quantBreakSegments(temp, "no2", h=0.3)
#'
#' #addition examples (not run)
#' \dontrun{
#' #in-call plot modification
#' #removing x axis label
#' #recolouring break line and
#' #adding an event marker
#' quantBreakPoints(temp, "no2", h=0.3,
#' xlab="", break.col = "red",
#' event=list(label="Event expected here",
#' x="2002-08-01", col="grey"))
#'}
#quantBreakPoints
################################
#need to tidy
# most local functions using data2 rather than data
#recent rebuild to isolate model prediction
#splatted function
#' @rdname quantify.breaks
#' @export
quantBreakPoints <-
function (data, pollutant, breaks, ylab = NULL,
xlab = NULL, pt.col = c("lightgrey", "darkgrey"),
line.col = "red", break.col ="blue",
event = NULL, show = c("plot", "report"), ...)
{
##########################
#need a checker for date and pollutant
#does breaks default want to be NA
#some code in next section could move into subfunctions
##########################
name.pol <- pollutant
pollutant <- data[, name.pol]
if(missing(breaks)){
#if breaks missing build assuming
#what sent plus defaults
breaks <- aqe_buildBreaks(data, name.pol, ...)
}
x.args <- list(...)
#print("fit")
################
data2 <- data[,c("date", name.pol)]
mod <- aqe_fitBreakPointsModel(data2, name.pol, breaks)
################
data2$pred <- rep(NA, nrow(data2))
data2$err <- data2$pred
ans <- predict(mod, se.fit = TRUE)
data2$pred[as.numeric(names(ans$fit))] <- ans$fit
data2$err[as.numeric(names(ans$fit))] <- ans$se.fit
#print("report")
################
report <- aqe_makeBreakPointsReport(data2, breaks)
#################
#################
if ("report" %in% show) {
aqe_summariseBreakPointsReport(report)
}
#print("plot")
######
####################
#not sure this is best way to handle
#auto.text=FALSE...
#####################
auto.text <- if("auto.text" %in% names(x.args)){
x.args$auto.text
} else { TRUE }
plt <- aqe_plotQuantBreakPoints(data2, name.pol, breaks,
xlab=xlab, ylab=ylab, pt.col=pt.col,
line.col=line.col, break.col=break.col,
event=event, auto.text=auto.text, ...)
if ("plot" %in% show) {
plot(plt)
}
return(invisible(list(data = data, breaks = breaks, data2 = data2,
plot = plt, report = report, model = mod)))
}
#splatted function
#' @rdname quantify.breaks
#' @export
quantBreakSegments <-
function (data, pollutant, breaks, ylab = NULL,
xlab = NULL, pt.col = c("lightgrey", "darkgrey"),
line.col = "red", break.col ="blue",
event = NULL, seg.method = 2, seg.seed = 12345,
show = c("plot", "report"), ...)
{
##########################
#need a checker for date and pollutant like quantBreakPoints
#does breaks default want to be NA
#need to rationalise seg.method = 1/2 once method finalised
#see CRAN feedback regard set.seed
##########################
#breaks setup
#buildBreaks in quantBreakPoints
if(missing(breaks)){
breaks <- aqe_buildBreaks(data, pollutant,...)
}
x.args <- list(...)
#model
if(!seg.method %in% 1:2){
stop("Unknown seg.method requested", call.=FALSE)
}
#######################################
#should be able to simplify this or
#drop it once seg.models confirmed
#see optimisation notes
#######################################
if(seg.method==1){
mod <- aqe_fitBreakSegmentsModel01(data, pollutant, breaks)
data2 <- data[,c("date", pollutant)]
data2$pred <- rep(NA, nrow(data2))
data2$err <- data2$pred
ans <- predict(mod, se.fit = TRUE)
data2$pred[as.numeric(names(ans$fit))] <- ans$fit
data2$err[as.numeric(names(ans$fit))] <- ans$se.fit
#segments
#this needs to be tidied once methods finalised
segments <- aqe_makeSegmentsFromBreaks01(breaks)
#report
#this needs to be tidied once methods finalised
report <- aqe_makeBreakSegmentsReport(data2, segments)
if ("report" %in% show) {
aqe_summariseBreakSegmentsReport(report)
}
#plot
#this needs to be tidied once methods finalised
auto.text <- if("auto.text" %in% names(x.args)){
x.args$auto.text
} else { TRUE }
plt <- aqe_plotQuantBreakSegments01(data2, pollutant,
segments,
pt.col=pt.col,
break.col = break.col, line.col = line.col,
ylab=ylab, xlab=xlab,
event=event,
auto.text = auto.text, ...)
if ("plot" %in% show) {
plot(plt)
}
}
if(seg.method==2){
data2 <- data[,c("date", pollutant)]
#print("fit")
#################
ls.mod <- aqe_fitBreakSegmentsModel02(data2, pollutant, breaks,
seg.seed = seg.seed)
#print("after.fit")
mod <- ls.mod$mod
data2$pred <- rep(NA, nrow(data2))
data2$err <- data2$pred
##########################
#not sure if I need all this
##########################
ans <- if("segmented" %in% class(mod)){
segmented::predict.segmented(mod, se.fit = TRUE)
} else {
predict(mod, se.fit=TRUE)
}
##############################
data2$pred[as.numeric(names(ans$fit))] <- ans$fit
data2$err[as.numeric(names(ans$fit))] <- ans$se.fit
#segments
#this needs to be tidied once methods finalised
segments <- ls.mod$segments
#report
#this needs to be tidied once methods finalised
#print("report")
###################
report <- aqe_makeBreakSegmentsReport(data2, segments)
if ("report" %in% show) {
aqe_summariseBreakSegmentsReport(report)
}
#plot
#this needs to be tidied once methods finalised
##plt <- NULL
auto.text <- if("auto.text" %in% names(x.args)){
x.args$auto.text
} else { TRUE }
#print("plot")
#######################
plt <- aqe_plotQuantBreakSegments02(data2, pollutant,
segments,
pt.col=pt.col,
break.col = break.col, line.col = line.col,
ylab=ylab, xlab=xlab,
event=event,
auto.text = auto.text, ...)
if ("plot" %in% show) {
plot(plt)
}
}
#output
return(invisible(list(data = data, segments = segments, data2 = data2,
plot = plt, report = report, model = mod)))
}
#################################
#removed
#################################
## @export
##quantBreaks02 <- function(...) quantBreakPoints(...)
## from days of quantBreaks01 and 02
## nobody should be using either these days
| /scratch/gouwar.j/cran-all/cranData/AQEval/R/quantify.breaks.R |
############################################
#' @title Spectral Analysis
############################################
#' @name spectral.analysis
#' @aliases spectralFrequency
#' @description Time-series spectral frequency analysis.
#' @param data \code{data.frame} holding data to be analysed,
#' expected to contain a timestamp data-series called
#' \code{date} and a measurement time-series to be analysed
#' identified using the \code{pollutant} argument.
#' @param pollutant The name of the time-series,
#' typically pollutant measurements, to be analysed.
#' @param ... extra arguments.
#' @returns \code{spectralFrequency} uses the \code{show}
#' argument to control which elements of the functions outputs
#' are shown but also invisibly returns a \code{list}
#' of all outputs which can caught using, e.g.:
#'
#' \code{sfa.mod <- spectralFrequency(data, pollutant)}
#' @details \code{spectralFrequency} producing a
#' time frequency analysis of the requested
#' \code{pollutant}.
#' @examples
#' spectralFrequency(aq.data, "no2")
#splatted function
#spectralFrequency
#kr v.0.0.1 2019/11/02
#kr v.0.0.2 2019/11/15 (speed up regularization)
#kr v.0.0.3 2020/03/26 (moved plot to ggplot)
#(in development)
#args need tidying
#know issues/work to do
#look at regularization/hole filling - lomb?
#group handling
##think about colour scheme for stacking (yellow in middle)
#think about normalise
#think about ci
#think about comparison + add reference to paper
#labelling for key and facets via quickText
# needs replacing with local version
#' @rdname spectral.analysis
#' @export
spectralFrequency <-
function(data, pollutant, ...){
################
#default settings plus user updates via '...'
x.args <- loa::listUpdate(list(show = "plot",
auto.text=TRUE,
stack=FALSE, xlab="",
log.x = TRUE,
ylab=paste("spec(",
paste(pollutant,
collapse = ","),
")", sep="")
),
list(...))
#######################
#current bit
#######################
if(!"col" %in% names(x.args)){
temp <- length(pollutant)
x.args$col <- loa::colHandler(z=1:(if(temp==1) 2 else temp),
col.regions = loa::colHandler(1:4, col.regions = "Spectral"))
if(temp==1){
x.args$col <- x.args$col[2]
} else {
if("stack" %in% names(x.args) && x.args$stack){
x.args$col <- rep(x.args$col[length(x.args$col)],
temp)
}
}
}
######################
################
#cut data back to just needed data
data <- aqe_prepData(data, pollutant)
#tidy data (regularise and holefill)
data <- aqe_tidySpectralData(data)
#not tracking by at moment
mypol <- unlist(data[, pollutant])
#could use align to do this - might be faster but ws/wd bad?
#nicer way of getting mypol?
######################
#spectral analysis
#(don't want to spec.pgram plot)
#(don't want to pass spec.pgram any lattice user settings)
temp <- loa::listUpdate(list(plot=FALSE), x.args,
use.b=names(formals(spec.pgram)),
ignore.b=c("x", "plot"))
spec.df <- lapply(pollutant, function(y){
temp$x <- unlist(data[, y])
spec.raw <- do.call(spec.pgram, temp)
ans <- data.frame(freq = spec.raw$freq, spec = spec.raw$spec,
pollutant = y, stringsAsFactors = FALSE)
#normalise
ans$spec <- ans$spec/sum(ans$spec)
ans
})
spec.df <- do.call(rbind, spec.df)
spec.df$pollutant <- factor(spec.df$pollutant, levels=pollutant,
ordered=TRUE)
#stripping arguments sent to spec.pgram
temp <- names(temp)[names(temp)!="plot"]
x.args <- x.args[!names(x.args) %in% temp]
#currently not allowing grouping/subsetting...
#############################
#plot prep
#############################
#x-axis
#periods and labels
#units are in hours (assuming openair-like data)
#(this could be better - currently assumes data-like timescale)
period <- rev(c(1/(24*365*10), 1/(24*365*5), 1/(24*365),
1/(24*30*6), 1/(24*30), 1/(24*7), 1/24,
1/12, 1/6, 1/3, 1))
labels <- rev(c("10 years", "5 years", " 1 year", "6 months",
"1 month", " 1 week", " 1 day", "12 hours",
" 6 hours", " 3 hours", " 1 hour"))
spec.df$period <- 1/spec.df$freq
if("auto.text" %in% names(x.args) && x.args$auto.text){
x.args$xlab <- openair::quickText(x.args$xlab)
x.args$ylab <- openair::quickText(x.args$ylab)
}
plt <- ggplot2::ggplot(data = subset(spec.df)) +
ggplot2::geom_line(ggplot2::aes(x = freq, y = spec,
col = pollutant)) +
ggplot2::xlab(x.args$xlab) +
ggplot2::ylab(x.args$ylab)
if("stack" %in% names(x.args) && x.args$stack){
#condition by pollutant
plt <- plt +
ggplot2::facet_grid(ggplot2::vars(pollutant))
}
if("log.x" %in% names(x.args) && x.args$log.x){
plt <- plt +
ggplot2::scale_x_log10(breaks = period,
labels = labels)
} else {
plt <- plt +
ggplot2::scale_x_continuous()
}
if("log.y" %in% names(x.args) && x.args$log.y){
plt <- plt +
ggplot2::scale_y_log10()
} else {
plt <- plt +
ggplot2::scale_y_continuous()
}
plt <- plt +
ggplot2::scale_colour_manual(
values = x.args$col) +
ggplot2::theme_bw() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45,
hjust=1.1, vjust=1.1))
if(length(pollutant)==1 || x.args$stack){
plt <- plt +
ggplot2::theme(legend.position = "none")
}
#####################
#output
if ("show" %in% names(x.args) &&
"plot" %in% x.args$show) plot(plt)
output <- list(plot = plt, data = spec.df,
call = match.call())
#class(output) <- "openair"
#need to decide what to do about class
invisible(output)
}
#######################
#internal functions
#######################
#aqe_prepData
#kr v.0.0.1 2019/11/15
#checks all expected data there
#stops if not
#send back only what is needed
aqe_prepData<- function(data, pollutant, ...){
temp <- unique(c("date", pollutant))
if(!all(temp %in% names(data))){
stop(paste("Expected data missing (",
paste(temp[!temp %in% names(data)], collapse=", "),
")", sep=""),
call.=FALSE)
}
data[temp]
}
#aqe_tidySpectralData
#kr v.0.0.1 2019/11/02
#might be better option via align
#but need to think about ws,wd handling
aqe_tidySpectralData<- function(data, by = "hour", ...){
######################
#to think about
#this currently holefills/regularises all columns
##(alternative? regularise in align?)
##################
#version 1
###################
#(very slow because of timeAverage)
#ind <- openair:::find.time.interval(data$date)
#temp <- openair::timeAverage(data, ind)
#ref <- names(temp)[names(temp)!="date"]
#for(i in ref){
# test <- unlist(temp[,i])
# if(any(is.na(test)))
# temp[,i] <- approx(temp$date, test, temp$date, rule=2)$y
#}
ts <- seq(data$date[1], data$date[nrow(data)], by = by)
ref <- names(data)[names(data)!="date"]
temp <- lapply(ref, function(x){
approx(data$date, unlist(data[,x]), ts, rule=2)$y
})
temp <- as.data.frame(temp, stringsAsFactors = FALSE,
col.names = ref)
temp$date <- ts
temp[unique(c("date", ref))]
}
######################
#work of others
#from https://stats.stackexchange.com/questions/26244/what-is-the-confidence-interval-calculated-in-a-spectral-density-periodogram-in
#not sure what this is...
spec.ci <- function(spec.obj, coverage = 0.95) {
if (coverage < 0 || coverage >= 1)
stop("coverage probability out of range [0,1)")
tail <- (1 - coverage)
df <- spec.obj$df
upper.quantile <- 1 - tail * pchisq(df, df, lower.tail = FALSE)
lower.quantile <- tail * pchisq(df, df)
1/(qchisq(c(upper.quantile, lower.quantile), df)/df)
}
#also method in plot.spec.coherence
#that might be worth looking at...
#draws bands...
| /scratch/gouwar.j/cran-all/cranData/AQEval/R/spectral.analysis.R |
---
title: "Test"
author: "Vignette Author"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{test}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
Vignettes are long form documentation commonly included in packages. Because they are part of the distribution of the package, they need to be as compact as possible. The `html_vignette` output type provides a custom style sheet (and tweaks some options) to ensure that the resulting html is as small as possible. The `html_vignette` format:
- Never uses retina figures
- Has a smaller default figure size
- Uses a custom CSS stylesheet instead of the default Twitter Bootstrap style
## Vignette Info
Note the various macros within the `vignette` section of the metadata block above. These are required in order to instruct R how to build the vignette. Note that you should change the `title` field and the `\VignetteIndexEntry` to match the title of your vignette.
## Styles
The `html_vignette` template includes a basic CSS theme. To override this theme you can specify your own CSS in the document metadata as follows:
output:
rmarkdown::html_vignette:
css: mystyles.css
## Figures
The figure sizes have been customised so that you can easily put two images side-by-side.
```{r, fig.show='hold'}
plot(1:10)
plot(10:1)
```
You can enable figure captions by `fig_caption: yes` in YAML:
output:
rmarkdown::html_vignette:
fig_caption: yes
Then you can use the chunk option `fig.cap = "Your figure caption."` in **knitr**.
## More Examples
You can write math expressions, e.g. $Y = X\beta + \epsilon$, footnotes^[A footnote here.], and tables, e.g. using `knitr::kable()`.
```{r, echo=FALSE, results='asis'}
knitr::kable(head(mtcars, 10))
```
Also a quote using `>`:
> "He who gives up [code] safety for [code] speed deserves neither."
([via](https://twitter.com/hadleywickham/status/504368538874703872))
| /scratch/gouwar.j/cran-all/cranData/AQEval/man/articles/test.Rmd |
AADouble <- function(type="Normal") {
if(type == "Normal"){
iplan <- 1
} else if (type == "Tightened"){
iplan <- 2
} else if (type == "Reduced"){
iplan <- 3
} else {
iplan<-4
}
if(iplan==4) {stop("type must be equal to 'Normal' 'Reduced' or 'Tightened'")} else
{plan<-AAZ14Double(PLAN=iplan,INSL=1,LOTS=1,AQL=1)
return(plan)}
}
| /scratch/gouwar.j/cran-all/cranData/AQLSchemes/R/AADouble.R |
AAMultiple <- function(type="Normal") {
if(type == "Normal"){
iplan <- 1
} else if (type == "Tightened"){
iplan <- 2
} else if (type == "Reduced"){
iplan <- 3
} else {
iplan<-4
}
if(iplan==4) {stop("type must be equal to 'Normal' 'Reduced' or 'Tightened'" )} else
{plan<-AAZ14Multiple(PLAN=iplan,INSL=1,LOTS=1,AQL=1)
return(plan)}
}
| /scratch/gouwar.j/cran-all/cranData/AQLSchemes/R/AAMultiple.R |
AASingle <- function(type="Normal") {
if(type == "Normal"){
iplan <- 1
} else if (type == "Tightened"){
iplan <- 2
} else if (type == "Reduced"){
iplan <- 3
} else {
iplan<-4
}
if(iplan==4) {stop("type must be equal to 'Normal' 'Reduced' or 'Tightened'")} else
{plan<-AAZ14Single(PLAN=iplan,INSL=1,LOTS=1,AQL=1)
return(plan)}
}
| /scratch/gouwar.j/cran-all/cranData/AQLSchemes/R/AASingle.R |
AAZ14Double<-function(PLAN,INSL,LOTS,AQL){
message("MIL-STD-105E ANSI/ASQ Z1.4")
# Get the inspection level
dINSL <- menu(c("S-1", "S-2", "S-3", "S-4",
"I", "II", "III"), title = "\nWhat is the Inspection Level?")
INSL
# Get the lot size
dLOTS <- menu(c("2-8", "9-15", "16-25", "26-50",
"51-90", "91-150", "151-280", "281-500",
"501-1200", "1201-3200", "3201-10,000",
"10,001-35,000", "35,001-150,000", "150,001-500,000",
"500,001 and over"), title = "\nWhat is the Lot Size?")
LOTS
# Get the AQL
dAQL <- menu(c("0.010","0.015","0.025","0.040","0.065","0.10","0.15","0.25",
"0.40","0.65","1.0","1.5","2.5","4.0","6.5","10",
"15","25","40","65","100","150","250","400","650","1000"),
title = "\nWhat is the AQL in percent nonconforming per 100 items?")
AQL
#Create matrix of Code Letters
codes<-c("A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R")
InspLev<-c("S-1","S-2","S-3","S-4","I","II","III")
LotSize<-c("2-8","9-15","16-25","26-50","51-90","91-150","151-280","281-500","501-1200","1201-3200","3201-10,000","10,001-35,000","35,001-150,000","150,001-500,000","over 500,001")
AQL<-c("0.010","0.015","0.025","0.040","0.065","0.10","0.15","0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5","10","15","25","40","65","100","150","250","400","650","1000")
letters<-c("A","A","A","A","A","A","B",
"A","A","A","A","A","B","C",
"A","A","B","B","B","C","D",
"A","B","B","C","C","D","E",
"B","B","C","C","C","E","F",
"B","B","C","D","D","F","G",
"B","C","D","E","E","G","H",
"B","C","D","E","F","H","J",
"C","C","E","F","G","J","K",
"C","D","E","G","H","K","L",
"C","D","F","G","J","L","M",
"C","D","F","H","K","M","N",
"D","E","G","J","L","N","P",
"D","E","G","J","M","P","Q",
"D","E","H","K","N","Q","R")
SSCodeLetters<-matrix(letters,nrow=15, byrow=TRUE)
rownames(SSCodeLetters)<-LotSize
colnames(SSCodeLetters)<-InspLev
#Create Matrix of Double Sample Sizes for Normal Sampling
temp<-array(c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,2,2,2,2,2,2,2,2,2,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,3,3,3,3,3,3,3,3,3,3,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,8,5,5,5,5,5,5,5,5,5,5,3,2,
0,0,0,0,0,0,0,0,0,0,0,0,13,8,8,8,8,8,8,8,8,8,8,5,3,2,
0,0,0,0,0,0,0,0,0,0,0,20,13,13,13,13,13,13,13,13,8,8,8,5,3,2,
0,0,0,0,0,0,0,0,0,0,32,20,20,20,20,20,20,20,20,13,8,8,8,5,3,2,
0,0,0,0,0,0,0,0,0,50,32,32,32,32,32,32,32,32,20,13,8,8,8,5,3,2,
0,0,0,0,0,0,0,0,80,50,50,50,50,50,50,50,50,32,20,13,8,8,8,5,3,2,
0,0,0,0,0,0,0,125,80,80,80,80,80,80,80,80,50,32,20,13,8,8,8,5,3,2,
0,0,0,0,0,0,200,125,125,125,125,125,125,125,125,80,50,32,20,13,8,8,8,5,3,2,
0,0,0,0,0,315,200,200,200,200,200,200,200,200,125,80,50,32,20,13,8,8,8,5,3,2,
0,0,0,0,500,315,315,315,315,315,315,315,315,200,125,80,50,32,20,13,8,8,8,5,3,2,
0,0,0,800,500,500,500,500,500,500,500,500,315,200,125,80,50,32,20,13,8,8,8,5,3,2,
0,0,1250,800,800,800,800,800,800,800,800,500,315,200,125,80,50,32,20,13,8,8,8,5,3,2,
0,0,1250,1250,1250,1250,1250,1250,1250,1250,800,500,315,200,125,80,50,32,20,13,8,8,8,5,3,2),
dim=c(26,16))
ANSIASQDoubleNormalss <- t(temp)
rownames(ANSIASQDoubleNormalss)<-codes
colnames(ANSIASQDoubleNormalss)<-AQL
#Create Matrix of Double Sample Sizes for Tightened Sampling
temp<-array(c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,3,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,3,2,2,2,2,2,2,2,2,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,5,3,3,3,3,3,3,3,3,3,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,13,8,5,5,5,5,5,5,5,5,5,3,2,
0,0,0,0,0,0,0,0,0,0,0,0,20,13,8,8,8,8,8,8,8,8,8,5,3,2,
0,0,0,0,0,0,0,0,0,0,0,32,20,13,13,13,13,13,13,13,8,8,8,5,3,2,
0,0,0,0,0,0,0,0,0,0,50,32,20,20,20,20,20,20,20,13,8,8,8,5,3,2,
0,0,0,0,0,0,0,0,0,80,50,32,32,32,32,32,32,32,20,13,8,8,8,5,3,2,
0,0,0,0,0,0,0,0,125,80,50,50,50,50,50,50,50,32,20,13,8,8,8,5,3,2,
0,0,0,0,0,0,0,200,125,80,80,80,80,80,80,80,50,32,20,13,8,8,8,5,3,2,
0,0,0,0,0,0,315,200,125,125,125,125,125,125,125,80,50,32,20,13,8,8,8,5,3,2,
0,0,0,0,0,500,315,200,200,200,200,200,200,200,125,80,50,32,20,13,8,8,8,5,3,2,
0,0,0,0,800,500,315,315,315,315,315,315,315,200,125,80,50,32,20,13,8,8,8,5,3,2,
0,0,0,1250,800,500,500,500,500,500,500,500,315,200,125,80,50,32,20,13,8,8,8,5,3,2,
0,0,2000,1250,800,800,800,800,800,800,800,500,315,200,125,80,50,32,20,13,8,8,8,5,3,2,
0,0,2000,1250,1250,1250,1250,1250,1250,1250,800,500,315,200,125,80,50,32,20,13,8,8,8,5,3,2),
dim=c(26,16))
ANSIASQDoubleTightenedss<-t(temp)
rownames(ANSIASQDoubleTightenedss)<-codes
colnames(ANSIASQDoubleTightenedss)<-AQL
#Create Matrix of Double Sample Sizes for Reduced Sampling
temp<-array(c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,3,2,2,2,2,2,2,2,2,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,5,3,3,3,3,3,3,3,3,3,3,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,8,5,5,5,5,5,5,5,5,3,3,3,2,0,0,
0,0,0,0,0,0,0,0,0,0,13,8,8,8,8,8,8,8,8,5,3,3,3,2,0,0,
0,0,0,0,0,0,0,0,0,20,13,13,13,13,13,13,13,13,8,5,3,3,3,2,0,0,
0,0,0,0,0,0,0,0,32,20,20,20,20,20,20,20,20,13,8,5,3,3,3,2,0,0,
0,0,0,0,0,0,0,50,32,32,32,32,32,32,32,32,20,13,8,5,3,3,3,2,0,0,
0,0,0,0,0,0,80,50,50,50,50,50,50,50,50,32,20,13,8,5,3,3,3,2,0,0,
0,0,0,0,0,125,80,80,80,80,80,80,80,80,50,32,20,13,8,5,3,3,3,2,0,0,
0,0,0,0,200,125,125,125,125,125,125,125,125,80,50,32,20,13,8,5,3,3,3,2,0,0,
0,0,0,315,200,200,200,200,200,200,200,200,125,80,50,32,20,13,8,5,3,3,3,2,0,0,
0,0,500,315,315,315,315,315,315,315,315,200,125,80,50,32,20,13,8,5,3,3,3,2,0,0,
0,0,500,500,500,500,500,500,500,500,315,200,125,80,50,32,20,13,8,5,3,3,3,2,0,0),
dim=c(26,16))
ANSIASQDoubleReducedss<-t(temp)
rownames(ANSIASQDoubleReducedss)<-codes
colnames(ANSIASQDoubleReducedss)<-AQL
#Create array of Acceptance Numbers for Normal Double Sampling
t<-c( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 3, 1, 4,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 7,18,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 7,18,11,26,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 7,18,11,26,11,26,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 7,18,11,26,11,26,11,26,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 7,18,11,26,11,26,11,26,11,26,
0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 7,18,11,26,11,26,11,26,11,26,11,26,
0, 0, 0, 0, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 7,18,11,26,11,26,11,26,11,26,11,26,11,26,
0, 1, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 7,18,11,26,11,26,11,26,11,26,11,26,11,26,11,26,
0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 7,18,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,
0, 0, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 7,18,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,
0, 0, 1, 4, 2, 6, 3, 8, 5,12, 7,18,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,
0, 0, 2, 6, 3, 8, 5,12, 7,18,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,
0, 0, 3, 8, 5,12, 7,18,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,
0, 0, 5,12, 7,18,11,26,17,37,17,37,17,37,17,37,17,37,17,37,17,37,17,37,17,37,17,37,17,37,17,37,
0, 0, 7,18,11,26,17,37,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,
0, 0,11,26,17,37,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,
0, 0,17,37,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,
0, 0,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56,25,56)
AADoubleNormalac<-array(t, dim=c(2,16,26), dimnames=list(c("first","second"),
c("A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R"),
c("0.010","0.015","0.025","0.040","0.065","0.10","0.15",
"0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5",
"10","15","25","40","65","100","150","250","400","650","1000")))
# Create array of Rejection Numbers for Normal Double Sampling
t<-c( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7, 9,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7, 9, 9,13,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7, 9, 9,13,11,19,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7, 9, 9,13,11,19,16,27,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7, 9, 9,13,11,19,16,27,16,27,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7, 9, 9,13,11,19,16,27,16,27,16,27,
0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7, 9, 9,13,11,19,16,27,16,27,16,27,16,27,
0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7, 9, 9,13,11,19,16,27,16,27,16,27,16,27,16,27,
0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7, 9, 9,13,11,19,16,27,16,27,16,27,16,27,16,27,16,27,
2, 2, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7, 9, 9,13,11,19,16,27,16,27,16,27,16,27,16,27,16,27,16,27,
2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7, 9, 9,13,11,19,16,27,16,27,16,27,16,27,16,27,16,27,16,27,16,27,
0, 0, 3, 4, 4, 5, 5, 7, 7, 9, 9,13,11,19,16,27,16,27,16,27,16,27,16,27,16,27,16,27,16,27,16,27,
0, 0, 4, 5, 5, 7, 7, 9, 9,13,11,19,16,27,16,27,16,27,16,27,16,27,16,27,16,27,16,27,16,27,16,27,
0, 0, 5, 7, 7, 9, 9,13,11,19,16,27,16,27,16,27,16,27,16,27,16,27,16,27,16,27,16,27,16,27,16,27,
0, 0, 7, 9, 9,13,11,19,16,27,16,27,16,27,16,27,16,27,16,27,16,26,16,26,16,26,16,26,16,27,16,27,
0, 0, 9,13,11,19,16,27,22,38,22,38,22,38,22,38,22,38,22,38,22,38,22,38,22,38,22,38,22,38,22,38,
0, 0,11,19,16,27,22,38,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,
0, 0,16,27,22,38,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,
0, 0,22,38,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,
0, 0,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57,31,57)
AADoubleNormalre<-array(t, dim=c(2,16,26), dimnames=list(c("first","second"),
c("A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R"),
c("0.010","0.015","0.025","0.040","0.065","0.10","0.15",
"0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5",
"10","15","25","40","65","100","150","250","400","650","1000")))
# Create array of acceptance numbers for Double Tightened Sampling
t<-c( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 3, 1, 4,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3,11,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3,11, 6,15,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3,11, 6,15, 9,23,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3,11, 6,15, 9,23, 9,23,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3,11, 6,15, 9,23, 9,23, 9,23,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3,11, 6,15, 9,23, 9,23, 9,23, 9,23,
0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3,11, 6,15, 9,23, 9,23, 9,23, 9,23, 9,23,
0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3,11, 6,15, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23,
0, 1, 0, 1, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3,11, 6,15, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23,
0, 1, 0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3,11, 6,15, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23,
0, 1, 0, 1, 0, 3, 1, 4, 2, 6, 3,11, 6,15, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23,
0, 0, 0, 3, 1, 4, 2, 6, 3,11, 6,15, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23,
0, 0, 1, 4, 2, 6, 3,11, 6,15, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23,
0, 0, 2, 6, 3,11, 6,15, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23, 9,23,
0, 0, 3,11, 6,15, 9,23,15,34,15,34,15,34,15,34,15,34,15,34,15,34,15,34,15,34,15,34,15,34,15,34,
0, 0, 6,15, 9,23,15,34,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,
0, 0, 9,23,15,34,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,
0, 0,15,34,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,
0, 0,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52,23,52)
AADoubleTightenedac<-array(t, dim=c(2,16,26), dimnames=list(c("first","second"),
c("A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R"),
c("0.010","0.015","0.025","0.040","0.065","0.10","0.15",
"0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5",
"10","15","25","40","65","100","150","250","400","650","1000")))
# Create array of rejection numbers for Double Tightened Sampling
t<-c( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3, 4,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3, 4, 4, 5,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7,12,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7,12,10,16,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7,12,10,16,14,24,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7,12,10,16,14,24,14,24,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7,12,10,16,14,24,14,24,14,24,
0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7,12,10,16,14,24,14,24,14,24,14,24,
0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7,12,10,16,14,24,14,24,14,24,14,24,14,24,
0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7,12,10,16,14,24,14,24,14,24,14,24,14,24,14,24,
2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7,12,10,16,14,24,14,24,14,24,14,24,14,24,14,24,14,24,
2, 2, 2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7,12,10,16,14,24,14,24,14,24,14,24,14,24,14,24,14,24,14,24,
2, 2, 2, 2, 3, 4, 4, 5, 5, 7, 7,12,10,16,14,24,14,24,14,24,14,24,14,24,14,24,14,24,14,24,14,24,
0, 0, 3, 4, 4, 5, 5, 7, 7,12,10,16,14,24,14,24,14,24,14,24,14,24,14,24,14,24,14,24,14,24,14,24,
0, 0, 4, 5, 5, 7, 7,12,10,16,14,24,14,24,14,24,14,24,14,24,14,24,14,24,14,24,14,24,14,24,14,24,
0, 0, 5, 7, 7,12,10,16,14,24,14,24,14,24,14,24,14,24,14,24,14,24,14,24,14,24,14,24,14,24,14,24,
0, 0, 7,12,10,16,14,24,20,35,20,35,20,35,20,35,20,35,20,35,20,35,20,35,20,35,20,35,20,35,20,35,
0, 0,10,16,14,24,20,35,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,
0, 0,14,24,20,35,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,
0, 0,20,35,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,
0, 0,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53,29,53)
AADoubleTightenedre<-array(t, dim=c(2,16,26), dimnames=list(c("first","second"),
c("A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R"),
c("0.010","0.015","0.025","0.040","0.065","0.10","0.15",
"0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5",
"10","15","25","40","65","100","150","250","400","650","1000")))
# Create array of acceptance numbers for Double Reduced Sampling
t<-c( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1, 4,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1, 4, 2, 6,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 5,12,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 5,12, 5,12,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 5,12, 5,12, 5,12,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 5,12, 5,12, 5,12, 5,12,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12,
0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12,
0, 0, 0, 0, 0, 0, 0, 3, 1, 4, 2, 6, 3, 8, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12,
0, 0, 0, 0, 0, 0, 1, 4, 2, 6, 3, 8, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12,
0, 0, 0, 0, 0, 0, 2, 6, 3, 8, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12,
0, 0, 0, 0, 0, 0, 3, 8, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12, 5,12,
0, 0, 0, 0, 0, 0, 5,12, 7,18, 7,18, 7,18, 7,18, 7,18, 7,18, 7,18, 7,18, 7,18, 7,18, 7,18, 7,18,
0, 0, 0, 0, 0, 0, 7,18,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,
0, 0, 0, 0, 0, 0,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,11,26,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
AADoubleReducedac<-array(t, dim=c(2,16,26), dimnames=list(c("first","second"),
c("A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R"),
c("0.010","0.015","0.025","0.040","0.065","0.10","0.15",
"0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5",
"10","15","25","40","65","100","150","250","400","650","1000")))
# Create array of rejection numbers for Double Reduced Sampling
t<-c( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 4, 6,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 4, 6, 5, 7,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 4, 6, 5, 7, 7, 9,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 4, 6, 5, 7, 7, 9, 8,12,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 4, 6, 5, 7, 7, 9, 8,12,10,16,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 4, 6, 5, 7, 7, 9, 8,12,10,16,10,16,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 4, 6, 5, 7, 7, 9, 8,12,10,16,10,16,10,16,
0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 4, 6, 5, 7, 7, 9, 8,12,10,16,10,16,10,16,10,16,
0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 4, 6, 5, 7, 7, 9, 8,12,10,16,10,16,10,16,10,16,10,16,
0, 0, 0, 0, 2, 2, 2, 2, 3, 4, 4, 5, 4, 6, 5, 7, 7, 9, 8,12,10,16,10,16,10,16,10,16,10,16,10,16,
0, 0, 0, 0, 0, 0, 3, 4, 4, 5, 4, 6, 5, 7, 7, 9, 8,12,10,16,10,16,10,16,10,16,10,16,10,16,10,16,
0, 0, 0, 0, 0, 0, 4, 5, 4, 6, 5, 7, 7, 9, 8,12,10,16,10,16,10,16,10,16,10,16,10,16,10,16,10,16,
0, 0, 0, 0, 0, 0, 4, 6, 5, 7, 7, 9, 8,12,10,16,10,16,10,16,10,16,10,16,10,16,10,16,10,16,10,16,
0, 0, 0, 0, 0, 0, 5, 7, 7, 9, 8,12,10,16,10,16,10,16,10,16,10,16,10,16,10,16,10,16,10,16,10,16,
0, 0, 0, 0, 0, 0, 7, 9, 8,12,10,16,10,16,10,16,10,16,10,16,10,16,10,16,10,16,10,16,10,16,10,16,
0, 0, 0, 0, 0, 0, 8,12,10,16,10,16,10,16,10,16,10,16,10,16,10,16,10,16,10,16,10,16,10,16,10,16,
0, 0, 0, 0, 0, 0,10,16,12,22,12,22,12,22,12,22,12,22,12,22,12,22,12,22,12,22,12,22,12,22,12,22,
0, 0, 0, 0, 0, 0,12,22,17,30,17,30,17,30,17,30,17,30,17,30,17,30,17,30,17,30,17,30,17,30,17,30,
0, 0, 0, 0, 0, 0,17,30,17,30,17,30,17,30,17,30,17,30,17,30,17,30,17,30,17,30,17,30,17,30,17,30,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
AADoubleReducedre<-array(t, dim=c(2,16,26), dimnames=list(c("first","second"),
c("A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R"),
c("0.010","0.015","0.025","0.040","0.065","0.10","0.15",
"0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5",
"10","15","25","40","65","100","150","250","400","650","1000")))
# Get Code letter from SSCodeLetters
codelet<-SSCodeLetters[dLOTS,dINSL]
if(PLAN == 1) {
ac<-AADoubleNormalac[ ,codelet,dAQL]
re<-AADoubleNormalre[ ,codelet,dAQL]
S<-ANSIASQDoubleNormalss[codelet, dAQL]
ss<-c(S,S)
names(ss)<-c("first","second")
} else if(PLAN == 2) {
ac<-AADoubleTightenedac[ ,codelet,dAQL]
re<-AADoubleTightenedre[ ,codelet,dAQL]
S<-ANSIASQDoubleTightenedss[codelet, dAQL]
ss<-c(S,S)
names(ss)<-c("first","second")
} else if(PLAN == 3) {
ac<-AADoubleReducedac[ ,codelet,dAQL]
re<-AADoubleReducedre[ ,codelet,dAQL]
S<-ANSIASQDoubleReducedss[codelet, dAQL]
ss<-c(S,S)
}
if(ss[1]==0){
warning("No multiple sampling exists. Use the corresponding single sampling plan")
}
else {plan<-data.frame(n=ss,c=ac,r=re)
return(plan)}
}
| /scratch/gouwar.j/cran-all/cranData/AQLSchemes/R/AAZ14Double.R |
AAZ14Multiple<-function(PLAN,INSL,LOTS,AQL){
message("MIL-STD-105E ANSI/ASQ Z1.4")
# Get the inspection level
dINSL <- menu(c("S-1", "S-2", "S-3", "S-4",
"I", "II", "III"), title = "\nWhat is the Inspection Level?")
INSL
# Get the lot size
dLOTS <- menu(c("2-8", "9-15", "16-25", "26-50",
"51-90", "91-150", "151-280", "281-500",
"501-1200", "1201-3200", "3201-10,000",
"10,001-35,000", "35,001-150,000", "150,001-500,000",
"500,001 and over"), title = "\nWhat is the Lot Size?")
LOTS
# Get the AQL
dAQL <- menu(c("0.010","0.015","0.025","0.040","0.065","0.10","0.15","0.25",
"0.40","0.65","1.0","1.5","2.5","4.0","6.5","10",
"15","25","40","65","100","150","250","400","650","1000"),
title = "\nWhat is the AQL in percent nonconforming per 100 items?")
AQL
#Create matrix of Code Letters
codes<-c("A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R")
InspLev<-c("S-1","S-2","S-3","S-4","I","II","III")
LotSize<-c("2-8","9-15","16-25","26-50","51-90","91-150","151-280","281-500","501-1200","1201-3200","3201-10,000","10,001-35,000","35,001-150,000","150,001-500,000","over 500,001")
AQL<-c("0.010","0.015","0.025","0.040","0.065","0.10","0.15","0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5","10","15","25","40","65","100","150","250","400","650","1000")
letters<-c("A","A","A","A","A","A","B",
"A","A","A","A","A","B","C",
"A","A","B","B","B","C","D",
"A","B","B","C","C","D","E",
"B","B","C","C","C","E","F",
"B","B","C","D","D","F","G",
"B","C","D","E","E","G","H",
"B","C","D","E","F","H","J",
"C","C","E","F","G","J","K",
"C","D","E","G","H","K","L",
"C","D","F","G","J","L","M",
"C","D","F","H","K","M","N",
"D","E","G","J","L","N","P",
"D","E","G","J","M","P","Q",
"D","E","H","K","N","Q","R")
SSCodeLetters<-matrix(letters,nrow=15, byrow=TRUE)
rownames(SSCodeLetters)<-LotSize
colnames(SSCodeLetters)<-InspLev
#Create Matrix of Multiple Sample Sizes for Normal Sampling
temp<-array(c(-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-2,-2,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,3,2,2,2,2,2,2,2,2,2,2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,5,3,3,3,3,3,3,3,3,3,3,2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,8,5,5,5,5,5,5,5,5,5,5,3,2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,13,8,8,8,8,8,8,8,8,5,3,3,3,2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,-1,-1,20,13,13,13,13,13,13,13,13,8,5,3,3,3,2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,-1,32,20,20,20,20,20,20,20,20,13,8,5,3,3,3,2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,50,32,32,32,32,32,32,32,32,20,13,8,5,3,3,3,2,-2,-2,
-1,-1,-1,-1,-1,-1,80,50,50,50,50,50,50,50,50,32,20,13,8,5,3,3,3,2,-2,-2,
-1,-1,-1,-1,-1,125,80,80,80,80,80,80,80,80,50,32,20,13,8,5,3,3,3,2,-2,-2,
-1,-1,-1,-1,200,125,125,125,125,125,125,125,125,80,50,32,20,13,8,5,3,3,3,2,-2,-2,
-1,-1,-1,315,200,200,200,200,200,200,200,200,125,80,50,32,20,13,8,5,3,3,3,2,-2,-2,
-1,-1,500,315,315,315,315,315,315,315,315,200,125,80,50,32,20,13,8,5,3,3,3,2,-2,-2,
-1,-1,500,500,500,500,500,500,500,500,315,200,125,80,50,32,20,13,8,5,3,3,3,2,-2,-2),
dim=c(26,16))
AAMultipleNormalss <- t(temp)
rownames(AAMultipleNormalss)<-codes
colnames(AAMultipleNormalss)<-AQL
#Create Matrix of Multiple Sample Sizes for Tightened Sampling
temp<-array(c(-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,2,-2,-2,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,3,2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,5,3,2,2,2,2,2,2,2,2,2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,8,5,3,3,3,3,3,3,3,3,3,2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,13,8,5,5,5,5,5,5,5,3,3,3,2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,20,13,8,8,8,8,8,8,8,5,3,3,3,2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,-1,-1,32,20,13,13,13,13,13,13,13,8,5,3,3,3,2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,-1,50,32,20,20,20,20,20,20,20,13,8,5,3,3,3,2,-2,-2,
-1,-1,-1,-1,-1,-1,-1,80,50,32,32,32,32,32,32,32,20,13,8,5,3,3,3,2,-2,-2,
-1,-1,-1,-1,-1,-1,125,80,50,50,50,50,50,50,50,32,20,13,8,5,3,3,3,2,-2,-2,
-1,-1,-1,-1,-1,200,125,80,80,80,80,80,80,80,50,32,20,13,8,5,3,3,3,2,-2,-2,
-1,-1,-1,-1,315,200,125,125,125,125,125,125,125,80,50,32,20,13,8,5,3,3,3,2,-2,-2,
-1,-1,-1,500,315,200,200,200,200,200,200,200,125,80,50,32,20,13,8,5,3,3,3,2,-2,-2,
-1,-1,800,500,315,315,315,315,315,315,315,200,125,80,50,32,20,13,8,5,3,3,3,2,-2,-2,
-1,-1,800,500,500,500,500,500,500,500,315,200,125,80,50,32,20,13,8,5,3,3,3,2,-2,-2),
dim=c(26,16))
AAMultipleTightenedss<-t(temp)
rownames(AAMultipleTightenedss)<-codes
colnames(AAMultipleTightenedss)<-AQL
#Create Matrix of Multiple Sample Sizes for Reduced Sampling
temp<-array(c(-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,3,2,2,2,2,2,2,2,2,-2,-2,-2,-2,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,5,3,3,3,3,3,3,3,3,2,-2,-2,-2,-2,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,8,5,5,5,5,5,5,5,5,3,2,-2,-2,-2,-2,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,13,8,8,8,8,8,8,8,8,5,3,2,-2,-2,-2,-2,-1,-1,
-1,-1,-1,-1,-1,-1,-1,20,13,13,13,13,13,13,13,13,8,5,3,2,-2,-2,-2,-2,-1,-1,
-1,-1,-1,-1,-1,-1,32,20,20,20,20,20,20,20,20,13,8,5,3,2,-2,-2,-2,-2,-1,-1,
-1,-1,-1,-1,-1,50,32,32,32,32,32,32,32,32,20,13,8,5,3,2,-2,-2,-2,-2,-1,-1,
-1,-1,-1,-1,80,50,50,50,50,50,50,50,50,32,20,13,8,5,3,2,-2,-2,-2,-2,-1,-1,
-1,-1,-1,125,80,80,80,80,80,80,80,80,50,32,20,13,8,5,3,2,-2,-2,-2,-2,-1,-1,
-1,-1,200,125,125,125,125,125,125,125,125,80,50,32,20,13,8,5,3,2,-2,-2,-2,-2,-1,-1,
-1,-1,200,200,200,200,200,200,200,200,125,80,50,32,20,13,8,5,3,2,-2,-2,-2,-2,-1,-1),
dim=c(26,16))
AAMultipleReducedss<-t(temp)
rownames(AAMultipleReducedss)<-codes
colnames(AAMultipleReducedss)<-AQL
#Create array of Acceptance Numbers for Normal Multiple Sampling
t<-c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,0,0,1,2,4,6,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,3,4,7,11,17,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,8,13,19,29,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,5,8,12,19,27,40,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,5,7,11,17,25,36,53,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,7,10,14,21,31,45,65,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,9,13,18,25,37,53,77,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,0,0,1,2,4,6,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,-1,0,0,1,1,3,4,7,11,17,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,8,13,19,29,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,8,12,19,27,40,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,7,11,17,25,36,53,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,3,4,7,10,14,21,31,45,65,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,4,6,9,13,18,25,37,53,77,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,0,0,1,2,4,6,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,0,0,1,1,3,4,7,11,17,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,8,13,19,29,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,8,12,19,27,40,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,2,3,5,7,11,17,25,36,53,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,3,4,7,10,14,21,31,45,65,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,4,6,9,13,18,25,37,53,77,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,0,0,1,2,4,6,6,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,0,0,1,1,3,4,7,11,17,17,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,8,13,19,29,29,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,8,12,19,27,40,40,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,1,2,3,5,7,11,17,25,36,53,53,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,1,3,4,7,10,14,21,31,45,65,65,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,2,2,4,6,9,13,18,25,37,53,77,77,0,0,
0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,0,0,1,2,2,4,6,6,0,0,
0,0,0,0,0,0,0,0,0,0,0,-1,-1,0,0,1,1,3,4,7,7,11,17,17,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,8,13,13,19,29,29,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,8,12,19,19,27,40,40,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,1,2,3,5,7,11,17,25,25,36,53,53,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,1,3,4,7,10,14,21,31,31,45,65,65,0,0,
0,0,0,0,0,0,0,0,0,0,0,2,2,4,6,9,13,18,25,37,37,53,77,77,0,0,
0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,0,0,1,2,2,2,4,6,6,0,0,
0,0,0,0,0,0,0,0,0,0,-1,-1,0,0,1,1,3,4,7,7,7,11,17,17,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,8,13,13,13,19,29,29,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,8,12,19,19,19,27,40,40,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,2,3,5,7,11,17,25,25,25,36,53,53,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,3,4,7,10,14,21,31,31,31,45,65,65,0,0,
0,0,0,0,0,0,0,0,0,0,2,2,4,6,9,13,18,25,37,37,37,53,77,77,0,0,
0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,0,0,1,2,2,2,2,4,6,6,0,0,
0,0,0,0,0,0,0,0,0,-1,-1,0,0,1,1,3,4,7,7,7,7,11,17,17,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,8,13,13,13,13,19,29,29,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,8,12,19,19,19,19,27,40,40,0,0,
0,0,0,0,0,0,0,0,0,1,1,2,3,5,7,11,17,25,25,25,25,36,53,53,0,0,
0,0,0,0,0,0,0,0,0,1,1,3,4,7,10,14,21,31,31,31,31,45,65,65,0,0,
0,0,0,0,0,0,0,0,0,2,2,4,6,9,13,18,25,37,37,37,37,53,77,77,0,0,
0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,0,0,1,2,2,2,2,2,4,6,6,0,0,
0,0,0,0,0,0,0,0,-1,-1,0,0,1,1,3,4,7,7,7,7,7,11,17,17,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,8,13,13,13,13,13,19,29,29,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,3,5,8,12,19,19,19,19,19,27,40,40,0,0,
0,0,0,0,0,0,0,0,1,1,2,3,5,7,11,17,25,25,25,25,25,36,53,53,0,0,
0,0,0,0,0,0,0,0,1,1,3,4,7,10,14,21,31,31,31,31,31,45,65,65,0,0,
0,0,0,0,0,0,0,0,2,2,4,6,9,13,18,25,37,37,37,37,37,53,77,77,0,0,
0,0,0,0,0,0,0,-1,-1,-1,-1,-1,0,0,1,2,2,2,2,2,2,4,6,6,0,0,
0,0,0,0,0,0,0,-1,-1,0,0,1,1,3,4,7,7,7,7,7,7,11,17,17,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,3,6,8,13,13,13,13,13,13,19,29,29,0,0,
0,0,0,0,0,0,0,0,0,1,2,3,5,8,12,19,19,19,19,19,19,27,40,40,0,0,
0,0,0,0,0,0,0,1,1,2,3,5,7,11,17,25,25,25,25,25,25,36,53,53,0,0,
0,0,0,0,0,0,0,1,1,3,4,7,10,14,21,31,31,31,31,31,31,45,65,65,0,0,
0,0,0,0,0,0,0,2,2,4,6,9,13,18,25,37,37,37,37,37,37,53,77,77,0,0,
0,0,0,0,0,0,-1,-1,-1,-1,-1,0,0,1,2,2,2,2,2,2,2,4,6,6,0,0,
0,0,0,0,0,0,-1,-1,0,0,1,1,3,4,7,7,7,7,7,7,7,11,17,17,0,0,
0,0,0,0,0,0,0,0,0,1,2,3,6,8,13,13,13,13,13,13,13,19,29,29,0,0,
0,0,0,0,0,0,0,0,1,2,3,5,8,12,19,19,19,19,19,19,19,27,40,40,0,0,
0,0,0,0,0,0,1,1,2,3,5,7,11,17,25,25,25,25,25,25,25,36,53,53,0,0,
0,0,0,0,0,0,1,1,3,4,7,10,14,21,31,31,31,31,31,31,31,45,65,65,0,0,
0,0,0,0,0,0,2,2,4,6,9,13,18,25,37,37,37,37,37,37,37,53,77,77,0,0,
0,0,0,0,0,-1,-1,-1,-1,-1,0,0,1,2,2,2,2,2,2,2,2,4,6,6,0,0,
0,0,0,0,0,-1,-1,0,0,1,1,3,4,7,7,7,7,7,7,7,7,11,17,17,0,0,
0,0,0,0,0,0,0,0,1,2,3,6,8,13,13,13,13,13,13,13,13,19,29,29,0,0,
0,0,0,0,0,0,0,1,2,3,5,8,12,19,19,19,19,19,19,19,19,27,40,40,0,0,
0,0,0,0,0,1,1,2,3,5,7,11,17,25,25,25,25,25,25,25,25,36,53,53,0,0,
0,0,0,0,0,1,1,3,4,7,10,14,21,31,31,31,31,31,31,31,31,45,65,65,0,0,
0,0,0,0,0,2,2,4,6,9,13,18,25,37,37,37,37,37,37,37,37,53,77,77,0,0,
0,0,0,0,-1,-1,-1,-1,-1,0,0,1,2,2,2,2,2,2,2,2,2,4,6,6,0,0,
0,0,0,0,-1,-1,0,0,1,1,3,4,7,7,7,7,7,7,7,7,7,11,17,17,0,0,
0,0,0,0,0,0,0,1,2,3,6,8,13,13,13,13,13,13,13,13,13,19,29,29,0,0,
0,0,0,0,0,0,1,2,3,5,8,12,19,19,19,19,19,19,19,19,19,27,40,40,0,0,
0,0,0,0,1,1,2,3,5,7,11,17,25,25,25,25,25,25,25,25,25,36,53,53,0,0,
0,0,0,0,1,1,3,4,7,10,14,21,31,31,31,31,31,31,31,31,31,45,65,65,0,0,
0,0,0,0,2,2,4,6,9,13,18,25,37,37,37,37,37,37,37,37,37,53,77,77,0,0,
0,0,0,-1,-1,-1,-1,-1,0,0,1,2,2,2,2,2,2,2,2,2,2,4,6,6,0,0,
0,0,0,-1,-1,0,0,1,1,3,4,7,7,7,7,7,7,7,7,7,7,11,17,17,0,0,
0,0,0,0,0,0,1,2,3,6,8,13,13,13,13,13,13,13,13,13,13,19,29,29,0,0,
0,0,0,0,0,1,2,3,5,8,12,19,19,19,19,19,19,19,19,19,19,27,40,40,0,0,
0,0,0,1,1,2,3,5,7,11,17,25,25,25,25,25,25,25,25,25,25,36,53,53,0,0,
0,0,0,1,1,3,4,7,10,14,21,31,31,31,31,31,31,31,31,31,31,45,65,65,0,0,
0,0,0,2,2,4,6,9,13,18,25,37,37,37,37,37,37,37,37,37,37,53,77,77,0,0,
0,0,-1,-1,-1,-1,-1,0,0,1,2,2,2,2,2,2,2,2,2,2,2,4,6,6,0,0,
0,0,-1,-1,0,0,1,1,3,4,7,7,7,7,7,7,7,7,7,7,7,11,17,17,0,0,
0,0,0,0,0,1,2,3,6,8,13,13,13,13,13,13,13,13,13,13,13,19,29,29,0,0,
0,0,0,0,1,2,3,5,8,12,19,19,19,19,19,19,19,19,19,19,19,27,40,40,0,0,
0,0,1,1,2,3,5,7,11,17,25,25,25,25,25,25,25,25,25,25,25,36,53,53,0,0,
0,0,1,1,3,4,7,10,14,21,31,31,31,31,31,31,31,31,31,31,31,45,65,65,0,0,
0,0,2,2,4,6,9,13,18,25,37,37,37,37,37,37,37,37,37,37,37,53,77,77,0,0,
0,0,-1,-1,-1,-1,0,0,1,2,2,2,2,2,2,2,2,2,2,2,2,4,6,6,0,0,
0,0,-1,0,0,1,1,3,4,7,7,7,7,7,7,7,7,7,7,7,7,11,17,17,0,0,
0,0,0,0,1,2,3,6,8,13,13,13,13,13,13,13,13,13,13,13,13,19,29,29,0,0,
0,0,0,1,2,3,5,8,12,19,19,19,19,19,19,19,19,19,19,19,19,27,40,40,0,0,
0,0,1,2,3,5,7,11,17,25,25,25,25,25,25,25,25,25,25,25,25,36,53,53,0,0,
0,0,1,3,4,7,10,14,21,31,31,31,31,31,31,31,31,31,31,31,31,45,65,65,0,0,
0,0,2,4,6,9,13,18,25,37,37,37,37,37,37,37,37,37,37,37,37,53,77,77,0,0)
AAMultipleNormalac<-array(t, dim=c(26,7,16), dimnames=list(c("0.010","0.015","0.025","0.040","0.065","0.10","0.15","0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5",
"10","15","25","40","65","100","150","250","400","650","1000"),
c("first","second","third","fourth","fifth","sixth","seventh"),
c("A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R")
))
# Create array of Rejection Numbers for Normal Multiple Sampling
t<-c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,4,4,5,7,9,12,16,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,5,6,8,10,14,19,27,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,6,8,10,13,19,27,39,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,5,7,10,13,17,25,34,49,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,6,8,11,15,20,29,40,58,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,6,9,12,17,23,33,47,68,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,7,10,14,19,26,38,54,78,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,3,4,4,5,7,9,12,16,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,3,5,6,8,10,14,19,27,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,4,6,8,10,13,19,27,39,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,5,7,10,13,17,25,34,49,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,6,8,11,15,20,29,40,58,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,5,6,9,12,17,23,33,47,68,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,5,7,10,14,19,26,38,54,78,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,3,4,4,5,7,9,12,16,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,3,3,5,6,8,10,14,19,27,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,3,4,6,8,10,13,19,27,39,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,4,5,7,10,13,17,25,34,49,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,4,6,8,11,15,20,29,40,58,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,5,6,9,12,17,23,33,47,68,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,5,7,10,14,19,26,38,54,78,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,3,4,4,5,7,9,12,16,16,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,2,2,3,3,5,6,8,10,14,19,27,27,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,2,2,3,4,6,8,10,13,19,27,39,39,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,3,3,4,5,7,10,13,17,25,34,49,49,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,3,3,4,6,8,11,15,20,29,40,58,58,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,3,3,5,6,9,12,17,23,33,47,68,68,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,3,3,5,7,10,14,19,26,38,54,78,78,0,0,
0,0,0,0,0,0,0,0,0,0,0,2,2,2,3,4,4,5,7,9,9,12,16,16,0,0,
0,0,0,0,0,0,0,0,0,0,0,2,2,3,3,5,6,8,10,14,14,19,27,27,0,0,
0,0,0,0,0,0,0,0,0,0,0,2,2,3,4,6,8,10,13,19,19,27,39,39,0,0,
0,0,0,0,0,0,0,0,0,0,0,3,3,4,5,7,10,13,17,25,25,34,49,49,0,0,
0,0,0,0,0,0,0,0,0,0,0,3,3,4,6,8,11,15,20,29,29,40,58,58,0,0,
0,0,0,0,0,0,0,0,0,0,0,3,3,5,6,9,12,17,23,33,33,47,68,68,0,0,
0,0,0,0,0,0,0,0,0,0,0,3,3,5,7,10,14,19,26,38,38,54,78,78,0,0,
0,0,0,0,0,0,0,0,0,0,2,2,2,3,4,4,5,7,9,9,9,12,16,16,0,0,
0,0,0,0,0,0,0,0,0,0,2,2,3,3,5,6,8,10,14,14,14,19,27,27,0,0,
0,0,0,0,0,0,0,0,0,0,2,2,3,4,6,8,10,13,19,19,19,27,39,39,0,0,
0,0,0,0,0,0,0,0,0,0,3,3,4,5,7,10,13,17,25,25,25,34,49,49,0,0,
0,0,0,0,0,0,0,0,0,0,3,3,4,6,8,11,15,20,29,29,29,40,58,58,0,0,
0,0,0,0,0,0,0,0,0,0,3,3,5,6,9,12,17,23,33,33,33,47,68,68,0,0,
0,0,0,0,0,0,0,0,0,0,3,3,5,7,10,14,19,26,38,38,38,54,78,78,0,0,
0,0,0,0,0,0,0,0,0,2,2,2,3,4,4,5,7,9,9,9,9,12,16,16,0,0,
0,0,0,0,0,0,0,0,0,2,2,3,3,5,6,8,10,14,14,14,14,19,27,27,0,0,
0,0,0,0,0,0,0,0,0,2,2,3,4,6,8,10,13,19,19,19,19,27,39,39,0,0,
0,0,0,0,0,0,0,0,0,3,3,4,5,7,10,13,17,25,25,25,25,34,49,49,0,0,
0,0,0,0,0,0,0,0,0,3,3,4,6,8,11,15,20,29,29,29,29,40,58,58,0,0,
0,0,0,0,0,0,0,0,0,3,3,5,6,9,12,17,23,33,33,33,33,47,68,68,0,0,
0,0,0,0,0,0,0,0,0,3,3,5,7,10,14,19,26,38,38,38,38,54,78,78,0,0,
0,0,0,0,0,0,0,0,2,2,2,3,4,4,5,7,9,9,9,9,9,12,16,16,0,0,
0,0,0,0,0,0,0,0,2,2,3,3,5,6,8,10,14,14,14,14,14,19,27,27,0,0,
0,0,0,0,0,0,0,0,2,2,3,4,6,8,10,13,19,19,19,19,19,27,39,39,0,0,
0,0,0,0,0,0,0,0,3,3,4,5,7,10,13,17,25,25,25,25,25,34,49,49,0,0,
0,0,0,0,0,0,0,0,3,3,4,6,8,11,15,20,29,29,29,29,29,40,58,58,0,0,
0,0,0,0,0,0,0,0,3,3,5,6,9,12,17,23,33,33,33,33,33,47,68,68,0,0,
0,0,0,0,0,0,0,0,3,3,5,7,10,14,19,26,38,38,38,38,38,54,78,78,0,0,
0,0,0,0,0,0,0,2,2,2,3,4,4,5,7,9,9,9,9,9,9,12,16,16,0,0,
0,0,0,0,0,0,0,2,2,3,3,5,6,8,10,14,14,14,14,14,14,19,27,27,0,0,
0,0,0,0,0,0,0,2,2,3,4,6,8,10,13,19,19,19,19,19,19,27,39,39,0,0,
0,0,0,0,0,0,0,3,3,4,5,7,10,13,17,25,25,25,25,25,25,34,49,49,0,0,
0,0,0,0,0,0,0,3,3,4,6,8,11,15,20,29,29,29,29,29,29,40,58,58,0,0,
0,0,0,0,0,0,0,3,3,5,6,9,12,17,23,33,33,33,33,33,33,47,68,68,0,0,
0,0,0,0,0,0,0,3,3,5,7,10,14,19,26,38,38,38,38,38,38,54,78,78,0,0,
0,0,0,0,0,0,2,2,2,3,4,4,5,7,9,9,9,9,9,9,9,12,16,16,0,0,
0,0,0,0,0,0,2,2,3,3,5,6,8,10,14,14,14,14,14,14,14,19,27,27,0,0,
0,0,0,0,0,0,2,2,3,4,6,8,10,13,19,19,19,19,19,19,19,27,39,39,0,0,
0,0,0,0,0,0,3,3,4,5,7,10,13,17,25,25,25,25,25,25,25,34,49,49,0,0,
0,0,0,0,0,0,3,3,4,6,8,11,15,20,29,29,29,29,29,29,29,40,58,58,0,0,
0,0,0,0,0,0,3,3,5,6,9,12,17,23,33,33,33,33,33,33,33,47,68,68,0,0,
0,0,0,0,0,0,3,3,5,7,10,14,19,26,38,38,38,38,38,38,38,54,78,78,0,0,
0,0,0,0,0,2,2,2,3,4,4,5,7,9,9,9,9,9,9,9,9,12,16,16,0,0,
0,0,0,0,0,2,2,3,3,5,6,8,10,14,14,14,14,14,14,14,14,19,27,27,0,0,
0,0,0,0,0,2,2,3,4,6,8,10,13,19,19,19,19,19,19,19,19,27,39,39,0,0,
0,0,0,0,0,3,3,4,5,7,10,13,17,25,25,25,25,25,25,25,25,34,49,49,0,0,
0,0,0,0,0,3,3,4,6,8,11,15,20,29,29,29,29,29,29,29,29,40,58,58,0,0,
0,0,0,0,0,3,3,5,6,9,12,17,23,33,33,33,33,33,33,33,33,47,68,68,0,0,
0,0,0,0,0,3,3,5,7,10,14,19,26,38,38,38,38,38,38,38,38,54,78,78,0,0,
0,0,0,0,2,2,2,3,4,4,5,7,9,9,9,9,9,9,9,9,9,12,16,16,0,0,
0,0,0,0,2,2,3,3,5,6,8,10,14,14,14,14,14,14,14,14,14,19,27,27,0,0,
0,0,0,0,2,2,3,4,6,8,10,13,19,19,19,19,19,19,19,19,19,27,39,39,0,0,
0,0,0,0,3,3,4,5,7,10,13,17,25,25,25,25,25,25,25,25,25,34,49,49,0,0,
0,0,0,0,3,3,4,6,8,11,15,20,29,29,29,29,29,29,29,29,29,40,58,58,0,0,
0,0,0,0,3,3,5,6,9,12,17,23,33,33,33,33,33,33,33,33,33,47,68,68,0,0,
0,0,0,0,3,3,5,7,10,14,19,26,38,38,38,38,38,38,38,38,38,54,78,78,0,0,
0,0,0,2,2,2,3,4,4,5,7,9,9,9,9,9,9,9,9,9,9,12,16,16,0,0,
0,0,0,2,2,3,3,5,6,8,10,14,14,14,14,14,14,14,14,14,14,19,27,27,0,0,
0,0,0,2,2,3,4,6,8,10,13,19,19,19,19,19,19,19,19,19,19,27,39,39,0,0,
0,0,0,3,3,4,5,7,10,13,17,25,25,25,25,25,25,25,25,25,25,34,49,49,0,0,
0,0,0,3,3,4,6,8,11,15,20,29,29,29,29,29,29,29,29,29,29,40,58,58,0,0,
0,0,0,3,3,5,6,9,12,17,23,33,33,33,33,33,33,33,33,33,33,47,68,68,0,0,
0,0,0,3,3,5,7,10,14,19,26,38,38,38,38,38,38,38,38,38,38,54,78,78,0,0,
0,0,2,2,2,3,4,4,5,7,9,9,9,9,9,9,9,9,9,9,9,12,16,16,0,0,
0,0,2,2,3,3,5,6,8,10,14,14,14,14,14,14,14,14,14,14,14,19,27,27,0,0,
0,0,2,2,3,4,6,8,10,13,19,19,19,19,19,19,19,19,19,19,19,27,39,39,0,0,
0,0,3,3,4,5,7,10,13,17,25,25,25,25,25,25,25,25,25,25,25,34,49,49,0,0,
0,0,3,3,4,6,8,11,15,20,29,29,29,29,29,29,29,29,29,29,29,40,58,58,0,0,
0,0,3,3,5,6,9,12,17,23,33,33,33,33,33,33,33,33,33,33,33,47,68,68,0,0,
0,0,3,3,5,7,10,14,19,26,38,38,38,38,38,38,38,38,38,38,38,54,78,78,0,0,
0,0,2,2,3,4,4,5,7,9,9,9,9,9,9,9,9,9,9,9,9,12,16,16,0,0,
0,0,2,3,3,5,6,8,10,14,14,14,14,14,14,14,14,14,14,14,14,19,27,27,0,0,
0,0,2,3,4,6,8,10,13,19,19,19,19,19,19,19,19,19,19,19,19,27,39,39,0,0,
0,0,3,4,5,7,10,13,17,25,25,25,25,25,25,25,25,25,25,25,25,34,49,49,0,0,
0,0,3,4,6,8,11,15,20,29,29,29,29,29,29,29,29,29,29,29,29,40,58,58,0,0,
0,0,3,5,6,9,12,17,23,33,33,33,33,33,33,33,33,33,33,33,33,47,68,68,0,0,
0,0,3,5,7,10,14,19,26,38,38,38,38,38,38,38,38,38,38,38,38,54,78,78,0,0)
AAMultipleNormalre<-array(t, dim=c(26,7,16), dimnames=list(c("0.010","0.015","0.025","0.040","0.065","0.10","0.15","0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5",
"10","15","25","40","65","100","150","250","400","650","1000"),
c("first","second","third","fourth","fifth","sixth","seventh"),
c("A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R")
))
# Create array of acceptance numbers for Multiple Tightened Sampling
t<-c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,3,4,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,4,6,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,0,0,1,3,6,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-1,0,0,1,2,3,6,10,16,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,4,7,11,17,26,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,10,16,24,37,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,9,14,22,32,49,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,3,4,7,12,18,27,40,61,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,4,6,9,14,21,32,48,72,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,0,0,1,3,6,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,0,0,1,2,3,6,10,16,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,4,7,11,17,26,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,10,16,24,37,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,2,3,5,9,14,22,32,49,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,3,4,7,12,18,27,40,61,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,4,6,9,14,21,32,48,72,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,0,0,1,3,6,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,0,0,1,2,3,6,10,16,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,4,7,11,17,26,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,10,16,24,37,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,2,3,5,9,14,22,32,49,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,3,4,7,12,18,27,40,61,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,4,6,9,14,21,32,48,72,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,0,0,1,3,6,6,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,0,0,1,2,3,6,10,16,16,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,4,7,11,17,26,26,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,10,16,24,37,37,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,2,3,5,9,14,22,32,49,49,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,3,4,7,12,18,27,40,61,61,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,4,6,9,14,21,32,48,72,72,0,0,
0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,0,0,1,1,3,6,6,0,0,
0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,0,0,1,2,3,6,6,10,16,16,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,4,7,11,11,17,26,26,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,10,16,16,24,37,37,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,1,1,2,3,5,9,14,22,22,32,49,49,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,1,1,3,4,7,12,18,27,27,40,61,61,0,0,
0,0,0,0,0,0,0,0,0,0,0,2,2,2,4,6,9,14,21,32,32,48,72,72,0,0,
0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,0,0,1,1,1,3,6,6,0,0,
0,0,0,0,0,0,0,0,0,0,-1,-1,-1,0,0,1,2,3,6,6,6,10,16,16,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,4,7,11,11,11,17,26,26,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,10,16,16,16,24,37,37,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,2,3,5,9,14,22,22,22,32,49,49,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,3,4,7,12,18,27,27,27,40,61,61,0,0,
0,0,0,0,0,0,0,0,0,0,2,2,2,4,6,9,14,21,32,32,32,48,72,72,0,0,
0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,0,0,1,1,1,1,3,6,6,0,0,
0,0,0,0,0,0,0,0,0,-1,-1,-1,0,0,1,2,3,6,6,6,6,10,16,16,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,4,7,11,11,11,11,17,26,26,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,10,16,16,16,16,24,37,37,0,0,
0,0,0,0,0,0,0,0,0,1,1,1,2,3,5,9,14,22,22,22,22,32,49,49,0,0,
0,0,0,0,0,0,0,0,0,1,1,1,3,4,7,12,18,27,27,27,27,40,61,61,0,0,
0,0,0,0,0,0,0,0,0,2,2,2,4,6,9,14,21,32,32,32,32,48,72,72,0,0,
0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,0,0,1,1,1,1,1,3,6,6,0,0,
0,0,0,0,0,0,0,0,-1,-1,-1,0,0,1,2,3,6,6,6,6,6,10,16,16,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,2,4,7,11,11,11,11,11,17,26,26,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,10,16,16,16,16,16,24,37,37,0,0,
0,0,0,0,0,0,0,0,1,1,1,2,3,5,9,14,22,22,22,22,22,32,49,49,0,0,
0,0,0,0,0,0,0,0,1,1,1,3,4,7,12,18,27,27,27,27,27,40,61,61,0,0,
0,0,0,0,0,0,0,0,2,2,2,4,6,9,14,21,32,32,32,32,32,48,72,72,0,0,
0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,0,0,1,1,1,1,1,1,3,6,6,0,0,
0,0,0,0,0,0,0,-1,-1,-1,0,0,1,2,3,6,6,6,6,6,6,10,16,16,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,2,4,7,11,11,11,11,11,11,17,26,26,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,3,6,10,16,16,16,16,16,16,24,37,37,0,0,
0,0,0,0,0,0,0,1,1,1,2,3,5,9,14,22,22,22,22,22,22,32,49,49,0,0,
0,0,0,0,0,0,0,1,1,1,3,4,7,12,18,27,27,27,27,27,27,40,61,61,0,0,
0,0,0,0,0,0,0,2,2,2,4,6,9,14,21,32,32,32,32,32,32,48,72,72,0,0,
0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,0,0,1,1,1,1,1,1,1,3,6,6,0,0,
0,0,0,0,0,0,-1,-1,-1,0,0,1,2,3,6,6,6,6,6,6,6,10,16,16,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,4,7,11,11,11,11,11,11,11,17,26,26,0,0,
0,0,0,0,0,0,0,0,0,1,2,3,6,10,16,16,16,16,16,16,16,24,37,37,0,0,
0,0,0,0,0,0,1,1,1,2,3,5,9,14,22,22,22,22,22,22,22,32,49,49,0,0,
0,0,0,0,0,0,1,1,1,3,4,7,12,18,27,27,27,27,27,27,27,40,61,61,0,0,
0,0,0,0,0,0,2,2,2,4,6,9,14,21,32,32,32,32,32,32,32,48,72,72,0,0,
0,0,0,0,0,-1,-1,-1,-1,-1,-1,0,0,1,1,1,1,1,1,1,1,3,6,6,0,0,
0,0,0,0,0,-1,-1,-1,0,0,1,2,3,6,6,6,6,6,6,6,6,10,16,16,0,0,
0,0,0,0,0,0,0,0,0,1,2,4,7,11,11,11,11,11,11,11,11,17,26,26,0,0,
0,0,0,0,0,0,0,0,1,2,3,6,10,16,16,16,16,16,16,16,16,24,37,37,0,0,
0,0,0,0,0,1,1,1,2,3,5,9,14,22,22,22,22,22,22,22,22,32,49,49,0,0,
0,0,0,0,0,1,1,1,3,4,7,12,18,27,27,27,27,27,27,27,27,40,61,61,0,0,
0,0,0,0,0,2,2,2,4,6,9,14,21,32,32,32,32,32,32,32,32,48,72,72,0,0,
0,0,0,0,-1,-1,-1,-1,-1,-1,0,0,1,1,1,1,1,1,1,1,1,3,6,6,0,0,
0,0,0,0,-1,-1,-1,0,0,1,2,3,6,6,6,6,6,6,6,6,6,10,16,16,0,0,
0,0,0,0,0,0,0,0,1,2,4,7,11,11,11,11,11,11,11,11,11,17,26,26,0,0,
0,0,0,0,0,0,0,1,2,3,6,10,16,16,16,16,16,16,16,16,16,24,37,37,0,0,
0,0,0,0,1,1,1,2,3,5,9,14,22,22,22,22,22,22,22,22,22,32,49,49,0,0,
0,0,0,0,1,1,1,3,4,7,12,18,27,27,27,27,27,27,27,27,27,40,61,61,0,0,
0,0,0,0,2,2,2,4,6,9,14,21,32,32,32,32,32,32,32,32,32,48,72,72,0,0,
0,0,0,-1,-1,-1,-1,-1,-1,0,0,1,1,1,1,1,1,1,1,1,1,3,6,6,0,0,
0,0,0,-1,-1,-1,0,0,1,2,3,6,6,6,6,6,6,6,6,6,6,10,16,16,0,0,
0,0,0,0,0,0,0,1,2,4,7,11,11,11,11,11,11,11,11,11,11,17,26,26,0,0,
0,0,0,0,0,0,1,2,3,6,10,16,16,16,16,16,16,16,16,16,16,24,37,37,0,0,
0,0,0,1,1,1,2,3,5,9,14,22,22,22,22,22,22,22,22,22,22,32,49,49,0,0,
0,0,0,1,1,1,3,4,7,12,18,27,27,27,27,27,27,27,27,27,27,40,61,61,0,0,
0,0,0,2,2,2,4,6,9,14,21,32,32,32,32,32,32,32,32,32,32,48,72,72,0,0,
0,0,-1,-1,-1,-1,-1,-1,0,0,1,1,1,1,1,1,1,1,1,1,1,3,6,6,0,0,
0,0,-1,-1,-1,0,0,1,2,3,6,6,6,6,6,6,6,6,6,6,6,10,16,16,0,0,
0,0,0,0,0,0,1,2,4,7,11,11,11,11,11,11,11,11,11,11,11,17,26,26,0,0,
0,0,0,0,0,1,2,3,6,10,16,16,16,16,16,16,16,16,16,16,16,24,37,37,0,0,
0,0,1,1,1,2,3,5,9,14,22,22,22,22,22,22,22,22,22,22,22,32,49,49,0,0,
0,0,1,1,1,3,4,7,12,18,27,27,27,27,27,27,27,27,27,27,27,40,61,61,0,0,
0,0,2,2,2,4,6,9,14,21,32,32,32,32,32,32,32,32,32,32,32,48,72,72,0,0,
0,0,-1,-1,-1,-1,-1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,3,6,6,0,0,
0,0,-1,-1,0,0,1,2,3,6,6,6,6,6,6,6,6,6,6,6,6,10,16,16,0,0,
0,0,0,0,0,1,2,4,7,11,11,11,11,11,11,11,11,11,11,11,11,17,26,26,0,0,
0,0,0,0,1,2,3,6,10,16,16,16,16,16,16,16,16,16,16,16,16,24,37,37,0,0,
0,0,1,1,2,3,5,9,14,22,22,22,22,22,22,22,22,22,22,22,22,32,49,49,0,0,
0,0,1,1,3,4,7,12,18,27,27,27,27,27,27,27,27,27,27,27,27,40,61,61,0,0,
0,0,2,2,4,6,9,14,21,32,32,32,32,32,32,32,32,32,32,32,32,48,72,72,0,0)
AAMultipleTightenedac<-array(t, dim=c(26,7,16), dimnames=list(c("0.010","0.015","0.025","0.040","0.065","0.10","0.15","0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5",
"10","15","25","40","65","100","150","250","400","650","1000"),
c("first","second","third","fourth","fifth","sixth","seventh"),
c("A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R")
))
# Create array of rejection numbers for Multiple Tightened Sampling
t<-c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,4,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,5,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,6,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,5,6,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,5,7,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,4,4,6,8,10,15,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,3,5,7,9,12,17,25,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,4,6,9,12,17,24,36,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,5,7,11,15,22,31,46,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,6,8,12,17,25,37,55,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,5,6,9,14,20,29,43,64,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,5,7,10,15,22,33,49,73,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,2,4,4,6,8,10,15,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,3,3,5,7,9,12,17,25,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,3,4,6,9,12,17,24,36,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,4,5,7,11,15,22,31,46,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,4,6,8,12,17,25,37,55,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,5,6,9,14,20,29,43,64,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,5,7,10,15,22,33,49,73,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,2,2,4,4,6,8,10,15,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,3,3,5,7,9,12,17,25,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,3,4,6,9,12,17,24,36,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,3,4,5,7,11,15,22,31,46,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,3,4,6,8,12,17,25,37,55,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,3,5,6,9,14,20,29,43,64,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,3,5,7,10,15,22,33,49,73,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,2,2,4,4,6,8,10,15,15,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,3,3,5,7,9,12,17,25,25,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,3,4,6,9,12,17,24,36,36,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,3,3,3,4,5,7,11,15,22,31,46,46,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,3,3,3,4,6,8,12,17,25,37,55,55,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,3,3,3,5,6,9,14,20,29,43,64,64,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,3,3,3,5,7,10,15,22,33,49,73,73,0,0,
0,0,0,0,0,0,0,0,0,0,0,2,2,2,2,2,4,4,6,8,8,10,15,15,0,0,
0,0,0,0,0,0,0,0,0,0,0,2,2,2,3,3,5,7,9,12,12,17,25,25,0,0,
0,0,0,0,0,0,0,0,0,0,0,2,2,2,3,4,6,9,12,17,17,24,36,36,0,0,
0,0,0,0,0,0,0,0,0,0,0,3,3,3,4,5,7,11,15,22,22,31,46,46,0,0,
0,0,0,0,0,0,0,0,0,0,0,3,3,3,4,6,8,12,17,25,25,37,55,55,0,0,
0,0,0,0,0,0,0,0,0,0,0,3,3,3,5,6,9,14,20,29,29,43,64,64,0,0,
0,0,0,0,0,0,0,0,0,0,0,3,3,3,5,7,10,15,22,33,33,49,73,73,0,0,
0,0,0,0,0,0,0,0,0,0,2,2,2,2,2,4,4,6,8,8,8,10,15,15,0,0,
0,0,0,0,0,0,0,0,0,0,2,2,2,3,3,5,7,9,12,12,12,17,25,25,0,0,
0,0,0,0,0,0,0,0,0,0,2,2,2,3,4,6,9,12,17,17,17,24,36,36,0,0,
0,0,0,0,0,0,0,0,0,0,3,3,3,4,5,7,11,15,22,22,22,31,46,46,0,0,
0,0,0,0,0,0,0,0,0,0,3,3,3,4,6,8,12,17,25,25,25,37,55,55,0,0,
0,0,0,0,0,0,0,0,0,0,3,3,3,5,6,9,14,20,29,29,29,43,64,64,0,0,
0,0,0,0,0,0,0,0,0,0,3,3,3,5,7,10,15,22,33,33,33,49,73,73,0,0,
0,0,0,0,0,0,0,0,0,2,2,2,2,2,4,4,6,8,8,8,8,10,15,15,0,0,
0,0,0,0,0,0,0,0,0,2,2,2,3,3,5,7,9,12,12,12,12,17,25,25,0,0,
0,0,0,0,0,0,0,0,0,2,2,2,3,4,6,9,12,17,17,17,17,24,36,36,0,0,
0,0,0,0,0,0,0,0,0,3,3,3,4,5,7,11,15,22,22,22,22,31,46,46,0,0,
0,0,0,0,0,0,0,0,0,3,3,3,4,6,8,12,17,25,25,25,25,37,55,55,0,0,
0,0,0,0,0,0,0,0,0,3,3,3,5,6,9,14,20,29,29,29,29,43,64,64,0,0,
0,0,0,0,0,0,0,0,0,3,3,3,5,7,10,15,22,33,33,33,33,49,73,73,0,0,
0,0,0,0,0,0,0,0,2,2,2,2,2,4,4,6,8,8,8,8,8,10,15,15,0,0,
0,0,0,0,0,0,0,0,2,2,2,3,3,5,7,9,12,12,12,12,12,17,25,25,0,0,
0,0,0,0,0,0,0,0,2,2,2,3,4,6,9,12,17,17,17,17,17,24,36,36,0,0,
0,0,0,0,0,0,0,0,3,3,3,4,5,7,11,15,22,22,22,22,22,31,46,46,0,0,
0,0,0,0,0,0,0,0,3,3,3,4,6,8,12,17,25,25,25,25,25,37,55,55,0,0,
0,0,0,0,0,0,0,0,3,3,3,5,6,9,14,20,29,29,29,29,29,43,64,64,0,0,
0,0,0,0,0,0,0,0,3,3,3,5,7,10,15,22,33,33,33,33,33,49,73,73,0,0,
0,0,0,0,0,0,0,2,2,2,2,2,4,4,6,8,8,8,8,8,8,10,15,15,0,0,
0,0,0,0,0,0,0,2,2,2,3,3,5,7,9,12,12,12,12,12,12,17,25,25,0,0,
0,0,0,0,0,0,0,2,2,2,3,4,6,9,12,17,17,17,17,17,17,24,36,36,0,0,
0,0,0,0,0,0,0,3,3,3,4,5,7,11,15,22,22,22,22,22,22,31,46,46,0,0,
0,0,0,0,0,0,0,3,3,3,4,6,8,12,17,25,25,25,25,25,25,37,55,55,0,0,
0,0,0,0,0,0,0,3,3,3,5,6,9,14,20,29,29,29,29,29,29,43,64,64,0,0,
0,0,0,0,0,0,0,3,3,3,5,7,10,15,22,33,33,33,33,33,33,49,73,73,0,0,
0,0,0,0,0,0,2,2,2,2,2,4,4,6,8,8,8,8,8,8,8,10,15,15,0,0,
0,0,0,0,0,0,2,2,2,3,3,5,7,9,12,12,12,12,12,12,12,17,25,25,0,0,
0,0,0,0,0,0,2,2,2,3,4,6,9,12,17,17,17,17,17,17,17,24,36,36,0,0,
0,0,0,0,0,0,3,3,3,4,5,7,11,15,22,22,22,22,22,22,22,31,46,46,0,0,
0,0,0,0,0,0,3,3,3,4,6,8,12,17,25,25,25,25,25,25,25,37,55,55,0,0,
0,0,0,0,0,0,3,3,3,5,6,9,14,20,29,29,29,29,29,29,29,43,64,64,0,0,
0,0,0,0,0,0,3,3,3,5,7,10,15,22,33,33,33,33,33,33,33,49,73,73,0,0,
0,0,0,0,0,2,2,2,2,2,4,4,6,8,8,8,8,8,8,8,8,10,15,15,0,0,
0,0,0,0,0,2,2,2,3,3,5,7,9,12,12,12,12,12,12,12,12,17,25,25,0,0,
0,0,0,0,0,2,2,2,3,4,6,9,12,17,17,17,17,17,17,17,17,24,36,36,0,0,
0,0,0,0,0,3,3,3,4,5,7,11,15,22,22,22,22,22,22,22,22,31,46,46,0,0,
0,0,0,0,0,3,3,3,4,6,8,12,17,25,25,25,25,25,25,25,25,37,55,55,0,0,
0,0,0,0,0,3,3,3,5,6,9,14,20,29,29,29,29,29,29,29,29,43,64,64,0,0,
0,0,0,0,0,3,3,3,5,7,10,15,22,33,33,33,33,33,33,33,33,49,73,73,0,0,
0,0,0,0,2,2,2,2,2,4,4,6,8,8,8,8,8,8,8,8,8,10,15,15,0,0,
0,0,0,0,2,2,2,3,3,5,7,9,12,12,12,12,12,12,12,12,12,17,25,25,0,0,
0,0,0,0,2,2,2,3,4,6,9,12,17,17,17,17,17,17,17,17,17,24,36,36,0,0,
0,0,0,0,3,3,3,4,5,7,11,15,22,22,22,22,22,22,22,22,22,31,46,46,0,0,
0,0,0,0,3,3,3,4,6,8,12,17,25,25,25,25,25,25,25,25,25,37,55,55,0,0,
0,0,0,0,3,3,3,5,6,9,14,20,29,29,29,29,29,29,29,29,29,43,64,64,0,0,
0,0,0,0,3,3,3,5,7,10,15,22,33,33,33,33,33,33,33,33,33,49,73,73,0,0,
0,0,0,2,2,2,2,2,4,4,6,8,8,8,8,8,8,8,8,8,8,10,15,15,0,0,
0,0,0,2,2,2,3,3,5,7,9,12,12,12,12,12,12,12,12,12,12,17,25,25,0,0,
0,0,0,2,2,2,3,4,6,9,12,17,17,17,17,17,17,17,17,17,17,24,36,36,0,0,
0,0,0,3,3,3,4,5,7,11,15,22,22,22,22,22,22,22,22,22,22,31,46,46,0,0,
0,0,0,3,3,3,4,6,8,12,17,25,25,25,25,25,25,25,25,25,25,37,55,55,0,0,
0,0,0,3,3,3,5,6,9,14,20,29,29,29,29,29,29,29,29,29,29,43,64,64,0,0,
0,0,0,3,3,3,5,7,10,15,22,33,33,33,33,33,33,33,33,33,33,49,73,73,0,0,
0,0,2,2,2,2,2,4,4,6,8,8,8,8,8,8,8,8,8,8,8,10,15,15,0,0,
0,0,2,2,2,3,3,5,7,9,12,12,12,12,12,12,12,12,12,12,12,17,25,25,0,0,
0,0,2,2,2,3,4,6,9,12,17,17,17,17,17,17,17,17,17,17,17,24,36,36,0,0,
0,0,3,3,3,4,5,7,11,15,22,22,22,22,22,22,22,22,22,22,22,31,46,46,0,0,
0,0,3,3,3,4,6,8,12,17,25,25,25,25,25,25,25,25,25,25,25,37,55,55,0,0,
0,0,3,3,3,5,6,9,14,20,29,29,29,29,29,29,29,29,29,29,29,43,64,64,0,0,
0,0,3,3,3,5,7,10,15,22,33,33,33,33,33,33,33,33,33,33,33,49,73,73,0,0,
0,0,2,2,2,2,4,4,6,8,8,8,8,8,8,8,8,8,8,8,8,10,15,15,0,0,
0,0,2,2,3,3,5,7,9,12,12,12,12,12,12,12,12,12,12,12,12,17,25,25,0,0,
0,0,2,2,3,4,6,9,12,17,17,17,17,17,17,17,17,17,17,17,17,24,36,36,0,0,
0,0,3,3,4,5,7,11,15,22,22,22,22,22,22,22,22,22,22,22,22,31,46,46,0,0,
0,0,3,3,4,6,8,12,17,25,25,25,25,25,25,25,25,25,25,25,25,37,55,55,0,0,
0,0,3,3,5,6,9,14,20,29,29,29,29,29,29,29,29,29,29,29,29,43,64,64,0,0,
0,0,3,3,5,7,10,15,22,33,33,33,33,33,33,33,33,33,33,33,33,49,73,73,0,0)
AAMultipleTightenedre<-array(t, dim=c(26,7,16), dimnames=list(c("0.010","0.015","0.025","0.040","0.065","0.10","0.15","0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5",
"10","15","25","40","65","100","150","250","400","650","1000"),
c("first","second","third","fourth","fifth","sixth","seventh"),
c("A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R")
))
# Create array of acceptance numbers for Multiple Reduced Sampling
t<-c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,-1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,-1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,0,0,1,1,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,8,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,7,11,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,3,4,7,10,14,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,1,2,4,6,9,13,18,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,0,0,1,1,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,8,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,7,11,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,3,4,7,10,14,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,1,1,2,4,6,9,13,18,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,0,0,1,1,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,8,8,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,7,11,11,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,1,3,4,7,10,14,14,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,2,4,6,9,13,18,18,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,-1,-1,-1,-1,0,0,1,1,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,6,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,8,8,8,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,7,11,11,11,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,1,3,4,7,10,14,14,14,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,1,1,1,2,4,6,9,13,18,18,18,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,-1,-1,-1,-1,0,0,1,1,3,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,6,6,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,8,8,8,8,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,7,11,11,11,11,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,3,4,7,10,14,14,14,14,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,1,1,2,4,6,9,13,18,18,18,18,0,0,0,0,0,0,
0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,-1,-1,-1,-1,0,0,1,1,3,3,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,6,6,6,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,2,3,5,8,8,8,8,8,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,3,5,7,11,11,11,11,11,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,1,1,3,4,7,10,14,14,14,14,14,0,0,0,0,0,0,
0,0,0,0,0,0,0,1,1,1,2,4,6,9,13,18,18,18,18,18,0,0,0,0,0,0,
0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,-1,-1,-1,-1,0,0,1,1,3,3,3,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,2,3,6,6,6,6,6,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,3,5,8,8,8,8,8,8,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,1,2,3,5,7,11,11,11,11,11,11,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,1,3,4,7,10,14,14,14,14,14,14,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,1,2,4,6,9,13,18,18,18,18,18,18,0,0,0,0,0,0,
0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,-1,-1,-1,-1,0,0,1,1,3,3,3,3,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,3,6,6,6,6,6,6,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,1,2,3,5,8,8,8,8,8,8,8,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,2,3,5,7,11,11,11,11,11,11,11,0,0,0,0,0,0,
0,0,0,0,0,0,0,1,1,3,4,7,10,14,14,14,14,14,14,14,0,0,0,0,0,0,
0,0,0,0,0,1,1,1,2,4,6,9,13,18,18,18,18,18,18,18,0,0,0,0,0,0,
0,0,0,0,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,-1,-1,-1,-1,0,0,1,1,3,3,3,3,3,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,1,2,3,6,6,6,6,6,6,6,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,2,3,5,8,8,8,8,8,8,8,8,0,0,0,0,0,0,
0,0,0,0,0,0,0,1,2,3,5,7,11,11,11,11,11,11,11,11,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,3,4,7,10,14,14,14,14,14,14,14,14,0,0,0,0,0,0,
0,0,0,0,1,1,1,2,4,6,9,13,18,18,18,18,18,18,18,18,0,0,0,0,0,0,
0,0,0,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,-1,-1,-1,-1,0,0,1,1,3,3,3,3,3,3,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,2,3,6,6,6,6,6,6,6,6,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,1,2,3,5,8,8,8,8,8,8,8,8,8,0,0,0,0,0,0,
0,0,0,0,0,0,1,2,3,5,7,11,11,11,11,11,11,11,11,11,0,0,0,0,0,0,
0,0,0,0,0,1,1,3,4,7,10,14,14,14,14,14,14,14,14,14,0,0,0,0,0,0,
0,0,0,1,1,1,2,4,6,9,13,18,18,18,18,18,18,18,18,18,0,0,0,0,0,0,
0,0,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,-1,-1,-1,-1,0,0,1,1,3,3,3,3,3,3,3,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,1,2,3,6,6,6,6,6,6,6,6,6,6,0,0,0,0,0,0,
0,0,0,0,0,0,1,2,3,5,8,8,8,8,8,8,8,8,8,8,0,0,0,0,0,0,
0,0,0,0,0,1,2,3,5,7,11,11,11,11,11,11,11,11,11,11,0,0,0,0,0,0,
0,0,0,0,1,1,3,4,7,10,14,14,14,14,14,14,14,14,14,14,0,0,0,0,0,0,
0,0,1,1,1,2,4,6,9,13,18,18,18,18,18,18,18,18,18,18,0,0,0,0,0,0,
0,0,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,-1,-1,-1,0,0,1,1,3,3,3,3,3,3,3,3,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,1,2,3,6,6,6,6,6,6,6,6,6,6,6,0,0,0,0,0,0,
0,0,0,0,0,1,2,3,5,8,8,8,8,8,8,8,8,8,8,8,0,0,0,0,0,0,
0,0,0,0,1,2,3,5,7,11,11,11,11,11,11,11,11,11,11,11,0,0,0,0,0,0,
0,0,0,1,1,3,4,7,10,14,14,14,14,14,14,14,14,14,14,14,0,0,0,0,0,0,
0,0,1,1,2,4,6,9,13,18,18,18,18,18,18,18,18,18,18,18,0,0,0,0,0,0)
AAMultipleReducedac<-array(t, dim=c(26,7,16), dimnames=list(c("0.010","0.015","0.025","0.040","0.065","0.10","0.15","0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5",
"10","15","25","40","65","100","150","250","400","650","1000"),
c("first","second","third","fourth","fifth","sixth","seventh"),
c("A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R")
))
# Create array of rejection numbers for Multiple Reduced Sampling
t<-c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,2,2,3,3,4,4,5,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,2,2,3,4,5,6,7,9,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,2,2,4,5,6,8,9,12,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,3,3,5,6,7,10,12,15,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,3,3,6,7,8,11,13,17,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,3,3,6,7,9,12,15,20,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,3,3,7,8,10,14,17,22,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,2,2,2,3,3,4,4,5,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,2,2,3,3,4,5,6,7,9,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,2,2,3,4,5,6,8,9,12,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,3,3,4,5,6,7,10,12,15,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,3,3,4,6,7,8,11,13,17,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,3,3,5,6,7,9,12,15,20,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,3,3,5,7,8,10,14,17,22,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,2,2,3,3,4,4,5,6,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,2,3,3,4,5,6,7,9,9,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,2,3,4,5,6,8,9,12,12,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,3,3,4,5,6,7,10,12,15,15,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,3,3,4,6,7,8,11,13,17,17,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,3,3,5,6,7,9,12,15,20,20,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,3,3,5,7,8,10,14,17,22,22,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,2,2,2,3,3,4,4,5,6,6,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,2,2,3,3,4,5,6,7,9,9,9,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,2,2,3,4,5,6,8,9,12,12,12,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,3,4,5,6,7,10,12,15,15,15,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,3,4,6,7,8,11,13,17,17,17,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,3,5,6,7,9,12,15,20,20,20,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,3,5,7,8,10,14,17,22,22,22,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,2,2,3,3,4,4,5,6,6,6,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,2,3,3,4,5,6,7,9,9,9,9,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,2,3,4,5,6,8,9,12,12,12,12,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,3,3,4,5,6,7,10,12,15,15,15,15,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,3,3,4,6,7,8,11,13,17,17,17,17,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,3,3,5,6,7,9,12,15,20,20,20,20,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,3,3,5,7,8,10,14,17,22,22,22,22,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,2,3,3,4,4,5,6,6,6,6,6,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,3,3,4,5,6,7,9,9,9,9,9,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,3,4,5,6,8,9,12,12,12,12,12,0,0,0,0,0,0,
0,0,0,0,0,0,0,3,3,4,5,6,7,10,12,15,15,15,15,15,0,0,0,0,0,0,
0,0,0,0,0,0,0,3,3,4,6,7,8,11,13,17,17,17,17,17,0,0,0,0,0,0,
0,0,0,0,0,0,0,3,3,5,6,7,9,12,15,20,20,20,20,20,0,0,0,0,0,0,
0,0,0,0,0,0,0,3,3,5,7,8,10,14,17,22,22,22,22,22,0,0,0,0,0,0,
0,0,0,0,0,0,2,2,2,3,3,4,4,5,6,6,6,6,6,6,0,0,0,0,0,0,
0,0,0,0,0,0,2,2,3,3,4,5,6,7,9,9,9,9,9,9,0,0,0,0,0,0,
0,0,0,0,0,0,2,2,3,4,5,6,8,9,12,12,12,12,12,12,0,0,0,0,0,0,
0,0,0,0,0,0,3,3,4,5,6,7,10,12,15,15,15,15,15,15,0,0,0,0,0,0,
0,0,0,0,0,0,3,3,4,6,7,8,11,13,17,17,17,17,17,17,0,0,0,0,0,0,
0,0,0,0,0,0,3,3,5,6,7,9,12,15,20,20,20,20,20,20,0,0,0,0,0,0,
0,0,0,0,0,0,3,3,5,7,8,10,14,17,22,22,22,22,22,22,0,0,0,0,0,0,
0,0,0,0,0,2,2,2,3,3,4,4,5,6,6,6,6,6,6,6,0,0,0,0,0,0,
0,0,0,0,0,2,2,3,3,4,5,6,7,9,9,9,9,9,9,9,0,0,0,0,0,0,
0,0,0,0,0,2,2,3,4,5,6,8,9,12,12,12,12,12,12,12,0,0,0,0,0,0,
0,0,0,0,0,3,3,4,5,6,7,10,12,15,15,15,15,15,15,15,0,0,0,0,0,0,
0,0,0,0,0,3,3,4,6,7,8,11,13,17,17,17,17,17,17,17,0,0,0,0,0,0,
0,0,0,0,0,3,3,5,6,7,9,12,15,20,20,20,20,20,20,20,0,0,0,0,0,0,
0,0,0,0,0,3,3,5,7,8,10,14,17,22,22,22,22,22,22,22,0,0,0,0,0,0,
0,0,0,0,2,2,2,3,3,4,4,5,6,6,6,6,6,6,6,6,0,0,0,0,0,0,
0,0,0,0,2,2,3,3,4,5,6,7,9,9,9,9,9,9,9,9,0,0,0,0,0,0,
0,0,0,0,2,2,3,4,5,6,8,9,12,12,12,12,12,12,12,12,0,0,0,0,0,0,
0,0,0,0,3,3,4,5,6,7,10,12,15,15,15,15,15,15,15,15,0,0,0,0,0,0,
0,0,0,0,3,3,4,6,7,8,11,13,17,17,17,17,17,17,17,17,0,0,0,0,0,0,
0,0,0,0,3,3,5,6,7,9,12,15,20,20,20,20,20,20,20,20,0,0,0,0,0,0,
0,0,0,0,3,3,5,7,8,10,14,17,22,22,22,22,22,22,22,22,0,0,0,0,0,0,
0,0,0,2,2,2,3,3,4,4,5,6,6,6,6,6,6,6,6,6,0,0,0,0,0,0,
0,0,0,2,2,3,3,4,5,6,7,9,9,9,9,9,9,9,9,9,0,0,0,0,0,0,
0,0,0,2,2,3,4,5,6,8,9,12,12,12,12,12,12,12,12,12,0,0,0,0,0,0,
0,0,0,3,3,4,5,6,7,10,12,15,15,15,15,15,15,15,15,15,0,0,0,0,0,0,
0,0,0,3,3,4,6,7,8,11,13,17,17,17,17,17,17,17,17,17,0,0,0,0,0,0,
0,0,0,3,3,5,6,7,9,12,15,20,20,20,20,20,20,20,20,20,0,0,0,0,0,0,
0,0,0,3,3,5,7,8,10,14,17,22,22,22,22,22,22,22,22,22,0,0,0,0,0,0,
0,0,2,2,2,3,3,4,4,5,6,6,6,6,6,6,6,6,6,6,0,0,0,0,0,0,
0,0,2,2,3,3,4,5,6,7,9,9,9,9,9,9,9,9,9,9,0,0,0,0,0,0,
0,0,2,2,3,4,5,6,8,9,12,12,12,12,12,12,12,12,12,12,0,0,0,0,0,0,
0,0,3,3,4,5,6,7,10,12,15,15,15,15,15,15,15,15,15,15,0,0,0,0,0,0,
0,0,3,3,4,6,7,8,11,13,17,17,17,17,17,17,17,17,17,17,0,0,0,0,0,0,
0,0,3,3,5,6,7,9,12,15,20,20,20,20,20,20,20,20,20,20,0,0,0,0,0,0,
0,0,3,3,5,7,8,10,14,17,22,22,22,22,22,22,22,22,22,22,0,0,0,0,0,0,
0,0,2,2,3,3,4,4,5,6,6,6,6,6,6,6,6,6,6,6,0,0,0,0,0,0,
0,0,2,3,3,4,5,6,7,9,9,9,9,9,9,9,9,9,9,9,0,0,0,0,0,0,
0,0,2,3,4,5,6,8,9,12,12,12,12,12,12,12,12,12,12,12,0,0,0,0,0,0,
0,0,3,4,5,6,7,10,12,15,15,15,15,15,15,15,15,15,15,15,0,0,0,0,0,0,
0,0,3,4,6,7,8,11,13,17,17,17,17,17,17,17,17,17,17,17,0,0,0,0,0,0,
0,0,3,5,6,7,9,12,15,20,20,20,20,20,20,20,20,20,20,20,0,0,0,0,0,0,
0,0,3,5,7,8,10,14,17,22,22,22,22,22,22,22,22,22,22,22,0,0,0,0,0,0)
AAMultipleReducedre<-array(t, dim=c(26,7,16), dimnames=list(c("0.010","0.015","0.025","0.040","0.065","0.10","0.15","0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5",
"10","15","25","40","65","100","150","250","400","650","1000"),
c("first","second","third","fourth","fifth","sixth","seventh"),
c("A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R")
))
# Get Code letter from SSCodeLetters
codelet<-SSCodeLetters[dLOTS,dINSL]
if(PLAN == 1) {
ac<-AAMultipleNormalac[dAQL, ,codelet]
re<-AAMultipleNormalre[dAQL, ,codelet]
S<-AAMultipleNormalss[codelet, dAQL]
ss<-c(S,S,S,S,S,S,S)
names(ss)<-c("first","second","third","fourth","fifth","sixth","seventh")
} else if(PLAN == 2) {
ac<-AAMultipleTightenedac[dAQL, ,codelet]
re<-AAMultipleTightenedre[dAQL, ,codelet]
S<-AAMultipleTightenedss[codelet, dAQL]
ss<-c(S,S,S,S,S,S,S)
names(ss)<-c("first","second","third","fourth","fifth","sixth","seventh")
} else if(PLAN == 3) {
ac<-AAMultipleReducedac[dAQL, ,codelet]
re<-AAMultipleReducedre[dAQL, ,codelet]
S<-AAMultipleReducedss[codelet, dAQL ]
ss<-c(S,S,S,S,S,S,S)
names(ss)<-c("first","second","third","fourth","fifth","sixth","seventh")
}
if(S==-1) {warning("No multiple sampling exists. Use the corresponding single sampling plan")
} else if(S==-2) {warning("No multiple sampling exists. Use the corresponding double sampling plan")
} else if(S>0) {plan<-data.frame(n=ss,c=ac,r=re)
return(plan)
}
}
| /scratch/gouwar.j/cran-all/cranData/AQLSchemes/R/AAZ14Multiple.R |
AAZ14Single<-function(PLAN,INSL,LOTS,AQL){
message("MIL-STD-105E ANSI/ASQ Z1.4")
message("If the sample size exceeds the lot size, carry out 100% inspection")
# Get Plan
# dPLAN <- menu(c("Normal", "Tightened", "Reduced"), title = "\nWhat type Inspection Plan?")
# PLAN
# Get the inspection level
dINSL <- menu(c("S-1", "S-2", "S-3", "S-4",
"I", "II", "III"), title = "\nWhat is the Inspection Level?")
INSL
# Get the lot size
dLOTS <- menu(c("2-8", "9-15", "16-25", "26-50",
"51-90", "91-150", "151-280", "281-500",
"501-1200", "1201-3200", "3201-10,000",
"10,001-35,000", "35,001-150,000", "150,001-500,000",
"500,001 and over"), title = "\nWhat is the Lot Size?")
LOTS
# Get the AQL
dAQL <- menu(c("0.010","0.015","0.025","0.040","0.065","0.10","0.15","0.25",
"0.40","0.65","1.0","1.5","2.5","4.0","6.5","10",
"15","25","40","65","100","150","250","400","650","1000"),
title = "\nWhat is the AQL in percent nonconforming per 100 items?")
AQL
#Create matrix of Code Letters
codes<-c("A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R")
InspLev<-c("S-1","S-2","S-3","S-4","I","II","III")
LotSize<-c("2-8","9-15","16-25","26-50","51-90","91-150","151-280","281-500","501-1200","1201-3200","3201-10,000","10,001-35,000","35,001-150,000","150,001-500,000","over 500,001")
AQL<-c("0.010","0.015","0.025","0.040","0.065","0.10","0.15","0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5","10","15","25","40","65","100","150","250","400","650","1000")
letters<-c("A","A","A","A","A","A","B",
"A","A","A","A","A","B","C",
"A","A","B","B","B","C","D",
"A","B","B","C","C","D","E",
"B","B","C","C","C","E","F",
"B","B","C","D","D","F","G",
"B","C","D","E","E","G","H",
"B","C","D","E","F","H","J",
"C","C","E","F","G","J","K",
"C","D","E","G","H","K","L",
"C","D","F","G","J","L","M",
"C","D","F","H","K","M","N",
"D","E","G","J","L","N","P",
"D","E","G","J","M","P","Q",
"D","E","H","K","N","Q","R")
SSCodeLetters<-matrix(letters,nrow=15, byrow=TRUE)
rownames(SSCodeLetters)<-LotSize
colnames(SSCodeLetters)<-InspLev
#Create Matrix of Single Sampling sample Sizes for Normal Sampling
temp<-array(c(1250,800,500,315,200,125,80,50,32,20,13,8,5,3,2,5,3,2,2,2,2,2,2,2,2,2,
1250,800,500,315,200,125,80,50,32,20,13,8,5,3,2,5,3,3,3,3,3,3,3,3,3,3,
1250,800,500,315,200,125,80,50,32,20,13,8,5,3,8,5,5,5,5,5,5,5,5,5,5,3,
1250,800,500,315,200,125,80,50,32,20,13,8,5,13,8,8,8,8,8,8,8,8,8,8,5,3,
1250,800,500,315,200,125,80,50,32,20,13,8,20,13,13,13,13,13,13,13,13,13,13,8,5,3,
1250,800,500,315,200,125,80,50,32,20,13,32,20,20,20,20,20,20,20,20,13,13,13,8,5,3,
1250,800,500,315,200,125,80,50,32,20,50,32,32,32,32,32,32,32,32,20,13,13,13,8,5,3,
1250,800,500,315,200,125,80,50,32,80,50,50,50,50,50,50,50,50,32,20,13,13,13,8,5,3,
1250,800,500,315,200,125,80,50,125,80,80,80,80,80,80,80,80,50,32,20,13,13,13,8,5,3,
1250,800,500,315,200,125,80,200,125,125,125,125,125,125,125,125,80,50,32,20,13,13,13,8,5,3,
1250,800,500,315,200,125,315,200,200,200,200,200,200,200,200,125,80,50,32,20,13,13,13,8,5,3,
1250,800,500,315,200,500,315,315,315,315,315,315,315,315,200,125,80,50,32,20,13,13,13,8,5,3,
1250,800,500,315,800,500,500,500,500,500,500,500,500,315,200,125,80,50,32,20,13,13,13,8,5,3,
1250,800,500,1250,800,800,800,800,800,800,800,800,500,315,200,125,80,50,32,20,13,13,13,8,5,3,
1250,800,2000,1250,1250,1250,1250,1250,1250,1250,1250,800,500,315,200,125,80,50,32,20,13,13,13,8,5,3,
1250,800,2000,2000,2000,2000,2000,2000,2000,2000,1250,800,500,315,200,125,80,50,32,20,13,13,13,8,5,3),
dim=c(26,16))
ANSIASQSingleNormalss <- t(temp)
rownames(ANSIASQSingleNormalss)<-codes
colnames(ANSIASQSingleNormalss)<-AQL
#Create Matrix of Sampling Sizes for Tightened Sampling
temp<-array(c(2000,1250,800,500,315,200,125,80,50,32,20,13,8,5,3,8,5,3,2,2,2,2,2,2,2,2,
2000,1250,800,500,315,200,125,80,50,32,20,13,8,5,3,8,5,3,3,3,3,3,3,3,3,3,
2000,1250,800,500,315,200,125,80,50,32,20,13,8,5,13,8,5,5,5,5,5,5,5,5,5,3,
2000,1250,800,500,315,200,125,80,50,32,20,13,8,20,13,8,8,8,8,8,8,8,8,8,5,3,
2000,1250,800,500,315,200,125,80,50,32,20,13,32,20,13,13,13,13,13,13,13,13,13,8,5,3,
2000,1250,800,500,315,200,125,80,50,32,20,50,32,20,20,20,20,20,20,20,13,13,13,8,5,3,
2000,1250,800,500,315,200,125,80,50,32,80,50,32,32,32,32,32,32,32,20,13,13,13,8,5,3,
2000,1250,800,500,315,200,125,80,50,125,80,50,50,50,50,50,50,50,32,20,13,13,13,8,5,3,
2000,1250,800,500,315,200,125,80,200,125,80,80,80,80,80,80,80,50,32,20,13,13,13,8,5,3,
2000,1250,800,500,315,200,125,315,200,125,125,125,125,125,125,125,80,50,32,20,13,13,13,8,5,3,
2000,1250,800,500,315,200,500,315,200,200,200,200,200,200,200,125,80,50,32,20,13,13,13,8,5,3,
2000,1250,800,500,315,800,500,315,315,315,315,315,315,315,200,125,80,50,32,20,13,13,13,8,5,3,
2000,1250,800,500,1250,800,500,500,500,500,500,500,500,315,200,125,80,50,32,20,13,13,13,8,5,3,
2000,1250,800,2000,1250,800,800,800,800,800,800,800,500,315,200,125,80,50,32,20,13,13,13,8,5,3,
2000,1250,3150,2000,1250,1250,1250,1250,1250,1250,1250,800,500,315,200,125,80,50,32,20,13,13,13,8,5,3,
2000,1250,3150,2000,2000,2000,2000,2000,2000,2000,1250,800,500,315,200,125,80,50,32,20,13,13,13,8,5,3),
dim=c(26,16))
ANSASQSingleTightenedss<-t(temp)
rownames(ANSASQSingleTightenedss)<-codes
colnames(ANSASQSingleTightenedss)<-AQL
#Create Matrix of Single Sampling sample Sizes for Reduced Sampling
temp<-array(c(500,315,200,125,80,50,32,20,13,8,5,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
500,315,200,125,80,50,32,20,13,8,5,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
500,315,200,125,80,50,32,20,13,8,5,3,2,2,3,2,2,2,2,2,2,2,2,2,2,2,
500,315,200,125,80,50,32,20,13,8,5,3,2,5,3,3,3,3,3,3,3,3,3,3,2,2,
500,315,200,125,80,50,32,20,13,8,5,3,8,5,5,5,5,5,5,5,5,5,5,3,2,2,
500,315,200,125,80,50,32,20,13,8,5,13,8,8,8,8,8,8,8,8,5,5,5,3,2,2,
500,315,200,125,80,50,32,20,13,8,20,13,13,13,13,13,13,13,13,8,5,5,5,3,2,2,
500,315,200,125,80,50,32,20,13,32,20,20,20,20,20,20,20,20,13,8,5,5,5,3,2,2,
500,315,200,125,80,50,32,20,50,32,32,32,32,32,32,32,32,20,13,8,5,5,5,3,2,2,
500,315,200,125,80,50,32,80,50,50,50,50,50,50,50,50,32,20,13,8,5,5,5,3,2,2,
500,315,200,125,80,50,125,80,80,80,80,80,80,80,80,50,32,20,13,8,5,5,5,3,2,2,
500,315,200,125,80,200,125,125,125,125,125,125,125,125,80,50,32,20,13,8,5,5,5,3,2,2,
500,315,200,125,315,200,200,200,200,200,200,200,200,125,80,50,32,20,13,8,5,5,5,3,2,2,
500,315,200,500,315,315,315,315,315,315,315,315,200,125,80,50,32,20,13,8,5,5,5,3,2,2,
500,315,800,500,500,500,500,500,500,500,500,315,200,125,80,50,32,20,13,8,5,5,5,3,2,2,
500,315,800,800,800,800,800,800,800,800,500,315,200,125,80,50,32,20,13,8,5,5,5,3,2,2),
dim=c(26,16))
ANSIASQSingleReducedss<-t(temp)
rownames(ANSIASQSingleReducedss)<-codes
colnames(ANSIASQSingleReducedss)<-AQL
# Create Matrix of Single Sampling acceptance numbers for Normal Inspection
c1<-rep(0,16)
c2<-c1
c3<-c(rep(0,14),1,1)
c4<-c(rep(0,13),1,1,2)
c5<-c(rep(0,12),1,1,2,3)
c6<-c(rep(0,11),1,1,2,3,5)
c7<-c(rep(0,10),1,1,2,3,5,7)
c8<-c(rep(0,9),1,1,2,3,5,7,10)
c9<-c(rep(0,8),1,1,2,3,5,7,10,14)
c10<-c(rep(0,7),1,1,2,3,5,7,10,14,21)
c11<-c(rep(0,6),1,1,2,3,5,7,10,14,21,21)
c12<-c(rep(0,5),1,1,2,3,5,7,10,14,21,21,21)
c13<-c(rep(0,4),1,1,2,3,5,7,10,14,21,21,21,21)
c14<-c(rep(0,3),1,1,2,3,5,7,10,14,rep(21,5))
c15<-c(0,0,1,1,2,3,5,7,10,14,21,rep(21,5))
c16<-c(1,1,1,2,3,5,7,10,14,21,rep(21,6))
c17<-c(1,1,2,3,5,7,10,14,21,rep(21,7))
c18<-c(1,2,3,5,7,10,14,21,rep(21,8))
c19<-c(2,3,5,7,10,14,21,rep(21,9))
c20<-c(3,5,7,10,14,21,rep(21,10))
c21<-c(5,7,10,14,21,rep(21,11))
c22<-c(7,10,14,21,30,rep(30,11))
c23<-c(10,14,21,30,44,rep(44,11))
c24<-c(14,21,30,44,rep(44,12))
c25<-c(21,30,44,rep(44,13))
c26<-c(30,44,rep(44,14))
ANSIASQSingleNormalac<-cbind(c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26)
rownames(ANSIASQSingleNormalac)<-codes
colnames(ANSIASQSingleNormalac)<-AQL
# Create Matrix of Single Sampling rejection numbers for Normal Inspection
c1<-rep(1,16)
c2<-c1
c3<-c(rep(1,14),2,2)
c4<-c(rep(1,13),2,2,3)
c5<-c(rep(1,12),2,2,3,4)
c6<-c(rep(1,11),2,2,3,4,6)
c7<-c(rep(1,10),2,2,3,4,6,8)
c8<-c(rep(1,9),2,2,3,4,6,8,11)
c9<-c(rep(1,8),2,2,3,4,6,8,11,15)
c10<-c(rep(1,7),2,2,3,4,6,8,11,15,22)
c11<-c(rep(1,6),2,2,3,4,6,8,11,15,22,22)
c12<-c(rep(1,5),2,2,3,4,6,8,11,15,22,22,22)
c13<-c(rep(1,4),2,2,3,4,6,8,11,15,22,22,22,22)
c14<-c(rep(1,3),2,2,3,4,6,8,11,15,rep(22,5))
c15<-c(1,1,2,2,3,4,6,8,11,15,22,rep(22,5))
c16<-c(2,2,2,3,4,6,8,11,15,22,rep(22,6))
c17<-c(2,2,3,4,6,8,11,15,22,rep(22,7))
c18<-c(2,3,4,6,8,11,15,22,rep(22,8))
c19<-c(3,4,6,8,11,15,22,rep(22,9))
c20<-c(4,6,8,11,15,22,rep(22,10))
c21<-c(6,8,11,15,22,rep(22,11))
c22<-c(8,11,15,22,31,rep(31,11))
c23<-c(11,15,22,31,45,rep(45,11))
c24<-c(15,22,31,45,rep(45,12))
c25<-c(22,31,45,rep(45,13))
c26<-c(31,45,rep(45,14))
ANSIASQSingleNormalre<-cbind(c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26)
rownames(ANSIASQSingleNormalre)<-codes
colnames(ANSIASQSingleNormalre)<-AQL
#Create Matrix of Single Sampling acceptance numbers for Tightened Inspection
c1<-rep(0,16)
c2<-c1
c3<-c(rep(0,14),1,1)
c4<-c(rep(0,13),1,1,1)
c5<-c(rep(0,12),1,1,1,2)
c6<-c(rep(0,11),1,1,1,2,3)
c7<-c(rep(0,10),1,1,1,2,3,5)
c8<-c(rep(0,9),1,1,1,2,3,5,8)
c9<-c(rep(0,8),1,1,1,2,3,5,8,12)
c10<-c(rep(0,7),1,1,1,2,3,5,8,12,18)
c11<-c(rep(0,6),1,1,1,2,3,5,8,12,18,18)
c12<-c(rep(0,5),1,1,1,2,3,5,8,12,18,18,18)
c13<-c(rep(0,4),1,1,1,2,3,5,8,12,18,18,18,18)
c14<-c(rep(0,3),1,1,1,2,3,5,8,12,rep(18,5))
c15<-c(0,0,1,1,1,2,3,5,8,12,18,rep(18,5))
c16<-c(1,1,1,1,2,3,5,8,12,18,rep(18,6))
c17<-c(1,1,1,2,3,5,8,12,18,rep(18,7))
c18<-c(1,1,2,3,5,8,12,18,rep(18,8))
c19<-c(1,2,3,5,8,12,18,rep(18,9))
c20<-c(2,3,5,8,12,18,rep(18,10))
c21<-c(3,5,8,12,18,rep(18,11))
c22<-c(5,8,12,18,27,rep(27,11))
c23<-c(8,12,18,27,41,rep(41,11))
c24<-c(12,18,27,41,rep(41,12))
c25<-c(18,27,41,rep(41,13))
c26<-c(27,41,rep(41,14))
ANSIASQSingleTightenedac<-cbind(c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26)
rownames(ANSIASQSingleTightenedac)<-codes
colnames(ANSIASQSingleTightenedac)<-AQL
#Create Matrix of Single Sampling rejection numbers for Tightened Inspection
c1<-rep(1,16)
c2<-c1
c3<-c(rep(1,14),2,2)
c4<-c(rep(1,13),2,2,2)
c5<-c(rep(1,12),2,2,2,3)
c6<-c(rep(1,11),2,2,2,3,4)
c7<-c(rep(1,10),2,2,2,3,4,6)
c8<-c(rep(1,9),2,2,2,3,4,6,9)
c9<-c(rep(1,8),2,2,2,3,4,6,9,13)
c10<-c(rep(1,7),2,2,2,3,4,6,9,13,19)
c11<-c(rep(1,6),2,2,2,3,4,6,9,13,19,19)
c12<-c(rep(1,5),2,2,2,3,4,6,9,13,19,19,19)
c13<-c(rep(1,4),2,2,2,3,4,6,9,13,19,19,19,19)
c14<-c(rep(1,3),2,2,2,3,4,6,9,13,rep(19,5))
c15<-c(1,1,2,2,2,3,4,6,9,13,19,rep(19,5))
c16<-c(2,2,2,2,3,4,6,9,13,19,rep(19,6))
c17<-c(2,2,2,3,4,6,9,13,19,rep(19,7))
c18<-c(2,2,3,4,6,9,13,19,rep(19,8))
c19<-c(2,3,4,6,9,13,19,rep(19,9))
c20<-c(3,4,6,9,13,19,rep(19,10))
c21<-c(4,6,9,13,19,rep(19,11))
c22<-c(6,9,13,19,28,rep(28,11))
c23<-c(9,13,19,28,42,rep(42,11))
c24<-c(13,19,28,42,rep(42,12))
c25<-c(19,28,42,rep(42,13))
c26<-c(28,42,rep(42,14))
ANSIASQSingleTightenedre<-cbind(c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26)
rownames(ANSIASQSingleTightenedre)<-codes
colnames(ANSIASQSingleTightenedre)<-AQL
#Create Matrix of Single Sampling acceptance numbers for Reduced Inspection
c1<-rep(0,16)
c2<-c1
c3<-c(rep(0,16))
c4<-c(rep(0,15),1)
c5<-c(rep(0,14),1,1)
c6<-c(rep(0,13),1,1,2)
c7<-c(rep(0,12),1,1,2,3)
c8<-c(rep(0,11),1,1,2,3,5)
c9<-c(rep(0,10),1,1,2,3,5,7)
c10<-c(rep(0,9),1,1,2,3,5,7,10)
c11<-c(rep(0,8),1,1,2,3,5,7,10,10)
c12<-c(rep(0,7),1,1,2,3,5,7,10,10,10)
c13<-c(rep(0,6),1,1,2,3,5,7,10,10,10,10)
c14<-c(rep(0,5),1,1,2,3,5,7,10,rep(10,4))
c15<-c(0,0,0,0,1,1,2,3,5,7,10,rep(10,5))
c16<-c(0,0,0,1,1,2,3,5,7,10,rep(10,6))
c17<-c(0,0,1,1,2,3,5,7,10,rep(10,7))
c18<-c(1,1,1,2,3,5,7,10,rep(10,8))
c19<-c(2,2,2,3,5,7,10,rep(10,9))
c20<-c(3,3,3,5,7,10,rep(10,10))
c21<-c(5,5,5,7,10,rep(10,11))
c22<-c(7,7,7,10,14,rep(14,11))
c23<-c(10,10,10,14,21,rep(21,11))
c24<-c(14,14,14,21,rep(21,12))
c25<-c(21,21,21,rep(21,13))
c26<-c(rep(30,16))
ANSIASQSingleReducedac<-cbind(c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26)
rownames(ANSIASQSingleReducedac)<-codes
colnames(ANSIASQSingleReducedac)<-AQL
#Create Matrix of Single Sampling rejection numbers for Reduced Inspection
c1<-rep(1,16)
c2<-c1
c3<-c(rep(1,14),2,2)
c4<-c(rep(1,13),2,2,3)
c5<-c(rep(1,12),2,2,3,4)
c6<-c(rep(1,11),2,2,3,4,5)
c7<-c(rep(1,10),2,2,3,4,5,6)
c8<-c(rep(1,9),2,2,3,4,5,6,8)
c9<-c(rep(1,8),2,2,3,4,5,6,8,10)
c10<-c(rep(1,7),2,2,3,4,5,6,8,10,13)
c11<-c(rep(1,6),2,2,3,4,5,6,8,10,13,13)
c12<-c(rep(1,5),2,2,3,4,5,6,8,10,13,13,13)
c13<-c(rep(1,4),2,2,3,4,5,6,8,10,13,13,13,13)
c14<-c(rep(1,3),2,2,3,4,5,6,8,10,13,rep(13,4))
c15<-c(1,1,2,2,3,4,5,6,8,10,13,rep(13,5))
c16<-c(2,2,2,3,4,5,6,8,10,13,rep(13,6))
c17<-c(2,2,3,4,5,6,8,10,13,rep(13,7))
c18<-c(2,3,4,5,6,8,10,13,rep(13,8))
c19<-c(3,4,5,6,8,10,13,rep(13,9))
c20<-c(4,5,6,8,10,13,rep(13,10))
c21<-c(6,6,8,10,13,rep(13,11))
c22<-c(8,8,10,13,17,rep(17,11))
c23<-c(11,11,13,17,24,rep(24,11))
c24<-c(15,15,17,24,rep(24,12))
c25<-c(22,22,24,rep(24,13))
c26<-c(rep(31,16))
ANSIASQSingleReducedre<-cbind(c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26)
rownames(ANSIASQSingleReducedre)<-codes
colnames(ANSIASQSingleReducedre)<-AQL
# Get Code letter from SSCodeLetters
codelet<-SSCodeLetters[dLOTS,dINSL]
if(PLAN == 1) {
ac<-ANSIASQSingleNormalac[codelet,dAQL]
re<-ANSIASQSingleNormalre[codelet,dAQL]
ss<-ANSIASQSingleNormalss[codelet,dAQL]
} else if(PLAN == 2) {
ac<-ANSIASQSingleTightenedac[codelet,dAQL]
re<-ANSIASQSingleTightenedre[codelet,dAQL]
ss<-ANSASQSingleTightenedss[codelet,dAQL]
} else if(PLAN == 3) {
ac<-ANSIASQSingleReducedac[codelet,dAQL]
re<-ANSIASQSingleReducedre[codelet,dAQL]
ss<-ANSIASQSingleReducedss[codelet,dAQL]
}
plan<-data.frame(n=ss,c=ac,r=re)
return(plan)
}
| /scratch/gouwar.j/cran-all/cranData/AQLSchemes/R/AAZ14Single.R |
AAZ19<-function(type="Normal",stype="unknown",INSL="II",LOTS="2-8",AQL="1.0"){
newvalue<-NULL
op <- options(changedop=newvalue)
message("ANSI/ASQ Z1.9 converted from MIL-STD-414 using Gasciogne Method")
dINSL <- menu(c("S-3", "S-4","I", "II", "III"), title = "\nWhat is the Inspection Level?")
INSL
# Get the lot size
dLOTS <- menu(c("2-8", "9-15", "16-25", "26-50",
"51-90", "91-150", "151-280", "281-400",
"401-500", "501-1200", "1201-3200","3201-10,000",
"10,001-35,000", "35,001-150,000", "150,001-500,000",
"500,001 and over"), title = "\nWhat is the Lot Size?")
LOTS
# Get the AQL
dAQL <- menu(c("0.10","0.15","0.25",
"0.40","0.65","1.0","1.5","2.5","4.0","6.5","10"),
title = "\nWhat is the AQL in percent nonconforming per 100 items?")
AQL
#Create matrix of Code Letters
codes<-c("B","C","D","E","F","G","H","I","J","K","L","M","N","P")
InspLev<-c("S-3","S-4","I","II","III")
LotSize<-c("2-8", "9-15", "16-25", "26-50","51-90", "91-150", "151-280", "281-400","401-500", "501-1200", "1201-3200","3201-10,000","10,001-35,000", "35,001-150,000", "150,001-500,000","500,001 and over")
aql<-c("0.10","0.15","0.25","0.40","0.65","1.0","1.5","2.5","4.0","6.5","10")
letters<-c("B","B","B","B","C",
"B","B","B","B","D",
"B","B","B","C","E",
"B","B","C","D","F",
"B","B","D","E","G",
"B","C","E","F","H",
"B","D","F","G","I",
"C","E","G","H","J",
"C","E","G","I","J",
"D","F","H","J","K",
"E","G","I","K","L",
"F","H","J","L","M",
"G","I","K","M","N",
"H","J","L","N","P",
"H","K","M","P","P",
"H","K","N","P","P")
SSCodeLetters<-matrix(letters,nrow=16, byrow=TRUE)
rownames(SSCodeLetters)<-LotSize
colnames(SSCodeLetters)<-InspLev
# get the code letter
Codeletter<-SSCodeLetters[dLOTS,dINSL]
while(stype=="unknown") {
#Create Matrix of M values for Normal and Tightened Sampling standard deviation unknown
temp<-array(c(0.077,0.077,0.077,0.077,0.077,0.186,0.228,0.250,0.253,0.243,0.225,0.218,0.202,0.204,
0.005,0.005,0.005,0.005,0.179,0.311,0.356,0.378,0.373,0.355,0.326,0.315,0.292,0.294,
0.087,0.087,0.087,0.087,0.349,0.491,0.531,0.551,0.534,0.503,0.461,0.444,0.412,0.414,
0.421,0.421,0.421,0.421,0.714,0.839,0.864,0.874,0.833,0.778,0.711,0.684,0.636,0.637,
0.041,0.041,0.041,1.050,1.270,1.330,1.330,1.320,1.240,1.160,1.060,1.020,0.946,0.945,
1.340,1.340,1.340,2.130,2.140,2.090,2.030,2.000,1.870,1.730,1.590,1.520,1.420,1.420,
1.490,1.490,3.330,3.540,3.270,3.060,2.930,2.860,2.660,2.470,2.270,2.180,2.050,2.040,
5.460,5.460,5.820,5.340,4.720,4.320,4.100,3.970,3.700,3.440,3.170,3.060,2.880,2.860,
7.590,10.88,9.800,8.400,7.260,6.550,6.180,5.980,5.580,5.210,4.830,4.670,4.420,4.390,
18.86,16.41,14.37,12.19,10.53,9.480,8.950,8.650,8.110,7.610,7.100,6.880,6.560,6.520,
26.94,22.84,20.19,17.34,15.17,13.74,13.01,12.60,11.89,11.23,10.58,10.29,9.860,9.800,
33.69,29.43,26.55,23.30,20.73,18.97,18.07,17.55,16.67,15.87,15.07,14.71,14.18,14.11),
dim=c(14,12))
ANSIASQZ19M <- temp
rownames(ANSIASQZ19M)<-codes
#Create Matrix of M values for Reduced Sampling standard deviation unknown
temp<-array(c(0.087,0.087,0.087,0.087,0.087,0.087,0.087,0.349,0.491,0.531,0.551,0.567,0.503,0.461,
0.421,0.421,0.421,0.421,0.421,0.421,0.421,0.714,0.839,0.864,0.874,0.885,0.778,0.711,
0.041,0.041,0.041,0.041,0.041,0.041,1.060,1.270,1.330,1.330,1.320,1.320,1.160,1.060,
1.340,1.340,1.340,1.340,1.340,1.340,2.130,2.140,2.090,2.030,2.000,1.980,1.730,1.590,
1.490,1.490,1.490,1.490,1.490,3.330,3.540,3.270,3.060,2.930,2.860,2.820,2.470,2.270,
5.460,5.460,5.460,5.460,5.460,5.820,5.340,4.720,4.320,4.100,3.970,3.910,3.440,3.170,
7.590,7.590,7.590,7.590,10.88,9.800,8.400,7.260,6.550,6.180,5.980,5.870,5.210,4.830,
18.86,18.86,18.86,18.86,16.41,14.37,12.19,10.53,9.480,8.950,8.650,8.480,7.610,7.100,
26.94,26.94,26.94,26.94,22.84,20.19,17.34,15.17,13.74,13.01,12.60,12.37,11.23,10.58,
33.69,33.69,33.69,33.69,29.43,26.55,23.30,20.73,18.97,18.07,17.55,17.25,15.87,15.07,
40.47,40.47,40.47,40.47,36.79,33.94,30.50,27.65,25.63,24.58,23.97,23.61,21.99,21.05),
dim=c(14,11))
ANSIASQZ19RM<-temp
rownames(ANSIASQZ19RM)<-codes
#Create Matrix of sample sizes for Normal and Tightened Sampling standard deviation unknown
ss<-array(c(10,10,10,10,10,15,20,25,35,50,75,100,150,200,
7, 7, 7, 7,10,15,20,25,35,50,75,100,150,200,
7, 7, 7, 7,10,15,20,25,35,50,75,100,150,200,
7, 7, 7, 7,10,15,20,25,35,50,75,100,150,200,
5, 5, 5, 7,10,15,20,25,35,50,75,100,150,200,
5, 5, 5, 7,10,15,20,25,35,50,75,100,150,200,
4, 4, 5, 7,10,15,20,25,35,50,75,100,150,200,
4, 4, 5, 7,10,15,20,25,35,50,75,100,150,200,
3, 4, 5, 7,10,15,20,25,35,50,75,100,150,200,
3, 4, 5, 7,10,15,20,25,35,50,75,100,150,200,
3, 4, 5, 7,10,15,20,25,35,50,75,100,150,200,
3, 4, 5, 7,10,15,20,25,35,50,75,100,150,200),
dim=c(14,12))
rownames(ss)<-codes
#Create Matrix of sample sizes for Reduced Sampling standard deviation unknown
ssR<-array(c(7, 7, 7, 7, 7, 7, 7,10,15,20,25,30,50,75,
7, 7, 7, 7, 7, 7, 7,10,15,20,25,30,50,75,
5, 5, 5, 5, 5, 5, 7,10,15,20,25,30,50,75,
5, 5, 5, 5, 5, 5, 7,10,15,20,25,30,50,75,
4, 4, 4, 4, 4, 5, 7,10,15,20,25,30,50,75,
4, 4, 4, 4, 4, 5, 7,10,15,20,25,30,50,75,
3, 3, 3, 3, 4, 5, 7,10,15,20,25,30,50,75,
3, 3, 3, 3, 4, 5, 7,10,15,20,25,30,50,75,
3, 3, 3, 3, 4, 5, 7,10,15,20,25,30,50,75,
3, 3, 3, 3, 4, 5, 7,10,15,20,25,30,50,75,
3, 3, 3, 3, 4, 5, 7,10,15,20,25,30,50,75),
dim=c(14,11))
rownames(ssR)<-codes
# AQL for Normal sampling
aqln<-array(c("0.10","0.15","0.25",
"0.40","0.65","1.0","1.5","2.5","4.0","6.5","10"),
dim=c(11,1))
rownames(aqln)<-c(2:12)
# AQL for Tightened sampling
aqlt<-array(c("0.10","0.15","0.25",
"0.40","0.65","1.0","1.5","2.5","4.0","6.5","10"),
dim=c(11,1))
#get M
if (type=="Normal")
{M<-ANSIASQZ19M[Codeletter,dAQL+1]/100 }
else if (type=="Reduced")
{M<-ANSIASQZ19RM[Codeletter,dAQL]/100}
else
{M<-ANSIASQZ19M[Codeletter,dAQL]/100}
format(M,digits=8)
options(scipen=999)
# Get Sample size
if (type=="Normal")
{S<-ss[Codeletter,dAQL+1] }
else if (type=="Reduced")
{S<-ssR[Codeletter,dAQL]}
else
{S<-ss[Codeletter,dAQL]
format(M,digits=8)
options(scipen=999)}
Codeletter
BBM<-qbeta(M,(S-2)/2,(S-2)/2)
k<-(-1)*((BBM/.5)-1)/(sqrt(S)/(S-1))
result<-c(S,k,M)
names(result)<-c("n","k","M")
return(result)
stype="done" }
while(stype=="known") {
#Create Matrix of M values for Normal and Tightened Sampling standard deviation known ****
temp<-array(c(0.114,0.114,0.114,0.114,0.114,0.114,0.161,0.230,0.226,0.217,0.211,0.207,0.193,0.196,
0.290,0.290,0.290,0.290,0.290,0.290,0.296,0.321,0.330,0.326,0.308,0.296,0.283,0.285,
0.369,0.369,0.369,0.369,0.369,0.399,0.445,0.478,0.469,0.461,0.438,0.423,0.397,0.402,
0.310,0.310,0.310,0.310,0.568,0.681,0.721,0.756,0.760,0.721,0.673,0.655,0.615,0.620,
0.510,0.510,0.510,0.510,0.959,1.090,1.140,1.140,1.140,1.080,1.000,0.980,0.921,0.920,
1.28,1.28,1.28,1.94,1.88,1.76,1.75,1.80,1.73,1.62,1.51,1.47,1.39,1.39,
2.73,2.73,2.23,2.76,2.58,2.57,2.62,2.59,2.49,2.35,2.19,2.12,2.00,2.00,
3.90,3.90,3.00,3.85,3.87,3.77,3.68,3.63,3.43,3.28,3.05,2.99,2.82,2.82,
6.11,6.11,7.56,6.99,6.05,5.83,5.68,5.60,5.34,4.98,4.68,4.55,4.35,4.34,
9.27,9.27,10.79,9.97,8.92,8.62,8.43,8.13,7.72,7.34,6.95,6.75,6.48,6.46,
17.74,17.74,15.60,15.21,13.89,12.88,12.35,12.04,11.57,10.93,10.40,10.17,9.76,9.73,
24.22,24.22,22.97,20.80,19.46,17.88,17.36,17.05,16.23,15.61,14.87,14.58,14.09,14.02),
dim=c(14,12))
ANSIASQZ19M <- temp
rownames(ANSIASQZ19M)<-codes
#Create Matrix of M values for Reduced Sampling standard deviation known **************
temp<-array(c(.369,.369,.369,.369,.369,.369,.369,.369,.399,.445,.478,.507,.461,.438,
.310,.310,.310,.310,.310,.310,.310,.568,.681,.721,.756,.791,.721,.673,
.510,.510,.510,.510,.510,.510,.510,.959,1.09,1.14,1.14,1.18,1.08,1.00,
1.28,1.28,1.28,1.28,1.28,1.28,1.94,1.88,1.76,1.75,1.80,1.79,1.62,1.51,
2.73,2.73,2.73,2.73,2.73,2.23,2.76,2.58,2.57,2.62,2.59,2.57,2.35,2.19,
3.90,3.90,3.90,3.90,3.90,3.00,3.85,3.87,3.77,3.68,3.63,3.61,3.28,3.05,
6.11,6.11,6.11,6.11,6.11,7.56,6.99,6.05,5.83,5.68,5.60,5.58,4.98,4.68,
9.27,9.27,9.27,9.27,9.27,10.79,9.97,8.92,8.62,8.43,8.13,8.13,7.34,6.95,
17.74,17.74,17.74,17.74,17.74,15.60,15.21,13.89,12.88,12.35,12.04,11.88,10.93,10.40,
24.22,24.22,24.22,24.22,24.22,22.97,20.80,19.46,17.88,17.36,17.05,16.71,15.61,14.87,
33.67,33.67,33.67,33.67,33.67,31.01,28.64,26.64,24.88,23.96,23.43,23.13,21.77,20.90),
dim=c(14,11))
ANSIASQZ19RM<-temp
rownames(ANSIASQZ19RM)<-codes
#Create Matrix of sample sizes for Normal and Tightened Sampling standard deviation known ******
ss<-array(c(3,3,3,3,3,3,4,6,7,11,15,20,30,40,
4,4,4,4,4,4,5,6,8,11,16,22,31,42,
3,3,3,3,3,4,5,6,9,12,17,23,34,45,
2,2,2,2,3,4,6,7,9,13,19,25,37,49,
2,2,2,2,3,5,6,8,10,14,21,27,40,54,
2,2,2,4,4,5,7,8,11,16,23,30,44,59,
2,2,2,3,4,6,7,9,12,17,25,33,49,65,
2,2,2,3,4,6,8,10,14,19,28,36,54,71,
2,2,3,4,5,7,9,11,15,22,32,42,61,81,
2,2,3,4,5,8,10,13,18,25,36,48,73,93,
3,3,3,5,6,9,12,15,20,29,42,55,82,109,
3,3,4,5,7,11,14,17,24,33,49,64,95,127),
dim=c(14,12))
rownames(ss)<-codes
#Create Matrix of sample sizes for Reduced Sampling standard deviation known ****
ssR<-array(c(3,3,3,3,3,3,3,3,4,5,6,7,12,17,
2,2,2,2,2,2,2,3,4,6,7,8,13,19,
2,2,2,2,2,2,2,3,5,6,8,9,14,21,
2,2,2,2,2,2,3,4,5,7,8,10,16,23,
2,2,2,2,2,2,3,4,6,7,9,11,17,25,
2,2,2,2,2,2,3,4,6,8,10,12,19,28,
2,2,2,2,2,3,4,5,7,9,11,13,22,32,
2,2,2,2,2,3,4,5,8,10,13,15,25,36,
3,3,3,3,3,3,5,6,9,12,15,18,29,42,
3,3,3,3,3,4,5,7,11,14,17,21,33,49,
4,4,4,4,4,4,6,8,12,16,20,24,38,56),
dim=c(14,11))
rownames(ssR)<-codes
# AQL for Normal sampling
aqln<-array(c("0.10","0.15","0.25",
"0.40","0.65","1.0","1.5","2.5","4.0","6.5","10"),
dim=c(11,1))
rownames(aqln)<-c(2:12)
# AQL for Tightened sampling
aqlt<-array(c("0.10","0.15","0.25",
"0.40","0.65","1.0","1.5","2.5","4.0","6.5","10"),
dim=c(11,1))
#get M
if (type=="Normal")
{M<-ANSIASQZ19M[Codeletter,dAQL+1]/100 }
else if (type=="Reduced")
{M<-ANSIASQZ19RM[Codeletter,dAQL]/100}
else
{M<-ANSIASQZ19M[Codeletter,dAQL]/100}
format(M,digits=8)
options(scipen=999)
# Get Sample size
if (type=="Normal")
{S<-ss[Codeletter,dAQL+1] }
else if (type=="Reduced")
{S<-ssR[Codeletter,dAQL]}
else
{S<-ss[Codeletter,dAQL]
format(M,digits=8)
options(scipen=999)}
Codeletter
k<-sqrt((S-1)/S)*qnorm(M,0,1,lower.tail=F)
result<-c(S,k,M)
names(result)<-c("n","k","M")
on.exit(options(op))
return(result)
stype="done" }
}
| /scratch/gouwar.j/cran-all/cranData/AQLSchemes/R/AAZ19.R |
EPn<-function( sample=c(1), sided="one", stype="unknown", LSL=-1, USL=-1, sigma=-1 ,xbar=1E9, s=1E9, n=1E9 )
{
# Calculate the estimated proportion non-conforming
# using the standardized Beta CDF as shown on
# pages 45-48 Acceptance Sampling and SPC
if(sigma<0 && stype=="known") {stop("When stype='known', a known value of sigma must be supplied")}
ns<-length(sample)
case<-2
# First case is where sigma is unknown
if (stype=="unknown") {case<-1}
# Second case is where sigma is known
if (stype=="known") {case<-2}
while (case==1) {
if(ns==1 && stype=="unknown") {if(xbar>.9E9) stop("You must supply either a vector of sample values or xbar, s, and n")}
if(ns==1 && stype=="unknown") {if(s>.9E9) stop("You must supply either a vector of sample values or xbar, s, and n")}
if(ns==1 && stype=="unknown") {if(n>.9E9) stop("You must supply either a vector of sample values or xbar, s, and n")}
if(ns>1 && stype=="unknown") {xb<-mean(sample)}
if(ns>1 && stype=="unknown") {sdev<-sd(sample)}
if(ns>1 && stype=="unknown") {n<-ns}
if(ns==1 && stype=="unknown") {xb<-xbar}
if(ns==1 && stype=="unknown") {sdev<-s}
a<-(n/2)-1
b<-(n/2)-1
P1<-0
P2<-0
P<-0
# Calculate the proportion below the LSL if there is one
if(LSL>=0) {
Q1<-(abs(xb-LSL)/sdev)
x1<-max(0,.5-.5*Q1*(sqrt(n)/(n-1)))
P1<-pbeta(x1,a,b)
}
P<-P1
# Calculate the proportion above the USL if there is one
if(USL>=0) {
Q2<-(abs(USL-xb)/sdev)
x2<-max(0,.5-.5*Q2*(sqrt(n)/(n-1)))
P2<-pbeta(x2,a,b)
}
if(sided=="two") {P<-P+P2} else
{P<-max(P1,P2)}
format(P,digits=8)
case<-3
}
if(ns==1 && stype=="known") {if(xbar>.9E9) stop("You must supply either a vector of sample values or xbar and n")}
if(ns==1 && stype=="known") {if(n>.9E9) stop("You must supply either a vector of sample values or xbar and n")}
if(ns==1 && stype=="known"){xb<-xbar} else {xb<-mean(sample)}
if(ns==1 && stype=="known") {sdev<-s} else {sdev<-sd(sample)}
if(ns==1 && stype=="known") {n<-n} else {n<-ns}
while (case==2) {
# Second case is where sigma is known
a<-(n/2)-1
b<-(n/2)-1
P1<-0
P2<-0
P<-0
if(LSL>=0) {
QL<-((LSL-xb)/sigma)*sqrt(n/(n-1))
P1<-pnorm(QL,lower.tail=T)
P<-P1 }
# Calculate the proportion above the USL if there is one
if(USL>=0) {
ZU<-(USL-xbar)/sigma
QU<-ZU*sqrt(n/(n-1))
P2<-pnorm(QU,lower.tail=F)
}
if(sided=="two")
{P<-P+P2}
else
{P<-max(P1,P2)}
format(P,digits=8)
case<-3
}
return(P)
}
| /scratch/gouwar.j/cran-all/cranData/AQLSchemes/R/EPn.R |
MPn<-function(k=-1,n=-1,stype="unknown") {
if(k<0){stop("You must supply a value for k","\n")}
if(n<0){stop("You must supply a value for n","\n")}
if(stype=="known") {M<-1-pnorm(k*sqrt(n/(n-1)))}
else
{BM<-.5*(1 - k* sqrt(n)/(n-1));
M<-pbeta(BM,((n-2)/2),((n-2)/2)) }
return(M)
}
| /scratch/gouwar.j/cran-all/cranData/AQLSchemes/R/MPn.R |
OCASNZ4D<-function(plan,pd) {
# Here is where the function OCASN starts
x<-length(pd)
n<-plan[ ,1]
c<-plan[ ,2]
r<-plan[ ,3]
ns<-length(n)
# First Sample
pr1<-array(rep(0,r[1]-c[1]+1*x),dim=c((r[1]-c[1]+1),x))
nd1<-array(rep(0,r[1]-c[1]+1*x),dim=c((r[1]-c[1]+1),x))
nd1[1]<-c[1]
for (j in 2:(r[1]-c[1]+1)) {nd1[j]<-nd1[j-1]+1}
pr1[1,1:x]<-pbinom(c[1],n[1],pd)
for (i in 2:(r[1]-c[1]) ) {pr1[i,1:x]<-dbinom(c[1]+i-1,n[1],pd)
}
pr1[r[1]-c[1]+1,1:x]<-1-pbinom(r[1]-1,n[1],pd)
# Second Sample
pr2<-array(rep(0,(r[2]-c[2]+1)*x),dim=c((r[2]-c[2]+1),x))
nd2<-array(rep(0,(r[2]-c[2]+1)*x),dim=c((r[2]-c[2]+1),x))
nd2[1]<-c[2]
for (j in 2:(r[2]-c[2]+1)) {nd2[j]<-nd2[j-1]+1}
pr2[1]<-0
# Case where accept on 2
## Note if r[2]>C[2]+1 using reduced samplng
## then accept for any nc count less than or
## equal to r[2]-1, but return to normal inspection
## for the next lot
ce<-r[2]-1
for (j in 2:(r[1]-c[1])) {
# bin<-pbinom(c[2]-nd1[j],n[2],pd)
bin<-pbinom(ce-nd1[j],n[2],pd)
pr2[1,]<-pr2[1,]+pr1[j,]*bin
}
# Case where reject on 2
pr2[r[2]-c[2]+1]<-0
for (j in 2:(r[1]-c[1])) {
bin<-1-(pbinom(r[2]-nd1[j]-1,n[2],pd))
pr2[r[2]-c[2]+1,]<-pr2[r[2]-c[2]+1,]+pr1[j,]*bin
}
#prob accept OC
OC<-pr1[1, ]+pr2[1, ]
#Prob reject
Rej<-pr1[r[1]-c[1]+1, ]+pr2[r[2]-c[2]+1, ]
# Prob of Decision
P1<-pr1[1, ]+pr1[r[1]-c[1]+1, ]
P2<-pr2[1, ]+pr2[r[2]-c[2]+1, ]
ASN<-P1*n[1]+P2*(n[1]+n[2])
data.frame(pd,OC,ASN)
}
| /scratch/gouwar.j/cran-all/cranData/AQLSchemes/R/OCASNZ4D.R |
OCASNZ4M<-function(plan,pd) {
# Here is where the function OCASN starts
x<-length(pd)
n<-plan[ ,1]
c<-plan[ ,2]
r<-plan[ ,3]
ns<-length(n)
# First Sample
if (c[1]<0) {c1<-0} else {c1<-c[1]}
pr1<-array(rep(0,r[1]-c1+1*x),dim=c((r[1]-c1+1),x))
nd1<-array(rep(0,r[1]-c1+1*x),dim=c((r[1]-c1+1),x))
if (c[1]<0) {nd1[1]<-c1 } else {nd1[1]<-c[1]}
for (j in 2:(r[1]-c1+1)) {nd1[j]<-nd1[j-1]+1}
for (i in 2:r[1]-c1) {pr1[i,1:x]<-dbinom(c1+i-1,n[1],pd)}
pr1[r[1]-c1+1,1:x]<-1-pbinom(r[1]-1,n[1],pd)
pr1[1,1:x]<-dbinom(c1,n[1],pd)
# Second Sample
if (c[2]<0) {c2<-0} else {c2<-c[2]}
pr2<-array(rep(0,(r[2]-c2+1)*x),dim=c((r[2]-c2+1),x))
nd2<-array(rep(0,(r[2]-c2+1)*x),dim=c((r[2]-c2+1),x))
nd2[1]<-c2
for (j in 2:(r[2]-c2+1)) {nd2[j]<-nd2[j-1]+1}
pr2[1]<-0
# Case where accept on 2
for (j in 2:(r[1]-c1)) {
bin<-pbinom(c2-nd1[j],n[2],pd)
pr2[1,]<-pr2[1,]+pr1[j,]*bin
}
if (c[1]<0) pr2[1,]<-pr2[1,]+pr1[1, ]*pbinom(c2-nd1[1],n[2],pd)
# Case where no decision on 2
for (i in 2:(r[2]-c2)) {
pr2[i]<-0
for (j in 2:(r[1]-c[1])) {
if (c[1]<0) {jnew<-j-1} else {jnew<-j}
bin<-dbinom(nd2[i]-nd1[jnew],n[2],pd)
pr2[i,]<-pr2[i,]+pr1[jnew,]*bin
}
}
# Case where reject on 2
pr2[r[2]-c2+1]<-0
for (j in 2:(r[1]-c[1])) {
if (c[1]<0) {jnew<-j-1} else {jnew<-j}
bin<-1-(pbinom(r[2]-nd1[jnew]-1,n[2],pd))
pr2[r[2]-c2+1,]<-pr2[r[2]-c2+1,]+pr1[jnew,]*bin
}
# Third Sample
v<-r[3]-c[3]+1
pr3<-array(rep(0,(r[3]-c[3]+1)*x),dim=c((r[3]-c[3]+1),x))
nd3<-array(rep(0,(r[3]-c[3]+1)*x),dim=c((r[3]-c[3]+1),x))
nd3[1]<-c[3]
for (j in 2:(r[3]-c[3]+1)) {nd3[j]<-nd3[j-1]+1}
# Case where accept on 3
for (j in 2:(r[2]-c2)) {
bin<-pbinom(c[3]-nd2[j],n[3],pd)
pr3[1,]<-pr3[1,]+pr2[j,]*bin
}
if (c[2]<0) pr3[1,]<-pr3[1,]+pr2[1, ]*pbinom(c[3]-nd2[1],n[2],pd)
# Case where no decision on 3
for (i in 2:(r[3]-c[3])) {
pr3[i]<-0
for (j in 2:(r[2]-c[2])) {
if (c[2]<0) {jnew<-j-1} else {jnew<-j}
bin<-dbinom(nd3[i]-nd2[jnew],n[3],pd)
pr3[i,]<-pr3[i,]+pr2[jnew,]*bin
}
}
# Case where reject on 3 ####
pr3[r[3]-c[3]+1]<-0
for (j in 2:(r[2]-c[2])) {
if (c[2]<0) {jnew<-j-1} else {jnew<-j}
bin<-1-(pbinom(r[3]-nd2[jnew]-1,n[3],pd))
pr3[r[3]-c[3]+1,]<-pr3[r[3]-c[3]+1,]+pr2[jnew,]*bin
}
# Forth sample
pr4<-array(rep(0,(r[4]-c[4]+1)*x),dim=c((r[4]-c[4]+1),x))
nd4<-array(rep(0,(r[4]-c[4]+1)*x),dim=c((r[4]-c[4]+1),x))
nd4[1]<-c[4]
for (j in 2:(r[4]-c[4]+1)) {nd4[j]<-nd4[j-1]+1}
# Case where accept on 4
for (j in 2:(r[3]-c[3])) {
bin<-pbinom(c[4]-nd3[j],n[4],pd) #### This loop works
pr4[1,]<-pr4[1,]+pr3[j,]*bin
}
# Case where no decision on 4
for (i in 2:(r[4]-c[4])) {
pr4[i]<-0
for (j in 2:(r[3]-c[3])) {
bin<-dbinom(nd4[i]-nd3[j],n[4],pd)
pr4[i,]<-pr4[i,]+pr3[j,]*bin
}
}
# Case where reject on 4
pr4[r[4]-c[4]+1]<-0
for (j in 2:(r[3]-c[3])) {
bin<-1-(pbinom(r[4]-nd3[j]-1,n[4],pd))
pr4[r[4]-c[4]+1,]<-pr4[r[4]-c[4]+1,]+pr3[j,]*bin
}
# Fifth sample
pr5<-array(rep(0,(r[5]-c[5]+1)*x),dim=c((r[5]-c[5]+1),x))
nd5<-array(rep(0,(r[5]-c[5]+1)*x),dim=c((r[5]-c[5]+1),x))
nd5[1]<-c[5]
for (j in 2:(r[5]-c[5]+1)) {nd5[j]<-nd5[j-1]+1}
# Case where accept on 5
for (j in 2:(r[4]-c[4])) {
bin<-pbinom(c[5]-nd4[j],n[5],pd)
pr5[1,]<-pr5[1,]+pr4[j,]*bin
}
# Case where no decision on 5
for (i in 2:(r[5]-c[5])) {
pr5[i]<-0
for (j in 2:(r[4]-c[4])) {
bin<-dbinom(nd5[i]-nd4[j],n[5],pd)
pr5[i,]<-pr5[i,]+pr4[j,]*bin
}
}
# Case where reject on 5
pr5[r[5]-c[5]+1]<-0
for (j in 2:(r[4]-c[4])) {
bin<-1-(pbinom(r[5]-nd4[j]-1,n[5],pd))
pr5[r[5]-c[5]+1,]<-pr5[r[5]-c[5]+1,]+pr4[j,]*bin
}
# Sixth sample
pr6<-array(rep(0,(r[6]-c[6]+1)*x),dim=c((r[6]-c[6]+1),x))
nd6<-array(rep(0,(r[6]-c[6]+1)*x),dim=c((r[6]-c[6]+1),x))
nd6[1]<-c[6]
for (j in 2:(r[6]-c[6]+1)) {nd6[j]<-nd6[j-1]+1}
# Case where accept on 6
for (j in 2:(r[5]-c[5])) {
bin<-pbinom(c[6]-nd5[j],n[6],pd)
pr6[1,]<-pr6[1,]+pr5[j,]*bin
}
# Case where no decision on 6
for (i in 2:(r[6]-c[6])) {
pr6[i]<-0
for (j in 2:(r[5]-c[5])) {
bin<-dbinom(nd6[i]-nd5[j],n[6],pd)
pr6[i,]<-pr6[i,]+pr5[j,]*bin
}
}
# Case where reject on 6
pr6[r[6]-c[6]+1]<-0
for (j in 2:(r[5]-c[5])) {
bin<-1-(pbinom(r[6]-nd5[j]-1,n[6],pd))
pr6[r[6]-c[6]+1,]<-pr6[r[6]-c[6]+1,]+pr5[j,]*bin
}
# Seventh sample
pr7<-array(rep(0,(r[7]-c[7]+1)*x),dim=c((r[7]-c[7]+1),x))
nd7<-array(rep(0,(r[7]-c[7]+1)*x),dim=c((r[7]-c[7]+1),x))
nd7[1]<-c[7]
for (j in 2:(r[7]-c[7]+1)) {nd7[j]<-nd7[j-1]+1}
# Case where accept on 7
## Note if r[7]>C[7]+1 using reduced samplng
## then accept for any nc count less than or
## equal to r[7]-1, but return to normal inspection
## for the next lot
ce<-r[7]-1
for (j in 2:(r[6]-c[6])) {
# bin<-pbinom(c[7]-nd6[j],n[7],pd)
bin<-pbinom(ce-nd6[j],n[7],pd)
pr7[1,]<-pr7[1,]+pr6[j,]*bin
}
# Case where reject on 7
pr7[r[7]-c[7]+1]<-0
for (j in 2:(r[6]-c[6])) {
bin<-1-(pbinom(r[7]-nd6[j]-1,n[7],pd))
pr7[r[7]-c[7]+1,]<-pr7[r[7]-c[7]+1,]+pr6[j,]*bin
}
#prob accept OC
OC<-pr1[1, ]+pr2[1, ]+pr3[1, ]+pr4[1, ]+pr5[1, ]+pr6[1, ]+pr7[1, ]
if (c[1]<0) OC<-OC-pr1[1, ]
if (c[2]<0) OC<-OC-pr2[1, ]
#Prob reject
Rej<-pr1[r[1]-c1+1, ]+pr2[r[2]-c2+1, ]+pr3[r[3]-c[3]+1, ]+pr4[r[4]-c[4]+1, ]+pr5[r[5]-c[5]+1, ]+pr6[r[6]-c[6]+1, ]+pr7[r[7]-c[7]+1, ]
# Prob of Decision
P1<-pr1[1, ]+pr1[r[1]-c1+1, ]
if (c[1]<0) P1<-P1-pr1[1, ]
P2<-pr2[1, ]+pr2[r[2]-c2+1, ]
if (c[2]<0) P2<-P2-pr2[1, ]
P3<-pr3[1, ]+pr3[r[3]-c[3]+1, ]
P4<-pr4[1, ]+pr4[r[4]-c[4]+1, ]
P5<-pr5[1, ]+pr5[r[5]-c[5]+1, ]
P6<-pr6[1, ]+pr6[r[6]-c[6]+1, ]
P7<-pr7[1, ]+pr7[r[7]-c[7]+1, ]
ASN<-P1*n[1]+P2*(n[1]+n[2])+P3*(n[1]+n[2]+n[3])+P4*(n[1]+n[2]+n[3]+n[4])+P5*(n[1]+n[2]+n[3]+n[4]+n[5])+P6*(n[1]+n[2]+n[3]+n[4]+n[5]+n[6])+P7*(n[1]+n[2]+n[3]+n[4]+n[5]+n[6]+n[7])
data.frame(pd,OC,ASN)
}
| /scratch/gouwar.j/cran-all/cranData/AQLSchemes/R/OCASNZ4M.R |
OCASNZ4S<-function(plan,pd) {
# Here is where the function OCASN starts
x<-length(pd)
n<-plan[ ,1]
c<-plan[ ,2]
r<-plan[ ,3]
ns<-length(n)
# First Sample
pr1<-array(rep(0,r[1]-c[1]+1*x),dim=c((r[1]-c[1]+1),x))
nd1<-array(rep(0,r[1]-c[1]+1*x),dim=c((r[1]-c[1]+1),x))
nd1[1]<-c[1]
for (j in 2:(r[1]-c[1]+1)) {nd1[j]<-nd1[j-1]+1}
if(c[1]<0) pr1[1,1:x]<-rep(0,x) else pr1[1,1:x]<-pbinom(c[1],n[1],pd)
for (i in 2:r[1]-c[1]) {pr1[i,1:x]<-dbinom(c[1]+i-1,n[1],pd)}
pr1[r[1]-c[1]+1,1:x]<-1-pbinom(r[1]-1,n[1],pd)
#prob accept OC
OC<-pbinom(c[1],n[1],pd)
ASN<-rep(n[1], x)
data.frame(pd,OC,ASN)
}
| /scratch/gouwar.j/cran-all/cranData/AQLSchemes/R/OCASNZ4S.R |
---
title: "AQL Based Acceptance Sampling Schemes"
output:
pdf_document: default
fig_width: 10
fig_height: 4
vignette: >
\usepackage[utf8]{inputenc}
---
<!-- rmarkdown::html_vignette
%\VignetteIndexEntry{AQL Based Acceptance Sampling Schemes}
%\VignetteEngine{knitr::rmarkdown}-->
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
The R package $\verb!AQLSchemes!$ has functions for retrieving individual sampling plans from the MIL-STD-105E - ANSI/ASQ Z1.4 Standard and the MIL-STD-414 - ANSI/ASQ Z1.9 Standard. The ISO 2859-1 Single and Double sampling plans are also equivalent to the MIL-STD-105E, and the ISO 3951-1 plans are equivalent to the MIL-STD-414. The functions are interactive and query the user for the inspection level, lot size, and AQL. The $\verb!AASingle()!$, $\verb!AADouble()!$, and $\verb!AAMultiple()!$ functions recall sampling plans from the ANSI/ASQ Z1.4 Standard, and produce a data frame with columns for the sample size (n), the acceptance number (c) and rejection numbers (r). The $\verb!AAZ19()!$ function recalls plans from the ANSI/ASQ Z1.9 Standard and prints the sample size (n), Acceptability constant (k) and the maximum proportion nonconforming (M). This vignette gives examples of the function calls and interactive dialog used by $\verb!AASingle()!$, $\verb!AADouble()!$, $\verb!AAMultiple()!$, and $\verb!AAZ19()!$ to retrieve the sampling plans. The central table entries in ANSI/ASQ Z1.4 - ISO 2859-1 are the same as the MIL-STD-105E, and the central table entries for ANSI/ASQ Z1.9 - ISO 3951-1 are the same as MIL-STD-414. The MIL-STD tables are in the public domain.
This package was originally written for Stat 462 (Quality Control)[(Click to see Description here:)](https://jlawson.byu.edu/docs/files/CQPAedgeforGrads.pdf) taught in the Statistics Department at Brigham Young University.
One of the objectives of Stat 462 is to prepare students to pass the ASQ Certified Quality Process Analyst Exam. The book *The Certified Quality Process Analyst Handbook* by Christensen et. al.(2013) will prepare students for the Exam that is given by the American Society for Quality through Prometrix. That handbook shows the mechanics of using the ANSI/ASQ Z1.4 and Z1.9 tables of acceptance sampling plans. However it only contains the plans for normal inspection.
In industry, the ANSI/ASQ sampling plans and their associated OC curves are retrieved with the help of computers. Commercial software such as StatGraphics$^{TM}$ and commercial online calculators such as sqc online calculator [https://www.sqconline.com/](https://www.sqconline.com/) contains functions that will retrieve the normal, tightened, or reduced single and double sampling plans from the ANSI/ASQ Z1.4 and Z1.9 Standard.
The open source software R and the $\verb!AQLSchemes!$ package makes computer retrieval of these plans available at no cost for educational purposes. The $\verb!AQLSchemes!$ package along with the $\verb!AcceptanceSampling!$ package for R and basic R programming and graphics functions makes it very useful for those seeking to understand the benefits of ANSI/ASQ Z1.4 and Z1.9.
The ANSI/ASQ Z1.4 Standard is the American national standard for AQL-based Attribute Acceptance Sampling plans. They are derived from MIL-STD-105E, which is no longer supported by the U.S. Department of Defense. ANSI/ASQ Z1.4 is a scheme of sampling plans that includes normal, tightened, and reduced plans and associated switching rules for single, double, and multiple sampling. The switching rules provide maximum protection for consumer and supplier for a given sample size, and they must be used if the standard is to be properly applied.
The civilian standard for AQL based Variable sampling plans is ANSI/ASQ Z1.9, which is similar to the MIL-STD-414. It is also a scheme of sampling plans including normal, tightened, and reduced sampling. ANSI/ASQ Z1.9 matches the OC performance of the attribute plans in ANSI/ASQ Z1.4. Therefore, it is possible to switch back and forth between an attribute plan in ANSI/ASQ Z1.4 and a variables plan from ANSI/ASQ Z1.9 and keep essentially the same operating characteristic (for schemes with the same lot size, inspection level, and AQL).
The ANSI/ASQ Z1.4 and Z1.9 standards are recommended for in-house or U.S. domestic trade partners. The ASQ Quality Engineer and Quality Process Analyst Certifications require knowledge of ANSI/ASQ Z1.4. and Z1.9. However, the training materials and certification exams focus on the mechanics of looking up a specific plan in the tables for the published standard based on the lot size, inspection level, and AQL; and in the mechanics of manually calculating specific points on the OC Curve or ASN Curve for a particular plan. These exercises do not help those preparing for the exam to understand the overall benefits of the ANSI/ASQ Standards over custom derived sampling plans that can be obtained from commercial software products such as Minitab\textsuperscript{\textcopyright} or StatGraphics\textsuperscript{\textcopyright}, or open source software such as the $\verb!AcceptanceSampling!$ Package in R.
The R package $\verb!AQLSchemes!$ automates the process of retrieving particular sampling plans, and the programming features in R and availability of the complimentary $\verb!AcceptanceSampling!$ package make it easy to compute and graph OC, ASN, and AOC curves for comparing more than one plan on the same graph. Comparing several of these curves on the same graph can be easily done using the graphical capabilities of R. These graphs clarify the advantages of an ANSI/ASQ AQL scheme over a custom-developed sampling plan. For example, the near OC equivalence of the ANSI/ASQ Z1.4 plans and the ANSI/ASQ Z1.9 plan can be visualized by plotting the OC curves on the same graph as shown in Figure 1.
ANSI/ASQ Z1.4 single sampling plan normal-level II inspection plan for lots of size 3201-10,000 with an AQL of 1.5 \% is n=200 with an acceptance number of c=7. The R code below illustrates how this plan can be retrieved using the $\verb!AASingle()!$ function. This code shows the function call and the interactive dialog to produce the result. The output of the function call is a data frame with one row and three columns which is stored in the object $\verb!plans!$, and then retrieved at the end of the dialog.
```{r eval=FALSE}
> library(AQLSchemes)
> plans<-AASingle('Normal')
[1] "MIL-STD-105E ANSI/ASQ Z1.4"
What is the Inspection Level?
1: S-1
2: S-2
3: S-3
4: S-4
5: I
6: II
7: III
Selection: 6
What is the Lot Size?
1: 2-8 2: 9-15 3: 16-25
4: 26-50 5: 51-90 6: 91-150
7: 151-280 8: 281-500 9: 501-1200
10: 1201-3200 11: 3201-10,000 12: 10,001-35,000
13: 35,001-150,000 14: 150,001-500,000 15: 500,001 and over
Selection: 11
What is the AQL in percent nonconforming per 100 items?
1: 0.010 2: 0.015 3: 0.025 4: 0.040 5: 0.065 6: 0.10
7: 0.15 8: 0.25 9: 0.40 10: 0.65 11: 1.0 12: 1.5
13: 2.5 14: 4.0 15: 6.5 16: 10 17: 15 18: 25
19: 40 20: 65 21: 100 22: 150 23: 250 24: 400
25: 650 26: 1000
Selection: 12
> plans
n c r
1 200 7 8
```
The ANSI/ASQ Z1.4 double sampling plan for the same situation can be recalled with the $\verb!AADouble()!$ function as shown in the function call and interactive dialog shown below. Again the output of this function is a data frame with two rows and three columns that is stored in the object $\verb!pland!$ and then retrieved at the end of the dialog.
```{r eval=FALSE}
> library(AQLSchemes)
> pland<-AADouble('Normal')
[1] "MIL-STD-105E ANSI/ASQ Z1.4"
What is the Inspection Level?
1: S-1
2: S-2
3: S-3
4: S-4
5: I
6: II
7: III
Selection: 6
What is the Lot Size?
1: 2-8 2: 9-15 3: 16-25
4: 26-50 5: 51-90 6: 91-150
7: 151-280 8: 281-500 9: 501-1200
10: 1201-3200 11: 3201-10,000 12: 10,001-35,000
13: 35,001-150,000 14: 150,001-500,000 15: 500,001 and over
Selection: 11
What is the AQL in percent nonconforming per 100 items?
1: 0.010 2: 0.015 3: 0.025 4: 0.040 5: 0.065 6: 0.10
7: 0.15 8: 0.25 9: 0.40 10: 0.65 11: 1.0 12: 1.5
13: 2.5 14: 4.0 15: 6.5 16: 10 17: 15 18: 25
19: 40 20: 65 21: 100 22: 150 23: 250 24: 400
25: 650 26: 1000
Selection: 12
> pland
n c r
first 125 3 7
second 125 8 9
```
The ANSI/ASQ Z1.4 Multiple sampling plan for the same situation is recalled using the $\verb!AADouble()!$ function as shown in the function call and interactive dialog shown below. The resulting data frame with seven rows and three columns is stored in $\verb!planm!$ and retrieved at the end of the dialog.
```{r eval=FALSE}
> library(AQLSchemes)
> planm<-Multiple('Normal')
[1] "MIL-STD-105E ANSI/ASQ Z1.4"
What is the Inspection Level?
1: S-1
2: S-2
3: S-3
4: S-4
5: I
6: II
7: III
Selection: 6
What is the Lot Size?
1: 2-8 2: 9-15 3: 16-25
4: 26-50 5: 51-90 6: 91-150
7: 151-280 8: 281-500 9: 501-1200
10: 1201-3200 11: 3201-10,000 12: 10,001-35,000
13: 35,001-150,000 14: 150,001-500,000 15: 500,001 and over
Selection: 11
What is the AQL in percent nonconforming per 100 items?
1: 0.010 2: 0.015 3: 0.025 4: 0.040 5: 0.065 6: 0.10
7: 0.15 8: 0.25 9: 0.40 10: 0.65 11: 1.0 12: 1.5
13: 2.5 14: 4.0 15: 6.5 16: 10 17: 15 18: 25
19: 40 20: 65 21: 100 22: 150 23: 250 24: 400
25: 650 26: 1000
Selection: 12
>planm
n c r
first 50 0 4
second 50 1 6
third 50 3 8
fourth 50 5 10
fifth 50 7 11
sixth 50 10 12
seventh 50 13 14
```
Finally, the ANSI/ASQ Z1.9 variables sampling plan for this situation
can be retrieved with the $\verb!AAZ19()!$ function as shown in the function call below.
```{r eval=FALSE}
> library(AQLSchemes)
> AAZ19('Normal')
[1] "MIL-STD-414 ANSI/ASQ Z1.9"
What is the Inspection Level?
1: S-3
2: S-4
3: I
4: II
5: III
Selection: 4
What is the Lot Size?
1: 2-8 2: 9-15 3: 16-25
4: 26-50 5: 51-90 6: 91-150
7: 151-280 8: 281-400 9: 401-500
10: 501-1200 11: 1201-3200 12: 3201-10,000
13: 10,001-35,000 14: 35,001-150,000 15: 150,001-500,000
16: 500,001 and over
Selection: 12
What is the AQL in percent nonconforming per 100 items?
1: 0.10 2: 0.15 3: 0.25 4: 0.40 5: 0.65 6: 1.0 7: 1.5
8: 2.5 9: 4.0 10: 6.5 11: 10
Selection: 7
Sample size n = 75
Acceptability constant k = 1.84097
Maximum proportion non-conforming M = 0.0317
```
In Figure 1, the OC and ASN (average sample number) curves for the ANSI/ASQ single sampling is labeled S, the double sampling plan is labeled D, the multiple sampling plan is labeled M, and the ANSI/ASQ variable sampling plan is labeled V. Points on the OC curves and ASN curves can be obtained for each of these plans using the $\verb!AQLSchemes::OCASNZ4S()!$, $\verb!AQLSchemes::OCASNZ4D()!$, $\verb!AQLSchemes::OCASNZ4M()!$, and the $\verb!AcceptanceSampling::OCvar()!$ functions. The OC and ASN functions for each plan are plotted on the same graph using the R $\verb!plot()!$ function as shown in the block of code below. In this block of code, data frames the same as those produced by the $\verb!AASingle()!$ function, the $\verb!AADouble()!$ function and the $\verb!AAMultiple()!$ function were created with assigment statements to avoid repeating the interactive dialogs shown above.
```{r fig1, fig.height = 4, fig.width = 7}
library(AQLSchemes)
library(AcceptanceSampling)
par(mfcol=c(1,2))
# Use AQLSchemes and AcceptanceSAmpling Packages to get get points on the OC Curves
# and ASN curves for Attibute single(S), double(D), and multiple(M) sampling plans
# the Variable sampling plan and store these ponts in the vectors
# OCASNS$OC, OCASNS$ASN, OCASND$OC, OCASND$ASN, OCASNM$OC, OCASNM$ASN, OCV and ASNV
# The next statement creates data frame like the output of the AASingle() function
plans<-data.frame(n=c(200),c=c(7),r=c(8))
# The next statement creates the coordinates of the x-axis for OC and ASN curves
Pnc<-seq(0,.08,.005)
# The AQLSchemes function OCASNZ4S() creates creates the y-axis coordinates for
# the OC and ASN curves in a data frame stored in OCASNS
OCASNS<-OCASNZ4S(plans,Pnc)
# Next statement creates data frame like the output of the AADouble() function
pland<-data.frame(n=c(125,125),c=c(3,8), r=c(7,9))
# The AQLSchemes function OCASNZ4D() creates creates the y-axis coordinates for
# the OC and ASN curves in a data frame stored in OCASND
OCASND<-OCASNZ4D(pland,Pnc)
# Next statement creates data frame like the output of the AAMultiple() function
planm<-data.frame(n=c(50,50,50,50,50,50,50),c=c(0,1,3,5,7,10,13),r=c(4,6,8,10,11,12,14))
# The AQLSchemes function OCASNZ4M() creates creates the y-axis coordinates for
# the OC and ASN curves in a data frame stored in OCASNM
OCASNM<-OCASNZ4M(planm,Pnc)
# Next statement creates the y-axis coordinates for the variables sampling plan
# using the OCvar() function in the AcceptanceSampling package
V<-OCvar(n=75,k=1.84097,s.type="unknown",pd=Pnc)
OCV<-V@paccept
# Next statement produces the sample size for the variable sampling plan
ASNV<-rep(75,length(OCASNS$pd))
# Plot all four OC curves on the same graph
par(mfcol=c(1,2))
plot(OCASNS$pd,OCASNS$OC,type='l',xlab='Proportion Nonconforming',ylab="OC Curves",
main="Figure 1",lty=1)
lines(OCASND$pd,OCASND$OC,type='l',lty=2,col=2)
lines(OCASNM$pd,OCASNM$OC,type='l',lty=4,col=4)
lines(OCASNM$pd,OCV,type='l',lty=2,col=3)
legend(.04,.95,c("S","D","M","V"),lty=c(1,2,4,2),col=c(1,2,4,3))
# Plot all four ASN curves on the same graph
plot(OCASNS$pd,OCASNS$ASN,type='l',lty=1,xlab='Proportion Nonconforming',
ylab="ASN Curves",ylim=c(50,200))
lines(OCASND$pd,OCASND$ASN,type='l',lty=2,col=2)
lines(OCASNM$pd,OCASNM$ASN,type='l',lty=4,col=4)
lines(OCASNM$pd,ASNV,type='l',lty=2,col=3)
par(mfcol=c(1,1))
```
The Average Sample Number (ASN) curve for the double and multiple sampling plans are plotted on the right side of Figure 1, where they are compared to the constant sample size required by the ANSI/ASQ Z1.4 single sampling plan, and the ANSI/ASQ Z1.9 variables sampling plan. The ASN curve for the double sampling plan is calculated by the equation
\begin{equation*}
ASN(p)=n_1+n_2 \times P(p)
\end{equation*}
Where $P(p)$ is the probability of no decision reached on the first sample. The ASN curve for the multiple sampling plan can be calculated similarly, but will be more involved since there are more steps. The $\verb!AQLSchemes::OCASNZ4D()!$, and $\verb!AQLSchemes::OCASNZ4M()!$ calculate both the the OC curves and ASN curves for the double and multiple plans.
Considering both graphs in Figure 1 together, it can be seen that the OC curves are roughly equivalent. They all pass through the same producer risk point (AQL=0.013, 1-$\alpha$=.975) and the same consumer risk point (LTPD=0.064, $\beta=.054$) as shown in the figure. This clarifies the point made in the ANSI/ASQ z1.4 document p.6, that says "The curves for single sampling...double sampling and multiple sampling are matched as close as possible". While the curve for the variable sampling plan has the same AQL protection for the producer, it decreases more rapidly than the attribute plans for proportions nonconforming greater than the AQL. Therefore, it provides slightly more protection to the customer for intermediate levels of lot quality.
Although the four plans have very similar OC curves, the number of samples required decreases as you move from the single $\rightarrow$ double $\rightarrow$ multiple $\rightarrow$ variable sampling plans. The reduced number of samples has to be balanced against the extra administrative effort required for double and multiple sampling plans and the precision required to get numerical measurements for a variable sampling plan. However, the user can switch between the four plans at any time and keep essentially the same producer and consumer protection.
As another example, consider an ANSI/ASQ sampling scheme compared to a custom made sampling plan. Using $\verb!AASingle()!$ function it can be shown that the ANSI/ASQ Z1.4 normal-level II inspection plan for lots of size 3201-10,000 with an AQL of 1.5 \% is n=200 with an acceptance number of c=7. The same function shows the tightened-level II plan for the same lot size and AQL is n=200 with the acceptance number c=5. The probability of acceptance for these two plans can be easily calculated using the $\verb!pbinom()!$ function in R.
When ignoring the reduced sampling plan (the use of which requires authority approval) and considering the switching rules for the normal-tightened scheme, Stephens and Larson(1967) showed the scheme can be viewed as a two-state Markov-chain. They showed the probability of accepting by this scheme is given by:
\begin{equation*}
Pr(accept)=\frac{aP_N+bP_T}{a+b}
\end{equation*}
where $P_N$ is the probability of accepting under normal inspection, $P_T$ is the probability of accepting under tightened inspection, and
\begin{equation*} \begin{split}
a & = \frac{2-P_N^4}{(1-P_N)(1-P_N^4)} \\
\\
b & = \frac{1-P_T^5}{(1-P_T)P_T^5}.
\end{split}
\end{equation*}
The R code below creates and plots the OC curves for the normal, tightened, and scheme on the same graph.
```{r fig2, fig.height = 4.5, fig.width = 4.5}
# Comparison of Normal, Tightened, and Scheme OC Curves
par(mfcol=c(1,1))
library(AcceptanceSampling)
pd<-seq(0,.1,.001)
# Computes points on the OC curves for the Normal and Tightened Plan
# using the R function pbinom
PN=pbinom(7,size=200,prob=seq(0,.1,.001),lower.tail=TRUE)
PT=pbinom(5,size=200,prob=seq(0,.1,.001),lower.tail=TRUE)
# Computes points on the OC curve for the Normal-Tightened Scheme using
# the Markov chain steady state formulas from Stephens and Larson(1967)
a=(2-PN^4)/((1.0000000000001-PN)*(1.0000000000001-PN^4))
b=(1.0000000000001-PT^5)/((1.0000000000001-PT)*(PT^5))
PS<-(a*PN+b*PT)/(a+b)
plot(pd,PS,type='l',lty=1,xlim=c(0,.1),xlab='Probability of nonconforming',
ylab="OC",main="Figure 2")
lines(pd,PN,type='l',lty=2,col=2)
lines(pd,PT,type='l',lty=2,col=3)
lines(c(.04,.05),c(.95,.95),lty=1, col=1)
lines(c(.04,.05),c(.89,.89),lty=2, col=2)
lines(c(.04,.05),c(.83,.83),lty=2, col=3)
text(.07,.95,'Scheme', col=1)
text(.07,.89,'Normal', col=2)
text(.07,.83,'Tightened',col=3)
```
The comparison of the OC curves is shown in Figure 2. There it can be seen that the scheme has a steeper OC curve that offers as much protection to the customer as the tightened plan, yet the shoulder on scheme OC curve offers as much protection for the producer (who supplies lots with the percent nonconforming at or below the AQL) as the normal plan. The producer risk point for the scheme OC curve is (AQL=.019296, 1-$\alpha$=0.95), and the customer risk point is (LTPD=.045884, $\beta$=.10). A custom made single sampling plan with an OC curve that passes through these two points can be obtained using the $\verb!AcceptanceSampling::find.plan()!$ function in the R package $\verb!AcceptanceSampling!$. A comparison of the scheme OC curve to the OC curve for the custom single sampling plan shown in Figure 3.
```{r fig3, fig.height = 4.5, fig.width = 4.5}
library(AcceptanceSampling)
Cs<-find.plan(PRP=c(.019296,.95),CRP=c(.045884,.10),type="binomial")
Cs
ocCs<-OC2c(Cs$n,Cs$c,type='binomial',pd=pd)
plot(ocCs,type='l',main="Figure 3")
lines(pd,PS,type='l',lty=2,col=2)
lines(c(.04,.05),c(.95,.95),lty=1, col=1)
lines(c(.04,.05),c(.89,.89),lty=2, col=2)
points(c(.019296,.045884),c(.95,.10))
text(.07,.95,'Custom Single', col=1)
text(.07,.89,'Scheme', col=2)
```
Although the two OC curves in Figure 3 are very similar, the custom single sampling plan requires a sample of n=359 ( 79.5\% increase over the average sample size for the ANSI/ASQ normal-tightened scheme. This clearly shows the advantage of the ANSI/ASQ Z1.4 scheme for producer and customer protection.
The published ANSI/ASQ Z1.4 Standard contains tables for single, double, and multiple sampling plans indexed by the code letter and AQL. The code letter is a function of the lot size, and inspection level. The published standard also includes operating characteristic (OC) curves, ASN curves and other measures of performance for the individual plans, as well as composite OC curves for the scheme consisting of the normal, tightened and reduced sampling plans and associated switching rules for each code letter, AQL combination. However, the tables do not include a comparisons of OC curves that clearly show the advantage of using the ANSI/ASQ scheme. Using the $\verb!AQLSchemes!$ and $\verb!AcceptanceSampling!$ R packages to complete exercises similar to the examples above does make the advantages clear.
The ANSI/ASQ Z1.9 Standard plans assume that measurements of a single quality characteristic are independent and normally distributed. This standard also includes plans for single or double specification limits with known or unknown variability. Plans use either Form 1 or Form 2 for the acceptability criterion. Form 2 requires estimating the lot percent nonconforming from the sample data, while Form 1 does not. When the variability is unknown, there are plans for the case when variability is estimated by the sample standard deviation or by the average range. The plans based estimating the unknown variability with the sample standard deviation require fewer sample units than the plans that use the average range.
For the ANSI/ASQ Z1.9 sampling plans, the $\verb!AQLSchemes::AAZ19()!$ function in the $\verb!AQLSchemes!$ package only retrieves plans for the unknown variability case that are based on the sample standard deviation since they require smaller sample sizes. After collecting the data for a plan, the $\verb!AQLSchemes::Epn()!$ function in the package can be used to quickly calculate the estimated lot percent nonconforming (for Form 2) from the sample data for either single or double specification limits with known or unknown variability.
**References**
Christensen, C., Betz, K. M. and Stein, M.S. 2013. *The Certified Quality Process Analyst Handbook*, 2nd Ed., ASQ Quality Press, Milwaukee Wisconsin.
Stephens K. and Larson K. (1967) An evaluation of the MIL-STD-105D system of sampling plans, *Industrial Quality Control* 23(7).
| /scratch/gouwar.j/cran-all/cranData/AQLSchemes/vignettes/AQLSchemes.Rmd |
#' @title Class "AQuadtree".
#' @description
#' An S4 class representing a Quadtree hierarchical geographic Grid
#' to anonymise spatial point data.
#'
#' Objects can be created by calls to the function \link{AQuadtree}
#'
#' @details
#' Given a set of points, the \code{AQuadtree class} represents a
#' varying size Quadtree grid created performing a
#' bottom-up aggregation considering a minimum threshold for each cell.
#' Cells with a value under the threshold for the \code{thresholdField} are
#' aggregated to the upper level in a quadtree manner.\cr
#' When no \code{thresholdField} is given, total number of points in the cell
#' will be used, and so, given a threshold of k, none of the cells in the
#' resulting grid have a value less than k individuals as in a k-anonymity model.\cr
#' The Quadtree produced balances information loss and accuracy. For instance,
#' for the set of cells in the left image, where numbers in the cells represent
#' the values in the \code{thresholdField}, using a \code{threshold} value of 100,
#' the resulting Quadtree will be the one on the right. As we can see, some cells
#' will be discarded, and some aggregated to maintain as much information as
#' possible, keeping at the same time as much disaggregation as possible\cr
#' \if{html}{\figure{QTexampleA.png}{options: width=260 alt="62.5m2 cells"}}
#' \if{latex}{\figure{QTexampleA.png}{options: width=4.5cm}}
#' \if{html}{\figure{QTexampleB.png}{options: width=250 alt="resulting Quadtree"}}
#' \if{latex}{\figure{QTexampleB.png}{options: width=4.4cm}}\cr
#' The INSPIRE coding system for cell identifiers will be used to generate a
#' CellCode and CellNum for each cell in the Quadtree.
#' The objective of the coding system is to generate unique
#' identifiers for each cell, for any of the resolutions.\cr
#' The cellCode is a text string, composed of cell size and cell coordinates.
#' Cell codes start with a cell size prefix. The cell size is denoted in meter (m)
#' for cell sizes below 1000 m and kilometre (km) for cell sizes from 1000 m and
#' above.\cr
#' Example: a 100 meter cell has an identifier starting with “100m”, the
#' identifier of a 10000 meter cell starts with “10km”.\cr
#' The coordinate part of the cell code reflects the distance of the lower left
#' grid cell corner from the false origin of the CRS. In order to reduce the
#' length of the string, Easting (E) and Northing (N) values are divided by
#' 10^n (n is the number of zeros in the cell size value). Example for a cell
#' size of 10000 meters: The number of zeros in the cell size value is 4.
#' The resulting divider for Easting and Northing values is 10^4 = 10000.\cr
#' The CellNum is a sequence of concatenated integers identifying all the
#' hierarchical partitions of the main cell in which the point resides.
#' For instance, the CellNum of the top right cell would be 416 (fourth
#' in first partition, sixteenth in second partition)\cr
#' The input object must be projected and units should be in 'meters'
#' because the system uses the INSPIRE coding system.
#'
#' @seealso
#' \itemize{
#' \item{
#' D2.8.I.2 INSPIRE Specification on Geographical Grid Systems – Guidelines
#' \url{https://inspire.ec.europa.eu/documents/Data_Specifications/INSPIRE_Specification_GGS_v3.0.1.pdf}
#' }
#' \item{
#' EEA reference grid dataset
#' \url{https://data.europa.eu/euodp/data/dataset/data_eea-reference-grids-2}
#' }
#' }
#' @importFrom methods as as<- slot slot<- callGeneric
#' @name AQuadtree-class
#' @aliases AQuadtree-class
#' @slot dim = "numeric"
#' @slot layers = "numeric",
#' @slot colnames = "character",
#' @slot threshold = "numeric",
#' @slot thresholdField = "character",
#' @slot loss = "numeric"
#' @exportClass AQuadtree
#'
#' @examples
#' data("BarcelonaPop", "BarcelonaCensusTracts")
#' aquadtree.Barcelona<-AQuadtree(BarcelonaPop, layers = 3)
#' plot(aquadtree.Barcelona)
#'
#' aQuadtree.Charleston<-AQuadtree(CharlestonPop, colnames="sex", threshold=17,
#' thresholdField=c("sex.male", "sex.female"))
#'
#' \dontrun{
#' ## spatial object not projected
#' sp.not.projected<-spTransform(CharlestonPop,CRS("+proj=longlat +datum=NAD27"))
#' is.projected(sp.not.projected)
#' aqt<-AQuadtree(sp.not.projected)
#'
#' ## not an SpatialPoints object
#' aqt<-AQuadtree(CharlestonCensusTracts)
#'
#' ## too many subdivisions
#' aqt<-AQuadtree(CharlestonPop, layers=15)
#'
#' }
setClass(
Class="AQuadtree",
contains = "SpatialPolygonsDataFrame",
slots = c(
dim = "numeric",
layers = "numeric",
colnames = "character",
threshold = "numeric",
thresholdField = "character",
loss = "numeric"
)
)
#' Wrapper function AQuadtree.
#'
#' @rdname AQuadtree-class
#' @title AQuadtree
#' @details function to create an object of class AQuadtree
#'
#' @param points object of class "SpatialPoints" or "SpatialPointsDataFrame".
#' @param dim a single integer specifying the initial cell sizes in meters, defaults to 1000.
#' @param layers a single integer specifying the number of divisions of the
#' initial cells, defaults to 5.
#' @param colnames character string or character string vector specifying the
#' columns to summarise in the resulting quadtree.
#' @param threshold number. The threshold minimum value each cell must have
#' in the column \code{thresholdField}.
#' @param thresholdField character string specifying the column at which the
#' \code{threshold} value will apply.
#' @param funs character string or character string vector specifying the summary
#' functions for each of the \code{colnames}. If vector, the size must be the
#' same as colnames.
#' @param ineq.threshold inequality threshold value to be considered on the
#' disaggregation process. Forces disaggregation under the given inequality
#' threshold.
#' @param loss.threshold loss threshold value to be considered on the
#' disaggregation process. Forces aggregation when there's much loss
#' (i.e loss rate > ineq.threshold ).
#' @return AQuadtree object representing a varying size Quadtree
#' aggregation for the given points.
#' @export
#'
AQuadtree<-function(points, dim=1000, layers=5, colnames=NULL, threshold=100, thresholdField=NULL, funs=NULL, ineq.threshold=0.25, loss.threshold=0.4) {
aquadtree<-createAQuadtree(points, dim, layers, colnames, threshold, thresholdField, funs, as="AQuadtree", ineq.threshold, loss.threshold)
}
#'
#'
#' Method show.
#' @rdname show
#' @title show AQuadtree-method
#' @details Display the AQuadtree object
#' @aliases show
#'
#' @param object an object of class AQuadtree.
#' @return A data.frame showing the information attributes contained in the AQuadtree object.
#'
setMethod("show", "AQuadtree",
function(object){
cat("An object of class \"",class(object),"\" with ", sep="")
cat(length(object),
"grid cells with sizes between",
ifelse(object@dim>=1000, paste0(object@dim/1000, "km"), paste0(object@dim, "m")), "and",
ifelse(object@dim/(2^(object@layers-1))>=1000, paste0(object@dim/(2^(object@layers-1))/1000, "km"), paste0(object@dim/(2^(object@layers-1)), "m")),
"\n")
print(object@data)
}
)
#'
#'
#' Method print.
#' @rdname print
#' @title print AQuadtree-method
#' @details Prints the AQuadtree object
#' @aliases print
#'
#' @param x an object of class AQuadtree.
#' @param ... passed through.
#' @return none
#'
setMethod("print", "AQuadtree",
function(x, ...){
cat("* dim: "); print(x@dim)
cat("* layers: "); print(x@layers)
cat("* threshold: "); print(x@threshold)
cat("* colnames: "); print(x@colnames)
cat("* loss: "); print(x@loss)
cat("* residual cells: "); print(length(x$residual))
cat("* grid cells: "); print(length(x))
print(x@data)
}
)
#'
#'
#' Method summary.
#' @rdname summary
#' @title summary AQuadtree-method
#' @details summarize information of an object of class AQuadtree
#' @aliases summary
#'
#' @param object an object of class AQuadtree.
#' @param ... passed through.
#' @return An object of class "table" with summarising information in the AQuadtree input object
#'
setMethod("summary", "AQuadtree",
function(object, ...) {
cat("Object of class \"", class(object), "\"\n", sep="")
cat(length(object),
"grid cells with sizes between",
ifelse(object@dim>=1000, paste0(object@dim/1000, "km"), paste0(object@dim, "m")),
"and",
ifelse(object@dim/(2^(object@layers-1))>=1000, paste0(object@dim/(2^(object@layers-1))/1000, "km"), paste0(object@dim/(2^(object@layers-1)), "m")),
"\n")
cat("Coordinates:\n")
print(bbox(object))
if (!is.na(is.projected(object))) cat("Is projected:", is.projected(object), "\n")
else cat("Is projected: NA", "\n")
if (!is.na(proj4string(object))) cat("proj4string:\n", proj4string(object), "\n")
cat("Initial Cell Size:", ifelse(object@dim>=1000, paste0(object@dim/1000, "km"), paste0(object@dim, "m")), "\n")
if (!is.null(object@data$residual)){
cat("Number of valid grid Cells:", length(object[!object@data$residual,]), "\n")
cat("Number of residual grid Cells:", length(object[object@data$residual,]), "\n")
}
cat("Data attributes:\n")
print(summary(object@data[object@colnames]))
}
)
#'
#'
#' Method [
#' @rdname extract.aquadtree.data
#' @title [ AQuadtree-method
#' @details Extract a part of a AQuadtree object
#' @aliases [
#'
#' @param x an object of class AQuadtree.
#' @param i,j elements to extract.
#' @param ... passed through.
#' @param drop passed on to [ indexing operator.
#' @return An AQuadtree object with the selected subset of rows or columns from the input object.
#'
setMethod("[", "AQuadtree",
function(x, i, j, ..., drop){
if (missing(j)){
if (length(i)==1 && is.character(i)) {
if(i=="dim"){return(x@dim)}
else if(i=="layers"){return(x@layers)}
else if(i=="threshold"){return(x@threshold)}
else if(i=="thresholdField"){return(x@thresholdField)}
else if(i=="colnames"){return(x@colnames)}
else if(i=="loss"){return(x@loss)}
} else {
x.SP<-as(x, "SpatialPolygonsDataFrame")
x.SP<-x.SP[i, , ...]
}
} else if (missing(i)) {
x.SP<-as(x, "SpatialPolygonsDataFrame")
x.SP<-x.SP[, j, ...]
} else{
x.SP<-as(x, "SpatialPolygonsDataFrame")
x.SP<-x.SP[i, j, ...]
}
return(
new("AQuadtree",
x.SP,
dim=x@dim,
layers=x@layers,
threshold=x@threshold,
thresholdField = x@thresholdField,
colnames= x@colnames[x@colnames %in% names(x.SP)],
loss=x@loss
)
)
}
)
#'
#'
#' Method [<-
#' @rdname replace.aquadtree.data
#' @title [<- AQuadtree-method
#' @details An AQuadtree object cannot be assigned directly
#' @aliases [<-
#'
#' @param x an object of class AQuadtree.
#' @param i,j elements to extract or replace.
#' @param ... passed through.
#' @param value value to set.
#' @return none
#'
setReplaceMethod("[", "AQuadtree",
function(x, ...){
stop("Error: quadtree slots cannot be changed", call.=FALSE)
}
)
#'
#'
#' Method plot
#' @rdname plot
#' @title plot AQuadtree-method
#' @details Plot an object of class AQuadtree.
#' @aliases plot
#' @importFrom sp plot
#' @export
#'
#' @param x an object of class AQuadtree.
#' @param residual logical; if TRUE cells marked as residual cells are included
#' @param add logical. TRUE to add plot to the current existing plot
#' @param col default plotting color
#' @param ... passed through.
#' @return none
#'
setMethod("plot", signature = c(x="AQuadtree", y="missing"),
function(x, ..., residual=TRUE, add=FALSE, col){
if (residual) {
if (missing(col)) {
callGeneric(obj.SP<-as(x, "SpatialPolygonsDataFrame")[(x$residual),], add=add, col="red", xlim=x@bbox[1,], ylim=x@bbox[2,], ...)
callGeneric(obj.SP<-as(x, "SpatialPolygonsDataFrame")[!(x$residual),], add=TRUE, col="green", ...)
} else {
callGeneric(obj.SP<-as(x, "SpatialPolygonsDataFrame"), add=add, col=col, ...)
}
} else {
if (missing(col)) col="green"
callGeneric(obj.SP<-as(x, "SpatialPolygonsDataFrame")[!(x$residual),], add=add, col=col, ...)
}
}
)
#'
#'
#' Method spplot
#' @rdname spplot
#' @title spplot AQuadtree-method
#' @details Plots a AQuadtree object as a spatial object with its data
#' @aliases spplot
#' @importFrom sp spplot
#' @export
#'
#' @param obj an object of class AQuadtree.
#' @param by.density logical; if TRUE cell values specified in zcol are divided by cell areas
#' @param zcol character; attribute name(s) or column number(s) in attribute table
#' @param residual logical; if TRUE cells marked as residual cells are included
#' @param ... passed through.
#' @return Creates a lattice plot of class "trellis" created with the spplot method in the sp package
#'
setMethod("spplot", "AQuadtree",
function(obj, zcol=NULL, by.density=TRUE, residual=TRUE, ...){
if (is.null(zcol)) zcol=obj@colnames
if (residual) obj.SP<-as(obj, "SpatialPolygonsDataFrame")
else obj.SP<-as(obj[!obj$residual], "SpatialPolygonsDataFrame")
if (by.density) {
obj.SP@data[,zcol]<-obj.SP@data[,zcol]*(1000*2^(obj.SP@data$level-1)/obj@dim)^2
callGeneric(obj.SP, zcol=zcol, ...)
} else callGeneric(obj.SP, zcol=zcol, ...)
}
)
#'
#'
#' Method merge.
#' @rdname merge
#' @title Merge an AQuadtree object with a data.frame
#' @details Merges the AQuadtree object data with the data.frame on the columns "cellCode" and cellNum"
#' @aliases merge
#' @importFrom sp merge
#' @export
#'
#' @param x an object of class AQuadtree.
#' @param y an object of class data.frame
#' @return An AQuadtree object where the data is extended with the input data.frame
#'
setMethod("merge", signature = c(x="AQuadtree", y="data.frame"),
function(x, y){
if (!(all(c("cellCode","cellNum") %in% names(x))))
stop("first object does not contain 'cellcode' and 'cellNum' attributes", call.=FALSE)
if (!(all(c("cellCode","cellNum") %in% names(y))))
stop("second object does not contain 'cellcode' and 'cellNum' attributes", call.=FALSE)
return (
new("AQuadtree",
sp::merge(as(x, "SpatialPolygonsDataFrame"), y, by=c("cellCode","cellNum"), sort=F),
dim = x@dim,
layers = x@layers,
threshold = as.numeric(NA),
thresholdField = as.character(NA),
colnames = c(x@colnames, names(y)[!names(y) %in% c('cellCode', 'cellNum')]),
loss = as.numeric(NA)
)
)
}
)
#' Method area.QT
#' @rdname area.QT
#' @title area.QT AQuadtree-method
#' @details Get the areas of the Quadtree grid cells in square meters
#'
#' @aliases area.QT
#' @export
#'
#' @param obj an object of class AQuadtree.
#' @param residual logical; if TRUE cells marked as residual cells are included
#' @param ... passed through.
#'
setGeneric("area.QT", function(obj, residual=TRUE, ...) standardGeneric("area.QT"))
#' @return area of Quadtree grid cells in square meters
#' @rdname area.QT
#' @export
setMethod("area.QT", "AQuadtree",
function(obj, residual=FALSE, ...){
obj.SP<-as(obj, "SpatialPolygonsDataFrame")
if (!residual) obj.SP<-obj.SP[!obj.SP$residual,]
return((obj@dim/2^(obj.SP@data$level-1))^2)
}
)
| /scratch/gouwar.j/cran-all/cranData/AQuadtree/R/AQuadtree-class.R |
#' AQuadtree: A package to anonymise spatial points data.
#'
#' @description
#' This package provides an S4 class for creating, manipulating
#' and exporting spatial quadtree varying size grids, and for methods
#' including print/show, plot, spplot, subset, [, [[, names,
#' dim, summary, write.
#'
#' @section Introduction:
#' The quadtree functions and class provide the tools to build a varying size
#' quadtree grid performing a bottom-up aggregation considering a minimum
#' threshold for each the cell.
#' The main goal of the package is the anonymization of a set of spatial
#' point data by an aggregation process as in a k-anonymity model. The grid
#' created follows the INSPIRE Specification on Geographical Grid Systems.
#'
#' @references
#' D2.8.I.2 INSPIRE Specification on Geographical Grid Systems – Guidelines
#' \url{https://inspire.ec.europa.eu/documents/Data_Specifications/INSPIRE_Specification_GGS_v3.0.1.pdf}
#'
#' EEA reference grid dataset
#' \url{https://data.europa.eu/euodp/data/dataset/data_eea-reference-grids-2}
#'
#' @docType package
#' @aliases AQuadtree-package
"_PACKAGE"
| /scratch/gouwar.j/cran-all/cranData/AQuadtree/R/AQuadtree.R |
#' @title Create a Quadtree grid to anonymise spatial point data
#' @description
#' \code{createAQuadtree} returns a SpatialPointsDataFrame representing a Quadtree
#' hierarchical geographic dataset. The resulting grid contains varying size cells
#' depending on a given threshold and column.
#' with identifiers
#' A \code{cellCode} and \code{cellNum} is created for each cell as in INSPIRE
#' Specification on Geographical Grid Systems.
#' @importFrom stats setNames
#' @importFrom methods new
#' @importFrom sp coordinates SpatialPolygons Polygons Polygon CRS proj4string
#' SpatialPolygonsDataFrame
#' @importFrom dplyr %>% summarise_ group_by summarise_at
#' @details
#' Given a set of points a varying size Quadtree grid is created performing a
#' bottom-up aggregation considering a minimum threshold for each cell.
#' Cells with a value under the threshold for the \code{thresholdField} are
#' aggregated to the upper level in a quadtree manner.\cr
#' When no \code{thresholdField} is given, total number of points in the cell
#' will be used, and so, given a threshold of k, none of the cells in the
#' resulting grid have a value less than k individuals as in a k-anonymity model.\cr
#' The Quadtree produced balances information loss and accuracy. For instance,
#' for the set of cells in the left image, where numbers in the cells represent
#' the values in the \code{thresholdField}, using a \code{threshold} value of 100,
#' the resulting Quadtree will be the one on the right. As we can see, some cells
#' will be discarded, and some aggregated to maintain as much information as
#' possible, keeping at the same time as much disaggregation as possible\cr
#' \if{html}{\figure{QTexampleA.png}{options: width=260 alt="62.5m2 cells"}}
#' \if{latex}{\figure{QTexampleA.png}{options: width=4.5cm}}
#' \if{html}{\figure{QTexampleB.png}{options: width=250 alt="resulting Quadtree"}}
#' \if{latex}{\figure{QTexampleB.png}{options: width=4.4cm}}\cr
#' The INSPIRE coding system for cell identifiers will be used to generate a
#' cellCode and cellNum for each cell in the Quadtree.
#' The objective of the coding system is to generate unique
#' identifiers for each cell, for any of the resolutions.\cr
#' The cellCode is a text string, composed of cell size and cell coordinates.
#' Cell codes start with a cell size prefix. The cell size is denoted in meter (m)
#' for cell sizes below 1000 m and kilometre (km) for cell sizes from 1000 m and
#' above.\cr
#' Examples: a 100 meter cell has an identifier starting with “100m”, the
#' identifier of a 10000 meter cell starts with “10km”.\cr
#' The coordinate part of the cell code reflects the distance of the lower left
#' grid cell corner from the false origin of the CRS. In order to reduce the
#' length of the string, Easting (E) and Northing (N) values are divided by
#' 10^n (n is the number of zeros in the cell size value). Example for a cell
#' size of 10000 meters: The number of zeros in the cell size value is 4.
#' The resulting divider for Easting and Northing values is 10^4 = 10000.\cr
#' The cellNum is a sequence of concatenated integers identifying all the
#' hierarchical partitions of the main cell in which the point resides.
#' For instance, the cellNum of the top right cell would be 416 (fourth
#' in first partition, sixteenth in second partition)\cr
#' The input object must be projected and units should be in 'meters'
#' because the system uses the INSPIRE coding system.
#'
#' @seealso
#' \itemize{
#' \item{
#' D2.8.I.2 INSPIRE Specification on Geographical Grid Systems – Guidelines
#' \url{https://inspire.ec.europa.eu/documents/Data_Specifications/INSPIRE_Specification_GGS_v3.0.1.pdf}
#' }
#' \item{
#' EEA reference grid dataset
#' \url{https://data.europa.eu/euodp/data/dataset/data_eea-reference-grids-2}
#' }
#' }
#'
#' @param points object of class "SpatialPoints" or "SpatialPointsDataFrame".
#' @param dim a single integer specifying the initial cell sizes in meters, defaults to 1000.
#' @param layers a single integer specifying the number of divisions of the
#' initial cells, defaults to 5.
#' @param colnames character or character vector specifying the
#' columns to summarise in the resulting quadtree. For columns of class factor,
#' a column for each factor level cill be created.
#' @param threshold number. The threshold minimum value each cell must have
#' in the column \code{thresholdField}.
#' @param thresholdField character or character vector specifying the
#' columns to which the \code{threshold} value will apply. If not specified,
#' threshold value will be applied over the total cell points number.
#' ThresholdField must be one of the colnames.
#' @param funs character or character vector specifying the summary
#' functions for each of the \code{colnames}. If vector, the size must be the
#' same as colnames.
#' @param as character indicating return type, if "AQuadtree" a quadtree
#' class element will be returned, otherwise a SpatialPolygonsDataFrame
#' will ber returned. Defaults to "Spatial".
#' @param ineq.threshold inequality threshold value to be considered on the
#' disaggregation process. Forces disaggregation under the given inequality
#' threshold.
#' @param loss.threshold loss threshold value to be considered on the
#' disaggregation process. Stops disaggregation when there's much loss
#' (i.e loss rate > ineq.threshold ).
#' @return SpatialPolygonsDataFrame representing a varying size Quadtree
#' aggregation for the given points.
#' @export
#' @examples
#' data("CharlestonPop")
#' aQuadtree.Charleston<-createAQuadtree(CharlestonPop, threshold=10,
#' colnames="sex", thresholdField=c("sex.male", "sex.female"))
#'
createAQuadtree <- function(points, dim=1000, layers=5, colnames=NULL, threshold=100, thresholdField=NULL, funs=NULL, as="Spatial", ineq.threshold=0.25, loss.threshold=0.4) {
cellCode<-NULL
ContainerID<-NULL
## aux function to determine the number of trailing zeros
trailingZeros<-function(x){
i<-0
while (x %% 10 == 0 ) {
x<-x%/%10
i<-i+1
}
return(i)
}
## f.Ineq inequality function
f.Ineq<-function(x){
x <- x[!(x == 0)]
Th <- x/mean(x)
Th <- sum(x * log(Th))
Th <- Th/sum(x)
}
## f.Loss cell loss percentage
f.Loss<-function(x){
L <- sum(x[x<threshold])/sum(x)
}
#stopifnot(require("sp"), require("dplyr"))
if (missing(points)) stop("argument 'points' is missing, with no default", call.="FALSE")
if (length(points)==0) stop("argument 'points' has length 0", call.="FALSE")
stopifnot(dim>0, layers>=2)
if (!inherits(points, "SpatialPoints")) stop("argument 'points' is not a 'SpatialPoints' or 'SpatialPointsDataFrame' object", call.="FALSE")
if (!is.projected(points)) stop("spatial data must be projected", call.="FALSE")
if (layers>10) stop("maximum 10 layers allowed", call.="FALSE")
if (any(bbox(points)<0)) stop("negative bbox not permited, use a different projection", call.="FALSE")
if (!(all(colnames %in% names(points)))) {
stop(sprintf("some colnames (%s) not in object names (%s)", paste(colnames[!(colnames %in% names(points))] , collapse=", "), paste(names(points), collapse=", ")), call.="FALSE")
}
if (is.null(thresholdField)) {
thresholdField<-"total"
} else {
# add thresholdField fields to selection (colnames)
colnamesToAdd<-NULL
for (f in thresholdField) {
if (!(f %in% names(points))) {
# control if fieldsToAdd come from factors
f_<-unlist(strsplit(f, ".", fixed = TRUE))[1]
if ((f_ %in% names(points))){
colnamesToAdd<-c(colnamesToAdd, f_)
} else {
stop(sprintf("thresholdField (%s) not in object names (%s)", f_, paste(names(points), collapse=", ")), call.="FALSE")
}
} else {
if (is.factor(points@data[,f])) stop(sprintf("thresholdField (%s) is a factor", f), call.="FALSE")
if (!(f %in% colnames)) colnamesToAdd<-c(colnamesToAdd, f)
}
}
colnamesToAdd<-colnamesToAdd[!duplicated(colnamesToAdd)]
if (!is.null(funs) & length(funs)>1) funs<-c(funs, rep("sum", length(colnamesToAdd)))
colnames<-c(colnames, colnamesToAdd)
}
if (length(funs)>1 & length(funs)!=length(colnames)) {
stop("colnames and funs parameters do not have same length", call.="FALSE")
}
#create summarising expression
summariseExpr<-c("total"="n()")
if (!is.null(colnames)) {
if (is.null(funs)) funs <- "sum"
# treat possible factors within colnames
summariseCols<-
unlist(mapply(function(col, f){
if (is.factor(points@data[,col])){
lev<-levels(points@data[,col])
newCols<-paste0(col, ".", lev)
if (length(lev)>5) stop(sprintf("factor column %s has more than 5 levels", col), call.="FALSE")
# decompose factor creating a new column for factor level
points@data[newCols]<<-1*(points@data[rep(col, length(lev))]==as.list(lev))
colnames<<-c(colnames, newCols) # add new created columns to colnames
return (setNames(paste0(f, "(", newCols, ")"), newCols))
}else{
return(setNames(paste0(f, "(", col, ")"), col))
}
}, colnames, funs, USE.NAMES = FALSE, SIMPLIFY = FALSE))
colnames<-colnames[sapply(colnames, function(c) !is.factor(points@data[,c]))]
colnames<-colnames[!duplicated(colnames)]
summariseExpr<-c(summariseExpr, summariseCols)
summariseExpr<-summariseExpr[!duplicated(summariseExpr)]
}
sizePrefix<-ifelse(dim>=1000, paste0(dim/1000, "km"), paste0(dim, "m"))
# create points data.frame from input points' coordinates
if (is.null(colnames)) {
pts<-data.frame(x=coordinates(points)[,1], y=coordinates(points)[,2]) # there's no extra columns to keep
} else {
pts<-data.frame(x=coordinates(points)[,1], y=coordinates(points)[,2], points[colnames]@data) # keep extra columns given by colnames
}
# add x, y cell origin to each point
pts$CellOrigin.x<-as.integer(pts$x%/%dim*dim)
pts$CellOrigin.y<-as.integer(pts$y%/%dim*dim)
# calculate string CellCode of the form "1kmNyyyyExxxx"
zerosToRemove<-as.integer(10^trailingZeros(dim))
cellCodeE<-pts$CellOrigin.x/zerosToRemove
cellCodeN<-pts$CellOrigin.y/zerosToRemove
lenCellCode<-nchar(max(cellCodeE, cellCodeN))
pts$cellCode<-paste0(sizePrefix, "N",formatC(cellCodeN, width=lenCellCode, flag=0, mode = 'integer'), "E",formatC(cellCodeE, width=lenCellCode, flag=0, mode = 'integer'))
# cellNumPos stores the number of digits for each subpart of the cell codes
# cellNumPosStart<-c( 1,14,15,17,19,22)
# cellNumPosStop <-c(13,14,16,18,21,25)
cellNumPosStart<-c(1)
cellNumPosStop<-c(nchar(pts$cellCode[1]))
# calculate cellNum of the form "nmmooopppp..." with n 1:4 mm 01:16 ooo 001:256 pppp 0001:1024 ...
pts$cellNum<-''
for (i in 2:layers){
zeros<-ceiling(log10(2^(2*(i-1))))
cellNumPosStop[i]<-cellNumPosStop[i-1]+zeros
cellNumPosStart[i]<-cellNumPosStop[i-1]+1
size<-dim/2^(i-1)
pts$cellNum<-paste0(pts$cellNum, formatC((pts$x-pts$CellOrigin.x)%/%size + (2^(i-1))*(pts$y-pts$CellOrigin.y)%/%size+1, width=zeros, flag=0, mode = 'integer'))
}
pts[c("x", "y", "CellOrigin.x", "CellOrigin.y")]<-NULL
pts$cellCodesStr<-paste0(pts$cellCode, pts$cellNum)
## message("layer:1\n")
prevGrid <- pts %>% group_by(cellCode) %>% summarise_(.dots=summariseExpr)
prevGrid<-prevGrid[apply(prevGrid[,thresholdField]>=threshold,1, all),] # remove high level cells with underthreshold population
if (nrow(prevGrid)==0) stop("empty set, try a smaller threshold", call.="FALSE")
prevGrid$level<-1
#removed.Points<-pts[!(pts$cellCode %in% prevGrid$cellCode),] # keep points before removing them
removed.Points<-data.frame()
pts<-pts[pts$cellCode %in% prevGrid$cellCode,] # remove points corresponding to cells with underthreshold population
maxLayer<-1
# create elements of the quadtree aggregating at each level
quadtree.Elements<-data.frame()
for (i in 2:layers){
## message("layer:", i, "\n")
actualcellNumPos<-cellNumPosStop[i]
pts$cellCode<-substr(pts$cellCodesStr, 1, cellNumPosStop[i])
actualGrid <- pts %>% group_by(cellCode) %>% summarise_(.dots=summariseExpr)
actualGrid$level<-i
maxLayer<-i
actualGrid$ContainerID<-substr(actualGrid$cellCode, 1, cellNumPosStop[i-1])
actualGrid<-actualGrid[actualGrid$ContainerID %in% actualGrid[apply(actualGrid[,thresholdField]>=threshold,1, all),]$ContainerID,]
if (nrow(actualGrid)==0) {
break
}
codes.Ineq<-as.data.frame(actualGrid %>% group_by(ContainerID) %>% summarise_at(thresholdField,f.Ineq))
## do not aggregate when there's much inequality between cell population (i.e inequality index > ineq.threshold )
if (length(thresholdField)>1)
codes.Ineq.high<-codes.Ineq[apply(codes.Ineq[,thresholdField]>ineq.threshold,1, any), 1]
else
codes.Ineq.high<-codes.Ineq[codes.Ineq[,thresholdField]>ineq.threshold, 1]
codes.Loss<-as.data.frame(actualGrid %>% group_by(ContainerID) %>% summarise_at(thresholdField,f.Loss))
## do not disaggregate when there's much Loss
if (length(thresholdField)>1)
codes.Loss.high<-codes.Loss[apply(codes.Loss[,thresholdField]>loss.threshold,1, any), 1]
else
codes.Loss.high<-codes.Loss[codes.Loss[,thresholdField]>loss.threshold, 1]
codesToAggregate<-actualGrid[apply(actualGrid[, thresholdField] < threshold, 1, any),]$ContainerID
codesToAggregate<-codesToAggregate[!(codesToAggregate %in% codes.Ineq.high) | (codesToAggregate %in% codes.Loss.high)]
#keep cells discarded before removing them
removed.Points<-rbind(removed.Points, pts[pts$cellCode %in% actualGrid[ !(actualGrid$ContainerID %in% codesToAggregate) & apply(actualGrid[, thresholdField] < threshold, 1, any),]$cellCode,])
actualGrid<-actualGrid[ !(actualGrid$ContainerID %in% codesToAggregate) & apply(actualGrid[, thresholdField]>=threshold, 1, all), ]
prevGrid<-prevGrid[!(prevGrid$cellCode %in% actualGrid$ContainerID),]
actualGrid$ContainerID<-NULL
quadtree.Elements<-rbind(quadtree.Elements, prevGrid)
#remove all the points already in quadtree.Elements
pts<-pts[pts$cellCode %in% actualGrid$cellCode,]
prevGrid<-actualGrid
if (nrow(pts)==0) break # finish when there's no points left
}
# add remaining elements from last layer
quadtree.Elements<-rbind(quadtree.Elements, prevGrid)
quadtree.Elements$residual<-FALSE
if (nrow(removed.Points)>0) {
# aggregate removed cells and add them to the quadtree
removed.Points$cellCode<-substr(removed.Points$cellCode, 1, cellNumPosStop[1])
removed.Points$ContainerID<-NULL
removed.Points<-removed.Points %>% group_by(cellCode) %>% summarise_(.dots=summariseExpr)
## keep only aggregation of removed point over the threshold value
removed.Points<-removed.Points[apply(removed.Points[thresholdField]>=threshold, 1, all),]
if (nrow(removed.Points)>0) {
removed.Points$level<-1
removed.Points$residual<-TRUE
quadtree.Elements<-rbind(quadtree.Elements, removed.Points)
}
}
# convert quadtree elements to SpatialPolygons
i<--1
quadtree.SP<-SpatialPolygons(
sapply(quadtree.Elements$cellCode,
function(cellCode){
i<<-i+1
IDs<-substring(cellCode, cellNumPosStart, cellNumPosStop)
IDs<-IDs[IDs!=""]
IDs.length<-length(IDs)
actualDim<-dim / 2^(IDs.length-1)
elemNum<-as.integer(dim/actualDim)
if (IDs.length==1){
coords.x<-as.integer(strsplit(strsplit(IDs, "N")[[1]][2], "E")[[1]][2])*zerosToRemove
coords.y<-as.integer(strsplit(strsplit(IDs, "N")[[1]][2], "E")[[1]][1])*zerosToRemove
} else {
coords.x<-as.integer(strsplit(strsplit(IDs, "N")[[1]][2], "E")[[1]][2]) * zerosToRemove + ((as.integer(IDs[IDs.length]) - 1) %% elemNum) * actualDim
coords.y<-as.integer(strsplit(strsplit(IDs, "N")[[1]][2], "E")[[1]][1]) * zerosToRemove + ((as.integer(IDs[IDs.length]) - 1) %/% elemNum) * actualDim
}
pol.points<-cbind(
x=c(coords.x, coords.x+actualDim, coords.x+actualDim, coords.x, coords.x),
y=c(coords.y, coords.y, coords.y+actualDim, coords.y+actualDim, coords.y)
)
return (list( Polygons(list(Polygon(pol.points)), i)))
}, USE.NAMES = FALSE), proj4string=slot(points, "proj4string"))
quadtree.Elements$cellNum<-substr(quadtree.Elements$cellCode, cellNumPosStart[2], cellNumPosStop[length(cellNumPosStop)])
quadtree.Elements$cellCode<-substr(quadtree.Elements$cellCode, 1, cellNumPosStop[1])
colOrder<-c("cellCode", "cellNum", "level", "residual", setdiff(colnames(quadtree.Elements), c("cellCode", "cellNum", "level", "residual")))
quadtree.Elements<-quadtree.Elements[colOrder]
quadtree<-SpatialPolygonsDataFrame(quadtree.SP, as.data.frame(quadtree.Elements), match.ID = FALSE)
quadtree@bbox<-bbox(quadtree)
if (as=="AQuadtree") {
return(
new("AQuadtree",
quadtree,
dim=dim,
layers=maxLayer,
colnames=c("total", colnames),
threshold=threshold,
thresholdField=thresholdField,
loss=nrow(points)-sum(quadtree$total)
)
)
} else {
return(quadtree)
}
}
| /scratch/gouwar.j/cran-all/cranData/AQuadtree/R/createAQuadtree.R |
#' @title Create a Grid grid covering a given geographic zone.
#' @description
#' \code{createGrid} returns a SpatialPolygons object representing a grid
#' covering a given geographic zone following the INSPIRE Specification on
#' Geographical Grid Systems. Each polygon will be identified with it's CellCode
#' code.
#' @importFrom sp bbox GridTopology is.projected spChFIDs CRS proj4string
#' as.SpatialPolygons.GridTopology
#' @details
#' INSPIRE Specification on Geographical Grid Systems\cr
#' The objective of the coding system is to generate unique identifiers for each
#' point, for any of the recommended resolutions.\cr
#' The cellCode is a text string, composed of cell size and cell coordinates.
#' Cell codes start with the cell's size prefix. The cell size is denoted in meter (m)
#' for cell sizes below 1000m and kilometre (km) for cell sizes from 1000m and
#' above.\cr
#' Examples: a 100 meter cell has an identifier starting with “100m”, the
#' identifier of a 10000 meter cell starts with “10km”.\cr
#' The coordinate part of the cell code reflects the distance of the lower left
#' grid cell corner from the false origin of the CRS. In order to reduce the
#' length of the string, Easting (E) and Northing (N) values are divided by
#' 10^n (n is the number of zeros in the cell size value). Example for a cell
#' size of 10000 meters: The number of zeros in the cell size value is 4.
#' The resulting divider for Easting and Northing values is 10^4 = 10000.\cr
#' @seealso
#' \itemize{
#' \item{
#' D2.8.I.2 INSPIRE Specification on Geographical Grid Systems – Guidelines
#' \url{https://inspire.ec.europa.eu/documents/Data_Specifications/INSPIRE_Specification_GGS_v3.0.1.pdf}
#' }
#' \item{
#' EEA reference grid dataset
#' \url{https://data.europa.eu/euodp/data/dataset/data_eea-reference-grids-2}
#' }
#' }
#'
#' @param zone object of class "SpatialPoints", "SpatialPointsDataFrame",
#' "SpatialPolygons" or "SpatialPolygonsDataFrame" specifying the zone to
#' be covered by the grid.
#' @param dim a single integer specifying the initial cell sizes in meters, defaults to 1000.
#' @param intersect, logical, if TRUE the resulting grid will be
#' intersected with the given zone. If zone is of class SpatialPoints, only cells
#' containing points will be kept on the resulting grid. If zone is of
#' class SpatialPolygons, only cells inside or partially inside polygons
#' in zone will be kept on the resulting grid.
#' Defaults to TRUE
#' @param outline, logical, if TRUE the resulting grid will be
#' clipped with the outlines of the given zone. Only applicable if zone is of
#' class SpatialPolygons.
#' Defaults to FALSE
#' @return SpatialPolygons dataset representing a grid with squared cells of
#' the given size.
#' @export
#' @examples
#'
#' data("BarcelonaPop")
#' BarcelonaPop.INSPIRE_GRID<-createGrid(BarcelonaPop)
#' plot(BarcelonaPop.INSPIRE_GRID)
#'
#' \dontrun{
#' BarcelonaPop.INSPIRE_GRID.10km<-createGrid(BarcelonaPop, 10000, intersect=FALSE)
#' plot(BarcelonaPop.INSPIRE_GRID.10km)
#'
#' data("BarcelonaCensusTracts")
#' Barcelona.INSPIRE_GRID<-createGrid(BarcelonaCensusTracts, outline=TRUE)
#' plot(Barcelona.INSPIRE_GRID)
#' }
#'
createGrid <- function(zone, dim=1000, intersect=TRUE, outline=FALSE) {
## aux function to determine the number of trailing zeros
trailingZeros<-function(x){
i<-0
while (x %% 10 == 0 ) {
x<-x%/%10
i<-i+1
}
return(i)
}
# requires sf
if (outline) requireNamespace("sf", quietly = TRUE)
if (missing(zone)) stop("argument 'zone' is missing, with no default", call.="FALSE")
stopifnot(dim>0, inherits(zone, c("SpatialPolygons", "SpatialPoints")))
zoneBbox<-bbox(zone)
if (zoneBbox[1,"min"]<0 || zoneBbox[2,"min"]<0){
stop("zone outside limits", call.="FALSE")
}
gridTop<-GridTopology(
c(zoneBbox[1,"min"]%/%dim*dim+dim/2,zoneBbox[2,"min"]%/%dim*dim+dim/2),
c(dim,dim),
c((zoneBbox[1,"max"]-zoneBbox[1,"min"])%/%dim+2,(zoneBbox[2,"max"]-zoneBbox[2,"min"])%/%dim+2))
if (!is.na(is.projected(zone)) && is.projected(zone)) {
SPGrid.polygons<-as.SpatialPolygons.GridTopology(gridTop, slot(zone, "proj4string"))
} else {
SPGrid.polygons<-as.SpatialPolygons.GridTopology(gridTop)
}
SPGrid.sf <- sf::st_as_sf(SPGrid.polygons)
if (intersect) {
message("intersecting...\n")
zone.sf<-sf::st_as_sf(zone)
SPGrid.sf<-SPGrid.sf[sapply(sf::st_intersects(SPGrid.sf, zone.sf, sparse = T), any),]
}
if (outline & class(zone) %in% c("SpatialPolygons", "SpatialPolygonsDataFrame")){
message("creating outline...\n")
SPGrid.sf<-sf::st_intersection(sf::st_union(zone.sf), SPGrid.sf)
}
return(as(SPGrid.sf, "Spatial"))
}
| /scratch/gouwar.j/cran-all/cranData/AQuadtree/R/createGrid.R |
#'
#'
#'
#' Radomly created population points for Charleston, SC MSA, USA.
#'
#' A dataset containing randomly created population for Charleston, SC MSA,
#' USA.
#' Population has been created randomly with distributions of census tracts
#' from the dataset Charleston1, 2000 Census Tract Data for Charleston, SC
#' MSA and counties
#' @seealso https://spatial.uchicago.edu/sample-data
#'
#' @format A SpatialPointsDataFrame with 54619 rows and 3 attributes:
#' \describe{
#' \item{age}{group age the individual as a factor with
#' levels: "under16", "16_65", "over65"}
#' \item{sex}{sex of the individual as a factor with levels: "male", "female"}
#' \item{origin}{origin of the individual as a factor with
#' levels: "asian", "black", "hisp", "multi_ra", "white"}
#' }
#'
"CharlestonPop"
#'
#'
#'
#' Census tract borders of Charleston, SC MSA, USA.
#'
#' A SpatiaPolygons object containing the Census tract borders of
#' Charleston, SC MSA, USA.
#'
#' @format A SpatialPolygons object with 117 polygons
#'
"CharlestonCensusTracts"
#'
#'
#'
#' Radomly created population points for Barcelona city in Catalonia.
#'
#' A dataset containing randomly created population for the Barcelona city
#' in Catalonia for the year 2018.
#' Population has been created randomly with the real distributions of census
#' tracts from the dataset dividing the total population by 20
#' (\url{https://ajuntament.barcelona.cat/estadistica/catala/Estadistiques_per_temes/Poblacio_i_demografia/Poblacio/Padro_municipal_habitants/a2018/edat/index.htm}).
#'
#' @format A SpatialPointsDataFrame with 81359 rows and 2 attributes:
#' \describe{
#' \item{age}{age the individual}
#' \item{sex}{sex of the individual as a factor with levels: "man", "woman"}
#' }
#'
"BarcelonaPop"
#'
#'
#'
#' Census tract borders of Barcelona city in Catalonia.
#'
#' A SpatiaPolygons object containing the Census tract borders of
#' Barcelona city in Catalonia.
#'
#' @format A SpatialPolygons object
#'
"BarcelonaCensusTracts"
| /scratch/gouwar.j/cran-all/cranData/AQuadtree/R/data.R |
#' @title Join two AQuadtree objects from the same area, to compare their data
#' @description
#' Given two objects of class AQuadtree for the same area, wich, for instance,
#' may contain data from two different periods, \code{joinAQuadtrees}
#' returns a new object of class AQuadtree with the common zones at the lowest
#' shared level, summarising the data from both AQuadtrees.
#' @importFrom methods new as
#' @importFrom stats weighted.mean
#' @importFrom sp SpatialPolygons Polygons Polygon CRS proj4string identicalCRS
#' SpatialPolygonsDataFrame spChFIDs
#' @importFrom dplyr summarise_at funs
#' @details
#' The function \code{joinAQuadtrees} creates a new AQuadtree object from two
#' given AQuadtree objects with data from the same area. The data of the
#' two given objects is summarised at the smallest possible cells shared by
#' both given objects. All the input data is maintained on the new created
#' object. This function can be used to join the different attributes from
#' the same area or information from different periods.
#' @param qt1 object of class "AQuadtree" containing the first object to join.
#' @param qt2 object of class "AQuadtree" containing the second object to join.
#' @param withResiduals logical indicating if \code{residual} cells should be
#' maintained (TRUE) or not (FALSE, default).
#' @param mean.1 character or character vector specifying the columns in the
#' first AQuadtreeto which a weighted mean should be computed. By default
#' the aggregation function used is \code{sum}.
#' @param mean.2 character or character vector specifying the columns in the
#' first AQuadtreeto which a weighted mean should be computed. By default
#' the aggregation function used is \code{sum}.
#' @return AQuadtree with the information of the two given objects summarised
#' at the lowest level shared by both objects.
#' @export
#' @examples
#' data("CharlestonPop")
#' CharlestonPop.AQT_1<-AQuadtree(CharlestonPop, layers = 2)
#' CharlestonPop.AQT_2<-AQuadtree(CharlestonPop, colnames="sex",
#' thresholdField=c("sex.male", "sex.female"), layers = 2)
#' CharlestonPop.AQT_1_2<-joinAQuadtrees(CharlestonPop.AQT_1, CharlestonPop.AQT_2)
#'
#' \dontrun{
#' ## non AQuadtree objects
#' joinAQuadtrees(CharlestonPop, CharlestonCensusTracts)
#' }
joinAQuadtrees<-function(qt1, qt2, withResiduals=FALSE, mean.1=NULL, mean.2=NULL){
.=NULL
if (missing(qt1)) stop("argument 'qt1' is missing, with no default", call.="FALSE")
if (missing(qt2)) stop("argument 'qt2' is missing, with no default", call.="FALSE")
stopifnot(class(qt1)=="AQuadtree", class(qt2)=="AQuadtree", class(withResiduals)=="logical")
stopifnot(is.projected(qt1), is.projected(qt2), identicalCRS(qt1, qt2))
if (qt1@dim != qt2@dim) stop("initial dimensions of 'qt1' and 'qt2' differ", call.="FALSE")
if (!(all(mean.1 %in% qt1@colnames))) {
stop(sprintf("some 'mean.1' vars (%s) not in object names (%s)", paste(mean.1[!(mean.1 %in% qt1@colnames)] , collapse=", "), paste(qt1@colnames, collapse=", ")), call.="FALSE")
}
if (!(all(mean.2 %in% qt2@colnames))) {
stop(sprintf("some 'mean.2' vars (%s) not in object names (%s)", paste(mean.2[!(mean.2 %in% qt2@colnames)] , collapse=", "), paste(qt2@colnames, collapse=", ")), call.="FALSE")
}
if (length(intersect(qt1$cellCode, qt2$cellCode))==0) stop("no common cells found", call.="FALSE")
names(qt1)<-sapply(names(qt1), function(n){if (n %in% qt1@colnames) paste0(n,".1") else n}, simplify = TRUE, USE.NAMES = FALSE)
qt1@colnames<-paste0(qt1@colnames,".1")
names(qt2)<-sapply(names(qt2), function(n){if (n %in% qt2@colnames) paste0(n,".2") else n}, simplify = TRUE, USE.NAMES = FALSE)
qt2@colnames<-paste0(qt2@colnames,".2")
if (!is.null(mean.1)) {
mean.1<-paste0(mean.1, ".1")
sum.1<-qt1@colnames[!(qt1@colnames %in% mean.1)]
} else sum.1<-qt1@colnames
if (!is.null(mean.2)) {
mean.2<-paste0(mean.2, ".2")
sum.2<-qt2@colnames[!(qt2@colnames %in% mean.2)]
} else sum.2<-qt2@colnames
qt.act<-SpatialPolygonsDataFrame(SpatialPolygons(list()), data=data.frame())
slot(qt.act, "proj4string") <- slot(qt1, "proj4string")
layerNumber<-max(qt1@layers, qt2@layers)
cellCodes<-unique(union(qt1$cellCode, qt2$cellCode))
for (mainCell in cellCodes) {
qt1.act<-as(qt1[qt1$cellCode==mainCell,], "SpatialPolygonsDataFrame")
qt2.act<-as(qt2[qt2$cellCode==mainCell,], "SpatialPolygonsDataFrame")
if (length(qt1.act)==0 || length(qt2.act)==0) next
if (length(qt1.act[qt1.act$cellNum=="" & !qt1.act$residual,])>0) {
currentCell.sp<-as(qt1.act[qt1.act$cellNum=="" & !qt1.act$residual,], "SpatialPolygons")
df1<-qt1.act[qt1.act$cellNum=="" & !qt1.act$residual,]@data
if (is.null(mean.2))
df2<-summarise_at(qt2.act@data, sum.2, funs(sum))
else
df2<-cbind(summarise_at(qt2.act@data, sum.2, funs(sum)), summarise_at(qt2.act@data, mean.2, funs(weighted.mean(., w=qt2.act$total.2))))
qt.act<-rbind(qt.act, SpatialPolygonsDataFrame(currentCell.sp, data.frame(df1, df2, stringsAsFactors=FALSE), match.ID = FALSE))
next
} else if (length(qt2.act[qt2.act$cellNum=="" & !qt2.act$residual,])>0) {
currentCell.sp<-as(qt2.act[qt2.act$cellNum=="" & !qt2.act$residual,], "SpatialPolygons")
if (is.null(mean.1))
df1<-summarise_at(qt1.act@data, sum.1, funs(sum))
else
df1<-cbind(summarise_at(qt1.act@data, sum.1, funs(sum)), summarise_at(qt1.act@data, mean.1, funs(weighted.mean(., w=qt1.act$total.1))))
df2<-qt2.act[qt2.act$cellNum=="" & !qt2.act$residual,]@data
qt.act<-rbind(qt.act, SpatialPolygonsDataFrame(currentCell.sp, data.frame(df1, df2, stringsAsFactors=FALSE), match.ID = FALSE))
next
}
if (withResiduals) {
if (length(qt1.act[qt1.act$residual,])>0) {
if(length(qt2.act[qt2.act$residual,])>0) {
qt.act<-rbind(qt.act,
SpatialPolygonsDataFrame(
as(qt1.act[qt1.act$residual,], "SpatialPolygons"),
data.frame(qt1.act[qt1.act$residual,]@data, qt2.act[qt2.act$residual,qt2@colnames], stringsAsFactors=FALSE), match.ID = FALSE))
} else {
auxdf<-as.data.frame(t(rep(0, length(qt2@colnames))))
names(auxdf)<-qt2@colnames
qt.act<-rbind(qt.act,
SpatialPolygonsDataFrame(
as(qt1.act[qt1.act$residual,], "SpatialPolygons"),
data.frame(qt1.act[qt1.act$residual,]@data, auxdf, stringsAsFactors=FALSE), match.ID = FALSE))
}
} else if (length(qt2.act[qt2.act$residual,])>0) {
auxdf<-as.data.frame(t(rep(0, length(qt1@colnames))))
names(auxdf)<-qt1@colnames
qt.act<-rbind(qt.act,
SpatialPolygonsDataFrame(
as(qt2.act[qt2.act$residual,], "SpatialPolygons"),
data.frame(auxdf, qt2.act[qt2.act$residual,]@data, stringsAsFactors=FALSE), match.ID = FALSE))
}
}
pos<-0
for (i in 2:layerNumber) {
pos<-pos+ceiling(log10(2^(2*(i-1))))
for (currentCell in qt1.act[qt1.act$level==i,]$cellNum) {
currentCell.sp<-as(qt1.act[qt1.act$cellNum==currentCell,], "SpatialPolygons")
df2<-qt2.act@data[substr(qt2.act$cellNum,1,pos)==currentCell, ]
if (nrow(df2)==0) next
df1<-qt1.act[qt1.act$cellNum==currentCell,]@data
if (is.null(mean.2))
df2<-summarise_at(df2, sum.2, funs(sum))
else
df2<-cbind(
summarise_at(df2, sum.2, funs(sum)),
summarise_at(df2, mean.2, funs(weighted.mean(., w=df2[, 'total.2']))))
qt.act<-rbind(qt.act, SpatialPolygonsDataFrame(currentCell.sp, data.frame(df1, df2, stringsAsFactors=FALSE), match.ID = FALSE))
}
qt2.act<-qt2.act[!(substr(qt2.act$cellNum, 1, pos)%in%qt1.act[qt1.act$level==i, ]$cellNum),]
qt1.act<-qt1.act[qt1.act$level!=i, ]
for (currentCell in qt2.act[qt2.act$level==i,]$cellNum) {
currentCell.sp<-as(qt2.act[qt2.act$cellNum==currentCell,], "SpatialPolygons")
df1<-qt1.act@data[substr(qt1.act$cellNum,1,pos)==currentCell, ]
if (nrow(df1)==0) next
df2<-qt2.act[qt2.act$cellNum==currentCell,]@data
if (is.null(mean.1))
df1<-summarise_at(df1, sum.1, funs(sum))
else
df1<-cbind(
summarise_at(df1, sum.1, funs(sum)),
summarise_at(df1, mean.1, funs(weighted.mean(., w=df1[,'total.1']))))
qt.act<-rbind(qt.act, SpatialPolygonsDataFrame(currentCell.sp, data.frame(df1, df2, stringsAsFactors=FALSE), match.ID = FALSE))
}
qt1.act<-qt1.act[!(substr(qt1.act$cellNum, 1, pos)%in%qt2.act[qt2.act$level==i, ]$cellNum), ]
qt2.act<-qt2.act[qt2.act$level!=i, ]
}
}
qt.act<-spChFIDs(qt.act, as.character(1:length(qt.act)))
qt.act@data<-qt.act@data[c("cellCode", "cellNum", "level", "residual", qt1@colnames, qt2@colnames)]
return(
new("AQuadtree",
qt.act,
dim=qt1@dim,
layers=layerNumber,
threshold=as.numeric(NA),
thresholdField = as.character(NA),
colnames= c(qt1@colnames, qt2@colnames),
loss=as.numeric(NA)
)
)
}
| /scratch/gouwar.j/cran-all/cranData/AQuadtree/R/joinAQuadtrees.R |
#' @title Add SpatialPoints to an AQuadtree obtject.
#' @description
#' Given an object of class AQuadtree and an object of class SpatialPoints or
#' SpatialPointsDataFrame for the same area, \code{pointsToAQuadtree}
#' returns a new object of class AQuadtree aggregating the data from the points
#' to the cells where each point fall.
#' @importFrom methods new as
#' @importFrom sp CRS proj4string identicalCRS
#' @importFrom dplyr %>% summarise_ group_by summarise_at funs
#' @details
#' The function \code{pointsToAQuadtree} returns a new AQuadtree object with
#' the input set of points aggregated to the input AQuadtree object. The function
#' creates a “p.total” attribute to compute the total
#' number of points aggregated to each cell of the input AQuadtree.
#' If points is an object of class SpatialPointsDataFrame, the function
#' summarises numeric attributes in the dataframe using the \code{mean}
#' function, and deploys factor attributes creating a new attribute for each label of the
#' factor to calculate the count. The attributes added to the resulting
#' AQuadtree object are prefixed with “p.”.
#' @param qt object of class "AQuadtree".
#' @param points object of class "SpatialPoints" or "SpatialPointsDataFrame".
#' @return AQuadtree with the information of the given set of points aggregated
#' at each corresponding cell of the given AQuadtree.
#' @export
#' @examples
#' data("BarcelonaPop")
#' Barcelona.QT<-AQuadtree(BarcelonaPop)
#' BcnWomen75yPop<-BarcelonaPop[BarcelonaPop$sex=='woman' & BarcelonaPop$age>=75, 'age']
#' Barcelona.extended.QT<-pointsToAQuadtree(Barcelona.QT, BcnWomen75yPop)
#'
#' \dontrun{
#' ## not an AQuadtree object
#' pointsToAQuadtree(CharlestonCensusTracts, CharlestonPop)
#'
#' ## spatial object not projected
#' sp.not.projected<-spTransform(CharlestonPop,CRS("+proj=longlat +datum=NAD27"))
#' is.projected(sp.not.projected)
#' pointsToAQuadtree(AQuadtree(CharlestonPop), sp.not.projected)
#'
#' }
pointsToAQuadtree<-function(qt, points){
cellCode<-NULL
if (missing(qt)) stop("argument 'qt' is missing, with no default", call.="FALSE")
stopifnot(class(qt)=="AQuadtree")
if (length(qt)==0) stop("argument 'qt' has length 0", call.="FALSE")
if (missing(points)) stop("argument 'points' is missing, with no default", call.="FALSE")
if (length(points)==0) stop("argument 'points' has length 0", call.="FALSE")
if (!inherits(points, "SpatialPoints")) stop("argument 'points' is not a 'SpatialPoints' or 'SpatialPointsDataFrame' object", call.="FALSE")
stopifnot(is.projected(qt), is.projected(points), identicalCRS(qt, points))
if (any(bbox(points)<0)) stop("negative bbox not permited, use a different projection", call.="FALSE")
#create summarising expression
summariseExpr<-c("total"="n()")
colnames<-names(points)
if (!is.null(colnames)) {
# treat possible factors within colnames
summariseCols<-
unlist(mapply(function(col){
if (is.numeric(points@data[,col])){
return(setNames(paste0('mean', "(", col, ")"), col))
} else if (is.factor(points@data[,col])){
lev<-levels(points@data[,col])
newCols<-paste0(col, ".", lev)
if (length(lev)>5) stop(sprintf("factor column %s has more than 5 levels", col), call.="FALSE")
# decompose factor creating a new column for factor level
points@data[newCols]<<-1*(points@data[rep(col, length(lev))]==as.list(lev))
colnames<<-c(colnames, newCols) # add new created columns to colnames
return (setNames(paste0('sum', "(", newCols, ")"), newCols))
} else return(NULL) # only aggregate factor and numeric attributes
}, colnames, USE.NAMES = FALSE, SIMPLIFY = FALSE))
summariseExpr<-c(summariseExpr, summariseCols)
summariseExpr<-summariseExpr[!duplicated(summariseExpr)]
names(summariseExpr)<-paste0('p.', names(summariseExpr))
}
maxLayers<-max(qt$level)
points_ID<-as.data.frame(spatialPointsCellCodes(points, dim=slot(qt,'dim'),layers=maxLayers))
points.agg <- data.frame(
'cellCode' = character(),
'cellNum' = character()
)
len <- 0
for (i in 2:maxLayers) {
len <- len + ceiling(log10(2^(2*(i-1))))
aux <- points_ID %>%
group_by(cellCode, 'cellNum'=substr(points_ID$cellNum, 1, len)) %>%
summarise_(.dots=summariseExpr)
aux<-do.call(data.frame,aux)
aux<-aux[paste(aux$cellCode,aux$cellNum) %in% paste(qt$cellCode,qt$cellNum),]
points_ID <- points_ID[!paste(points_ID$cellCode, substr(points_ID$cellNum,1,len)) %in% paste(aux$cellCode,aux$cellNum),]
points.agg<-rbind(points.agg,aux)
}
# the rest of the point should aggregate to the cells of first level (residual or not)
aux <- points_ID %>%
group_by(cellCode, 'cellNum'=substr(points_ID$cellNum, 1, 0)) %>%
summarise_(.dots=summariseExpr)
aux<-do.call(data.frame,aux)
points.agg<-rbind(points.agg,aux)
merge(qt, points.agg)
return (
new("AQuadtree",
sp::merge(as(qt, "SpatialPolygonsDataFrame"), points.agg, by=c("cellCode","cellNum"), sort=F),
dim = qt@dim,
layers = qt@layers,
threshold = as.numeric(NA),
thresholdField = as.character(NA),
colnames = c(qt@colnames, names(points.agg)[!names(points.agg) %in% c('cellCode', 'cellNum')]),
loss = as.numeric(NA)
)
)
}
| /scratch/gouwar.j/cran-all/cranData/AQuadtree/R/pointsToAQuadtree.R |
#' @title Add cell identifiers to SpatialPoints as in INSPIRE Specification
#' @description
#' \code{spatialPointsCellCodes} returns a SpatialPointsDataFrame with identifiers
#' (CellCode and CellNum) for each point as in INSPIRE Specification on Geographical
#' Grid Systems.
#' @details
#' INSPIRE coding system for point identifiers\cr
#' The objective of the coding system is to generate unique identifiers for each
#' point, for any of the recommended resolutions.\cr
#' The cellCode is a text string, composed of cell size and cell coordinates.
#' Cell codes start with a cell size prefix. The cell size is denoted in meter (m)
#' for cell sizes below 1000 m and kilometre (km) for cell sizes from 1000 m and
#' above.\cr
#' Examples: a 100 meter cell has an identifier starting with “100m”, the
#' identifier of a 10000 meter cell starts with “10km”.\cr
#' The coordinate part of the cell code reflects the distance of the lower left
#' grid cell corner from the false origin of the CRS. In order to reduce the
#' length of the string, Easting (E) and Northing (N) values are divided by
#' 10n (n is the number of zeros in the cell size value). Example for a cell
#' size of 10000 meters: The number of zeros in the cell size value is 4.
#' The resulting divider for Easting and Northing values is 104 = 10000.\cr
#' The cellNum is a sequence of concatenated integers identifying all the
#' hierarchical partitions of the main cell in which the point resides.
#' For instance, the cellNum of the top right cell would be 416 (fourth
#' in first partition, sixteenth in second partition)\cr
#' \if{html}{\figure{CellNum.jpg}{options: width=200 alt="Hyerarchical CellNums"}}
#' \if{latex}{\figure{CellNum.jpg}{options: width=4cm}}
#' The input object must be projected and units should be in 'meters'
#' because the system uses the INSPIRE coding system.
#'
#' @seealso
#' \itemize{
#' \item{
#' D2.8.I.2 INSPIRE Specification on Geographical Grid Systems – Guidelines
#' \url{https://inspire.ec.europa.eu/documents/Data_Specifications/INSPIRE_Specification_GGS_v3.0.1.pdf}
#' }
#' \item{
#' EEA reference grid dataset
#' \url{https://data.europa.eu/euodp/data/dataset/data_eea-reference-grids-2}
#' }
#' }
#'
#' @param points object of class "SpatialPoints" or "SpatialPointsDataFrame".
#' @param dim a single integer specifying the initial cell sizes, defaults to
#' 1km.
#' @param layers a single integer specifying the number of divisions of the
#' initial cells, defaults to 1.
#' @return A "SpatialPointsDataFrame" containing all the points given. For
#' each point a cellCode and cellNum identify the cell to which the point belongs.\cr
#' CellCode is a text string, composed of cell size and cell coordinates.
#' Cell codes start with a cell size prefix. The cell size is denoted in meter (m)
#' for cell sizes below 1000 m and kilometer (km) for cell sizes from 1000 m and
#' above.\cr
#' The cellNum is a sequence identifying the different partitions of the main
#' cell in which the point resides.
#' @export
#' @examples
#' data("BarcelonaPop")
#' BarcelonaPop.IDs<-spatialPointsCellCodes(BarcelonaPop)
#' BarcelonaPop.IDs.10km<-spatialPointsCellCodes(BarcelonaPop, 10000, 3)
#'
#' \dontrun{
#' ## spatial object not projected
#' sp.not.projected<-spTransform(CharlestonPop,CRS("+proj=longlat +datum=NAD27"))
#' is.projected(sp.not.projected)
#' spatialPointsCellCodes(sp.not.projected)
#' }
spatialPointsCellCodes <- function(points, dim=1000, layers=1){
## aux function to determine the number of trailing zeros
trailingZeros<-function(x){
i<-0
while (x %% 10 == 0 ) {
x<-x%/%10
i<-i+1
}
return(i)
}
if (missing(points)) stop("argument 'points' is missing missing, with no default", call.="FALSE")
if (length(points)==0) stop("argument 'points' has length 0", call.="FALSE")
#stopifnot(require("sp"))
stopifnot(dim>0)
if (!inherits(points, "SpatialPoints")) stop("argument 'points' is not a 'SpatialPoints' or 'SpatialPointsDataFrame' object", call.="FALSE")
if (!is.projected(points)) stop("spatial data must be projected", call.="FALSE")
# calculate string CellCode of the form "1kmNyyyyExxxx"
sizePrefix<-ifelse(dim>=1000, paste0(dim/1000, "km"), paste0(dim, "m"))
# add x, y cell origin to each point
points$CellOrigin.x<-points@coords[,1]%/%dim*dim
points$CellOrigin.y<-points@coords[,2]%/%dim*dim
# calculate string CellCode of the form "1kmNyyyyExxxx"
zerosToRemove<-as.integer(10^trailingZeros(dim))
cellCodeE<-points$CellOrigin.x/zerosToRemove
cellCodeN<-points$CellOrigin.y/zerosToRemove
lenCellCode<-nchar(max(cellCodeE, cellCodeN))
points$cellCode<-paste0(sizePrefix, "N",formatC(cellCodeN, width=lenCellCode, flag=0, mode = 'integer'), "E",formatC(cellCodeE, width=lenCellCode, flag=0, mode = 'integer'))
points$cellNum<-''
if (layers>1) {
# cellNumPos stores the number of digits for each subpart of the cell codes
# cellNumPosStart<-c( 1,14,15,17,19,22)
# cellNumPosStop <-c(13,14,16,18,21,25)
cellNumPosStart<-c(1)
cellNumPosStop<-c(nchar(points$CellCode[1]))
# calculate CellNum of the form "nmmooopppp..." with n 1:4 mm 01:16 ooo 001:256 pppp 0001:1024 ...
for (i in 2:layers){
zeros<-ceiling(log10(2^(2*(i-1))))
cellNumPosStop[i]<-cellNumPosStop[i-1]+zeros
cellNumPosStart[i]<-cellNumPosStop[i-1]+1
size<-dim/2^(i-1)
points$cellNum<-paste0(points$cellNum, formatC((points$x-points$CellOrigin.x)%/%size + (2^(i-1))*(points$y-points$CellOrigin.y)%/%size+1, width=zeros, flag=0, mode = 'integer'))
}
}
points$CellOrigin.x<-NULL
points$CellOrigin.y<-NULL
return(points)
}
| /scratch/gouwar.j/cran-all/cranData/AQuadtree/R/spatialPointsCellCodes.R |
#' @title Split CellNum sequence into a vector
#' @description
#' \code{createQuadtree} returns a vector decomposing the given CellNum into the
#' sequence of the different cell numbers for each level.
#' @details
#' CellNum is an integer with the concatenated sequence of hierarchical cell positions
#' inside a main cell. \code{splitCellNum} splits that sequence into a vector.
#' For instance, the CellNum of the top right cell would be 416 (fourth
#' in first partition, sixteenth in second partition)\cr
#' \if{html}{\figure{CellNum.jpg}{options: width=200 alt="Hyerarchical CellNums"}}
#' \if{latex}{\figure{CellNum.jpg}{options: width=4cm}}
#' @seealso
#' \itemize{
#' \item{
#' D2.8.I.2 INSPIRE Specification on Geographical Grid Systems – Guidelines
#' \url{https://inspire.ec.europa.eu/documents/Data_Specifications/INSPIRE_Specification_GGS_v3.0.1.pdf}
#' }
#' \item{
#' EEA reference grid dataset
#' \url{https://data.europa.eu/euodp/data/dataset/data_eea-reference-grids-2}
#' }
#' }
#'
#' @param x a character or character vector containing a sequence of cell numbers or
#' an R object with a field named 'cellNum'
#' @return integer vector or list of integer vectors with the sequence
#' CellNums splitted
#' @export
#' @examples
#' data("CharlestonPop")
#' CharlestonPop.IDs<-spatialPointsCellCodes(CharlestonPop, layers=2)
#' splitCellNum(CharlestonPop.IDs)
#'
splitCellNum<-function(x){
if ('cellNum' %in% names(x)) {
x<-x$cellNum
}
if (length(x)>1){
sapply(x, function(s){
IDs<-c()
t<-1
while (nchar(s) > 0) {
pos<-ceiling(log10(2^(2*t)))
IDs<-c(IDs, as.integer(substr(s, 1, pos )))
s<-substr(s, pos + 1 , nchar(s))
t<-t+1
}
return(IDs)
}, simplify = TRUE, USE.NAMES=FALSE)
} else {
IDs<-c()
t<-1
while (nchar(x) > 0) {
pos<-ceiling(log10(2^(2*t)))
IDs<-c(IDs, as.integer(substr(x, 1, pos )))
x<-substr(x, pos + 1 , nchar(x))
t<-t+1
}
return(IDs)
}
}
| /scratch/gouwar.j/cran-all/cranData/AQuadtree/R/splitCellNum.R |
## ----setup, include = FALSE---------------------------------------------------
options(width=80)
knitr::opts_chunk$set(
collapse = TRUE,
warning=FALSE,
message=FALSE,
fig.show='hold',
tidy.opts=list(width.cutoff=80),
tidy=TRUE,
comment = "##"
)
library(knitr)
hook_output = knit_hooks$get('output')
knit_hooks$set(output = function(x, options) {
# this hook is used only when the linewidth option is not NULL
if (!is.null(n <- options$linewidth)) {
x = knitr:::split_lines(x)
# any lines wider than n should be wrapped
x = unlist(sapply(x, function(x){
if (nchar(x) > n) {
paste(strwrap(x, width = n), collapse = paste0('\n', options$comment, ' '))
} else {
x
}
}, simplify = T, USE.NAMES = FALSE))
}
hook_output(x, options)
})
library(AQuadtree)
## ----echo=FALSE, fig.align='center', out.width="60%", fig.cap="\\label{fig:Figure 1}Three level quadtree splitting cell numbering example. Initial cell on the (left); first quadtree subdivision (center); second quadtree subdivision (right)", fig.show='hold'----
knitr::include_graphics('images/Fig1.png')
## ----echo=FALSE, fig.align='center', out.width="25%", fig.cap="\\label{fig:Figure 2}Set of spatial points (a) and the corresponding 62.5m grid with no threshold restrictions (b) (the numbers indicate the points aggregated in each cell).", fig.subcap=rep("", 4), fig.show='hold'----
knitr::include_graphics(c('images/Fig2a.png','images/Fig2b.png'))
## ----echo=FALSE, fig.align='center', out.width="24%", fig.cap="\\label{fig:Figure 3}Disaggregation examples with threshold value 17. No disaggregation and no loss (a); disaggregation with suppression of 4 points (b) ; more disaggregation with suppression of 12 points (c); maximum disaggregation with suppression of 29 points (d).", fig.subcap=rep("", 4), fig.show='hold'----
knitr::include_graphics(c('images/Fig3a.png','images/Fig3b.png','images/Fig3c.png','images/Fig3d.png'))
## ----echo=FALSE, fig.align='center', out.width="28%", fig.cap="\\label{fig:Figure 4}Example of a residual cell.", fig.show='hold'----
knitr::include_graphics('images/Fig4.png')
## -----------------------------------------------------------------------------
example.QT<-AQuadtree(CharlestonPop)
class(example.QT)
## ----echo=2:4, fig.align='center', out.width="40%", fig.cap="AQuadtree plot and spplot"----
oldpar<-par(mar = c(0,0,0,0))
bcn.QT<-AQuadtree(BarcelonaPop)
plot(bcn.QT)
spplot(bcn.QT, by.density=TRUE)
par(oldpar)
## ---- linewidth=90------------------------------------------------------------
charleston.QT<-AQuadtree(CharlestonPop, dim = 10000, layers = 4)
summary(charleston.QT)
## ---- linewidth=90------------------------------------------------------------
class(BarcelonaPop$sex)
levels(BarcelonaPop$sex)
bcn.QT<-AQuadtree(BarcelonaPop, colnames = names(BarcelonaPop), funs = c('mean', 'sum'))
summary(bcn.QT)
## ---- linewidth=90------------------------------------------------------------
bcn.QT<-AQuadtree(BarcelonaPop, colnames = c('age','sex'),
funs = c('mean', 'sum'), threshold=17,
thresholdField=c("sex.man", "sex.woman"))
summary(bcn.QT)
## ----echo=2:5, fig.align='center', out.width="40%", fig.cap="\\label{fig:Figure 6}Examples of the effect of the ineq.threshold parameter."----
oldpar<-par(mar = c(0,0,0,0))
bcn.QT <- AQuadtree(BarcelonaPop, threshold = 5, ineq.threshold = 0.01)
plot(bcn.QT)
bcn.QT <- AQuadtree(BarcelonaPop, threshold = 5, ineq.threshold = 0.5)
plot(bcn.QT)
par(oldpar)
## ---- linewidth=90------------------------------------------------------------
bcn.QT<-AQuadtree(BarcelonaPop)
slotNames(bcn.QT)
## ---- linewidth=90------------------------------------------------------------
names(bcn.QT)
head(bcn.QT)
## ---- linewidth=90------------------------------------------------------------
data("BarcelonaPop", package = "AQuadtree")
summary(BarcelonaPop)
## ---- linewidth=90------------------------------------------------------------
data("CharlestonPop", package = "AQuadtree")
summary(CharlestonPop)
## -----------------------------------------------------------------------------
devtools::session_info("AQuadtree")
| /scratch/gouwar.j/cran-all/cranData/AQuadtree/inst/doc/AQuadtreeUse.R |
---
title: "Quadtree anonymization of point data"
author: "Raymond Lagonigro, Ramon Oller, Joan Carles Martori"
date: '`r Sys.Date()`'
output:
pdf_document:
fig_caption: yes
fig_crop: no
fig_width: 5
number_sections: yes
toc: yes
geometry: left=2.54cm,right=2.54cm,top=2.54cm,bottom=2.54cm
header-includes: \usepackage{subfig}
classoption: a4paper
bibliography: References.bib
vignette: >
%\VignetteIndexEntry{Quadtree anonymization of point data}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
options(width=80)
knitr::opts_chunk$set(
collapse = TRUE,
warning=FALSE,
message=FALSE,
fig.show='hold',
tidy.opts=list(width.cutoff=80),
tidy=TRUE,
comment = "##"
)
library(knitr)
hook_output = knit_hooks$get('output')
knit_hooks$set(output = function(x, options) {
# this hook is used only when the linewidth option is not NULL
if (!is.null(n <- options$linewidth)) {
x = knitr:::split_lines(x)
# any lines wider than n should be wrapped
x = unlist(sapply(x, function(x){
if (nchar(x) > n) {
paste(strwrap(x, width = n), collapse = paste0('\n', options$comment, ' '))
} else {
x
}
}, simplify = T, USE.NAMES = FALSE))
}
hook_output(x, options)
})
library(AQuadtree)
```
# Introduction
The AQuadtree package provides an automatic aggregation tool to anonymise point data. The framework proposed seeks the data accuracy at the smallest possible areas preventing individual information disclosure. Aggregation and local suppression of point data is performed using a methodology based on hierarchical geographic data structures. The final result is a varying size grid adapted to local area population densities described in @Lagonigro2017.
The grid is created following the guidelines for grid datasets of the GEOSTAT project [@GEOSTAT1B2014] and the INSPIRE grid coding system is adopted as defined in the INSPIRE Data specifications [@INSPIRE2010]. Geospatial specifications use the European Terrestrial Reference System 89, Lambert Azimuthal Equal Area (ETRS89-LAEA) projection [@Annoni2003], although other Coordinate Reference Systems (CRS) and projections are also be used with the package. In the definition of the grid dataset, each cell is identified by a code composed of the cell's size and the coordinates of the lower left cell corner in the ETRS89-LAEA system. The cell's size is denoted in meters (“m”) for cells' sizes up to 1000 meters, or kilometers (“km”) for cells' sizes from 1000 meters and above. To reduce the length of the string, values for northing and easting are divided by 10n (where “n” is the number of zeros in the cell size value measured in meters).
> The cell code “1kmN2599E4695“ identifies the 1km grid cell with coordinates of the lower left corner: Y=2599000m, X=4695000m.
The aggregation algorithm implemented in the package builds an initial regular grid of a given cell size, identifying each cell with the corresponding cell code. Each initial cell is recursively subdivided in quadrants where each new cell is assigned a second identifier containing a sequence of numbers to indicate the position of the cell in the disaggregation scheme. For instance, the sequence identifier corresponding to the right top cell in the right image in Figure \ref{fig:Figure 1} would be 416, i.e. fourth cell in the first division, and sixteenth cell in the second division.
```{r echo=FALSE, fig.align='center', out.width="60%", fig.cap="\\label{fig:Figure 1}Three level quadtree splitting cell numbering example. Initial cell on the (left); first quadtree subdivision (center); second quadtree subdivision (right)", fig.show='hold'}
knitr::include_graphics('images/Fig1.png')
```
To ensure data privacy, a cell is only split if all the resulting subdivisions satisfy the threshold restriction on the number of points. In cases of very irregular point pattern, this restriction results in less accuracy on the cell resolution. For instance, Figure \ref{fig:Figure 2}a presents a pattern of 932 points unevenly distributed on a 1km cell and Figure \ref{fig:Figure 2}b shows the corresponding grid of 62.5m cells with no threshold restrictions (the total number of points aggregated in each cell is shown).
```{r echo=FALSE, fig.align='center', out.width="25%", fig.cap="\\label{fig:Figure 2}Set of spatial points (a) and the corresponding 62.5m grid with no threshold restrictions (b) (the numbers indicate the points aggregated in each cell).", fig.subcap=rep("", 4), fig.show='hold'}
knitr::include_graphics(c('images/Fig2a.png','images/Fig2b.png'))
```
If we define an anonymity threshold of 17, the cell in Figure \ref{fig:Figure 2}a can not be subdivided because one of the four resulting quadrants contains only 4 points. The privacy mechanism aggregates all the points, as presented in Figure \ref{fig:Figure 3}a, and covers an irregular spatial distribution. The AQuadtree algorithm contemplates the suppression of some points before continuing the disaggregation. For instance, suppressing the 4 points in the top right quadrant of Figure \ref{fig:Figure 2}b results in the disaggregation shown in Figure \ref{fig:Figure 3}b, which clearly is much more accurate to the underlying spatial distribution. Moreover, the elimination of more data points would lead to further disaggregation (Figure \ref{fig:Figure 3}c and Figure \ref{fig:Figure 3}d).
```{r echo=FALSE, fig.align='center', out.width="24%", fig.cap="\\label{fig:Figure 3}Disaggregation examples with threshold value 17. No disaggregation and no loss (a); disaggregation with suppression of 4 points (b) ; more disaggregation with suppression of 12 points (c); maximum disaggregation with suppression of 29 points (d).", fig.subcap=rep("", 4), fig.show='hold'}
knitr::include_graphics(c('images/Fig3a.png','images/Fig3b.png','images/Fig3c.png','images/Fig3d.png'))
```
In order to balance information loss and resolution accuracy on the process of splitting a cell, the method computes the Theil inequality measure [@theil1972statistical] for the number of points in the possible quadrants as well as the percentage of points needed to be suppressed to force the division. In those cases where the anonymity threshold value prevents disaggregation, high values on the inequality measure may suggest the need for further subdivision, while high values on the loss rate may suggest to stop this subdivision. The algorithm uses default limits for both measures: 0.25 and 0.4. respectively (both values can be defined between 0 and 1). Thus, if there exists any sub-cell with a number of points lower than the anonymity threshold and the inequality measure is higher than 0.25, then the disaggregation process continues by suppressing those points as long as the loss rate is lower than 0.4. Hence, following with example in Figure \ref{fig:Figure 2}, the default disaggregation produced by the method would be the one shown in Figure \ref{fig:Figure 3}b.
All the suppressed points during the process are aggregated in a cell with the initial dimension so their information does not disappear. This cell is marked as a residual cell. Following with the example in Figure \ref{fig:Figure 2}, if the number of suppressed points overcome the anonymity threshold, as for instance, in Figure 3d, the 29 suppressed points are aggregated in a cell of the initial given dimension, which will be marked as a residual cell (see Figure \ref{fig:Figure 4}).
```{r echo=FALSE, fig.align='center', out.width="28%", fig.cap="\\label{fig:Figure 4}Example of a residual cell.", fig.show='hold'}
knitr::include_graphics('images/Fig4.png')
```
# The AQuadtree Class
An AQuadtree class object is a spatial dataset representing a varying size grid and is created performing an aggregation of a given set of points considering a minimum threshold for the number of points in each cell. The AQuadtree main function of the package creates the AQuadtree object from _`SpatialPoints`_ or _`SpatialPointsDataFrame`_ objects.
```{r}
example.QT<-AQuadtree(CharlestonPop)
class(example.QT)
```
The AQuadtree class proposes a collection of methods to manage the generated objects and overrides the generic methods _`show`_, _`print`_, _`summary`_ and _`[`_ (subsetting) for the AQuadtree signature. The _`plot`_ method overrides the generic function for plotting R objects with an extra parameter to specify if residual cells should be plotted. The _`spplot`_ function overrides the lattice-based plot method from sp package [@Pebesma2005], with two extra parameters to control if residual cells should be displayed, and wether attributes should be divided by the cell areas to make different zones comparable. The _`merge`_ method merges data from an input data frame to the given AQuadtree object. An AQuadtree object can be coerced to a SpatialPolygonsDataFrame using the generic method _`as`_ from methods package.
```{r echo=2:4, fig.align='center', out.width="40%", fig.cap="AQuadtree plot and spplot"}
oldpar<-par(mar = c(0,0,0,0))
bcn.QT<-AQuadtree(BarcelonaPop)
plot(bcn.QT)
spplot(bcn.QT, by.density=TRUE)
par(oldpar)
```
## Controlling the grid resolution
The characteristics of the AQuadtree object can be adjusted with various parameters. First, the _`dim`_ parameter defines the size in meters of the highest scale cells and the _`layers`_ parameter indicates the number of disaggregation levels. Thus, specifying the parameters _`dim=10000`_ and _`layers=4`_ would create a grid with cells of sizes between 10km and 1.25km. The default values establish an initial size of 1000 meters and 3 levels of disaggregation.
```{r, linewidth=90}
charleston.QT<-AQuadtree(CharlestonPop, dim = 10000, layers = 4)
summary(charleston.QT)
```
## Summarizing data
The _`colnames`_ parameter specifies the columns on the original dataset to summarize in the resulting grid. An extra attribute _`total`_, containing the number of points in each cell is automatically created and added to the dataframe. On the aggregation process, attributes specified in _`colnames`_ parameter will be summarized using the _'sum'_ function. A list of alternative summarizing functions can can be provided with the _`funs`_ parameter. If any attribute indicated in the _`colnames`_ parameter is a factor, the function creates a new attribute for each label of the factor. For instance, an attribute sex with two labels, _`man`_ and _`woman`_, would be deployed into the two attributes _`sex.man`_ and _`sex.woman`_.
```{r, linewidth=90 }
class(BarcelonaPop$sex)
levels(BarcelonaPop$sex)
bcn.QT<-AQuadtree(BarcelonaPop, colnames = names(BarcelonaPop), funs = c('mean', 'sum'))
summary(bcn.QT)
```
## Specifying a threshold and threshold fields
The package applies a default anonymity threshold value of 100 and it can be changed with the _`threshold`_ parameter. If nothing else is indicated, the threshold restriction is applied only to the total number of points aggregated in each cell (i.e. the _`total`_ attribute added to the resulting dataset). When some of the attributes include confidential information, the threshold restriction can be applied to various properties with the _`thresholdField`_ parameter, indicating the list of attributes from the resulting dataset that must satisfy that given threshold.
```{r, linewidth=90 }
bcn.QT<-AQuadtree(BarcelonaPop, colnames = c('age','sex'),
funs = c('mean', 'sum'), threshold=17,
thresholdField=c("sex.man", "sex.woman"))
summary(bcn.QT)
```
## Balancing information loss and accuracy
In order to control the disaggregation process, two more parameters set the thresholds on the inequity and loss rate. The extra parameter _`ineq.threshold`_, a rate between 0 and 1, specifies a threshold to force disaggregation when there is high inequality between sub-cells. The Theil entropy measure as computed in the _`ineq`_ package [@zeileis2009package] is used to measure inequality for each cell. The _`ineq.threshold`_ parameter defaults to 0.25. Lower values in the _`ineq.threshold`_ produce grids with smaller cells (see Figure \ref{fig:Figure 6}).
```{r echo=2:5, fig.align='center', out.width="40%", fig.cap="\\label{fig:Figure 6}Examples of the effect of the ineq.threshold parameter."}
oldpar<-par(mar = c(0,0,0,0))
bcn.QT <- AQuadtree(BarcelonaPop, threshold = 5, ineq.threshold = 0.01)
plot(bcn.QT)
bcn.QT <- AQuadtree(BarcelonaPop, threshold = 5, ineq.threshold = 0.5)
plot(bcn.QT)
par(oldpar)
```
On the other side, the parameter _`loss.threshold`_, also a rate between 0 and 1, indicates a rate of loss to prevent disaggregation of cells. A low value states that lower loss is preferred on the resulting grid so less disaggregation is obtained.
## AQuadtree object structure
A call to the AQuadtree function will return an AQuadtree class object with six slots indicating the parameters used on the creation of the grid:
* _`dim`_: scale in meters of the highest level cells
* _`layers`_: number of subdivision levels
* _`colnames`_: attribute names summarized in the resulting grid
* _`threshold`_: the value used for anonymization
* _`thresholdField`_: attribute names to which the threshold restriction has been applied
* _`loss`_: number of points discarded during the process of disaggregation because of the threshold
```{r, linewidth=90}
bcn.QT<-AQuadtree(BarcelonaPop)
slotNames(bcn.QT)
```
The data slot contains a dataframe with the information comprised in each cell:
* _`total`_: number of points grouped in the cell.
* _`level`_: scale of disaggregation of the cell.
* _`residual`_: logical value indicating if the cell contains only residual points. Residual points are those that have been suppressed on the disaggregation process to get better accuracy, but can be grouped at the highest scale cell as it overcomes the given threshold.
* _`cellCode`_: cell's size and the coordinates of the lower left cell corner in the ETRS89-LAEA system at the highest aggregation level.
* _`cellNum`_: sequence of numbers indicating the position of the cell in the disaggregation scheme.
```{r, linewidth=90}
names(bcn.QT)
head(bcn.QT)
```
# Provided data
The package includes two _`SpatialPointsDataFrame`_ objects: _`BarcelonaPop`_ for the city of Barcelona (Spain) and _`CharlestonPop`_ for the Charleston, SC metropolitan area (USA). Both objects contain random point data with the distributions of real data acquired at census scale from different sources.
The package also provides two _`SpatialPolygons`_ objects with the spatial boundaries for each region. _`BarcelonaCensusTracts`_ and _`CharlestonCensusTracts`_ contain, respectively, the census tracts spatial limits for the city of Barcelona, and the census tracts spatial limits for the Charleston, SC metropolitan area.
_`BarcelonaPop`_ comprises 81,359 sample points in the city of Barcelona, Spain. The original information was obtained from the statistics department of the Ajuntament de Barcelona, providing population data at the census tract level for the year 2018 [@AjuntamentdeBarcelona.DepartamentdEstadistica2018]. The points were generated and distributed randomly in space, maintaining unchanged the information at each census tract. To reduce the file size, only a sample of 7% of the points have been maintained.
```{r, linewidth=90}
data("BarcelonaPop", package = "AQuadtree")
summary(BarcelonaPop)
```
In a similar way, the _`CharlestonPop`_ object, with 54,619 random sample points, was created using the information in the dataset Charleston1 from the 2000 Census Tract Data for the Charleston, SC metropolitan area (USA) [@GeodaDataandLab2019]. To reduce the file size, only a sample of 10% of the points have been maintained.
```{r, linewidth=90}
data("CharlestonPop", package = "AQuadtree")
summary(CharlestonPop)
```
# Session info
Here is the output of session_info("AQuadtree") on the system on which this document was compiled:
```{r}
devtools::session_info("AQuadtree")
```
# References
| /scratch/gouwar.j/cran-all/cranData/AQuadtree/inst/doc/AQuadtreeUse.Rmd |
---
title: "Quadtree anonymization of point data"
author: "Raymond Lagonigro, Ramon Oller, Joan Carles Martori"
date: '`r Sys.Date()`'
output:
pdf_document:
fig_caption: yes
fig_crop: no
fig_width: 5
number_sections: yes
toc: yes
geometry: left=2.54cm,right=2.54cm,top=2.54cm,bottom=2.54cm
header-includes: \usepackage{subfig}
classoption: a4paper
bibliography: References.bib
vignette: >
%\VignetteIndexEntry{Quadtree anonymization of point data}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
options(width=80)
knitr::opts_chunk$set(
collapse = TRUE,
warning=FALSE,
message=FALSE,
fig.show='hold',
tidy.opts=list(width.cutoff=80),
tidy=TRUE,
comment = "##"
)
library(knitr)
hook_output = knit_hooks$get('output')
knit_hooks$set(output = function(x, options) {
# this hook is used only when the linewidth option is not NULL
if (!is.null(n <- options$linewidth)) {
x = knitr:::split_lines(x)
# any lines wider than n should be wrapped
x = unlist(sapply(x, function(x){
if (nchar(x) > n) {
paste(strwrap(x, width = n), collapse = paste0('\n', options$comment, ' '))
} else {
x
}
}, simplify = T, USE.NAMES = FALSE))
}
hook_output(x, options)
})
library(AQuadtree)
```
# Introduction
The AQuadtree package provides an automatic aggregation tool to anonymise point data. The framework proposed seeks the data accuracy at the smallest possible areas preventing individual information disclosure. Aggregation and local suppression of point data is performed using a methodology based on hierarchical geographic data structures. The final result is a varying size grid adapted to local area population densities described in @Lagonigro2017.
The grid is created following the guidelines for grid datasets of the GEOSTAT project [@GEOSTAT1B2014] and the INSPIRE grid coding system is adopted as defined in the INSPIRE Data specifications [@INSPIRE2010]. Geospatial specifications use the European Terrestrial Reference System 89, Lambert Azimuthal Equal Area (ETRS89-LAEA) projection [@Annoni2003], although other Coordinate Reference Systems (CRS) and projections are also be used with the package. In the definition of the grid dataset, each cell is identified by a code composed of the cell's size and the coordinates of the lower left cell corner in the ETRS89-LAEA system. The cell's size is denoted in meters (“m”) for cells' sizes up to 1000 meters, or kilometers (“km”) for cells' sizes from 1000 meters and above. To reduce the length of the string, values for northing and easting are divided by 10n (where “n” is the number of zeros in the cell size value measured in meters).
> The cell code “1kmN2599E4695“ identifies the 1km grid cell with coordinates of the lower left corner: Y=2599000m, X=4695000m.
The aggregation algorithm implemented in the package builds an initial regular grid of a given cell size, identifying each cell with the corresponding cell code. Each initial cell is recursively subdivided in quadrants where each new cell is assigned a second identifier containing a sequence of numbers to indicate the position of the cell in the disaggregation scheme. For instance, the sequence identifier corresponding to the right top cell in the right image in Figure \ref{fig:Figure 1} would be 416, i.e. fourth cell in the first division, and sixteenth cell in the second division.
```{r echo=FALSE, fig.align='center', out.width="60%", fig.cap="\\label{fig:Figure 1}Three level quadtree splitting cell numbering example. Initial cell on the (left); first quadtree subdivision (center); second quadtree subdivision (right)", fig.show='hold'}
knitr::include_graphics('images/Fig1.png')
```
To ensure data privacy, a cell is only split if all the resulting subdivisions satisfy the threshold restriction on the number of points. In cases of very irregular point pattern, this restriction results in less accuracy on the cell resolution. For instance, Figure \ref{fig:Figure 2}a presents a pattern of 932 points unevenly distributed on a 1km cell and Figure \ref{fig:Figure 2}b shows the corresponding grid of 62.5m cells with no threshold restrictions (the total number of points aggregated in each cell is shown).
```{r echo=FALSE, fig.align='center', out.width="25%", fig.cap="\\label{fig:Figure 2}Set of spatial points (a) and the corresponding 62.5m grid with no threshold restrictions (b) (the numbers indicate the points aggregated in each cell).", fig.subcap=rep("", 4), fig.show='hold'}
knitr::include_graphics(c('images/Fig2a.png','images/Fig2b.png'))
```
If we define an anonymity threshold of 17, the cell in Figure \ref{fig:Figure 2}a can not be subdivided because one of the four resulting quadrants contains only 4 points. The privacy mechanism aggregates all the points, as presented in Figure \ref{fig:Figure 3}a, and covers an irregular spatial distribution. The AQuadtree algorithm contemplates the suppression of some points before continuing the disaggregation. For instance, suppressing the 4 points in the top right quadrant of Figure \ref{fig:Figure 2}b results in the disaggregation shown in Figure \ref{fig:Figure 3}b, which clearly is much more accurate to the underlying spatial distribution. Moreover, the elimination of more data points would lead to further disaggregation (Figure \ref{fig:Figure 3}c and Figure \ref{fig:Figure 3}d).
```{r echo=FALSE, fig.align='center', out.width="24%", fig.cap="\\label{fig:Figure 3}Disaggregation examples with threshold value 17. No disaggregation and no loss (a); disaggregation with suppression of 4 points (b) ; more disaggregation with suppression of 12 points (c); maximum disaggregation with suppression of 29 points (d).", fig.subcap=rep("", 4), fig.show='hold'}
knitr::include_graphics(c('images/Fig3a.png','images/Fig3b.png','images/Fig3c.png','images/Fig3d.png'))
```
In order to balance information loss and resolution accuracy on the process of splitting a cell, the method computes the Theil inequality measure [@theil1972statistical] for the number of points in the possible quadrants as well as the percentage of points needed to be suppressed to force the division. In those cases where the anonymity threshold value prevents disaggregation, high values on the inequality measure may suggest the need for further subdivision, while high values on the loss rate may suggest to stop this subdivision. The algorithm uses default limits for both measures: 0.25 and 0.4. respectively (both values can be defined between 0 and 1). Thus, if there exists any sub-cell with a number of points lower than the anonymity threshold and the inequality measure is higher than 0.25, then the disaggregation process continues by suppressing those points as long as the loss rate is lower than 0.4. Hence, following with example in Figure \ref{fig:Figure 2}, the default disaggregation produced by the method would be the one shown in Figure \ref{fig:Figure 3}b.
All the suppressed points during the process are aggregated in a cell with the initial dimension so their information does not disappear. This cell is marked as a residual cell. Following with the example in Figure \ref{fig:Figure 2}, if the number of suppressed points overcome the anonymity threshold, as for instance, in Figure 3d, the 29 suppressed points are aggregated in a cell of the initial given dimension, which will be marked as a residual cell (see Figure \ref{fig:Figure 4}).
```{r echo=FALSE, fig.align='center', out.width="28%", fig.cap="\\label{fig:Figure 4}Example of a residual cell.", fig.show='hold'}
knitr::include_graphics('images/Fig4.png')
```
# The AQuadtree Class
An AQuadtree class object is a spatial dataset representing a varying size grid and is created performing an aggregation of a given set of points considering a minimum threshold for the number of points in each cell. The AQuadtree main function of the package creates the AQuadtree object from _`SpatialPoints`_ or _`SpatialPointsDataFrame`_ objects.
```{r}
example.QT<-AQuadtree(CharlestonPop)
class(example.QT)
```
The AQuadtree class proposes a collection of methods to manage the generated objects and overrides the generic methods _`show`_, _`print`_, _`summary`_ and _`[`_ (subsetting) for the AQuadtree signature. The _`plot`_ method overrides the generic function for plotting R objects with an extra parameter to specify if residual cells should be plotted. The _`spplot`_ function overrides the lattice-based plot method from sp package [@Pebesma2005], with two extra parameters to control if residual cells should be displayed, and wether attributes should be divided by the cell areas to make different zones comparable. The _`merge`_ method merges data from an input data frame to the given AQuadtree object. An AQuadtree object can be coerced to a SpatialPolygonsDataFrame using the generic method _`as`_ from methods package.
```{r echo=2:4, fig.align='center', out.width="40%", fig.cap="AQuadtree plot and spplot"}
oldpar<-par(mar = c(0,0,0,0))
bcn.QT<-AQuadtree(BarcelonaPop)
plot(bcn.QT)
spplot(bcn.QT, by.density=TRUE)
par(oldpar)
```
## Controlling the grid resolution
The characteristics of the AQuadtree object can be adjusted with various parameters. First, the _`dim`_ parameter defines the size in meters of the highest scale cells and the _`layers`_ parameter indicates the number of disaggregation levels. Thus, specifying the parameters _`dim=10000`_ and _`layers=4`_ would create a grid with cells of sizes between 10km and 1.25km. The default values establish an initial size of 1000 meters and 3 levels of disaggregation.
```{r, linewidth=90}
charleston.QT<-AQuadtree(CharlestonPop, dim = 10000, layers = 4)
summary(charleston.QT)
```
## Summarizing data
The _`colnames`_ parameter specifies the columns on the original dataset to summarize in the resulting grid. An extra attribute _`total`_, containing the number of points in each cell is automatically created and added to the dataframe. On the aggregation process, attributes specified in _`colnames`_ parameter will be summarized using the _'sum'_ function. A list of alternative summarizing functions can can be provided with the _`funs`_ parameter. If any attribute indicated in the _`colnames`_ parameter is a factor, the function creates a new attribute for each label of the factor. For instance, an attribute sex with two labels, _`man`_ and _`woman`_, would be deployed into the two attributes _`sex.man`_ and _`sex.woman`_.
```{r, linewidth=90 }
class(BarcelonaPop$sex)
levels(BarcelonaPop$sex)
bcn.QT<-AQuadtree(BarcelonaPop, colnames = names(BarcelonaPop), funs = c('mean', 'sum'))
summary(bcn.QT)
```
## Specifying a threshold and threshold fields
The package applies a default anonymity threshold value of 100 and it can be changed with the _`threshold`_ parameter. If nothing else is indicated, the threshold restriction is applied only to the total number of points aggregated in each cell (i.e. the _`total`_ attribute added to the resulting dataset). When some of the attributes include confidential information, the threshold restriction can be applied to various properties with the _`thresholdField`_ parameter, indicating the list of attributes from the resulting dataset that must satisfy that given threshold.
```{r, linewidth=90 }
bcn.QT<-AQuadtree(BarcelonaPop, colnames = c('age','sex'),
funs = c('mean', 'sum'), threshold=17,
thresholdField=c("sex.man", "sex.woman"))
summary(bcn.QT)
```
## Balancing information loss and accuracy
In order to control the disaggregation process, two more parameters set the thresholds on the inequity and loss rate. The extra parameter _`ineq.threshold`_, a rate between 0 and 1, specifies a threshold to force disaggregation when there is high inequality between sub-cells. The Theil entropy measure as computed in the _`ineq`_ package [@zeileis2009package] is used to measure inequality for each cell. The _`ineq.threshold`_ parameter defaults to 0.25. Lower values in the _`ineq.threshold`_ produce grids with smaller cells (see Figure \ref{fig:Figure 6}).
```{r echo=2:5, fig.align='center', out.width="40%", fig.cap="\\label{fig:Figure 6}Examples of the effect of the ineq.threshold parameter."}
oldpar<-par(mar = c(0,0,0,0))
bcn.QT <- AQuadtree(BarcelonaPop, threshold = 5, ineq.threshold = 0.01)
plot(bcn.QT)
bcn.QT <- AQuadtree(BarcelonaPop, threshold = 5, ineq.threshold = 0.5)
plot(bcn.QT)
par(oldpar)
```
On the other side, the parameter _`loss.threshold`_, also a rate between 0 and 1, indicates a rate of loss to prevent disaggregation of cells. A low value states that lower loss is preferred on the resulting grid so less disaggregation is obtained.
## AQuadtree object structure
A call to the AQuadtree function will return an AQuadtree class object with six slots indicating the parameters used on the creation of the grid:
* _`dim`_: scale in meters of the highest level cells
* _`layers`_: number of subdivision levels
* _`colnames`_: attribute names summarized in the resulting grid
* _`threshold`_: the value used for anonymization
* _`thresholdField`_: attribute names to which the threshold restriction has been applied
* _`loss`_: number of points discarded during the process of disaggregation because of the threshold
```{r, linewidth=90}
bcn.QT<-AQuadtree(BarcelonaPop)
slotNames(bcn.QT)
```
The data slot contains a dataframe with the information comprised in each cell:
* _`total`_: number of points grouped in the cell.
* _`level`_: scale of disaggregation of the cell.
* _`residual`_: logical value indicating if the cell contains only residual points. Residual points are those that have been suppressed on the disaggregation process to get better accuracy, but can be grouped at the highest scale cell as it overcomes the given threshold.
* _`cellCode`_: cell's size and the coordinates of the lower left cell corner in the ETRS89-LAEA system at the highest aggregation level.
* _`cellNum`_: sequence of numbers indicating the position of the cell in the disaggregation scheme.
```{r, linewidth=90}
names(bcn.QT)
head(bcn.QT)
```
# Provided data
The package includes two _`SpatialPointsDataFrame`_ objects: _`BarcelonaPop`_ for the city of Barcelona (Spain) and _`CharlestonPop`_ for the Charleston, SC metropolitan area (USA). Both objects contain random point data with the distributions of real data acquired at census scale from different sources.
The package also provides two _`SpatialPolygons`_ objects with the spatial boundaries for each region. _`BarcelonaCensusTracts`_ and _`CharlestonCensusTracts`_ contain, respectively, the census tracts spatial limits for the city of Barcelona, and the census tracts spatial limits for the Charleston, SC metropolitan area.
_`BarcelonaPop`_ comprises 81,359 sample points in the city of Barcelona, Spain. The original information was obtained from the statistics department of the Ajuntament de Barcelona, providing population data at the census tract level for the year 2018 [@AjuntamentdeBarcelona.DepartamentdEstadistica2018]. The points were generated and distributed randomly in space, maintaining unchanged the information at each census tract. To reduce the file size, only a sample of 7% of the points have been maintained.
```{r, linewidth=90}
data("BarcelonaPop", package = "AQuadtree")
summary(BarcelonaPop)
```
In a similar way, the _`CharlestonPop`_ object, with 54,619 random sample points, was created using the information in the dataset Charleston1 from the 2000 Census Tract Data for the Charleston, SC metropolitan area (USA) [@GeodaDataandLab2019]. To reduce the file size, only a sample of 10% of the points have been maintained.
```{r, linewidth=90}
data("CharlestonPop", package = "AQuadtree")
summary(CharlestonPop)
```
# Session info
Here is the output of session_info("AQuadtree") on the system on which this document was compiled:
```{r}
devtools::session_info("AQuadtree")
```
# References
| /scratch/gouwar.j/cran-all/cranData/AQuadtree/vignettes/AQuadtreeUse.Rmd |
AR.Sim <-
function(n, f_X, Y.dist, Y.dist.par, xlim=c(0,1), S_X=xlim, Rej.Num=TRUE, Rej.Rate=TRUE, Acc.Rate=TRUE ){
options(warn=-1)
x = c()
k = 0 # Countur
f_Y <- function(x) pdf(T.dist=Y.dist, T.dist.par=Y.dist.par, x) # pdf is a function from DISTRIB Package
c = optimize(f=function(x) f_X(x)/f_Y(x), interval=S_X, maximum=T)$objective
c.max = optimize(f=function(x) f_X(x)/f_Y(x), interval=S_X, maximum=T)$maximum
max.f_X = optimize(f_X, interval=S_X, maximum=T)$objective #Is need for ylim in figures
max.f_Y = optimize(f_Y, interval=S_X, maximum=T)$objective #Is need for ylim in figures
cat("Optimal c =", round(c,3), fill=TRUE )
par(mfrow=c(3,1), oma=c(1.5, 4, 1.3, 0), mar=rep(1.2, 4), cex=.9, las=1)
curve(f_X(x), col=1, lwd=2, lty=1, xlim=xlim, ylim=c(0, .1+ max(c, max.f_X, max.f_Y)))
curve(f_Y(x), col="coral1", lwd=2, lty=1, add=T)
curve(f_X(x)/f_Y(x), lwd=2, lty=1, col=4, add=T)
abline(h=c, v=c.max, col=4, lty=3)
curve(dunif(x) , xlim=xlim, ylim=c(0,1.1), lwd=2)
curve(f_X(x)/(c*f_Y(x)) , xlim=xlim, col=4, lty=2, lwd=2, add=T)
Y <- U <- c() #For drow Fig 3
while( length(x) < n ) {
k = k + 1
u = runif(1)
y = rd(1, T.dist=Y.dist, T.dist.par=Y.dist.par) # rd is a function from DISTRIB Package
Y = c(Y,y) #Only for drow Fig 3
U = c(U,u) #Only for drow Fig 3
if( u <= f_X(y)/(c*f_Y(y)) )
{ x = c(x,y)
points(y, u , type="p", pch=20, col=3, add=T)
}
else
{
points(y, u , type="p", pch=20, col=2, add=T)
}
}
curve(c * f_Y(x) , col="coral1", xlim=xlim, ylim=c(0, .02+ max(max.f_X, c*max.f_Y)), lty=2, lwd=2)
curve( f_X(x) , xlim=xlim, col=1, lty=2, lwd=2, add=T)
points(Y, U*c*f_Y(Y) , type="p", pch=20, add=T,
col = 3*(U <= f_X(Y)/(c*f_Y(Y))) + 2*(U > f_X(Y)/(c*f_Y(Y))) )
title("Graphical Presentation to Acceptance-Rejection Method ", outer=TRUE)
par(mfcol=c(1,1)) #Reset display
#mtext("y", side=1, padj=3, outer=F) #For xlab
mtext("f_X(x) ", font=4, col=1, side=2, padj=-19, outer=T) #For ylab of Fig 1
mtext("f_Y(x) ", font=4, col="coral1", side=2, padj=-17, outer=T) #For ylab of Fig 1
mtext("f_X(x) ", font=4, col=4, side=2, padj=-14, outer=T) #For ylab of Fig 1
mtext("--------- ", font=4, col=4, side=2, padj=-13, outer=T) #For ylab of Fig 1
mtext("f_Y(x) ", font=4, col=4, side=2, padj=-12, outer=T) #For ylab of Fig 1
mtext("f_U(y) ", font=4, col=1, side=2, padj=-2, outer=T) #For ylab of Fig 2
mtext("f_X(y) ", font=4, col=4, side=2, padj=1, outer=T) #For ylab of Fig 2
mtext("---------- ", font=4, col=4, side=2, padj=2, outer=T) #For ylab of Fig 1
mtext("c. f_Y(y) ", font=4, col=4, side=2, padj=3, outer=T) #For ylab of Fig 1
mtext("c. f_Y(y) ", font=4, col="coral1", side=2, padj=14, outer=T) #For ylab of Fig 3
mtext("f_X(y) ", font=4, col=1, side=2, padj=16, outer=T) #For ylab of Fig 3
mtext("Points (y , u. c. f_Y(y)) Points (y , u) Computing the optimum c", 4, 0, outer=F, las=0) #For ylab in right side
if(Rej.Num != "FALSE")
cat( "The numbers of Rejections =", k-n, fill=TRUE )
if(Rej.Rate != "FALSE")
cat( "Ratio of Rejections =", round((k-n)/k, 3), fill=TRUE )
if(Acc.Rate != "FALSE")
cat( "Ratio of Acceptance =", 1-round((k-n)/k, 3), fill=TRUE )
Sim.data <- x
return(Sim.data)
}
| /scratch/gouwar.j/cran-all/cranData/AR/R/AR.Sim.R |
globalVariables(c("z","lag","..density.."))
ARtCensReg = function(cc, lcl=NULL, ucl=NULL, y, x, p=1, M=10, perc=0.25, MaxIter=400,
pc=0.18, nufix=NULL, tol=0.0001, show_se=TRUE, quiet=FALSE){
m = length(y)
if (!is.numeric(y)) stop("y must be a numeric vector")
if (!is.numeric(x)) stop("x must be a numeric matrix")
if (!is.matrix(x)) x = as.matrix(x)
if (det(t(x)%*%x)==0) stop("the columns of x must be linearly independent")
## Verify error at parameters specification
#No data
if ((length(x) == 0) | (length(y) == 0) | (length(cc) == 0)) stop("All parameters must be provided")
#Validating if exists NA's
if (sum(cc[1:p]) > 0) stop("The first p values in y must be completely observed")
if (sum(cc%in%c(0,1))< length(cc)) stop("The elements of the vector cc must be 0 or 1")
if (sum(is.na(x)) > 0) stop("There are some NA values in x")
if (sum(is.na(cc)) > 0) stop("There are some NA values in cc")
miss = which(is.na(y))
if (sum(cc[miss]) != length(miss)) stop ("NA values in y must be specified through arguments cc, lcl, and ucl")
#Validating dims data set
if (ncol(as.matrix(y)) > 1) stop("y must have just one column")
if (ncol(as.matrix(cc)) > 1) stop("cc must have just one column")
if (nrow(as.matrix(x)) != m) stop("x does not have the same number of lines than y")
if (length(cc) != m) stop("cc does not have the same length than y")
if (sum(cc) > 0){
if (is.null(lcl) | is.null(ucl)) stop("lcl and ucl must be provided for censored data")
if (!is.numeric(lcl) | !is.numeric(ucl)) stop("lcl and ucl must be numeric vectors")
if (length(miss)>0){
censor = (cc==1 & !is.na(y))
if (any(is.infinite(lcl[censor]) & is.infinite(ucl[censor]))) stop("lcl or ucl must be finite for censored data")
} else {
if (any(is.infinite(lcl[cc==1]) & is.infinite(ucl[cc==1]))) stop("lcl or ucl must be finite for censored data")
}
if (length(lcl) != m) stop("lcl does not have the same length than y")
if (length(ucl) != m) stop("ucl does not have the same length than y")
if (ncol(as.matrix(lcl)) > 1) stop("lcl must have just one column")
if (ncol(as.matrix(ucl)) > 1) stop("ucl must have just one column")
if (sum(is.na(lcl))>0 | sum(is.na(ucl))>0) stop("There are some NA values in lcl or ucl")
if (!all(lcl[cc==1]<ucl[cc==1])) stop ("lcl must be smaller than ucl")
}
#Validating supports
if (!is.null(nufix)){
if (length(c(nufix)) != 1) stop("nufix must be a positive value or 'NULL'")
if (!is.numeric(nufix)) stop("nufix must be a positive value")
if (nufix <= 2) stop("nufix must be a positive value (greater than 2)")
}
if (length(p) != 1) stop("p must be a positive integer value")
if (!is.numeric(p)) stop("p must be a positive integer value")
if (p!=round(p) | p<=0) stop("p must be a positive integer value")
if (tol <= 0) stop("tolerance must be a positive value (suggested to be small)")
if (!is.numeric(MaxIter)) stop("MaxIter must be a positive integer value")
if (MaxIter<=0 | MaxIter%%1!=0) stop("MaxIter must be a positive integer value")
if (!is.numeric(M)) stop("M must be a positive integer value")
if (M<=1 | M%%1!=0) stop("M must be a positive integer value (greater than 1)")
if (!is.numeric(pc)) stop("pc must be a real number in [0,1]")
if (pc>1 | pc<0) stop("pc must be a real number in [0,1]")
if (!is.numeric(perc)) stop("perc must be a real number in [0,1)")
if (perc>=1 | perc<0) stop("perc must be a real number in [0,1)")
if (!is.logical(show_se)) stop("show_se must be TRUE or FALSE")
if (!is.logical(quiet)) stop("quiet must be TRUE or FALSE")
#Running the algorithm
if (!quiet) {
cat('\n')
call <- match.call()
cat("Call:\n")
print(call)
cat('\n')
}
out = suppressWarnings(SAEM_temporalT(cc, lcl, ucl, y, x, p, tol, M, perc, MaxIter, pc, nufix, show_se, quiet))
q = ncol(x)
if (is.null(nufix)){ lab = numeric(p+q+2); lab[p+q+2] = 'nu' } else { lab = numeric(p+q+1) }
if (sum(abs(x[,1])) == nrow(x)){ for (i in 1:q) lab[i] = paste('beta',i-1,sep='')
} else { for (i in 1:q) lab[i] = paste('beta',i,sep='') }
lab[q+1] = 'sigma2'
for (i in ((q+2):(p+q+1))) lab[i] = paste('phi',i-q-1,sep='')
if (show_se) {
tab = round(rbind(out$res$theta, out$res$SE),4)
colnames(tab) = lab
rownames(tab) = c("","s.e.")
} else {
tab = round(rbind(out$res$theta),4)
colnames(tab) = lab
rownames(tab) = c("")
}
obj.out = out$res
obj.out$call = match.call()
obj.out$tab = tab
if (sum(cc) == 0){ cens = "no censoring"
} else {
if (sum(cc) == length(miss)){ cens = "missing"
} else {
if (all(is.infinite(lcl)) & any(is.finite(ucl))){ cens = "left" }
if (all(is.infinite(ucl)) & any(is.finite(lcl))){ cens = "right" }
if (any(is.finite(ucl-lcl))){ cens = "interval" }
}
}
obj.out$cens = cens
obj.out$nmiss = length(miss)
obj.out$ncens = sum(cc)
obj.out$converge = (out$res$iter < MaxIter)
obj.out$MaxIter = MaxIter
obj.out$M = M
obj.out$pc = pc
obj.out$time = out$time
#plot
obj.out$plot$cpl = pc*MaxIter
obj.out$plot$npar = length(out$res$theta)
obj.out$plot$labels = list()
if (sum(abs(x[,1]))==nrow(x)) { for(i in 1:q){obj.out$plot$labels[[i]] = bquote(beta[.(i-1)])}
} else { for(i in 1:q){obj.out$plot$labels[[i]] = bquote(beta[.(i)])} }
obj.out$plot$labels[[q+1]] = bquote(sigma^2)
for(i in 1:p){obj.out$plot$labels[[i+q+1]] = bquote(phi[.(i)])}
if (obj.out$plot$npar == (p+q+2)) obj.out$plot$labels[[p+q+2]] = bquote(nu)
obj.out$plot$Theta = out$Theta
#class
class(obj.out) = 'ARtpCRM'
invisible(obj.out)
}
#' @export
print.ARtpCRM = function(x, ...){
cat('---------------------------------------------------\n')
cat(' Censored Linear Regression Model with AR Errors \n')
cat('---------------------------------------------------\n')
cat("Call:\n")
print(x$call)
cat('\n')
cat('Estimated parameters:\n')
print(x$tab)
cat('\n')
cat('Details:\n')
cat('Type of censoring:', x$cens, '\n')
if (x$ncens > 0){ cat('Number of missing values:', x$nmiss, '\n') }
cat("Convergence reached?:", x$converge, '\n')
cat('Iterations:', x$iter,"/",x$MaxIter, '\n')
cat('MC sample:', x$M, '\n')
cat('Cut point:', x$pc, '\n')
cat("Processing time:", x$time, units(x$time), '\n')
}
#' @export
summary.ARtpCRM = function(object, ...){
cat('---------------------------------------------------\n')
cat(' Censored Linear Regression Model with AR Errors \n')
cat('---------------------------------------------------\n')
cat("Call:\n")
print(object$call)
cat('\n')
cat('Estimated parameters:\n')
print(object$tab)
cat('\n')
cat('Details:\n')
cat('Type of censoring:', object$cens, '\n')
if (object$ncens > 0) { cat('Number of missing values:', object$nmiss, '\n') }
cat("Convergence reached?:", object$converge, '\n')
cat('Iterations:', object$iter,"/",object$MaxIter, '\n')
cat('MC sample:', object$M, '\n')
cat('Cut point:', object$pc, '\n')
cat("Processing time:", object$time, units(object$time), '\n')
}
#' @export
plot.ARtpCRM = function(x, ...) {
count = x$iter
npar = x$plot$npar
label = x$plot$labels
myplot = vector("list", npar)
for (i in 1:npar){
data1 = data.frame(z=x$plot$Theta[,i])
myplot[[i]] = ggplot(data1, aes(x=seq(1,count), y=z)) + geom_line() +
geom_vline(xintercept=x$plot$cpl, color="red", linetype="twodash") +
labs(x="Iteration", y=label[[i]]) + theme_bw()
}
nrows = ifelse(npar%%3==0, npar%/%3, (npar%/%3)+1)
grid.arrange(grobs=myplot, nrow=nrows, ncol=3)
}
#' @export
residuals.ARtpCRM = function(object, ...) {
x = object$x
p = length(object$phi)
m = nrow(x)
residuals = numeric(m)
residuals[1:p] = 0
res = object$yest - x%*%object$beta
for (i in (p+1):m) residuals[i] = res[i] - sum(object$phi*res[(i-1):(i-p)])
#
quant = residuals/sqrt(object$sigma2)
quant = qnorm(pt(quant, object$nu))
resid = list(residuals=residuals[-(1:p)], quantile.resid=quant[-(1:p)])
class(resid) = "residARpCRM"
return(resid)
}
#' @export
predict.ARtpCRM = function(object, x_pred, ...) {
# Validation
x_pred = as.matrix(x_pred)
if (ncol(x_pred)!=ncol(as.matrix(object$x))) stop("x_pred must have the same number of columns than x")
if (sum(is.na(x_pred))>0) stop("There are some NA values in x_pred")
if (!is.numeric(x_pred)) stop("x_pred must be a numeric matrix")
m = nrow(x_pred)
n = length(c(object$yest))
p = length(c(object$phi))
beta = object$beta
phi = object$phi
meanDiff = object$yest - object$x%*%beta
media.pre = x_pred%*%beta
y_pred = matrix(0, ncol=1, nrow=m)
for (k in 1:m){
a1_pred = 0
if (k==1){ a1_pred = t(phi)%*%c(meanDiff[n:(n-p+k)])
} else { if (1<k & k<=p){ a1_pred = t(phi)%*%c((y_pred[(k-1):1] - media.pre[(k-1):1]), meanDiff[n:(n-p+k)])
} else { if (k>p) a1_pred = t(phi)%*%c(y_pred[(k-1):(k-p)] - media.pre[(k-1):(k-p)]) }}
y_pred[k,] = media.pre[k] + a1_pred
}
return (y_pred)
}
| /scratch/gouwar.j/cran-all/cranData/ARCensReg/R/ARtCensReg_final.R |
## The autoregressive AR(p) Student-t model ##
##############################################################
# Random numbers from the multivariate normal distribution #
##############################################################
rtmvnormal = function(n, mean, sigma, lower, upper, thinning){
if (!is.positive.definite(sigma)){ stop("The variance matrix could be positive definite.",call.=FALSE) }
x0 = ifelse(is.finite(lower), lower, ifelse(is.finite(upper), upper, 0))
if (length(mean)==1){ ret = rtnorm(n, mean=mean, sd=sqrt(sigma), lower=lower, upper=upper)
} else { ret = rtmvnorm(n, mean=mean, sigma=sigma, lower=lower, upper=upper, algorithm="gibbs",
burn.in.samples=0, start.value=x0, thinning=thinning) }
return (ret)
}
##############################################################
# Function to maximize nu #
##############################################################
Maxnu = function(e.nu, SAEMu, SAEMlu){
ni = length(SAEMu) # n-p
fmax = as.numeric()
fmax = 0.5*ni*(e.nu*log(0.5*e.nu) - 2*log(gamma(0.5*e.nu))) + 0.5*e.nu*(SAEMlu - sum(SAEMu))
return(fmax) # Function to be maximized
}
##############################################################
# Gibbs sampler #
##############################################################
Gibbs_samplerCLRT = function(X, yobs, SAEMu, cc, LI, LS, phi, beta, sigma2, nu, M1, M0, fixnu, show_ep){
nj = length(yobs) # n
pj = length(phi) # p
diff1 = nj - pj # n - p
drawsY = vector("numeric", length=nj) # Sum Y
drawsU = vector("numeric", length=diff1) # Sum u
drawslU = 0 # Sum i=1^m (sum j=p+1^n log u_j)
drawsUY2 = 0 # Sum i=1^m (sum j=p+1^n u_jy_j^2)
drawsUYi = vector("numeric", length=diff1) # Sum u*Y
drawsUYZ = vector("numeric", length=pj) # Sum i=1^m (sum j=p+1^n u_jy_ijz_j-1)
drawsUZi = matrix(0,nrow=pj, ncol=diff1) # Sum u_i z_i-1
drawsUZ2 = matrix(0,nrow=pj, ncol=pj) # Sum i=1^m (sum j=p+1^n u_jz_i-1t(z_i-1))
sampler.y = yobs
if (show_ep){
if (!fixnu){ deltaM = matrix(0, nrow=(pj+length(beta)+2), ncol=(pj+length(beta)+2)) # The second part of the information matrix
} else { deltaM = matrix(0, nrow=(pj+length(beta)+1), ncol=(pj+length(beta)+1)) } # The second part of the information matrix
}
if (sum(cc)==0){
diff.media = sampler.y - X%*%beta
for (k in 1:M1){
sampler.u = vector("numeric", length=diff1)
samplerUYZ = vector("numeric", length=pj)
samplerUZi = matrix(0, nrow=pj, ncol=diff1)
samplerUZ2 = matrix(0, nrow=pj, ncol=pj)
# Sampling from u|Y,theta
for (i in 1:diff1){
bi = nu + (diff.media[pj+i] - sum(phi*diff.media[(pj+i-1):i]))^2/sigma2
sampler.u[i] = rgamma(1, shape=0.5*(nu+1), rate=0.5*bi)
samplerUYZ = samplerUYZ + sampler.u[i]*sampler.y[pj+i]*sampler.y[(pj+i-1):i]
samplerUZi[,i] = sampler.u[i]*sampler.y[(pj+i-1):i]
samplerUZ2 = samplerUZ2 + sampler.u[i]*(sampler.y[(pj+i-1):i]%*%t(sampler.y[(pj+i-1):i]))
} # End for
if (k > M0){
drawsU = drawsU + sampler.u
drawslU = drawslU + sum(log(sampler.u))
drawsUY2 = drawsUY2 + sum(sampler.u*(sampler.y[-(1:pj)])^2)
drawsUYi = drawsUYi + sampler.u*sampler.y[-(1:pj)]
drawsUYZ = drawsUYZ + samplerUYZ
drawsUZi = drawsUZi + samplerUZi
drawsUZ2 = drawsUZ2 + samplerUZ2
if (show_ep){
score = Gradient(X, sampler.u, sampler.y, phi, beta, sigma2, nu, fixnu)
deltaM = deltaM + score%*%t(score)
}
} # End if
} # End for
EY = sampler.y
} # Sampling without censored observations
if (sum(cc)>=1 & sum(cc)<=nj){
mean.vc = ComputeMean(yobs, X, beta, phi)
mean1 = mean.vc$mvc
cc1 = cc[-(1:pj)]; Li = LI[-c(1:pj)]; Ls = LS[-c(1:pj)]
sampler.u = SAEMu
for (k in 1:M1){
samplerUYZ = vector("numeric", length=pj)
samplerUZi = matrix(0, nrow=pj, ncol=diff1)
samplerUZ2 = matrix(0, nrow=pj, ncol=pj)
# Sampling from Yc|u,Yo,theta
var1 = ComputeVar(sigma2, mean.vc$Pphi, phi, sampler.u)
y.np = sampler.y[-(1:pj)]
inversa.vc = var1[cc1==1,cc1==0]%*%inversa(var1[cc1==0,cc1==0])
mean.c = mean1[cc1==1] + inversa.vc%*%(y.np[cc1==0]-mean1[cc1==0])
mean.c = as.vector(mean.c) # Mean
var.c = var1[cc1==1,cc1==1] - inversa.vc%*%var1[cc1==0,cc1==1]
var.c = (var.c + t(var.c))/2 # Variance matrix
random.y = rtmvnormal(1, mean=mean.c, sigma=var.c, lower=Li[cc1==1], upper=Ls[cc1==1], thinning=2)
y.np[cc1==1] = random.y
sampler.y[-c(1:pj)] = y.np
# Sampling from u|Y,theta
diff.media = sampler.y - X%*%beta
for (i in 1:diff1){
bi = nu + (diff.media[pj+i] - sum(phi*diff.media[(pj+i-1):i]))^2/sigma2
sampler.u[i] = rgamma(1,shape=0.5*(nu+1),rate=0.5*bi)
samplerUYZ = samplerUYZ + sampler.u[i]*sampler.y[pj+i]*sampler.y[(pj+i-1):i]
samplerUZi[,i] = sampler.u[i]*sampler.y[(pj+i-1):i]
samplerUZ2 = samplerUZ2 + sampler.u[i]*(sampler.y[(pj+i-1):i]%*%t(sampler.y[(pj+i-1):i]))
} # End for
if (k > M0){
drawsY = drawsY + sampler.y
drawsU = drawsU + sampler.u
drawslU = drawslU + sum(log(sampler.u))
drawsUY2 = drawsUY2 + sum(sampler.u*(sampler.y[-(1:pj)])^2)
drawsUYi = drawsUYi + sampler.u*sampler.y[-(1:pj)]
drawsUYZ = drawsUYZ + samplerUYZ
drawsUZi = drawsUZi + samplerUZi
drawsUZ2 = drawsUZ2 + samplerUZ2
if (show_ep){
score = Gradient(X, sampler.u, sampler.y, phi, beta, sigma2, nu, fixnu)
deltaM = deltaM + score%*%t(score)
}
} # End if
} # End for
EY = drawsY/(M1 - M0)
} # Sampling censored variables
# Estimating the expectations
m = M1 - M0
EU = drawsU/m
ElU = drawslU/m
EUY2 = drawsUY2/m
EUYi = drawsUYi/m
EUYZ = drawsUYZ/m
EUZi = drawsUZi/m
EUZ2 = drawsUZ2/m
if (show_ep){
EdeltaM = deltaM/m
return (list(EY=EY, EU=EU, ElU=ElU, EUY2=EUY2, EUYi=EUYi, EUYZ=EUYZ, EUZi=EUZi, EUZ2=EUZ2, Edelta=EdeltaM))
} else {
return (list(EY=EY, EU=EU, ElU=ElU, EUY2=EUY2, EUYi=EUYi, EUYZ=EUYZ, EUZi=EUZi, EUZ2=EUZ2))
}
}
##############################################################
# Parameter estimation - Student-t innovations #
##############################################################
SAEM_temporalT = function(cens, LI, LS, y, x, p, tol, M, perc, MaxIter, pc,
nufix, show_ep, quiet){
if (!quiet){ pb = txtProgressBar(min = 0, max = MaxIter, style = 3) }
yobs = y
n = length(yobs)
X = x
p = p
q = ncol(X)
model0 = arima(yobs, order=c(p=p,d=0,q=0), xreg=X, method="ML", include.mean=FALSE)
beta = c((model0$coef)[-(1:p)])
phi = c((model0$coef)[1:p])
sigma2 = model0$sigma2
if (is.null(nufix)) {
nu = 10; fixed.nu = FALSE
theta = c(beta, sigma2, phi, nu)
} else {
nu = nufix; fixed.nu = TRUE
theta = c(beta, sigma2, phi)
}
Theta = theta
# Stop criterion
criterio = criterio2 = 10
count = 0
## SAEM algorithm ################################
MG = round(M/(1 - perc),0) # Number of samples to generate
M0 = MG - M # Number of burn samples
# Sequence of decreasing positive numbers: smoothing parameter
if (pc==1){
seqq = rep(1,MaxIter)
} else {
seqq = c(1/((((pc*MaxIter)+1):MaxIter)-(pc*MaxIter)))
seqq = c(rep(1,MaxIter-length(seqq)),seqq)
}
SAEM.Y = vector("numeric", length=n)
SAEM.U = runif(n-p, 0, 1)
SAEM.lU = 0
SAEM.UY2 = 0
SAEM.UYi = vector("numeric", length=n-p)
SAEM.UYZ = vector("numeric", length=p)
SAEM.UZi = matrix(0, nrow=p, ncol=n-p)
SAEM.UZ2 = matrix(0, nrow=p, ncol=p)
if (show_ep){
if (!fixed.nu){ SAEM.delta = matrix(0,nrow=(p+q+2),ncol=(p+q+2))
} else { SAEM.delta = matrix(0,nrow=(p+q+1),ncol=(p+q+1)) }
}
initime = Sys.time()
while (criterio > tol){
count = count + 1
if (!quiet){ setTxtProgressBar(pb, count) }
# E-1: Sampling step
amostras = Gibbs_samplerCLRT(X, yobs, SAEM.U, cens, LI, LS, phi, beta, sigma2, nu, MG, M0, fixed.nu, show_ep)
# E-2: Stochastic approximation
SAEM.Y = SAEM.Y + seqq[count]*(amostras$EY - SAEM.Y)
SAEM.U = SAEM.U + seqq[count]*(amostras$EU - SAEM.U)
SAEM.lU = SAEM.lU + seqq[count]*(amostras$ElU - SAEM.lU)
SAEM.UY2 = SAEM.UY2 + seqq[count]*(amostras$EUY2 - SAEM.UY2)
SAEM.UYi = SAEM.UYi + seqq[count]*(amostras$EUYi - SAEM.UYi)
SAEM.UYZ = SAEM.UYZ + seqq[count]*(amostras$EUYZ - SAEM.UYZ)
SAEM.UZi = SAEM.UZi + seqq[count]*(amostras$EUZi - SAEM.UZi)
SAEM.UZ2 = SAEM.UZ2 + seqq[count]*(amostras$EUZ2 - SAEM.UZ2)
if (show_ep){ SAEM.delta = SAEM.delta + seqq[count]*(amostras$Edelta - SAEM.delta) }
# CM: Conditional maximization
media1 = X%*%beta
part1 = vector("numeric", length=p)
part2 = matrix(0, ncol=p, nrow=p)
for (i in 1:(n-p)){
part1 = part1 + SAEM.UYi[i]*media1[(p+i-1):i,] + media1[p+i,]*matrix(SAEM.UZi[,i]) - SAEM.U[i]*media1[p+i,]*media1[(p+i-1):i,]
part2 = part2 + SAEM.UZi[,i]%*%t(media1[(p+i-1):i,]) + media1[(p+i-1):i,]%*%t(SAEM.UZi[,i]) - SAEM.U[i]*(media1[(p+i-1):i,]%*%t(media1[(p+i-1):i,]))
}
uy2 = SAEM.UY2 - 2*sum(SAEM.UYi*media1[-(1:p)]) + sum(SAEM.U*(media1[-(1:p)])^2)
uyw = SAEM.UYZ - part1
uw2 = SAEM.UZ2 - part2
phi = as.vector(solve(uw2)%*%uyw) # Update phi
sigma2 = (uy2 - t(phi)%*%uyw - t(uyw)%*%phi + t(phi)%*%uw2%*%phi)/(n-p)
sigma2 = as.numeric(sigma2) # Update sigma2
ciclos = ciclobeta(X,phi,SAEM.U,SAEM.UYi,SAEM.UZi)
Ai = ciclos$Ai
Bi = ciclos$Bi
beta = as.vector(solve(Ai)%*%Bi) # Update beta
if (!fixed.nu){
nu = optimize(f=Maxnu, lower=2, upper=200, maximum=TRUE, SAEMu=SAEM.U, SAEMlu=SAEM.lU)$maximum
nu = as.numeric(nu) # Update nu
theta1 = c(beta, sigma2, phi, nu)
} else { theta1 = c(beta, sigma2, phi) }
# Stopping criteria
criterio2 = sqrt((theta1/theta-1)%*%(theta1/theta-1))
criterio = criterio2
if (count==MaxIter){criterio = 1e-12}
theta = theta1
Theta = rbind(Theta, theta)
} # End SAEM algorithm
if (!quiet) setTxtProgressBar(pb, MaxIter)
endtime = Sys.time()
timediffe = endtime-initime
# Observed information matrix
if (show_ep){
score1 = GradientExp(SAEM.lU, SAEM.U, uy2, uyw, uw2, Ai, Bi, phi, beta, sigma2, nu, fixed.nu)
hessian1 = HessianExp(SAEM.U, SAEM.UZi, SAEM.UYi, uy2, uyw, uw2, Ai, Bi, media1, X, phi, beta,
sigma2, nu, fixed.nu)
ObsInfM = score1%*%t(score1) - hessian1 - SAEM.delta
variancias = diag(inversa(ObsInfM)) # variance error approximation
se.app = sqrt(variancias) # standard error approximation
}
# Results
if (show_ep) resultados = list(beta=beta, sigma2=sigma2, phi=phi, nu=nu, theta=theta, SE=se.app)
else resultados = list(beta=beta, sigma2=sigma2, phi=phi, nu=nu, theta=theta)
resultados$yest = SAEM.Y
resultados$uest = SAEM.U
resultados$x = x
resultados$iter = count
resultados$criteria = criterio2
colnames(Theta) = NULL; rownames(Theta) = NULL
return (list(res=resultados, time=timediffe, Theta=Theta[-1,]))
}
| /scratch/gouwar.j/cran-all/cranData/ARCensReg/R/ARtCensReg_utils.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
inversa <- function(M) {
.Call(`_ARCensReg_inversa`, M)
}
ciclobeta <- function(X, phi, SAEMu, SAEMuyi, SAEMuzi) {
.Call(`_ARCensReg_ciclobeta`, X, phi, SAEMu, SAEMuyi, SAEMuzi)
}
ComputeMean <- function(yobs, Xmatrix, beta, phi) {
.Call(`_ARCensReg_ComputeMean`, yobs, Xmatrix, beta, phi)
}
ComputeVar <- function(sigma2, Pphi, phi, SAEMu) {
.Call(`_ARCensReg_ComputeVar`, sigma2, Pphi, phi, SAEMu)
}
Gradient <- function(x, saemU, saemY, phi, beta, sigma2, nu, fixnu) {
.Call(`_ARCensReg_Gradient`, x, saemU, saemY, phi, beta, sigma2, nu, fixnu)
}
GradientExp <- function(saemLU, saemU, uy2, uyw, uw2, A, B, phi, beta, sigma2, nu, fixnu) {
.Call(`_ARCensReg_GradientExp`, saemLU, saemU, uy2, uyw, uw2, A, B, phi, beta, sigma2, nu, fixnu)
}
HessianExp <- function(saemU, saemUZi, saemUYi, uy2, uyw, uw2, A, B, media, X, phi, beta, sigma2, nu, fixnu) {
.Call(`_ARCensReg_HessianExp`, saemU, saemUZi, saemUYi, uy2, uyw, uw2, A, B, media, X, phi, beta, sigma2, nu, fixnu)
}
| /scratch/gouwar.j/cran-all/cranData/ARCensReg/R/RcppExports.R |
### AUXILIARY FUNCTIONS FOR AR NORMAL MODEL ###
## Covariance matrix (MatArp * sig2)
MatArp = function(phi,n) {
p = length(phi)
if (n==1) Rn = 1
else Rn = toeplitz(ARMAacf(ar=phi, ma=0, lag.max = n-1))
rhos = ARMAacf(ar=phi, ma=0, lag.max = p)[(1:p)+1]
return(Rn/(1-sum(rhos*phi)))
}
## Transformation function: pi to phi
estphit = function(pit) {
p = length(pit)
Phi = matrix(0, ncol=p, nrow=p)
if (p>1) {
diag(Phi) = pit
for (j in 2:p) {
for (k in 1:(j-1)) {
Phi[j,k] = Phi[j-1,k] - pit[j]*Phi[j-1,j-k]
}
}
return(Phi[p,])
}
else return(pit)
}
## Transformation function: phi to pi
tphitopi = function(phit) {
p = length(phit)
Phi = matrix(0, ncol=p, nrow=p)
Phi[p,] = phit
if (p>1) {
for (k in p:2) {
for (i in 1:(k-1)) {
Phi[k-1,i] = (Phi[k,i] + Phi[k,k]*Phi[k,k-i])/(1-Phi[k,k]^2)
}
}
return(diag(Phi))
}
else return(phit)
}
## Estimate pi - case with censoring
lc = function(pi,D,n) {
phi = estphit(pi)
p = length(phi)
lambda = matrix(c(-1,phi))
spi = t(lambda)%*%D%*%lambda
gp = 1
for (i in 1:p) gp = gp*((1-pi[i]^2)^(-i))
sig2hat = spi/n
l = as.numeric(-n/2*log(sig2hat) -1/2*log(gp))
return(-l)
}
## Estimate pi - case without censoring (Normal)
lcc = function(pi,y,x) {
n = length(y)
phi = estphit(pi)
p = length(phi)
lambda = matrix(c(-1,phi))
betahat = solve(t(x)%*%solve(MatArp(phi,n))%*%x)%*%t(x)%*%solve(MatArp(phi,n))%*%y
spi = t(lambda)%*%Dbeta(betahat,y,x,p)%*%lambda
gp = 1
for (i in 1:p) gp = gp*((1-pi[i]^2)^(-i))
l = as.numeric(-n/2*log(spi) -1/2*log(gp))
return(-l)
}
## Log-likelihood Normal model
LogVerosCens = function(cc, y, media, Psi, LI, LS){
m = length(cc)
gammai = media
if(sum(cc)==0){
ver = log(dmvnorm(as.vector(y), as.vector(gammai), Psi))
}
if(sum(cc)>0){
if(sum(cc)==m){
ver = log(pmvnorm(lower=c(LI),upper=c(LS),mean=c(media),sigma=Psi) + .Machine$double.xmin)
} else {
vero = numeric(2)
vero[1] = dmvnorm(y[cc==0],gammai[cc==0,],Psi[cc==0,cc==0])
inverse00 = solve(Psi[cc==0,cc==0])
muc = gammai[cc==1,]+ Psi[cc==1,cc==0]%*%inverse00%*%(y[cc==0]-gammai[cc==0,])
Sc = Psi[cc==1,cc==1]-Psi[cc==1,cc==0]%*%inverse00%*%Psi[cc==0,cc==1]
vero[2] = pmvnorm(lower=c(LI[cc==1]),upper=c(LS[cc==1]),mean=c(muc),sigma=Sc)
if(length(which(vero == 0)) > 0) vero[which(vero == 0)] = .Machine$double.xmin
ver = sum(log(vero))
}
}
obj.out = list(ver = ver)
return(obj.out)
}
## Gibbs sampler (normal model)
amostradordegibbs = function(M, M0, nj, t1, cc1, y1, media, Gama, LI, LS){
draws = matrix(NA, nrow=M, ncol=nj)
draws[1,1:nj] = t1
gammai = media
if(sum(cc1)==0){
for(i in 2:M){
t1 = y1
draws[i,1:nj] = t1
}
} else {
if(sum(cc1)>0 & sum(cc1)==nj){
g = as.vector(gammai)
for(i in 2:M){
t1 = as.vector(rtmvnorm(1, mean=g, sigma=(Gama), lower=LI, upper=LS, algorithm="gibbs", thinning=2))
draws[i,1:nj] = t1
}
}
if(sum(cc1)>0 & sum(cc1)<nj){
if(sum(cc1)==1){
g = gammai
t1[cc1==0] = y1[cc1==0]
inverse0 = solve(Gama[cc1==0,cc1==0])
muc = g[cc1==1]+Gama[cc1==1,cc1==0]%*%inverse0%*%(y1[cc1==0]-g[cc1==0])
muc = as.vector(muc)
Sc = Gama[cc1==1,cc1==1]-Gama[cc1==1,cc1==0]%*%inverse0%*%Gama[cc1==0,cc1==1]
Sc = as.numeric(Sc)
for(i in 2:M){
y_r = rtnorm(1, mean=muc, sd=(sqrt(Sc)), lower=LI[cc1==1], upper=LS[cc1==1])
t1[cc1==1] = y_r
draws[i,1:nj] = t1
}
} else{
g = gammai
t1[cc1==0] = y1[cc1==0]
inverse0 = solve(Gama[cc1==0,cc1==0])
muc = g[cc1==1]+Gama[cc1==1,cc1==0]%*%inverse0%*%(y1[cc1==0]-g[cc1==0])
muc = as.vector(muc)
Sc = Gama[cc1==1,cc1==1]-Gama[cc1==1,cc1==0]%*%inverse0%*%Gama[cc1==0,cc1==1]
for(i in 2:M){
y_r = rtmvnorm(1, mean=muc, sigma=(Sc), lower=LI[cc1==1], upper=LS[cc1==1], algorithm="gibbs", thinning=2)
t1[cc1==1] = y_r
draws[i,1:nj] = t1
}
}
}
}
# Sample with burn-in (M0)
amostragibbs = draws[(M0+1):M,]
obj.out = list(amostragibbs = amostragibbs)
return(obj.out)
}
## Derivatives
#################################
aphi = function(phi) ifelse(length(phi)==1,log(MatArp(phi,length(phi))),
log(det(MatArp(phi,length(phi)))))
Dbeta = function(beta,y,x,p) {
n = length(y)
D = matrix(0,p+1,p+1)
for (ii in 1:(p+1)) {
for (jj in 1:(p+1)) {
D[ii,jj] = sum((y-x%*%beta)[ii:(n+1-jj)]*(y-x%*%beta)[jj:(n+1-ii)])
}
}
return(D)
}
Dphi1 = function(beta,y,xx,p) matrix(Dbeta(beta,y,xx,p)[2:(p+1),1])
Dphiphi2 = function(beta,phi,y,xx,p) (Dbeta(beta,y,xx,p)[2:(p+1),2:(p+1)])%*%phi
einvM = function(phi,e) {
n = length(e)
invM = solve(MatArp(phi,n))
return(t(e)%*%invM)
}
M1 = function(phi,yy){
n= nrow(yy)
return(sum(diag(yy%*%solve(MatArp(phi,n)))))
}
M1i = function(phi,yy,Di){
n= nrow(yy)
return(sum(diag(yy%*%solve(MatArp(phi,n))%*%Di)))
}
M2 = function(phi,vec1,vec2){
n= nrow(vec2)
return(vec1%*%solve(MatArp(phi,n))%*%vec2)
}
M3 = function(phi,vec1){
n= length(vec1)
return(t(vec1)%*%solve(MatArp(phi,n)))
}
Jt = function(theta,y,x) {
l = ncol(x)
n = length(y)
beta = matrix(theta[1:l])
sig2 = theta[l+1]
phi = theta[(l+2):length(theta)]
p = length(phi)
Mn = MatArp(phi,n)
lambda = matrix(c(-1,phi))
spi = t(lambda)%*%Dbeta(beta,y,x,p)%*%lambda
invMn = solve(Mn)
dbeta = 1/sig2*(t(x)%*%invMn%*%y - t(x)%*%invMn%*%x%*%beta)
dsig2 = -n/2/sig2 +1/2/sig2^2*spi
da = matrix(jacobian(aphi,phi))
dphi = -1/sig2*(-Dphi1(beta,y,x,p) + Dphiphi2(beta,phi,y,x,p))-1/2*da
return(rbind(dbeta,dsig2,dphi))
}
Ht = function(theta,y,x) {
l = ncol(x)
n = length(y)
r = length(theta)
beta = matrix(theta[1:l])
sig2 = theta[l+1]
phi = theta[(l+2):r]
p = length(phi)
Mn = MatArp(phi,n)
lambda = matrix(c(-1,phi))
spi = t(lambda)%*%Dbeta(beta,y,x,p)%*%lambda
invMn = solve(Mn)
dbetabeta = -1/sig2*(t(x)%*%invMn%*%x)
dsig2sig2 = n/2/sig2^2 - 1*spi/sig2^3
daa = (hessian(aphi,phi))
dphiphi = -1/sig2*Dbeta(beta,y,x,p)[2:(p+1),2:(p+1)] - 1/2*daa
dbetasig2 = -1/sig2^2*(t(x)%*%invMn%*%y - t(x)%*%invMn%*%x%*%beta )
dD1beta = jacobian(Dphi1,beta,y=y,xx=x,p=p)
dDphibeta = jacobian(Dphiphi2,beta,phi=phi,y=y,xx=x,p=p)
dbetaphi = 1/sig2*(dD1beta - dDphibeta)
dphisig = 1/sig2^2*(-Dphi1(beta,y,x,p)+ Dphiphi2(beta,phi,y,x,p))
H = matrix(0,r,r)
H[1:l,1:l] = dbetabeta
H[l+1,l+1] = dsig2sig2
H[(l+2):r,(l+2):r] = dphiphi
H[l+1,1:l] = H[1:l,l+1] = dbetasig2
H[(l+2):r,1:l] = dbetaphi
H[1:l,(l+2):r] = t(dbetaphi)
H[l+1,(l+2):r] = H[(l+2):r,l+1] = dphisig
return(H)
}
Qt = function(theta,y,yy,x) {
l = ncol(x)
n = length(y)
r = length(theta)
beta = matrix(theta[1:l])
sig2 = theta[l+1]
phi = theta[(l+2):r]
p = length(phi)
Mn = MatArp(phi,n)
invMn = solve(Mn)
delta = sum(diag(yy%*%invMn)) - 2*t(y)%*%invMn%*%x%*%beta+t(beta)%*%t(x)%*%invMn%*%x%*%beta
dbetabeta = -1/sig2*(t(x)%*%invMn%*%x)
dsig2sig2 = n/2/sig2^2 - 1*delta/sig2^3
daa = (hessian(aphi,phi)) #hessiana do log(det(Mn))
dphiphi = - 1/2*daa -1/2/sig2*(hessian(M1,phi,yy=yy)+hessian(M2,phi,vec1=t(-2*y+x%*%beta),vec2=x%*%beta))
dbetasig2 = -1/sig2^2*(t(x)%*%invMn%*%y - t(x)%*%invMn%*%x%*%beta )
dbetaphi = 1/sig2*t(jacobian(M2,phi,vec1=t(y-x%*%beta),vec2=x))
dphisig = 1/2/sig2^2*(jacobian(M1,phi,yy=yy)+ jacobian(M2,phi,vec1=t(-2*y+x%*%beta),vec2=x%*%beta))
H = matrix(0,r,r)
H[1:l,1:l] = dbetabeta
H[l+1,l+1] = dsig2sig2
H[(l+2):r,(l+2):r] = dphiphi
H[l+1,1:l] = H[1:l,l+1] = dbetasig2
H[(l+2):r,1:l] = dbetaphi
H[1:l,(l+2):r] = t(dbetaphi)
H[l+1,(l+2):r] = H[(l+2):r,l+1] = dphisig
return(H)
}
######################################################
#local influence
######################################################
#ytil = y+w
deltaw = function(theta,y,x) {
l = ncol(x)
n = length(y)
r = length(theta)
beta = matrix(theta[1:l])
sig2 = theta[l+1]
phi = theta[(l+2):r]
p = length(phi)
Mn = MatArp(phi,n)
invMn = solve(Mn)
e = y-x%*%beta
dbeta = 1/sig2*t(x)%*%invMn
dsig2 = 1/sig2^2*t(e)%*%invMn
dphi = -1/sig2*t(jacobian(einvM,phi,e=e))
ddelta = rbind(dbeta,dsig2,dphi)
return(ddelta)
}
################################################
#scheme 2 ----> Sigmatil = D(w)*Sigma
deltaSigi = function(theta,y,yy,x,i) {
l = ncol(x)
n = length(y)
r = length(theta)
beta = matrix(theta[1:l])
sig2 = theta[l+1]
phi = theta[(l+2):r]
p = length(phi)
Mn = MatArp(phi,n)
invMn = solve(Mn)
e = y-x%*%beta
vec = rep(0,n);vec[i]=1
Di = diag(vec)
invMDi = invMn%*%Di
dbeta = -1/sig2*t(x)%*%invMDi%*%e
dsig2 = -1/2/sig2^2*(sum(diag(yy%*%invMDi))-2*t(y)%*%invMDi%*%x%*%beta+t(x%*%beta)%*%invMDi%*%x%*%beta)
d1 = jacobian(M1i,phi,yy=yy,Di=Di)
dphi = 1/2/sig2*t((d1)-2*jacobian(M2,phi,vec1 = t(y),vec2=Di%*%x%*%beta)+jacobian(M2,phi,vec1 = t(x%*%beta),vec2=Di%*%x%*%beta))
ddelta = rbind(dbeta,dsig2,dphi)
return(ddelta)
}
deltaSigi = function(theta,y,yy,x,i) {
l = ncol(x)
n = length(y)
r = length(theta)
beta = matrix(theta[1:l])
sig2 = theta[l+1]
phi = theta[(l+2):r]
p = length(phi)
Mn = MatArp(phi,n)
invMn = solve(Mn)
vec = rep(0,n);vec[i]=1
Di = diag(vec)
invMDi = invMn%*%Di
DiinvM= Di%*%invMn
dbeta = -1/2/sig2*t(x)%*%(2*DiinvM%*%y-(invMDi+DiinvM)%*%x%*%beta)
dsig2 = -1/2/sig2^2*(sum(diag(yy%*%invMDi))-2*t(y)%*%invMDi%*%x%*%beta+t(x%*%beta)%*%invMDi%*%x%*%beta)
d1 = jacobian(M1i,phi,yy=yy,Di=Di)
dphi = 1/2/sig2*t((d1)-2*jacobian(M2,phi,vec1 = t(y),vec2=Di%*%x%*%beta)+jacobian(M2,phi,vec1 = t(x%*%beta),vec2=Di%*%x%*%beta))
ddelta = rbind(dbeta,dsig2,dphi)
return(ddelta)
}
#scheme 3 ----> x(w)=x+w*t(1)
deltaxp = function(theta,y,x,indp) {
indp = matrix(indp,ncol=1)
l = ncol(x)
n = length(y)
r = length(theta)
beta = matrix(theta[1:l])
sig2 = theta[l+1]
phi = theta[(l+2):r]
p = length(phi)
Mn = MatArp(phi,n)
invMn = solve(Mn)
dbetaw = 1/sig2*(indp%*%t(y-x%*%beta)-as.numeric(t(indp)%*%beta)*t(x))%*%invMn
dsigw = -as.numeric(t(indp)%*%beta)/sig2^2*(t(y-x%*%beta)%*%invMn)
dphiw = as.numeric(t(indp)%*%beta)/sig2*t(jacobian(M3,phi,vec1=(y-x%*%beta)))
ddelta = rbind(dbetaw,dsigw,dphiw)
return(ddelta)
}
| /scratch/gouwar.j/cran-all/cranData/ARCensReg/R/auxfunctions.R |
ARCensReg = function(cc, lcl=NULL, ucl=NULL, y, x, p=1, M=10, perc=0.25, MaxIter=400,
pc=0.18, tol=0.0001, show_se=TRUE, quiet=FALSE){
m = length(y)
if (!is.numeric(y)) stop("y must be a numeric vector")
if (!is.numeric(x)) stop("x must be a numeric matrix")
if (!is.matrix(x)) x = as.matrix(x)
if (det(t(x)%*%x)==0) stop("the columns of x must be linearly independent")
## Verify error at parameters specification
#No data
if ( (length(x) == 0) | (length(y) == 0) | (length(cc) == 0)) stop("All parameters must be provided")
#Validating if exists NA's
if (sum(cc%in%c(0,1)) < length(cc)) stop("The elements of the vector cc must be 0 or 1")
if (sum(is.na(x)) > 0) stop("There are some NA values in x")
if (sum(is.na(cc)) > 0) stop("There are some NA values in cc")
miss = which(is.na(y))
if (length(miss)>0) { if (sum(cc[miss]) != length(miss)) stop ("NA values in y must be specified through arguments cc, lcl, and ucl")
} else { miss = NULL }
#Validating dims data set
if (ncol(as.matrix(y)) > 1) stop("y must have just one column")
if (ncol(as.matrix(cc)) > 1) stop("cc must have just one column")
if (nrow(as.matrix(x)) != m) stop("x does not have the same number of lines than y")
if (length(cc) != m) stop("cc does not have the same length than y")
if (sum(cc) > 0){
if (is.null(lcl) | is.null(ucl)) stop("lcl and ucl must be provided for censored data")
if (!is.numeric(lcl) | !is.numeric(ucl)) stop("lcl and ucl must be numeric vectors")
if (length(miss)>0){
censor = (cc==1 & !is.na(y))
if (any(is.infinite(lcl[censor]) & is.infinite(ucl[censor]))) stop("lcl or ucl must be finite for censored data")
} else {
if (any(is.infinite(lcl[cc==1]) & is.infinite(ucl[cc==1]))) stop("lcl or ucl must be finite for censored data")
}
if (length(lcl) != m) stop("lcl does not have the same length than y")
if (length(ucl) != m) stop("ucl does not have the same length than y")
if (ncol(as.matrix(lcl)) > 1) stop("lcl must have just one column")
if (ncol(as.matrix(ucl)) > 1) stop("ucl must have just one column")
if (sum(is.na(lcl))>0 | sum(is.na(ucl))>0) stop("There are some NA values in lcl or ucl")
if (!all(lcl[cc==1]<ucl[cc==1])) stop ("lcl must be smaller than ucl")
}
#Validating supports
if (length(p) != 1) stop("p must be a positive integer value")
if (!is.numeric(p)) stop("p must be a positive integer value")
if (p!=round(p) | p<=0) stop("p must be a positive integer value")
if (tol <= 0) stop("tolerance must be a positive value (suggested to be small)")
if (!is.numeric(MaxIter)) stop("MaxIter must be a positive integer value")
if (MaxIter<=0 | MaxIter%%1!=0) stop("MaxIter must be a positive integer value")
if (!is.numeric(M)) stop("M must be a positive integer value")
if (M<=1 | M%%1!=0) stop("M must be a positive integer value (greater than 1)")
if (!is.numeric(pc)) stop("pc must be a real number in [0,1]")
if (pc>1 | pc<0) stop("pc must be a real number in [0,1]")
if (!is.numeric(perc)) stop("perc must be a real number in [0,1)")
if (perc>=1 | perc<0) stop("perc must be a real number in [0,1)")
if (!is.logical(show_se)) stop("show_se must be TRUE or FALSE")
if (!is.logical(quiet)) stop("quiet must be TRUE or FALSE")
#Load required libraries
#Running the algorithm
if (!quiet) {
cat('\n')
call <- match.call()
cat("Call:\n")
print(call)
cat('\n')
}
out = suppressWarnings(SAEM(cc, lcl, ucl, y, x, p, M, perc, MaxIter, pc, miss, tol, show_se, quiet))
l = ncol(x)
lab = numeric(p + l + 1)
if (sum(abs(x[,1])) == nrow(x)){ for (i in 1:ncol(x)) lab[i] = paste('beta',i-1,sep='')
} else { for (i in 1:ncol(x)) lab[i] = paste('beta',i,sep='') }
lab[l+1] = 'sigma2'
for (i in ((l+2):length(lab))) lab[i] = paste('phi',i-l-1,sep='')
if (show_se) {
tab = round(rbind(out$res$theta, out$res$SE), 4)
colnames(tab) = lab
rownames(tab) = c("","s.e.")
} else {
tab = round(rbind(out$res$theta), 4)
colnames(tab) = lab
rownames(tab) = c("")
}
critFin = c(out$res$loglik, out$res$AIC, out$res$BIC, out$res$AICcorr)
critFin = round(t(as.matrix(critFin)), digits=3)
dimnames(critFin) = list(c("Value"),c("Loglik", "AIC", "BIC","AICcorr"))
obj.out = out$res
obj.out$call = match.call()
obj.out$tab = tab
obj.out$critFin = critFin
if (sum(cc) == 0){ cens = "no censoring"
} else {
if (sum(cc) == length(miss)){ cens = "missing"
} else {
if (all(is.infinite(lcl)) & any(is.finite(ucl))){ cens = "left" }
if (all(is.infinite(ucl)) & any(is.finite(lcl))){ cens = "right" }
if (any(is.finite(ucl-lcl))){ cens = "interval" }
}
}
obj.out$cens = cens
obj.out$nmiss = ifelse(is.null(miss),0,length(miss))
obj.out$ncens = sum(cc)
if (sum(cc)>0){
obj.out$converge = (out$iter < MaxIter)
obj.out$MaxIter = MaxIter
obj.out$M = M
obj.out$pc = pc
}
obj.out$time = out$time
#plot
if (sum(cc) > 0){
obj.out$plot$cpl = pc*MaxIter
obj.out$plot$npar = l+1+p
obj.out$plot$labels = list()
if (sum(abs(x[,1]))==nrow(x)) { for(i in 1:l){obj.out$plot$labels[[i]] = bquote(beta[.(i-1)])}
} else { for(i in 1:l){obj.out$plot$labels[[i]] = bquote(beta[.(i)])} }
obj.out$plot$labels[[l+1]] = bquote(sigma^2)
for(i in 1:p){obj.out$plot$labels[[i+l+1]] = bquote(phi[.(i)])}
obj.out$plot$Theta = out$Theta
}
#class
class(obj.out) = 'ARpCRM'
invisible(obj.out)
}
#' @export
print.ARpCRM = function(x, ...){
cat('---------------------------------------------------\n')
cat(' Censored Linear Regression Model with AR Errors \n')
cat('---------------------------------------------------\n')
cat("Call:\n")
print(x$call)
cat('\n')
cat('Estimated parameters:\n')
print(x$tab)
cat('\n')
cat('Model selection criteria:\n')
print(x$critFin)
cat('\n')
cat('Details:\n')
cat('Type of censoring:', x$cens, '\n')
if (x$ncens > 0) {
cat('Number of missing values:', x$nmiss, '\n')
cat("Convergence reached?:", x$converge, '\n')
cat('Iterations:', x$iter,"/",x$MaxIter, '\n')
cat('MC sample:', x$M, '\n')
cat('Cut point:', x$pc, '\n')
}
cat("Processing time:", x$time, units(x$time), '\n')
}
#' @export
summary.ARpCRM = function(object, ...){
cat('---------------------------------------------------\n')
cat(' Censored Linear Regression Model with AR Errors \n')
cat('---------------------------------------------------\n')
cat("Call:\n")
print(object$call)
cat('\n')
cat('Estimated parameters:\n')
print(object$tab)
cat('\n')
cat('Model selection criteria:\n')
print(object$critFin)
cat('\n')
cat('Details:\n')
cat('Type of censoring:', object$cens, '\n')
if (object$ncens>0) {
cat('Number of missing values:', object$nmiss, '\n')
cat("Convergence reached?:", object$converge, '\n')
cat('Iterations:', object$iter,"/",object$MaxIter, '\n')
cat('MC sample:', object$M, '\n')
cat('Cut point:', object$pc, '\n')
}
cat("Processing time:", object$time, units(object$time), '\n')
}
#' @export
plot.ARpCRM = function(x, ...) {
if (x$ncens == 0) stop("plot only defined for cases with censoring")
count = x$iter
npar = x$plot$npar
label = x$plot$labels
myplot = vector("list", npar)
for (i in 1:npar){
data1 = data.frame(z=x$plot$Theta[,i])
myplot[[i]] = ggplot(data1, aes(x=seq(1,count), y=z)) + geom_line() +
geom_vline(xintercept=x$plot$cpl, color="red", linetype="twodash") +
labs(x="Iteration", y=label[[i]]) + theme_bw()
}
nrows = ifelse(npar%%3==0, npar%/%3, (npar%/%3)+1)
grid.arrange(grobs=myplot, nrow=nrows, ncol=3)
}
#' @export
predict.ARpCRM = function(object, x_pred, ...){
# validation
x_pred = as.matrix(x_pred)
if (ncol(x_pred)!=ncol(as.matrix(object$x))) stop("x_pred must have the same number of columns than x")
if (sum(is.na(x_pred))>0) stop("There are some NA values in x_pred")
if (!is.numeric(x_pred)) stop("x_pred must be a numeric matrix")
y = object$yest
x = object$x
beta1 = object$beta
sigmae = object$sigma2
phi1 = object$phi
m = length(c(y))
h = nrow(x_pred)
sig_pred = MatArp(phi1, m+h)*sigmae
pred = x_pred%*%beta1 + sig_pred[m+1:h, 1:m]%*%solve(sig_pred[1:m, 1:m])%*%(y - x%*%beta1)
return (pred)
}
#' @export
residuals.ARpCRM = function(object, ...) {
x = object$x
p = length(object$phi)
m = nrow(x)
residuals = numeric(m)
residuals[1:p] = 0
res = object$yest - x%*%object$beta
for (i in (p+1):m) residuals[i] = res[i] - sum(object$phi*res[(i-1):(i-p)])
resid = list(residuals=residuals[-(1:p)], quantile.resid=(residuals[-(1:p)])/sqrt(object$sigma2))
class(resid) = "residARpCRM"
return(resid)
}
#' @export
plot.residARpCRM = function(x, ...) {
resid = data.frame(resid=x$quantile.resid)
replot = list(4)
m = nrow(resid)
#
replot[[1]] = ggplot(resid, aes(x=seq(1,m),y=resid)) + geom_line() + labs(x="Time", y="Quantile Residual") +
geom_hline(yintercept=c(-2,0,2), color="red", linetype="twodash") + theme_bw()
#
replot[[2]] = ggplot(resid, aes(sample=resid)) + stat_qq_band(distribution="norm", identity=TRUE) +
stat_qq_line(distribution="norm", color="red", linetype="twodash", identity=TRUE) +
stat_qq_point(distribution="norm", identity=TRUE, size=1, alpha=0.5) +
labs(x="Theoretical Quantiles", y="Sample Quantiles") + theme_bw()
#
replot[[3]] = ggplot(resid, aes(x=resid)) + geom_histogram(aes(y=..density..), fill="grey", color="black", bins=15) +
stat_function(fun=dnorm, col="red", linetype="twodash") + labs(x="Quantile Residual",y="Density") + theme_bw()
#
bacfdf = with(acf(resid, plot=FALSE), data.frame(lag, acf))
replot[[4]] = ggplot(data=bacfdf, aes(x=lag, y=acf)) + geom_hline(aes(yintercept=0)) + theme_bw() +
geom_segment(aes(xend=lag, yend=0)) + labs(x="Lag", y="ACF") +
geom_hline(yintercept=c(qnorm(0.975)/sqrt(m),-qnorm(0.975)/sqrt(m)), colour="red", linetype="twodash")
#
grid.arrange(grobs=replot, widths=c(1, 1, 1), layout_matrix = rbind(c(1, 1, 1), c(2, 3, 4)))
}
| /scratch/gouwar.j/cran-all/cranData/ARCensReg/R/finalfunction.R |
InfDiag = function(object, k=3, indpar=rep(1,length(object$theta)), indcolx=rep(1,ncol(object$x)),
perturbation='y')
{
## Verify error at parameters specification
if (!is(object, "ARpCRM")) stop("object must be of class 'ARpCRM'")
if (!is.numeric(k)) stop("k must be a number")
theta = object$theta
yest = object$yest
yyest = object$yyest
x = object$x
#Validating if exists NA's
if(perturbation=='x') if(sum(indcolx)==0) stop("indcolx must have at least one element equal to 1")
if(sum(indpar)==0) stop("indpar must have at least one element equal to 1")
#Validating dims data set
if (length(indpar)!= length(theta)) stop("indpar does not have the same length than theta")
if(perturbation=='x') if (length(indcolx)!= ncol(x)) stop("the lenght of indcolx must be equal to the number of columns of x")
#Validating supports
if(length(k)!=1) stop("k must be an integer value")
if(!any(perturbation==c('y','Sigma','x'))) stop("perturbation must be 'y', 'Sigma' or 'x'")
if(perturbation=='x') if(any(!(indcolx %in% c(0,1)))) stop("indcolx must a vector with elements 0 or 1")
if(any(!(indpar %in% c(0,1)))) stop("indpar must a vector with elements 0 or 1")
#Running the algorithm
if (perturbation=='y') M0 = InfDiagys(theta=theta, yest=yest, yyest=yyest, x=x,
k=k, indpar=indpar)
if (perturbation=='Sigma') M0 = InfDiagSigs(theta=theta, yest=yest, yyest=yyest,
x=x, k=k, indpar=indpar)
if (perturbation=='x') M0 = InfDiagxps(theta=theta, yest=yest, yyest=yyest, x=x,
k=k, indp=indcolx, indpar=indpar)
bench = mean(M0) + k*sd(M0)
cat("Perturbation scheme:", perturbation, '\n')
cat('Benchmark:', round(bench,3), '\n')
detecpoints = which(M0>bench)
if (length(detecpoints)>0) cat('Detected points:', detecpoints, '\n')
else cat('Detected points:', 0, '\n')
M1 = list(M0=M0, perturbation=perturbation, benchmark=bench)
class(M1) = "DiagARpCRM"
return(M1)
}
#' @export
plot.DiagARpCRM = function(x, ...) {
M0 = x$M0
n = nrow(matrix(M0))
M0y2 = data.frame(M0 = M0)
rownames(M0y2) = 1:n
bm = x$benchmark
if (x$perturbation == "y") text = 'Response perturbation'
if (x$perturbation == "Sigma") text = 'Scale matrix perturbation'
if (x$perturbation == "x") text = 'x perturbation'
ggplot(M0y2, aes(x=1:n, y=M0)) + geom_point(shape=ifelse(M0>bm,16,21), size=1.9) +
geom_hline(yintercept=bm, linetype="dashed") +
geom_text(aes(label=ifelse(M0>bm, rownames(M0y2), '')), vjust=1.5) +
labs(x="Index", y=text) + theme_bw()
}
| /scratch/gouwar.j/cran-all/cranData/ARCensReg/R/finalfunctionDiag.R |
rARCens = function(n, beta, phi, sig2=1, x=rep(1,n), cens='left', pcens=0.1, innov="norm", nu=NULL)
{
if ((!is.numeric(n)) | (length(n)!=1) | n<=0) stop("n must be a positive integer number")
if (!is.numeric(x)) stop("x must be a numeric matrix")
if (!is.matrix(x)) x = as.matrix(x)
if (det(t(x)%*%x) == 0) stop("The columns of x must be linearly independent")
if (!is.numeric(beta)) stop("beta must be a numeric vector")
if (!is.numeric(phi)) stop("phi must be a numeric vector")
if (!is.numeric(sig2)) stop("sig2 must be a number")
if (!is.numeric(pcens)) stop("pcens must be a number")
## Verify error at parameters specification
#No data
if ((length(x) == 0) | (length(beta) == 0) | (length(phi) == 0)) stop("All parameters must be provided.")
#Validating if exists NA's
if (sum(is.na(x)) > 0) stop("There are some NA values in x")
#Validating dims data set
if (length(beta) != ncol(as.matrix(x))) stop("The length of beta must be equal to the number of columns of x")
if (nrow(x) != n) stop("The number of rows of x must be equal to n")
#Validating supports
if (length(sig2)!=1) stop("sig2 must be a positive value")
if (sig2 <= 0) stop("sig2 must be a positive value")
if (!is.numeric(pcens)) stop("pcens must be a real number in [0,1]")
if (pcens > 1 | pcens < 0) stop("pcens must be a real number in [0,1]")
if (cens!='left' & cens!='right') stop('cens must be left or right')
pit = tphitopi(phi)
if (any(pit>=1) | any(pit<=-1)) stop('AR(p) non stationary, choose other phi')
if (innov!='norm' & innov!='t') stop('innov must be norm or t')
if (innov == 't'){
if (is.null(nu)) stop('nu must be provided for Student-t innovations')
if (!is.numeric(nu)) stop('nu must be a positive number (greater than 2)')
if (nu <= 2) stop('nu must be a positive number (greater than 2)')
}
#Load required libraries
#Running the algorithm
out = list()
out$data = gerarARCens(n=n, beta=beta, phi=phi, sig2=sig2, x=x, cens=cens, pcens=pcens,
innov=innov, nu=nu)
if (innov == "norm") out$param = c(beta, sig2, phi)
if (innov == "t") out$param = c(beta, sig2, phi, nu)
class(out) = 'rARCens'
return(out)
}
## Simulate a dataset ##
gerarARCens = function(n, beta, phi, sig2, x, cens, pcens, innov, nu) {
p = length(phi)
beta = as.matrix(beta)
x = as.matrix(x)
sigma = sqrt(sig2)
if (innov == 'norm'){
erro = as.matrix(arima.sim(n=n, model=list(ar=phi), sd=sigma))
} else {
erro = as.matrix(arima.sim(model=list(ar=phi), n=n,
rand.gen=function(n,...){sigma*rt(n,nu)}))
}
resp = x%*%beta + erro
if (pcens == 0){
return (data.frame(y=resp, cc=rep(0, n)))
} else {
if (innov == 't'){ prob2 = ifelse(pcens*n/(n-p)>1, 1, pcens*n/(n-p)) }
if (cens=='left') {
if (innov == 'norm'){
cte = as.numeric(quantile(resp, probs=pcens))
cc = (resp<cte) + 0
} else {
cte = as.numeric(quantile(resp[-(1:p)], probs=prob2))
cc = rep(0, n); cc[-(1:p)] = (resp[-(1:p)]<cte) + 0
}
y = resp*(1-cc) + cte*cc
LI = rep(-Inf, n)
LS = rep(cte, n)
if (innov == 't'){ LS[1:p] = rep(min(y[1:p], cte), p) }
} else {
if (innov == 'norm'){
cte = as.numeric(quantile(resp, probs=1-pcens))
cc = (resp>cte) + 0
} else {
cte = as.numeric(quantile(resp[-(1:p)], probs=1-prob2))
cc = rep(0, n); cc[-(1:p)] = (resp[-(1:p)]>cte) + 0
}
y = resp*(1-cc) + cte*cc
LI = rep(cte, n)
LS = rep(Inf, n)
if (innov == 't'){ LI[1:p] = rep(max(y[1:p], cte), p) }
}
return(data.frame(y=y, cc=cc, lcl=LI, ucl=LS))
} # End else
}
| /scratch/gouwar.j/cran-all/cranData/ARCensReg/R/finalfunctionGerar.R |
##############################################################
# Parameter estimation - Normal innovations #
##############################################################
#cc must be 1 for miss
SAEM = function(cc, LI, LS, y, x, p, M, perc, MaxIter, pc, miss, tol, show_ep, quiet){
if (!quiet) pb = txtProgressBar(min = 0, max = MaxIter, style = 3)
# Initial values
m = length(y)
if (length(miss)==0) {
beta1 = solve(t(x)%*%x)%*%t(x)%*%y
l = length(beta1)
pi1 = as.numeric(pacf((y - x%*%beta1), lag.max = p, plot=F)$acf)
pi1 = optim(pi1, lcc, y=y, x=x, lower=rep(-.999,p), upper=rep(0.999,p), method='L-BFGS-B')$par
phi1 = estphit(pi1)
lambda = matrix(c(-1,phi1))
beta1 = solve(t(x)%*%solve(MatArp(phi1,m))%*%x)%*%t(x)%*%solve(MatArp(phi1,m))%*%y
spi = t(lambda)%*%Dbeta(beta1,y,x,p)%*%lambda
sigmae = as.numeric(spi/m)
} else {
beta1 = solve(t(x[-miss,])%*%x[-miss,])%*%t(x[-miss,])%*%y[-miss]
l = length(beta1)
pi1 = as.numeric(pacf((y - x%*%beta1)[-miss],lag.max=p,plot=F)$acf)
pi1 = optim(pi1,lcc,y=y[-miss],x=as.matrix(x[-miss,]),lower = rep(-.999,p), upper = rep(0.999,p),method='L-BFGS-B')$par
phi1 = estphit(pi1)
lambda = matrix(c(-1,phi1))
beta1 = solve(t(as.matrix(x[-miss,]))%*%solve(MatArp(phi1,m-length(miss)))%*%as.matrix(x[-miss,]))%*%
t(as.matrix(x[-miss,]))%*%solve(MatArp(phi1,m-length(miss)))%*%as.matrix(y[-miss])
spi = t(lambda)%*%Dbeta(beta1,y[-miss],as.matrix(x[-miss,]),p)%*%lambda
sigmae = as.numeric(spi/(m-length(miss)))
}
teta = c(beta1, sigmae, phi1)
r = length(teta)
tempoi = Sys.time()
if ((sum(cc)==0) & (length(miss)==0)) {
teta1 = teta
media = x%*%beta1
V = MatArp(phi1, m)
Psi = sigmae*V
#
H = -Ht(teta1, y, x)
ep_par = sqrt(diag(solve(H)))
logver = (LogVerosCens(cc, y, media, Psi, LI, LS)$ver)
npar = length(c(teta1))
loglik = logver
AICc = -2*loglik +2*npar
AICcorr = AICc + ((2*npar*(npar+1))/(m-npar-1))
BICc = -2*loglik +log(m)*npar
tempof = Sys.time()
dift = tempof - tempoi
if (!quiet) setTxtProgressBar(pb, MaxIter)
if (show_ep) resultados = list(beta=c(beta1), sigma2=sigmae, phi=phi1, pi1=pi1, theta=teta1, SE=ep_par)
else resultados = list(beta=c(beta1), sigma2=sigmae, phi=phi1, pi1=pi1, theta=teta1)
resultados$loglik=loglik
resultados$AIC=AICc
resultados$BIC=BICc
resultados$AICcorr=AICcorr
resultados$yest=y
resultados$yyest=y%*%t(y)
resultados$x=x
obj.out = list(res=resultados, time=dift)
} else {
## SAEM algorithm ################################
MG = round(M/(1-perc),0)
M0 = MG - M #burn in
Theta = matrix(NA,MaxIter,length(teta))
#criterio <- 10000
critval = critval2 = 1
delta1 = 0.001
delta2 = tol
count = 0
media = x%*%beta1
V = MatArp(phi1,m)
Psi = sigmae*V
# Sequence of decreasing positive numbers: smoothing parameter
if (pc==1){
seqq = rep(1,pc*MaxIter)
} else {
seqq = c(rep(1,pc*MaxIter),(1/((((pc*MaxIter)+1):MaxIter)-(pc*MaxIter))))
seqq = c(rep(1,MaxIter-length(seqq)),seqq)
}
SAEM_ss = array(data=0,dim=c(MaxIter+1))
SAEM_xx = array(data=0,dim=c(MaxIter+1,l,l))
SAEM_xy = array(data=0,dim=c(MaxIter+1,l))
SAEM_y = array(data=0,dim=c(MaxIter+1,sum(cc)))
SAEM_yy = array(data=0,dim=c(MaxIter+1,m,m))
SAEM_D = array(data=0,dim=c(MaxIter+1,p+1,p+1))
if (show_ep & (sum(cc)!=0)) {
SAEM_delta = array(data=0,dim=c(MaxIter+1,r))
SAEM_G = array(data=0,dim=c(MaxIter+1,r,r))
SAEM_H = array(data=0,dim=c(MaxIter+1,r,r))
}
tyi = y
while(critval2 < 3) {
count = count + 1
if (!quiet) setTxtProgressBar(pb, count)
# E-1: Sampling step
t1 = tyi
gibbs = amostradordegibbs(MG,M0,m,t1,cc,y,media,Psi,LI,LS)
amostragibbs = gibbs$amostragibbs
uyi = matrix(amostragibbs[,1:m],nrow=M,ncol=m)
# E-2: Stochastic approximation
somaD = matrix(0, p+1, p+1)
somass = 0
somaxx = matrix(0, l, l)
somaxy = matrix(0, l, 1)
somay = matrix(0, sum(cc), 1)
somayy = matrix(0, m, m)
somadelta = matrix(0, r, 1)
somaG = matrix(0, r, r)
invV = solve(V)
for (k in 1:M) {
yi = matrix(uyi[k,], nrow=m, ncol=1)
somass = somass + (t(yi-media)%*%invV%*%(yi-media))
somaxx = somaxx + (t(x)%*%invV%*%x)
somaxy = somaxy + t(x)%*%invV%*%(yi)
somay = somay + as.matrix(yi[cc==1,])
somayy = somayy + yi%*%t(yi)
if (show_ep & (sum(cc)!=0)) {
J = Jt(teta,yi,x)
H = Ht(teta,yi,x)
somadelta = somadelta + J
somaG = somaG + (-H - (J)%*%t(J))
}
somaD = somaD + Dbeta(beta1,yi,x,p)
}
E_D = 1/M*somaD
E_ss = (1/M)*somass
E_xx = (1/M)*somaxx
E_xy = (1/M)*somaxy
E_y = (1/M)*somay
E_yy = (1/M)*somayy
if (show_ep & (sum(cc)!=0)) {
E_delta = 1/M * somadelta
E_G = 1/M*somaG
}
# E-2: Stochastic approximation
SAEM_D[count+1,,] = SAEM_D[count,,] + seqq[count]*(E_D - SAEM_D[count,,])
SAEM_ss[count+1] = SAEM_ss[count] + seqq[count]*(E_ss - SAEM_ss[count])
SAEM_xx[count+1,,] = SAEM_xx[count,,] + seqq[count]*(E_xx - SAEM_xx[count,,])
SAEM_xy[count+1,] = SAEM_xy[count,] + seqq[count]*(E_xy - SAEM_xy[count,])
SAEM_y[count+1,] = SAEM_y[count,] + seqq[count]*(E_y - SAEM_y[count,])
SAEM_yy[count+1,,] = SAEM_yy[count,,] + seqq[count]*(E_yy - SAEM_yy[count,,])
if (show_ep & (sum(cc)!=0)) {
SAEM_delta[count+1,] = SAEM_delta[count,] + seqq[count]*(E_delta - SAEM_delta[count,])
SAEM_G[count+1,,] = SAEM_G[count,,] + seqq[count]*(E_G - SAEM_G[count,,])
SAEM_H[count+1,,] = SAEM_G[count+1,,] - (SAEM_delta[count+1,])%*%t(SAEM_delta[count+1,])
}
tyi = y; tyi[cc==1] = SAEM_y[count+1,]
# CM: Conditional maximization
beta1 = solve(SAEM_xx[count+1,,])%*%SAEM_xy[count+1,]
media = x%*%beta1
sigmae = (1/m)*(SAEM_ss[count+1])
sigmae = as.numeric(sigmae)
pi1 = optim(pi1,lc,lower = rep(-.999,p), upper = rep(0.999,p), n=m, D = SAEM_D[count+1,,],method='L-BFGS-B')$par
phi1 = estphit(pi1)
teta1 = c(beta1, sigmae, phi1)
V = MatArp(phi1,m)
Psi = sigmae*V
criterio2 = sqrt((teta1/teta-1)%*%(teta1/teta-1))
if(max(criterio2) < delta2){critval2 = critval2+1}else{critval2 = 0}
if(count == MaxIter){critval2 = 10}
Theta[count,] = teta1
teta = teta1
} # End while
if (!quiet) setTxtProgressBar(pb, MaxIter)
Theta = Theta[1:count,]
logver = LogVerosCens(cc, y, media, Psi, LI, LS)$ver
tempof = Sys.time()
dift = tempof - tempoi
npar = length(c(teta1))
loglik = logver
AICc = -2*loglik +2*npar
AICcorr = AICc + ((2*npar*(npar+1))/(m-npar-1))
BICc = -2*loglik +log(m)*npar
if (show_ep & (sum(cc)!=0)) {
vartheta = solve(SAEM_H[count+1,,])
ep_par = sqrt(diag(vartheta))
}
yest = numeric(m)
yest[cc==0] = y[cc==0]
yest[cc==1] = SAEM_y[count+1,]
if (show_ep) resultados = list(beta=c(beta1), sigma2=sigmae, phi=phi1, pi1=pi1, theta=teta1, SE=ep_par)
else resultados = list(beta=c(beta1), sigma2=sigmae, phi=phi1, pi1=pi1, theta=teta1)
resultados$loglik = loglik
resultados$AIC = AICc
resultados$BIC = BICc
resultados$AICcorr = AICcorr
resultados$yest = yest
resultados$yyest = SAEM_yy[count+1,,]
resultados$x = x
resultados$iter = count
resultados$criteria=criterio2
obj.out = list(res=resultados, time=dift, Theta=Theta)
} # End else
return(obj.out)
}
| /scratch/gouwar.j/cran-all/cranData/ARCensReg/R/princfunction.R |
#source('princfunction.R')
InfDiagys = function(theta, yest, yyest, x, k=3, indpar=rep(1,length(theta))){
n = length(yest)
hes = Qt(theta,yest,yyest,x)
#if (plots) par(mfrow=c(1,1),mar=c(4, 4, 3, 2) + 0.1)
############################### y(w) = y+w
delta = deltaw(theta,yest,x)
npar = length(theta)
Q = solve(-hes)
if (sum(indpar)<npar) {
indt = (1:npar)[indpar==0]
b22 = matrix(0,ncol=npar,nrow=npar)
b22[indt,indt] = solve(hes[indt,indt])
Q = Q + b22
}
matF = 2*t(delta)%*%Q%*%delta
ci = diag(matF)
auto = eigen(matF)
autoval = auto$values[abs(auto$values)>.0001]
autovalp = autoval/sum(autoval)
autovec = auto$vectors[,abs(auto$values)>.0001]
Mautovalp = matrix(autovalp,n,length(autovalp),byrow=T)
M0y = apply(Mautovalp*autovec^2,1,sum)
# bm = mean(M0y)+k*sd(M0y)
# if (plots) {
# plot(M0y,pch=ifelse(M0y>bm,16,1),ylab='Response perturbation')
# abline(h=bm,lty='dashed')
# if (length(which(M0y>bm))>0) text(which(M0y>bm)+length(M0y)/10,M0y[M0y>bm],labels=which(M0y>bm))
# }
return(M0y)
}
InfDiagSigs = function(theta, yest, yyest, x, k=3, indpar=rep(1,length(theta))){
n = length(yest)
hes = Qt(theta,yest,yyest,x)
############################### Sigmatil=Sigma D(w)
delta = NULL
for (i in 1:n) delta = cbind(delta,deltaSigi(theta,yest,yyest,x,i))
npar = length(theta)
Q = solve(-hes)
if (sum(indpar)<npar) {
indt = (1:npar)[indpar==0]
b22 = matrix(0,ncol=npar,nrow=npar)
b22[indt,indt] = solve(hes[indt,indt])
Q = Q + b22
}
matF = 2*t(delta)%*%Q%*%delta
auto = eigen(matF)
autoval = auto$values[abs(auto$values)>.0001]
autovalp = autoval/sum(autoval)
autovec = auto$vectors[,abs(auto$values)>.0001]
Mautovalp = matrix(autovalp,n,length(autovalp),byrow=T)
M0sig = apply(Mautovalp*autovec^2,1,sum)
# bm = mean(M0sig)+k*sd(M0sig)
# if (plots) {
# plot(M0sig,pch=ifelse(M0sig>bm,16,1),ylab='Scale matrix perturbation')#,type='h')
# abline(h=bm,lty='dashed')
# if (length(which(M0sig>bm))>0) text(which(M0sig>bm)+length(M0sig)/10,M0sig[M0sig>bm],labels=which(M0sig>bm))
# }
###
return(M0sig)
}
InfDiagxps = function(theta,yest,yyest,x,k=3,indp=rep(1,ncol(x)),indpar=rep(1,length(theta))){
n = length(yest)
hes = Qt(theta,yest,yyest,x)
###############################
delta = deltaxp(theta,yest,x,indp)
npar = length(theta)
Q = solve(-hes)
if (sum(indpar)<npar) {
indt = (1:npar)[indpar==0]
b22 = matrix(0,ncol=npar,nrow=npar)
b22[indt,indt] = solve(hes[indt,indt])
Q = Q + b22
}
matF = 2*t(delta)%*%Q%*%delta
auto = eigen(matF)
autoval = auto$values[abs(auto$values)>.0001]
autovalp = autoval/sum(autoval)
autovec = auto$vectors[,abs(auto$values)>.0001]
Mautovalp = matrix(autovalp,n,length(autovalp),byrow=T)
M0xp = apply(Mautovalp*autovec^2,1,sum)
# bm = mean(M0xp)+k*sd(M0xp)
# if (plots) {
# plot(M0xp,pch=ifelse(M0xp>bm,16,1),ylab='x perturbation')
# abline(h=bm,lty='dashed')
# if (length(which(M0xp>bm))>0) text(which(M0xp>bm)+length(M0xp)/20,M0xp[M0xp>bm],labels=which(M0xp>bm))
# }
return(M0xp)
}
| /scratch/gouwar.j/cran-all/cranData/ARCensReg/R/princfunctionDiag.R |
#' @title Fit the AR-Cokriging model and make predictions
#' @description This is a simple and high-level funciton to fit autoregressive
#' cokriging models to multifidelity computer model outputs.
#'
#' @param formula a list of \eqn{s} elements, each of which contains the formula to specify fixed basis functions or regressors.
#' @param output a list of \eqn{s} elements, each of which contains a matrix of computer model outputs.
#' @param input a list of \eqn{s} elements, each of which contains a matrix of inputs.
#'
#' @param cov.model a string indicating the type of covariance
#' function in AR-cokriging models. Current covariance functions include
#' \describe{
#' \item{exp}{product form of exponential covariance functions.}
#' \item{matern_3_2}{product form of Matern covariance functions with
#' smoothness parameter 3/2.}
#' \item{matern_5_2}{product form of Matern covariance functions with
#' smoothness parameter 5/2.}
#' \item{Gaussian}{product form of Gaussian covariance functions.}
#' \item{powexp}{product form of power-exponential covariance functions with roughness parameter fixed at 1.9.}
#' }
#'
#' @param nugget.est a logical value indicating whether nugget parameter is included or not. Default value is \code{FALSE}.
#' @param input.new a matrix including new inputs for making prediction
#' @param prior a list of arguments to setup the prior distributions
#' \describe{
#' \item{name}{the name of the prior. Current implementation includes
#' \code{JR}, \code{Reference}, \code{Jeffreys}, \code{Ind_Jeffreys}}
#' \item{hyperparam}{hyperparameters in the priors.
#' For jointly robust (JR) prior, three parameters are included:
#' \eqn{a} refers to the polynomial penalty to avoid singular correlation
#' matrix with a default value 0.2; \eqn{b} refers to the exponenetial penalty to avoid
#' diagonal correlation matrix with a default value 1; nugget.UB is the upper
#' bound of the nugget variance with default value 1, which indicates that the
#' nugget variance has support (0, 1).}
#'
#'}
#'
#' @param opt a list of arguments to setup the \code{\link{optim}} routine.
#' @param NestDesign a logical value indicating whether the
#' experimental design is hierarchically nested within each level
#' of the code.
#'
#' @param tuning a list of arguments to control the MCEM algorithm for non-nested
#' design. It includes the arguments
#' \describe{
#' \item{maxit}{the maximum number of MCEM iterations.}
#' \item{tol}{a tolerance to stop the MCEM algorithm. If the parameter
#' difference between any two consecutive MCEM algorithm is less than
#' this tolerance, the MCEM algorithm is stopped.}
#' \item{n.sample}{the number of Monte Carlo samples in the
#' MCEM algorithm.}
#' \item{verbose}{a logical value to show the MCEM iterations if it is true.}
#'}
#'
#'
#' @param info a list that contains
#' \describe{
#' \item{iter}{number of iterations used in the MCEM algorithm}
#' \item{eps}{parameter difference after the MCEM algorithm stops}
#'}
#' @return The main call inside \code{\link{ARCokrig}} consists of
#' \code{\link{cokm}}, \code{\link{cokm.fit}}, and \code{\link{cokm.predict}}.
#' Thus, the function returns the \code{\link{cokm}} object and predictions
#' over new inputs.
#'
#' @author Pulong Ma <[email protected]>
#' @export
#'
#'
#' @seealso \code{\link{cokm}}, \code{\link{cokm.param}}, \code{\link{cokm.fit}}, \code{\link{cokm.predict}}
#'
#'
#' @references {
#' \itemize{
#' \item{Ma, P. (2019). ``Objective Bayesian Analysis of a Cokriging Model for Hierarchical Multifidelity Codes." arXiv:1910.10225. \url{https://arxiv.org/abs/1910.10225}. }
#' \item{Ma, P., Karagiannis, G., Konomi, B., Asher, T., Toro, G., and Cox, A. (2019) ``Multifidelity Computer Model Emulation with High-Dimensional Output: An Application to Storm Surge."
#' arXiv:1909.01836. \url{https://arxiv.org/abs/1909.01836}.}
#' }
#' }
#'
#' @examples
#'
#' ##############################################################
#' ##############################################################
#' ############ Example
#' Funcc = function(x){
#' return(0.5*(6*x-2)^2*sin(12*x-4)+10*(x-0.5)-5)
#' }
#'
#' Funcf = function(x){
#' z1 = Funcc(x)
#' z2 = 2*z1-20*x+20 + sin(10*cos(5*x))
#' return(z2)
#' }
#'
#' #####################################################################
#' ###### Nested design
#' #####################################################################
#' Dc <- seq(-1,1,0.1)
#' indDf <- c(1, 3, 6, 8, 10, 13, 17, 21)
#' zc <- Funcc(Dc)
#' Df <- Dc[indDf]
#' zf <- Funcf(Df)
#'
#' input.new = as.matrix(seq(-1,1,length.out=200))
#'
#'
#' ## fit and predict with the AR-Cokriging model
#'
#' out = ARCokrig(formula=list(~1,~1+x1), output=list(c(zc), c(zf)),
#' input=list(as.matrix(Dc), as.matrix(Df)),
#' cov.model="matern_5_2",
#' input.new=input.new)
#'
#' ## plot results
#' \donttest{
#' library(ggplot2)
#' cokrig = out$cokrig
#' df.l1 = data.frame(x=c(Dc), y=c(zc))
#' df.l2 = data.frame(x=c(Df), y=c(zf))
#' CI.lower = cokrig$lower95[[2]]
#' CI.upper = cokrig$upper95[[2]]
#' df.CI = data.frame(x=c(input.new),lower=CI.lower, upper=CI.upper)
#' df.pred = data.frame(x=c(input.new), y=cokrig$mu[[2]])
#'
#' g = ggplot(data.frame(x=c(-1,1)), aes(x)) +
#' stat_function(fun=Funcc, geom="line", aes(colour="level 1"), n=500) +
#' stat_function(fun=Funcf, geom="line", aes(colour="level 2"), n=500)
#'
#' g = g + geom_point(data=df.l1, mapping=aes(x=x, y=y), shape=16, size=2, color="black") +
#' geom_point(data=df.l2, mapping=aes(x=x, y=y), shape=17, size=2, color="black")
#'
#' g = g + geom_line(data=df.pred, aes(x=x, y=y, colour="cokriging"), inherit.aes=FALSE) +
#' geom_ribbon(data=df.CI, mapping=aes(x=x,ymin=lower, ymax=upper), fill="gray40",
#' alpha=0.3, inherit.aes=FALSE)
#' g = g + scale_colour_manual(name=NULL, values=c("red","blue", "turquoise3"),
#' breaks=c("cokriging","level 1", "level 2"))
#'
#' g = g + ggtitle("A Two-Level Example") +
#' theme(plot.title=element_text(size=14),
#' axis.title.x=element_text(size=14),
#' axis.text.x=element_text(size=14),
#' axis.title.y=element_text(size=14),
#' axis.text.y=element_text(size=14),
#' legend.text = element_text(size=12),
#' legend.direction = "horizontal",
#' legend.position = c(0.6, 0.1)) + xlab("") + ylab("")
#' print(g)
#'
#' }
#'
#'
#'
ARCokrig <- function(formula=list(~1,~1), output, input, cov.model="matern_5_2", nugget.est=FALSE,
input.new, prior=list(), opt=list(), NestDesign=TRUE, tuning=list(), info=list()){
## check the arguments
.check.arg.ARCokrig(formula=formula, output=output, input=input,
input.new=input.new, prior=prior, opt=opt,
NestDesign=NestDesign, tuning=tuning)
#cat("\n Constructing the AR-Cokriging model object.\n\n")
obj = cokm(formula=formula, output=output, input=input, cov.model=cov.model,
nugget.est=nugget.est, prior=prior, opt=opt, NestDesign=NestDesign,
tuning=tuning, info=info)
## fit the AR-Cokriging model
#cat("\n Fit the AR-Cokriging model.\n\n")
obj = cokm.fit(obj)
## predict with the AR-Cokriging model
#cat("\n Predict with the AR-Cokriging model\n\n")
pred = cokm.predict(obj=obj, input.new=input.new)
return(list(obj=obj, cokrig=pred))
}
#####################################################################
#####################################################################
.check.arg.ARCokrig <- function(formula, output, input, input.new,
prior, opt, NestDesign, tuning){
if(!is(formula, "list")){
stop("\n\n formula should be a list contaning the regressors at each code level.\n\n")
}
if(!is(output, "list")){
stop("\n\noutput should be a list of responses. Each element in a list should
contain output from a code level. The first level should contain
output from the code with the lowest fidelity.\n\n")
}
s = length(output)
if(!is(input, "list")){
stop("\n\ninput should be a list of inputs in computer models.\n\n")
}
for(t in 1:s){
if(!is(input[[t]], "matrix")){
message("\n\n coerce input to a matrix format.\n\n")
input[[t]] = as.matrix(input[[t]])
}
}
# if(!is(param, "list")){
# stop("\n\nparam should be a list with each element containing initial values for
# correlation parameters and nugget variance parameter (if needed).\n\n")
# }
if(!is(input.new, "matrix")){
stop("\n\ninput.new should be a matrix.\n\n")
}
if(!is(prior, "list")){
stop("\n\nhyperparam should be a list containing parameters
to setup the prior distributions.\n\n")
}
if(!is(opt, "list")){
stop("\n\nopt should be a list with each element containing optimization arguments
at each code level.\n\n")
}
if(!is(NestDesign, "logical")){
stop("NestDesign should be a logical value indicating whether the design is
hierarchically nested.")
}
if(!is(tuning, "list")){
stop("\n\n tuning should be a list containing tuning parameters to setup the
MCEM algorithm in non-nested design.\n")
}
}
| /scratch/gouwar.j/cran-all/cranData/ARCokrig/R/ARCokrig.R |
##### CLASS DEFINITIONS ########
#' @docType class
#' @title cokm Class
#' @description This is an S4 class definition for \code{\link{cokm}} in the \code{\link{ARCokrig}} package
#'
#' @slot output a list of \eqn{s} elements, each of which contains a matrix of computer model outputs.
#' @slot input a list of \eqn{s} elements, each of which contains a matrix of inputs.
#' @slot param a list of \eqn{s} elements, each of which contains a vector of initial values for
#' correlation parameters (and nugget variance parameters if
#' nugget terms are included in AR-cokriging models).
#' @slot cov.model a string indicating the type of covariance
#' function in AR-cokriging models. Current covariance functions include
#' \describe{
#' \item{exp}{product form of exponential covariance functions.}
#' \item{matern_3_2}{product form of Matern covariance functions with
#' smoothness parameter 3/2.}
#' \item{matern_5_2}{product form of Matern covariance functions with
#' smoothness parameter 5/2.}
#' \item{Gaussian}{product form of Gaussian covariance functions.}
#' \item{powexp}{product form of power-exponential covariance functions with roughness parameter fixed at 1.9.}
#' }
#'
#' @slot nugget.est a logical value indicating whether nugget parameter is included or not. Default value is \code{FALSE}.
#' @slot prior a list of arguments to setup the prior distributions with the reference prior as default
#' \describe{
#' \item{name}{the name of the prior. Current implementation includes
#' \code{JR}, \code{Reference}, \code{Jeffreys}, \code{Ind_Jeffreys}}
#' \item{hyperparam}{hyperparameters in the priors.
#' For jointly robust (JR) prior, three parameters are included:
#' \eqn{a} refers to the polynomial penalty to avoid singular correlation
#' matrix with a default value 0.2; \eqn{b} refers to the exponenetial penalty to avoid
#' diagonal correlation matrix with a default value 1; nugget.UB is the upper
#' bound of the nugget variance with default value 1, which indicates that the
#' nugget variance has support (0, 1).}
#'
#'}
#'
#' @slot opt a list of arguments to setup the \code{\link{optim}} routine.
#' @slot NestDesign a logical value indicating whether the
#' experimental design is hierarchically nested within each level
#' of the code.
#'
#' @slot tuning a list of arguments to control the MCEM algorithm for non-nested
#' design. It includes the arguments
#' \describe{
#' \item{maxit}{the maximum number of MCEM iterations.}
#' \item{tol}{a tolerance to stop the MCEM algorithm. If the parameter
#' difference between any two consecutive MCEM algorithm is less than
#' this tolerance, the MCEM algorithm is stopped.}
#' \item{n.sample}{the number of Monte Carlo samples in the
#' MCEM algorithm.}
#' \item{verbose}{a logical value to show the MCEM iterations if it is true.}
#'}
#'
#'
#' @slot info a list that contains
#' \describe{
#' \item{iter}{number of iterations used in the MCEM algorithm}
#' \item{eps}{parameter difference after the MCEM algorithm stops.}
#'}
#' @keywords AR-Cokriging Objective-Bayes Computer-Experiments Uncertainty-Quantification
#'
#' @author Pulong Ma <[email protected]>
#'
setClass("cokm", representation(
formula = "list",
output = "list",
input = "list",
param = "list",
cov.model = "character",
nugget.est = "logical",
prior = "list",
opt = "list",
NestDesign = "logical",
tuning = "list",
info = "list")
)
##### CLASS DEFINITIONS ########
#' @docType class
#' @title mvcokm Class
#' @description This is an S4 class definition for \code{\link{mvcokm}} in the \code{\link{ARCokrig}} package
#'
#' @slot output a list of \eqn{s} elements, each of which contains a matrix of computer model outputs.
#' @slot input a list of \eqn{s} elements, each of which contains a matrix of inputs.
#' @slot param a list of \eqn{s} elements, each of which contains a vector of initial values for
#' correlation parameters (and nugget variance parameters if
#' nugget terms are included in AR-cokriging models).
#' @slot cov.model a string indicating the type of covariance
#' function in AR-cokriging models. Current covariance functions include
#' \describe{
#' \item{exp}{product form of exponential covariance functions.}
#' \item{matern_3_2}{product form of Matern covariance functions with
#' smoothness parameter 3/2.}
#' \item{matern_5_2}{product form of Matern covariance functions with
#' smoothness parameter 5/2.}
#' \item{Gaussian}{product form of Gaussian covariance functions.}
#' \item{powexp}{product form of power-exponential covariance functions with roughness parameter fixed at 1.9.}
#' \item{aniso_exp}{anisotropic form of exponential covariance function.}
#' \item{aniso_matern_3_2}{anisotropic form of Matern covariance functions with
#' smoothness parameter 3/2.}
#' \item{aniso_matern_5_2}{anisotropic form of Matern covariance functions with
#' smoothness parameter 5/2.}
#' }
#'
#' @slot nugget.est a logical value indicating whether the nugget is included or not. Default value is \code{FALSE}.
#'
#' @slot prior a list of arguments to setup the prior distributions with the jointly robust prior as default
#' \describe{
#' \item{name}{the name of the prior. Current implementation includes
#' \code{JR}, \code{Reference}, \code{Jeffreys}, \code{Ind_Jeffreys}}
#' \item{hyperparam}{hyperparameters in the priors.
#' For jointly robust (JR) prior, three parameters are included:
#' \eqn{a} refers to the polynomial penalty to avoid singular correlation
#' matrix with a default value 0.2; \eqn{b} refers to the exponenetial penalty to avoid
#' diagonal correlation matrix with a default value 1; nugget.UB is the upper
#' bound of the nugget variance with default value 1, which indicates that the
#' nugget variance has support (0, 1).}
#'}
#'
#'
#' @slot opt a list of arguments to setup the \code{\link{optim}} routine.
#' @slot NestDesign a logical value indicating whether the
#' experimental design is hierarchically nested within each level
#' of the code.
#'
#' @slot tuning a list of arguments to control the MCEM algorithm for non-nested
#' design. It includes the arguments
#' \describe{
#' \item{maxit}{the maximum number of MCEM iterations.}
#' \item{tol}{a tolerance to stop the MCEM algorithm. If the parameter
#' difference between any two consecutive MCEM algorithm is less than
#' this tolerance, the MCEM algorithm is stopped.}
#' \item{n.sample}{the number of Monte Carlo samples in the
#' MCEM algorithm.}
#'}
#'
#' @slot info a list that contains
#' \describe{
#' \item{iter}{number of iterations used in the MCEM algorithm}
#' \item{eps}{parameter difference after the MCEM algorithm stops}
#'}
#'
#' @keywords AR-Cokriging Objective-Bayes Computer-Experiments Uncertainty-Quantification
#'
#' @author Pulong Ma <[email protected]>
#'
setClass("mvcokm", representation(
formula = "list",
output = "list",
input = "list",
param = "list",
cov.model = "character",
nugget.est = "logical",
prior = "list",
opt = "list",
NestDesign = "logical",
tuning="list",
info="list")
)
| /scratch/gouwar.j/cran-all/cranData/ARCokrig/R/AllClass.R |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.