content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Genotypic data of 655 genotypes for loblolly pine dataset
#'
#' Genotypic data for a total of 4,853 SNPs (coded as 0, 1, 2 and -9 for missing)
#' on 655 genotypes of Loblolly Pine (\emph{Pinus taeda} L.).
#' Dataset modified from supplementary material from Resende \emph{et al.} (2012).
#' This dataset differs from the original as some genotypes were made artificially
#' missing by full-sib family.
#'
#' @docType data
#'
#' @usage geno.pine655
#'
#' @format matrix
#'
#' @keywords datasets
#'
#' @references
#' Resende, M.F.R., Munoz, P. Resende, M.D.V., Garrick, D.J., Fernando, R.L., Davis, J.M.,
#' Jokela, E.J., Martin, T.A., Peter, G.F., and Kirst, M. 2012. Accuracy of genomic
#' selection methods in a standard data set of loblolly pine (\emph{Pinus taeda} L.).
#' Genetics 190:1503-1510.
#'
#' @examples
#' geno.pine655[1:5, 1:5]
#'
#' @name geno.pine655
NULL
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/geno_pine655.R |
#' Genotypic data of 926 genotypes for loblolly pine dataset
#'
#' Genotypic data for a total of 4,853 SNPs (coded as 0, 1, 2 and -9 for missing)
#' on 926 genotypes of Loblolly Pine (\emph{Pinus taeda} L.).
#' Dataset obtained from supplementary material in Resende \emph{et al.} (2012).
#'
#' @docType data
#'
#' @usage geno.pine926
#'
#' @format matrix
#'
#' @keywords datasets
#'
#' @references
#' Resende, M.F.R., Munoz, P. Resende, M.D.V., Garrick, D.J., Fernando, R.L., Davis, J.M.,
#' Jokela, E.J., Martin, T.A., Peter, G.F., and Kirst, M. 2012. Accuracy of genomic
#' selection methods in a standard data set of loblolly pine (\emph{Pinus taeda} L.).
#' Genetics 190:1503-1510.
#'
#' @examples
#' geno.pine926[1:5, 1:5]
#'
#' @name geno.pine926
NULL
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/geno_pine926.R |
#' Genotypic data for Atlantic salmon dataset
#'
#' Genotypic data on 1,481 Atlantic salmon samples. A total of 17,156
#' SNP markers (coded as 0, 1, 2 and \code{NA} for missing) are included in this dataset.
#' Dataset obtained from supplementary material in Robledo \emph{et al.} (2018).
#'
#' @docType data
#'
#' @usage geno.salmon
#'
#' @format matrix
#'
#' @keywords datasets
#'
#' @references
#' Robledo D., Matika O., Hamilton A., and Houston R.D. 2018.
#' Genome-wide association and genomic selection for resistance
#' to amoebic gill disease in Atlantic salmon.
#' G3 Genes, Genomes, Genetics 8:1195-1203.
#'
#' @examples
#' geno.salmon[1:5, 1:5]
#'
#' @name geno.salmon
NULL
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/geno_salmon.R |
#' Estimates minor allele frequency (MAF)
#'
#' @param M The additive \eqn{n \times p} matrix coded with 0, 1, 2, or \code{NA}
#' (default = \code{NULL}).
#'
#' @return A vector with the MAF of each molecular marker
#'
#' @keywords internal
maf <- function(M = NULL){
# Get the frequency of the markers.
maf <- colMeans(M, na.rm = TRUE)/2
# Identify the MAF.
maf <- apply(cbind(maf, 1 - maf), 1, min)
return(maf)
}
#' Estimates observed and expected heterozygosity
#'
#' @param M The additive \eqn{n \times p} matrix coded with 0, 1, 2 coding (default = \code{NULL}).
#'
#' @return A list with vectors containing the observed (ho) and expected (he) heterozygosity.
#'
#' @keywords internal
heterozygosity <- function(M = NULL){
# Get q.
q <- maf(M)
# Get p.
p <- 1 - q
# Get the expected heterozygosity.
he <- 2 * p * q
# Get the obseved heterozygosity.
ho <- colMeans(M == 1, na.rm = TRUE)
return(data.frame(ho = ho, he = he))
}
#' Estimates call rate
#'
#' @param M The additive \eqn{n \times p} matrix with any coding (default = \code{NULL}).
#' @param margin A character indicating the margin for call rate calculations.
#' Options are: \code{row} and \code{col} (default = \code{row}).
#'
#' @return A vector containing the call rate.
#'
#' @keywords internal
callrate <- function(M = NULL, margin = c("row", "col")){
# Collect input.
margin <- match.arg(margin)
# CR by row.
if (margin == "row") cr <- 100 - rowSums(is.na(M))/ncol(M) * 100
# CR by col.
if (margin == "col") cr <- 100 - colSums(is.na(M))/nrow(M) * 100
return(cr)
}
#' Estimates the population level inbreeding (Fis) by marker
#'
#' @param M The additive \eqn{n \times p} matrix with any coding (default = \code{NULL}).
#' @param margin A character indicating the margin for call rate calculations.
#' Options are: \code{row} and \code{col} (default = \code{col}).
#'
#' @return A vector containing the Fis for the markers.
#'
#' @keywords internal
Fis <- function(M = NULL, margin = c("col", "row")){
# Collect input.
margin <- match.arg(margin)
# H by row.
if (margin == "col") H <- heterozygosity(M = M)
# H by col.
if (margin == "row") H <- heterozygosity(M = t(M))
# Calculate Fis.
Fis <- ifelse(test = H[, "he"] == 0,
yes = 0,
no = 1 - (H[,"ho"] / H[,"he"]))
return(Fis)
}
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/genomic_summaries.R |
#' Reports summary statistics, plots and filter options for a given kinship matrix K
#'
#' It reports summary statistics, plots and allows for some filter options
#' for diagonal and off-diagonal elements for a given kinship matrix.
#' The input matrix can be a pedigree-based
#' relationship matrix \eqn{\boldsymbol{A}}, a genomic relationship matrix \eqn{\boldsymbol{G}} or a
#' hybrid relationship matrix \eqn{\boldsymbol{H}}.
#' Individual names should be assigned to \code{rownames} and \code{colnames}.
#'
#' @param K Input of a kinship matrix in full format (\eqn{n \times n}) (default = \code{NULL}).
#' @param diagonal.thr.large A threshold value to flag large diagonal values (default = \code{1.2}).
#' @param diagonal.thr.small A threshold value to flag small diagonal values (default = \code{0.8}).
#' @param duplicate.thr A threshold value to flag possible duplicates. Any calculation larger than the
#' threshold based on
#' \eqn{\boldsymbol{k}_{i,i}\mathbin{/}\sqrt{\boldsymbol{k}_{i,i} \times \boldsymbol{k}_{j,j}}}
#' is identified as a duplicate (default = \code{0.95}).
#' @param clean.diagonal If \code{TRUE} returns a kinship matrix filtered by values smaller than
#' \code{diagonal.thr.large} and larger than \code{diagonal.thr.small} (default = \code{FALSE}).
#' @param clean.duplicate If \code{TRUE} return a kinship matrix without the flagged duplicate individuals.
#' All individuals involved are removed (default = \code{FALSE}).
#' @param plots If \code{TRUE} generates graphical output of the diagonal and off-diagonal
#' values of the kinship matrix (default = \code{TRUE}).
#' @param sample.plot A numeric value between 0 and 1 indicating the proportion
#' of the data points to be sampled for fast plotting of off-diagonal values.
#' Note that for proportions other than 1, the method is not exact and low
#' proportions are only recommended for large kinship matrices (default = \code{1}).
#' @param message If \code{TRUE} diagnostic messages are printed on screen (default = \code{TRUE}).
#'
#' @return A list with the following elements:
#' \itemize{
#' \item{\code{list.diagonal}: a data frame with the list of flagged large or small diagonal values.}
#' \item{\code{list.duplicate}: a data frame with the list of possible duplicates.}
#' \item{\code{clean.kinship}: output of kinship matrix filtered without the flagged diagonal
#' and/or duplicate individuals.}
#' \item{\code{plot.diagonal}: histogram with the distribution of diagonal values from the kinship matrix.}
#' \item{\code{plot.offdiag}: histogram with the distribution of off-diagonal values from kinship matrix.}
#' }
#'
#' @export
#'
#' @examples
#' # Get G matrix.
#' G <- G.matrix(M = geno.apple, method = "VanRaden")$G
#'
#' # Diagnose G.
#' G_summary <- kinship.diagnostics(
#' K = G,
#' diagonal.thr.large = 1.3, diagonal.thr.small = 0.7, clean.diagonal = TRUE,
#' duplicate.thr = 0.8, clean.duplicate = TRUE,
#' sample.plot = 0.50)
#' ls(G_summary)
#' dim(G_summary$clean.kinship)
#' G_summary$clean.kinship[1:5, 1:5]
#' G_summary$list.duplicate
#' G_summary$list.diagonal
#' G_summary$plot.diag
#' G_summary$plot.offdiag
#'
kinship.diagnostics <- function(K = NULL,
diagonal.thr.large = 1.2, diagonal.thr.small = 0.8,
duplicate.thr = 0.95, clean.diagonal = FALSE,
clean.duplicate = FALSE,
plots = TRUE, sample.plot = 1, message = TRUE){
# Check if the class of K is matrix.
if (is.null(K) || !inherits(K, "matrix")) {
stop("K should be a valid object of class matrix")
}
# Check the attributes of K
if (is.null(rownames(K))){
stop('Individual names not assigned to rows of matrix K.')
}
if (is.null(colnames(K))){
stop('Individual names not assigned to columns of matrix K.')
}
if ((identical(rownames(K), colnames(K))) == FALSE){
stop("Rownames and colnames of matrix K do not match.")
}
# Check on other input
if (duplicate.thr < 0 | duplicate.thr > 1) {
stop("Specification of duplicate.thr must be between 0 and 1.")
}
if (diagonal.thr.large < diagonal.thr.small) {
stop("Value of diagonal.thr.large has to be equal or larger than diagonal.thr.small.")
}
if (diagonal.thr.large < 0 | diagonal.thr.small < 0) {
stop("Values of diagonal.thr.large and diagonal.thr.small have the be both positive.")
}
if (sample.plot <= 0 | sample.plot > 1) {
stop("Values of sample.plot must be between 0 and 1.")
}
# Preparing submatrices
n <- nrow(K)
#indNames <- rownames(K)
diagK <- diag(K) # Vector of diagonal
#offdiag <- K[lower.tri(K, diag=FALSE)]
Kcorr <- cov2cor(K)
K.sparse <- full2sparse(K)
corrS <- full2sparse(Kcorr)
K.sparse <- data.frame(K.sparse, Corr=corrS[,3])
offK <- K.sparse[K.sparse$Row != K.sparse$Col,]
rm(K.sparse, Kcorr, corrS)
# SOME STATISTICS
# Some general statistics and report in 'rp'
if (message){
message("Matrix dimension is: ", n, "x", n)
}
rank <- try(qr(K)$rank, silent=TRUE)
# Commented out
#if (message){
# if(class(rank) == "try-error"){
# message("Rank cannot be obtained due to missing values!")
# } else {
# message("Rank of matrix is: ",rank)
# }
#}
range.diagonal <- c(min=min(diagK, na.rm=TRUE), max=max(diagK, na.rm=TRUE))
if (message){
message("Range diagonal values: ", round(range.diagonal[1], 5),
" to ",round(range.diagonal[2], 5))
}
mean.diag <- mean(diagK, na.rm=TRUE)
if (message){
message("Mean diagonal values: ", round(mean.diag, 5))
}
range.off.diagonal <- c(min=min(offK$Value, na.rm=TRUE), max=max(offK$Value, na.rm=TRUE))
if (message){
message("Range off-diagonal values: ", round(range.off.diagonal[1], 5),
" to ",round(range.off.diagonal[2], 5))
}
mean.off.diag <- mean(offK$Value, na.rm=TRUE)
if (message){
message("Mean off-diagonal values: ", round(mean.off.diag, 5))
}
##########################
# DEALING with diagonal
df.list.diag <- data.frame(
value = sort(diagK[diagK > diagonal.thr.large
| diagK < diagonal.thr.small], decreasing=TRUE))
# Generating list of flagged potential duplicates
df.list.duplicate <- offK[offK$Corr > duplicate.thr,]
df.list.duplicate <- data.frame(df.list.duplicate, Indiv.A=rownames(K)[df.list.duplicate$Row],
Indiv.B=colnames(K)[df.list.duplicate$Col])
rownames(df.list.duplicate) <- NULL
df.list.duplicate <- df.list.duplicate[,c(5,6,3,4)]
df.list.duplicate <- df.list.duplicate[order(df.list.duplicate$Corr, decreasing=TRUE),]
rownames(df.list.duplicate) <- NULL
# Generating new K if requested with clean.diagonal.
if (clean.diagonal){
if (nrow(df.list.diag) > 0){
Kclean <- K[-which(rownames(K) %in% row.names(df.list.diag)),
-which(rownames(K) %in% row.names(df.list.diag))]
} else {
if (message){
message("No individuals filtered out by the diagonal thresholds, as none were found.")
}
# I added this line because it will be NULL if nothing has changed.
Kclean <- NULL
}
} else {
Kclean <- NULL
}
# Generating new K if requested with clean.duplicate.
if (clean.duplicate){
if (nrow(df.list.duplicate) > 0){
idx.offdiag <- unique(c(df.list.duplicate$Indiv.A, df.list.duplicate$Indiv.B))
# idx.offdiag <- unique(df.list.duplicate$Indiv.A)
if (is.null(Kclean)) {
Kclean <- K[-which(rownames(K) %in% idx.offdiag),
-which(rownames(K) %in% idx.offdiag)]
} else {
Kclean <- Kclean[-which(rownames(Kclean) %in% idx.offdiag),
-which(rownames(Kclean) %in% idx.offdiag)]
}
} else {
if (message){
message("No individuals filtered by duplicate.thr = ", duplicate.thr,", as none were found.")
}
}
}
# Removing K
rm(K)
# SOME STATISTICS
# Report extreme cases
count <- length(diagK[diagK > diagonal.thr.large | diagK < diagonal.thr.small])
if (message){
message("There are ", count, " extreme diagonal values, outside < ", diagonal.thr.small,
" and > ", diagonal.thr.large)
}
count <- nrow(df.list.duplicate)
if (message){
message("There are ", count, " records of possible duplicates, based on: k(i,j)/sqrt[k(i,i)*k(j,j)] > ", duplicate.thr)
}
# Obtain histogram of diagonal ----------------------------------------------------------------
if (plots){
# Get data.frame for plotting.
diagK <- as.data.frame(diagK)
names(diagK) <- "value"
# Generate main plot.
p1 <- ggplot(diagK, aes(x=value)) +
geom_histogram(aes(y = after_stat(density)), fill='#0072B2', bins=40) +
geom_density(alpha=0.3, fill="grey", position='identity') +
theme_classic() +
theme(axis.title.x=element_blank(),
plot.title = element_text(face = "bold")) +
ggtitle("Diagonal Values")
# Generate boxplot.
p2 <- ggplot(aes(value), data = diagK) +
geom_boxplot() +
theme_void()
# Combine plots.
plot.diag <- plot_grid(p1, p2, rel_heights = c(1, 0.2), ncol = 1, nrow = 2, align = "hv")
# Obtain histogram of off-diagonal ------------------------------------------------------------
# Sampling for plot if requested.
if (sample.plot != 1){
offK <- offK[sample(
x = 1:nrow(offK),
size = floor(sample.plot * nrow(offK)),
replace = FALSE), , drop = FALSE]
}
p1 <- ggplot(offK, aes(x = Value)) +
geom_histogram(aes(y = after_stat(density)), fill = '#0072B2', bins = 40) +
geom_density(alpha = 0.3, fill = "grey", position = 'identity') +
theme_classic() +
theme(axis.title.x=element_blank(),
plot.title = element_text(face = "bold")) +
ggtitle("Off-diagonal Values")
# Boxplot
p2 <- ggplot(aes(Value), data = offK) +
geom_boxplot() +
theme_void()
# Combine plots.
plot.offdiag <- plot_grid(p1, p2, rel_heights = c(1, 0.2), ncol = 1, nrow = 2, align = "hv")
} else {
# Nulify plots if not requested.
plot.diag <- plot.offdiag <- NULL
}
# Finalize ------------------------------------------------------------------------------------
if (nrow(df.list.duplicate) == 0) {df.list.duplicate <- NULL}
if (nrow(df.list.diag) == 0) {df.list.diag <- NULL}
return(list(list.diagonal=df.list.diag, list.duplicate=df.list.duplicate,
clean.kinship=Kclean, plot.diag=plot.diag, plot.offdiag=plot.offdiag))
}
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/kinship_diagnostics.R |
#' Check the genomic relationship matrix G against
#' the pedigree relationship matrix A or vice versa
#'
#' Assesses a given genomic relationship matrix \eqn{\boldsymbol{G}} against the
#' pedigree relationship matrix \eqn{\boldsymbol{A}}, or vice versa,
#' to determine the matched and mismatched individuals.
#' If requested, it provides the cleaned versions containing only the matched individuals
#' between both matrices. The user should provide the matrices \eqn{\boldsymbol{G}}and
#' \eqn{\boldsymbol{A}} in full form (\eqn{ng \times ng} and \eqn{na \times na}, respectively).
#' Individual names should be assigned to \code{rownames} and \code{colnames} for both matrices.
#'
#' @param G Input of the genomic relationship matrix \eqn{\boldsymbol{G}} in full form (\eqn{ng \times ng}) (default = \code{NULL}).
#' @param A Input of the pedigree relationship matrix \eqn{\boldsymbol{A}} in full form (\eqn{na \times na}) (default = \code{NULL}).
#' @param clean If \code{TRUE} generates new clean \eqn{\boldsymbol{G}} and \eqn{\boldsymbol{A}}
#' matrices in full form containing only matched individuals (default = \code{TRUE}).
#' @param ord If \code{TRUE} it will order by ascending order of individual names
#' both of the clean \eqn{\boldsymbol{A}} and \eqn{\boldsymbol{G}} matrices (default = \code{TRUE}).
#' @param mism If \code{TRUE} generates two data frames with mismatched individual names
#' from the \eqn{\boldsymbol{G}} and \eqn{\boldsymbol{A}} matrices (default = \code{FALSE}).
#' @param RMdiff If \code{TRUE} it generates the matrix (in lower diagonal row-wise sparse form) of matched
#' observations from both the \eqn{\boldsymbol{G}} and \eqn{\boldsymbol{A}} matrices.
#' This matrix can be used to identify inconsistent values between matched matrices, but it can be very large
#' (default = \code{FALSE}).
#' @param message If \code{TRUE} diagnostic messages are printed on screen (default = \code{TRUE}).
#'
#' @return A list with the following elements:
#' \itemize{
#' \item{\code{Gclean}: a matrix with the portion of \eqn{\boldsymbol{G}} containing only matched individuals.}
#' \item{\code{Aclean}: a matrix with the portion of \eqn{\boldsymbol{A}} containing only matched individuals.}
#' \item{\code{mismG}: a vector containing the names of the individuals from matrix \eqn{\boldsymbol{G}} that are
#' missing in matrix \eqn{\boldsymbol{A}}.}
#' \item{\code{mismA}: a vector containing the names of the individuals from matrix \eqn{\boldsymbol{A}} that are
#' missing in matrix \eqn{\boldsymbol{G}}.}
#' \item{\code{RM}: a data frame with the observations from both the \eqn{\boldsymbol{G}} and \eqn{\boldsymbol{A}}
#' matched matrices, together with their absolute relationship difference.}
#' \item{\code{plotG2A}: scatterplot with the pairing of matched pedigree- against genomic-based
#' relationship values. This graph might take a long to plot with large datasets.}
#' }
#'
#' @export
#'
#' @examples
#' \donttest{
#' # Get A matrix.
#' A <- AGHmatrix::Amatrix(data = ped.pine)
#' A[1:5,1:5]
#' dim(A)
#'
#' # Read and filter genotypic data.
#' M.clean <- qc.filtering(
#' M = geno.pine655,
#' maf = 0.05,
#' marker.callrate = 0.2, ind.callrate = 0.20,
#' na.string = "-9",
#' plots = FALSE)$M.clean
#'
#' # Get G matrix.
#' G <- G.matrix(M = M.clean, method = "VanRaden", na.string = "-9")$G
#' G[1:5, 1:5]
#' dim(G)
#'
#' # Match G2A.
#' check <- match.G2A(
#' A = A, G = G,
#' clean = TRUE, ord = TRUE, mism = TRUE, RMdiff = TRUE)
#' ls(check)
#' dim(check$Aclean)
#' dim(check$Gclean)
#' check$Aclean[1:5, 1:5]
#' check$Gclean[1:5, 1:5]
#' head(check$mismG)
#' head(check$mismA)
#' check$plotG2A
#' head(check$RM)
#' }
#'
match.G2A <- function(A = NULL, G = NULL, clean = TRUE, ord = TRUE,
mism = FALSE, RMdiff = FALSE, message = TRUE){
if (is.null(A) || !inherits(A, "matrix")) {
stop("A should be a valid object of class matrix.")
}
if (is.null(G) || !inherits(G, "matrix")) {
stop("G should be a valid object of class matrix.")
}
# Check the rownames/colnames
if (is.null(rownames(A))){
stop("Individual names not assigned to rows of matrix A.")
}
if (is.null(colnames(A))){
stop('Individual names not assigned to columns of matrix A.')
}
if ((identical(rownames(A), colnames(A))) == FALSE){
stop("Rownames and colnames of matrix A do not match.")
}
if (is.null(rownames(G))){
stop("Individual names not assigned to rows of matrix G.")
}
if (is.null(colnames(G))){
stop("Individual names not assigned to columns of matrix G.")
}
if ((identical(rownames(G), colnames(G))) == FALSE){
stop("Rownames and colnames of matrix G do not match.")
}
# Check for consistency between A and G
Aind <- row.names(A)
Gind <- row.names(G)
if (all(Aind %in% Gind) & message){
message("All ", ncol(A), " individuals from matrix A match those individuals from matrix G.")
}
if (all(Gind %in% Aind) & message){
message("All ", ncol(G), " individuals from matrix G match those individuals from matrix A.")
}
notGenotyped <- which((Aind %in% Gind) == FALSE)
notPedigree <- which((Gind %in% Aind) == FALSE)
# If G has different individuals than A, remove these individuals from G
if (length(notPedigree) > 0) {
if (message){
message("Matrix G has ", length(notPedigree), " individuals (out of ", ncol(G), ") NOT present on matrix A.")
}
if (clean) {
Gclean <- G[-notPedigree,-notPedigree]
} else {
Gclean <- NULL
}
if (mism) {
rpG <- rownames(G[notPedigree,])
} else {
rpG <- NULL
}
} else {
Gclean <- G
rpG <- NULL
}
# If A has different ind than G, remove these ind from A
if (length(notGenotyped) > 0) {
if (message){
message("Matrix A has ", length(notGenotyped), " individuals (out of ", ncol(A), ") NOT present on matrix G.")
}
if (clean) {
Aclean <- A[-notGenotyped, -notGenotyped]
} else {
Aclean <- NULL
}
if (mism) {
rpP <- rownames(A[notGenotyped,])
} else {
rpP <- NULL
}
} else {
Aclean <- A
rpP <- NULL
}
# Check order of A and G
if (all(row.names(Aclean) == row.names(Gclean)) == FALSE){
if (!ord) {
if (message){
message("Order of individual names from matched matrices A and G DO NOT agree.")
}
}
if (ord){
Gclean <- Gclean[order(rownames(Gclean), decreasing=FALSE),
order(colnames(Gclean), decreasing=FALSE)]
Aclean <- Aclean[order(rownames(Aclean), decreasing=FALSE),
order(colnames(Aclean), decreasing=FALSE)]
}
}
# TODO this section has to be improved since A.sparse can also be NULL!
# Sparse form matrices (faster for plots useful for RM matrix)
G.sparse <- full2sparse(K=Gclean, drop.zero=FALSE)
A.sparse <- full2sparse(K=Aclean, drop.zero=FALSE)
# Generating plot of Aclean vs Gclean
LL <- min(A.sparse[,3], G.sparse[,3])
UL <- max(A.sparse[,3], G.sparse[,3])
# Improved version of plot (faster). GG
p <- ggplot(data.frame(AValue = A.sparse[,3], GValue = G.sparse[,3]),
aes(x = AValue, y = GValue, color = 'black')) +
geom_scattermost(xy = cbind(A.sparse[,3], G.sparse[,3]), color = "#0072B2", pointsize = 2) +
geom_abline(linetype = "dashed") +
xlim(LL, UL) + ylim(LL, UL)+
labs(x="Pedigree Relationship (A matrix)",
y = "Genomic Relationship (G matrix)") +
theme_classic()
# RM matrix for diagnostics
if (isTRUE(RMdiff)) {
if (!isTRUE(clean)) {
stop("Option clean must be TRUE to produce RM data frame.")
}
RM <- data.frame(A.sparse[,1:2], AValue=round(A.sparse[,3],6), GValue=round(G.sparse[,3],6))
RM$absdiff <- round(abs(RM$AValue - RM$GValue),6)
RM$IDRow <- rownames(Aclean)[RM$Row]
RM$IDCol <- rownames(Aclean)[RM$Col]
RM$Diag <- 0
RM$Diag[RM$Row == RM$Col] <- 1
RM <- RM[c(1,2,6,7,3,4,5,8)]
} else {
RM <- NULL
}
AValue <- GValue <- NULL
return(list(Aclean=Aclean, Gclean=Gclean, mismG=rpG, mismA=rpP, RM=RM, plotG2A=p))
}
# A.sparse <- rbind(A.sparse, A.sparse, A.sparse, A.sparse, A.sparse, A.sparse, A.sparse, A.sparse, A.sparse)
# A.sparse <- rbind(A.sparse, A.sparse)
# G.sparse <- rbind(G.sparse, G.sparse, G.sparse, G.sparse, G.sparse, G.sparse, G.sparse, G.sparse, G.sparse)
# G.sparse <- rbind(G.sparse, G.sparse)
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/match_G2A.R |
#' Check any kinship matrix K against phenotypic data
#'
#' Assesses a given kinship matrix against the provided phenotypic data to determine
#' if all genotypes are in the kinship matrix or not. It also reports which individuals
#' match or are missing from one set or another.
#' If requested, a reduced kinship matrix is generated that has only the matched individuals.
#' The input kinship matrix can be a pedigree-based relationship matrix \eqn{\boldsymbol{A}},
#' a genomic-based relationship matrix \eqn{\boldsymbol{G}}, or a hybrid
#' relationship matrix \eqn{\boldsymbol{H}}.
#' Individual names should be assigned to \code{rownames} and \code{colnames} of input matrix.
#'
#' @param K Input of a kinship matrix in full form (\eqn{n \times n}) (default = \code{NULL});
#' @param pheno.data A data fame with the phenotypic data to assess (for \code{n} individuals)
#' (default = \code{NULL}).
#' @param indiv The string for the column name for genotypes/individuals in the phenotypic data (default = \code{NULL}).
#' @param clean If \code{TRUE}, generates a new clean kinship matrix containing only the matched
#' phenotyped individuals (default = \code{FALSE}).
#' @param ord If \code{TRUE}, it will order the kinship matrix as in the phenotypic data, which is
#' recommended for some downstream genomic analyses (default = \code{TRUE}).
#' @param mism If \code{TRUE}, it generates data frames with matched and mismatched individual's names
#' from the kinship matrix and the phenotypic data (default = \code{FALSE}).
#' @param message If \code{TRUE} diagnostic messages are printed on screen (default = \code{TRUE}).
#'
#' @return A list with the following elements:
#' \itemize{
#' \item{\code{mismatchesK}: a vector containing the names of the individuals from the provided kinship matrix
#' that \emph{mismatch} with the phenotypic data.}
#' \item{\code{matchesK}: a vector containing the names of the individuals from the provided kinship matrix
#' that \emph{match} with the phenotypic data.}
#' \item{\code{mismatchesP}: a vector containing the names of phenotyped individuals
#' that \emph{mismatch} with those from the kinship matrix.}
#' \item{\code{matchesP}: a vector containing the names of phenotyped individuals
#' that \emph{match} with those from the kinship matrix.}
#' \item{\code{Kclean}: a clean kinship matrix containing only the matched phenotyped individuals.}
#' }
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # Get G matrix.
#' G <- G.matrix(M = geno.pine655, method = "VanRaden", na.string = "-9", sparseform = FALSE)$G
#' dim(G)
#'
#' # Match G and the phenotype.
#' check <-
#' match.kinship2pheno(
#' K = G, pheno.data = pheno.pine,
#' indiv = "Genotype",
#' clean = TRUE, mism = TRUE)
#' ls(check)
#' length(check$matchesK)
#' length(check$mismatchesK)
#' length(check$matchesP)
#' length(check$mismatchesP)
#' dim(check$Kclean)
#'}
match.kinship2pheno <- function(K = NULL,
pheno.data = NULL, indiv = NULL,
clean = FALSE, ord = TRUE, mism = FALSE,
message = TRUE){
# Check if the class of K is matrix
if (is.null(K) || !inherits(K, "matrix")) {
stop("K should be a valid object of class matrix")
}
if (is.null(rownames(K))){
stop("Individual names not assigned to rows of matrix K")
}
if (is.null(colnames(K))){
stop("Individual names not assigned to columns of matrix K")
}
if ((identical(rownames(K), colnames(K))) == FALSE){
stop("Rownames and colnames of matrix K do not match.")
}
# Checks
kinship_names <- rownames(K)
pheno_names <- pheno.data[[indiv]]
if( all(kinship_names %in% pheno_names) == TRUE){
if (message){
message("All individuals within the kinship matrix match the phenotyped individuals.")
}
mismatchesK <- NULL
matchesK <- which(kinship_names %in% pheno_names == TRUE)
} else {
mismatchesK <- which(kinship_names %in% pheno_names != TRUE)
if (message){
message("Kinship matrix contains ", length(mismatchesK),
" individuals that DO NOT match the phenotyped individuals.")
}
matchesK <- which(kinship_names %in% pheno_names == TRUE)
if (message){
message("Kinship matrix contains ", length(matchesK),
" individuals that match the phenotyped individuals.")
}
}
if( all(pheno_names %in% kinship_names) == TRUE){
if (message){
message("All phenotyped individuals match the individuals within the kinship matrix.")
}
mismatchesP <- NULL
matchesP <- which(pheno_names %in% kinship_names == TRUE)
} else {
mismatchesP <- which(pheno_names %in% kinship_names != TRUE)
if (message){
message("Phenotypic data contains ", length(mismatchesP),
" individuals that DO NOT match the kinship matrix individuals.")
}
matchesP <- which(pheno_names %in% kinship_names == TRUE)
if (message){
message("Phenotypic data contains ", length(matchesP),
" individuals that match the kinship matrix individuals.")
}
}
if(length(mismatchesK) != 0 & clean){
if (message){
message("Individuals within the kinship matrix that do not match those in the phenotypic data will be removed from this matrix.")
}
#Kclean <- K[matchesP, matchesP]
Kclean <- K[matchesK, matchesK]
} else {
Kclean <- NULL
}
if(ord) {
#list.order <- match(rownames(Kclean), pheno_names)
list.order <- match(pheno_names, rownames(Kclean))
Kclean <- Kclean[list.order,list.order]
}
if(!mism) {
mismatchesK <- NULL; matchesK <- NULL
mismatchesP <- NULL; matchesP <- NULL
}
return(list(mismatchesK=mismatchesK, matchesK=matchesK,
mismatchesP=mismatchesP, matchesP=matchesP,
Kclean=Kclean))
}
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/match_kinship2pheno.R |
#' Pedigree data for loblolly pine dataset
#'
#' Individual pedigree data for a total of 2,034 records of loblolly pine
#' (\emph{Pinus taeda} L.).
#' Missing parental information coded as 0.
#' Dataset obtained from supplementary material in Resende \emph{et al.} (2012).
#'
#' @docType data
#'
#' @usage ped.pine
#'
#' @format data.frame
#'
#' @keywords datasets
#'
#' @references
#' Resende, M.F.R., Munoz, P. Resende, M.D.V., Garrick, D.J., Fernando, R.L., Davis, J.M.,
#' Jokela, E.J., Martin, T.A., Peter, G.F., and Kirst, M. 2012. Accuracy of genomic
#' selection methods in a standard data set of loblolly pine (\emph{Pinus taeda} L.).
#' Genetics 190:1503-1510.
#'
#' @examples
#' ped.pine |> head()
#'
#' @name ped.pine
NULL
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/ped_pine.R |
#' Pedigree data for Atlantic salmon dataset
#'
#' Pedigree data of 1,481 Atlantic salmon samples.
#' Missing parental information coded as 0.
#' Dataset obtained from supplementary material in Robledo \emph{et al.} (2018).
#'
#' @docType data
#'
#' @usage ped.salmon
#'
#' @format data.frame
#'
#' @keywords datasets
#'
#' @references
#' Robledo D., Matika O., Hamilton A., and Houston R.D. 2018.
#' Genome-wide association and genomic selection for resistance
#' to amoebic gill disease in Atlantic salmon.
#' G3 Genes, Genomes, Genetics 8:1195-1203.
#'
#' @examples
#' ped.salmon |> head()
#'
#' @name ped.salmon
NULL
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/ped_salmon.R |
#' Phenotypic data for apple dataset
#'
#' Phenotypic data on 247 apple clones (\emph{i.e.}, genotypes) evaluated for
#' several fruit quality traits at two New Zealand sites, Motueka (MOT) and Hawkes Bay (HB).
#' Dataset obtained from supplementary material in Kumar \emph{et al.} (2015).
#'
#' @docType data
#'
#' @usage pheno.apple
#'
#' @format data.frame
#'
#' @keywords datasets
#'
#' @references
#' Kumar S., Molloy C., Muñoz P., Daetwyler H., Chagné D., and Volz R. 2015.
#' Genome-enabled estimates of additive and nonadditive genetic variances and prediction
#' of apple phenotypes across environments. G3 Genes, Genomes, Genetics 5:2711–2718.
#'
#' @examples
#' pheno.apple |> head()
#'
#' @name pheno.apple
NULL
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/pheno_apple.R |
#' Phenotypic data for loblolly pine dataset
#'
#' Deregressed estimated breeding values (DEBV) for the trait diameter at breast height (DBH)
#' at 6 years of age from trees grown at site Nassau.
#' The dataset contains a total of 861 genotypes of loblolly pine
#' (\emph{Pinus taeda} L.).
#' Dataset obtained from supplementary material in Resende \emph{et al.} (2012).
#'
#' @docType data
#'
#' @usage pheno.pine
#'
#' @format data.frame
#'
#' @keywords datasets
#'
#' @references
#' Resende, M.F.R., Munoz, P. Resende, M.D.V., Garrick, D.J., Fernando, R.L., Davis, J.M.,
#' Jokela, E.J., Martin, T.A., Peter, G.F., and Kirst, M. 2012. Accuracy of genomic
#' selection methods in a standard data set of loblolly pine (\emph{Pinus taeda} L.).
#' Genetics 190:1503-1510.
#'
#' @examples
#' pheno.pine |> head()
#'
#' @name pheno.pine
NULL
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/pheno_pine.R |
#' Phenotypic data for Atlantic salmon dataset
#'
#' Phenotypic data on 1,481 Atlantic salmon individuals.
#' All fish were phenotyped for mean gill score (mean of the left
#' gill and right gill scores) and amoebic load (qPCR values using
#' \emph{Neoparamoeba perurans} specific primers, amplified from one of the gills).
#' Dataset obtained from supplementary material in Robledo \emph{et al.} (2018).
#'
#' @docType data
#'
#' @usage pheno.salmon
#'
#' @format data.frame
#'
#' @keywords datasets
#'
#' @references
#' Robledo D., Matika O., Hamilton A., and Houston R.D. 2018.
#' Genome-wide association and genomic selection for resistance
#' to amoebic gill disease in Atlantic salmon.
#' G3 Genes, Genomes, Genetics 8:1195-1203.
#'
#' @examples
#' pheno.salmon |> head()
#'
#' @name pheno.salmon
NULL
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/pheno_salmon.R |
#' Quality control filtering of molecular matrix M for downstream analyses
#'
#' Reads molecular data in the format 0, 1, 2 and performs some basic quality control
#' filters and simple imputation.
#' Matrix provided is of the full form (\eqn{n \times p}), with \eqn{n} individuals and \eqn{p} markers.
#' Individual and marker names are assigned to \code{rownames} and \code{colnames},
#' respectively. Filtering can be done with the some of the following options by
#' specifying thresholds for:
#' missing values on individuals, missing values on markers, minor allele frequency,
#' inbreeding Fis value (of markers), and observed heterozygosity (of markers).
#' String used for identifying missing values can be specified.
#' If requested, missing values will be imputed based on the mean of each SNP.
#'
#' @param M A matrix with SNP data of full form (\eqn{n \times p}), with \eqn{n} individuals and \eqn{p} markers
#' Individual and marker names are assigned to \code{rownames} and \code{colnames}, respectively.
#' Data in matrix is coded as 0, 1, 2 (integer or numeric) (default = \code{NULL}).
#' @param base If \code{TRUE} matrix \eqn{\boldsymbol{M}} is considered as bi-allele SNP data format (character)
#' and the SNPs are recoded to numerical values before performing the quality control filters
#' (default = \code{FALSE}) (currently deprecated).
#' @param na.string A character that will be interpreted as \code{NA} values (default = \code{"NA"}).
#' @param map (Optional) A data frame with the map information with \eqn{p} rows (default = \code{NULL}).
#' @param marker A character indicating the name of the column in data frame \code{map} with the identification
#' of markers. This is mandatory if \code{map} is provided (default = \code{NULL}).
#' @param chrom A character indicating the name of the column in data frame \code{map} with the identification
#' of chromosomes (default = \code{NULL}).
#' @param pos A character indicating the name of the column in data frame \code{map} with the identification
#' of marker positions (default = \code{NULL}).
#' @param ref A character indicating the name of the column in the map containing the reference allele for
#' recoding. If absent, then conversion will be based on the major allele (most frequent).
#' The marker information of a given individuals with two of the specified major alleles
#' in \code{ref} will be coded as 2 (default = \code{NULL}).
#' @param marker.callrate A numerical value between 0 and 1 used to remove SNPs with a rate
#' of missing values equal or larger than this value (default = 1, \emph{i.e.} no removing).
#' @param ind.callrate A numerical value between 0 and 1 used to remove individuals with a
#' rate of missing values equal or larger than this value (default = 1, \emph{i.e.} no removing).
#' @param maf A numerical value between 0 and 1 used to remove SNPs with a Minor Allele Frequency
#' (MAF) below this value (default = 0, \emph{i.e.} no removing).
#' @param heterozygosity A numeric value indicating the maximum value of accepted observed heterozygosity (Ho)
#' (default = 1, \emph{i.e.} no removing).
#' @param Fis A numeric value indicating the maximum value of accepted inbreeding (Fis) following
#' the equation \eqn{|1 - (Ho/He)|} (default = 1, \emph{i.e.} no removing).
#' @param impute If \code{TRUE} imputation of missing values is done using the mean of each SNP
#' (default = \code{FALSE}).
#' @param Mrecode If \code{TRUE} it provides the recoded \eqn{\boldsymbol{M}} matrix from the bi-allelic to numeric SNP
#' (default = \code{FALSE}) (currently deprecated).
#' @param plots If \code{TRUE} generates graphical output of the quality control based on the
#' original input matrix (default = \code{TRUE}).
#' @param digits Set up the number of digits used to round the output matrix (default = 2).
#' @param message If \code{TRUE} diagnostic messages are printed on screen (default = \code{TRUE}).
#'
#' @return A list with the following elements:
#' \itemize{
#' \item{\code{M.clean}: the cleaned \eqn{\boldsymbol{M}} matrix after the quality control filters have been applied.}
#' \item{\code{map}: if provided, a cleaned \code{map} data frame after the quality control filters have been applied.}
#' \item{\code{plot.missing.ind}: a plot of missing data per individual (original marker matrix).}
#' \item{\code{plot.missing.SNP}: a plot of missing data per SNP (original marker matrix).}
#' \item{\code{plot.heteroz}: a plot of observed heterozygocity per SNP (original marker matrix).}
#' \item{\code{plot.Fis}: a plot of Fis per SNP (original marker matrix).}
#' \item{\code{plot.maf}: a plot of the minor allele frequency (original marker matrix).}
#' }
#'
#' @md
#' @details
#' \strong{Warning}: The arguments \code{base}, \code{ref}, and \code{Mrecode}
#' currently are deprecated and will
#' be removed on the next version of \code{ASRgenomics}.
#' Use function \link{snp.recode} to recode the matrix prior to using \code{qc.filtering}.
#'
#' The filtering process is carried out as expressed in the following simplified pseudo-code
#' that consists on a loop repeated twice:
#'
#' \strong{for i in 1 to 2}
#'
#' Filter markers based on call rate.
#'
#' Filter individuals based on call rate.
#'
#' Filter markers based on minor allele frequency.
#'
#' Filter markers based on observed heterozygosity.
#'
#' Filter markers based on inbreeding.
#'
#' \strong{end for}
#'
#' @export
#'
#' @examples
#' # Example: Pine dataset from ASRgenomics (coded as 0,1,2 with missing as -9).
#'
#' M.clean <- qc.filtering(
#' M = geno.pine926,
#' maf = 0.05,
#' marker.callrate = 0.9, ind.callrate = 0.9,
#' heterozygosity = 0.9, Fis = 0.6,
#' na.string = "-9")
#' ls(M.clean)
#' M.clean$M.clean[1:5, 1:5]
#' dim(M.clean$M.clean)
#' head(M.clean$map)
#' M.clean$plot.maf
#' M.clean$plot.missing.ind
#' M.clean$plot.missing.SNP
#' M.clean$plot.heteroz
#' M.clean$plot.Fis
#'
#' \donttest{
#' # Example: Salmon dataset (coded as 0,1,2 with missing as NA).
#'
#' M.clean <- qc.filtering(
#' M = geno.salmon,
#' maf = 0.02,
#' marker.callrate = 0.10, ind.callrate = 0.20,
#' heterozygosity = 0.9, Fis = 0.4)
#' M.clean$M.clean[1:5, 1:5]
#' dim(M.clean$M.clean)
#' head(M.clean$map)
#' M.clean$plot.maf
#' M.clean$plot.missing.ind
#' M.clean$plot.missing.SNP
#' M.clean$plot.heteroz
#' M.clean$plot.Fis
#' }
#'
qc.filtering <- function(M = NULL, base = FALSE, na.string = NA,
map = NULL, marker = NULL, chrom = NULL, pos = NULL, ref = NULL,
marker.callrate = 1, ind.callrate = 1, maf = 0,
heterozygosity = 1, Fis = 1,
impute = FALSE, Mrecode = FALSE,
plots = TRUE, digits = 2, message = TRUE) {
# Deprecation traps ---------------------------------------------------------------------------
if (!is.null(ref) | base | Mrecode){
stop("The recoding has been deprecated in \'qc.filtering()', please use \'snp.recode()' to perform this task.")
}
# Traps ---------------------------------------------------------------------------------------
# Check if the class of M is matrix.
if (is.null(M) || !inherits(M, "matrix"))
stop("M should be a valid object of class matrix.")
if (is.null(colnames(M)))
stop("Marker names not assigned to columns of matrix M.")
if (is.null(rownames(M)))
stop("Individuals names not assigned to rows of matrix M.")
# Other input checks.
if (marker.callrate < 0 | marker.callrate > 1)
stop("Specification of marker.callrate must be be between 0 and 1.")
if (ind.callrate < 0 | ind.callrate > 1)
stop("Specification of ind.callrate must be between 0 and 1.")
if (maf < 0 | maf > 1)
stop("Specification of maf must be between 0 and 1.")
if (Fis < 0 | Fis > 1)
stop("Specification of Fis must be between 0 and 1.")
if (heterozygosity < 0 | heterozygosity > 1)
stop("Specification of heterozygosity must be between 0 and 1.")
# Check map if provided.
if (!is.null(map)) {
# Check map class.
check.data_(data_ = "map", class_ = "data.frame")
# Check map names.
# Check mandatory variables in map.
if(is.null(marker)){stop("\'marker' must be provided if \'map' is provided.")}
# Check if they are present in the map.
map.name.hit <- c(marker, chrom, pos) %in% names(map)
if (!all(map.name.hit)){
stop("Value provided to argument \'", c("marker", "chrom", "pos")[!map.name.hit],
"' does not correspond to a variable in \'map'.")
}
# Match map and M.
if (!identical(as.character(map[[marker]]), colnames(M))){
stop("map[[marker]] and colnames(M) must be identical.")
}
}
# # This is a slow test. Maybe not worth it. It is not necessary here.
# if(!all(unique(c(M)) %in% c(0, 1, 2, na.string)) & message){
# message("Some of the values in M are not one of the following: ",
# paste0(c(0, 1, 2, na.string), collapse = ", "), ".")
# }
# Body ----------------------------------------------------------------------------------------
# Initial info about the matrix.
if (message) {
message("Initial marker matrix M contains ", nrow(M),
" individuals and ", ncol(M), " markers.")
}
# Replace na.string by NA.
if (!is.na(na.string)) {
if (na.string == "NA") { na.string <- NA }
}
if (!is.na(na.string)) {
if (message){
message('A total of ', sum(M %in% na.string),
" values were identified as missing with the string ",
na.string, " and were replaced by NA.")
}
M[M %in% na.string] <- NA
}
# Check if all are compliant.
if (!all(M %in% c(0, 1, 2, NA))) {
stop("Data must be in numeric format: 0, 1, 2 and NA.")
}
# Remove markers with no valid information.
miss.all <- colSums(is.na(M)) == nrow(M)
if (any(miss.all)) {
# Apply the removal.
M <- M[, !miss.all]
# Report.
if (message){
message("A total of ", sum(miss.all), " markers were removed for only having missing data.")
}
} ; rm(miss.all)
# Generating some plots ---------------------------------------------------------------------------------
if (plots){
# Missing of individuals.
# missingInd_DF <- data.frame(Ind =rowMeans(is.na(M)))
missingInd_DF <- data.frame(Ind = (100 - callrate(M = M, margin = "row"))/100)
missingInd_plot <- ggplot(missingInd_DF, aes(x=Ind)) +
geom_histogram(fill='#0072B2', bins=40) +
theme_classic() +
xlab("Missing data per Individual")+
ylab("Count")
rm(missingInd_DF)
# Missing of markers.
# missingSNP_DF <- data.frame(SNP=colMeans(is.na(M)))
missingSNP_DF <- data.frame(SNP = (100 - callrate(M = M, margin = "col"))/100)
missingSNP_plot <- ggplot(missingSNP_DF, aes(x=SNP)) +
geom_histogram(fill='#0072B2', bins=40) +
theme_classic() +
xlab("Missing data per SNP") +
ylab("Count")
# Histogram of MAF.
qDF <- data.frame(MAF = maf(M = M))
maf_plot <- ggplot(qDF, aes(x=MAF)) +
geom_histogram(fill = '#0072B2', bins = 40) +
theme_classic() +
xlab("Minor Allele Frequency (MAF)")+
ylab("Count")
# Histogram of heterozygotes.
het_DF <- data.frame(het = heterozygosity(M = M)[, "ho"])
het_plot <- ggplot(het_DF, aes(x = het)) +
geom_histogram(fill = '#0072B2', bins = 40) +
theme_classic() +
xlab("Heterozygotes")+
ylab("Count")
# Histogram of Fis.
fis_DF <- data.frame(fis = abs(Fis(M = M)))
fis_plot <- ggplot(fis_DF, aes(x = fis)) +
geom_histogram(fill = '#0072B2', bins = 40) +
theme_classic() +
xlab("Fis")+
ylab("Count")
} else {
missingInd_plot <- NULL
missingSNP_plot <- NULL
maf_plot <- NULL
het_plot <- NULL
fis_plot <- NULL
}
# Filtering markers -------------------------------------------------------------------------------------
# Filtering process - 2 rounds (initializing objects).
cr_mk_out <- 0 ; cr_id_out <- 0
maf_out <- 0 ; fis_out <- 0
h_out <- 0
cr_mk_filter <- TRUE ; cr_id_filter <- TRUE
maf_filter <- TRUE ; fis_filter <- TRUE
h_filter <- TRUE
for (blank_ in 1:2) {
# Filtering markers by CR.
if (marker.callrate < 1){
cr_mk_filter <- 1 - callrate(M = M, margin = "col")/100 <= marker.callrate
cr_mk_out <- cr_mk_out + sum(!cr_mk_filter)
M <- M[, cr_mk_filter, drop = FALSE]
rm(cr_mk_filter)
}
# Filtering callrate of individuals.
if (ind.callrate < 1){
cr_id_filter <- 1 - callrate(M = M, margin = "row")/100 <= marker.callrate
cr_id_filter <- rowMeans(is.na(M)) <= ind.callrate
cr_id_out <- cr_id_out + sum(!(cr_id_filter))
M <- M[cr_id_filter, , drop = FALSE]
rm(cr_id_filter)
}
# Filtering markers by MAF.
if (maf > 0){
q <- maf(M = M)
maf_filter <- q > maf - .Machine$double.eps
maf_out <- maf_out + sum(!maf_filter, na.rm = TRUE)
M <- M[, maf_filter, drop = FALSE]
rm(maf_filter, q)
}
# Filtering markers by heterozygosity.
if (heterozygosity < 1){
# Get observed heterozygosity.
h <- heterozygosity(M = M)[, "ho"]
# Get incidence vector.
h_filter <- h <= heterozygosity
# Add current run to the sum.
h_out <- h_out + sum(!h_filter, na.rm = TRUE)
# Apply filter.
M <- M[, h_filter, drop = FALSE]
# Remove objects.
rm(h_filter, h)
}
if (Fis < 1){
# Get Fis.
fis <- Fis(M = M, margin = "col")
# Get incidence vector.
fis_filter <- abs(fis) <= Fis
# Add current run to the sum.
fis_out <- fis_out + sum(!fis_filter, na.rm = TRUE)
# Apply filter.
M <- M[, fis_filter, drop = FALSE]
# Remove objects.
rm(fis_filter, fis)
}
}
# Some intermediate reporting.
if (message){
message("A total of ", cr_mk_out, " markers were removed because ",
"their proportion of missing values was equal or larger than ",
marker.callrate, ".")
message("A total of ", cr_id_out, " individuals were removed because ",
"their proportion of missing values was equal or larger than ",
ind.callrate, ".")
message("A total of ", maf_out, " markers were removed because ",
"their MAF was smaller than ", maf, ".")
message("A total of ", h_out, " markers were removed because ",
"their heterozygosity was larger than ", heterozygosity, ".")
message("A total of ", fis_out, " markers were removed because ",
"their |F| was larger than ", Fis, ".")
missing.SNP <- sum(is.na(M))
prop.miss <- 100*missing.SNP/(ncol(M)*nrow(M))
message("Final cleaned marker matrix M contains ", round(prop.miss,2),
"% of missing SNPs.")
message("Final cleaned marker matrix M contains ", nrow(M),
" individuals and ", ncol(M), " markers.")
}
# Simple mean imputation.
if (impute){
missing.SNP <- sum(is.na(M))
if (missing.SNP == 0 & isTRUE(message)) {
message('No imputation was performed as there are no missing marker data.')
} else {
# Loop through markers and impute with mean.
for(i in 1:ncol(M)){
M[is.na(M[,i]), i] <- mean(M[,i], na.rm=TRUE)
}
# Polishing the dataset.
M <- round(M, digits)
if (isTRUE(message)){
prop.miss <- 100*missing.SNP/(ncol(M)*nrow(M))
message("A total of ", missing.SNP, " missing values were imputed, ",
"corresponding to ", round(prop.miss,2), "% of the total number of SNPs.")
}
}
}
# Finalize ------------------------------------------------------------------------------------
# Remove eventual cleaned markers from M in the map.
if (!is.null(map)){
# Get match index.
matches <- na.omit(match(colnames(M), as.character(map[[marker]])))
# Applies to map; This separation is done because of data.table.
map <- map[matches,]
}
return(list(M.clean = M, map = map, plot.missing.ind = missingInd_plot,
plot.missing.SNP = missingSNP_plot, plot.heteroz = het_plot, plot.Fis = fis_plot,
plot.maf = maf_plot))
}
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/qc_filtering.R |
#' Performs a Principal Component Analysis (PCA) based on a molecular matrix M
#'
#' Generates a PCA and summary statistics from a given molecular matrix
#' for population structure. Matrix
#' provided is of full form (\eqn{n \times p}), with n individuals and p markers. Individual and
#' marker names are assigned to \code{rownames} and \code{colnames}, respectively.
#' SNP data is coded as 0, 1, 2 (integers or decimal numbers). Missing values are
#' not accepted and these need to be imputed (see function \code{qc.filtering()}
#' for implementing mean imputation). There is additional output such as plots and
#' other data frames
#' to be used on other downstream analyses (such as GWAS).
#'
#' It calls function \code{prcomp()} to generate the PCA and the
#' \code{factoextra} R package to extract and visualize results.
#' Methodology uses normalized allele frequencies as proposed by Patterson \emph{et al.} (2006).
#'
#' @param M A matrix with SNP data of full form (\eqn{n \times p}), with \eqn{n}
#' individuals and \eqn{p} markers (default = \code{NULL}).
#' @param label If \code{TRUE} then includes in output individuals names (default = \code{FALSE}).
#' @param ncp The number of PC dimensions to be shown in the screeplot, and to provide
#' in the output data frame (default = \code{10}).
#' @param groups Specifies a vector of class factor that will be used to define different
#' colors for individuals in the PCA plot. It must be presented in the same order as the individuals
#' in the molecular \eqn{\boldsymbol{M}} matrix (default = \code{NULL}).
#' @param ellipses If \code{TRUE}, ellipses will will be drawn around each of the define levels in
#' \code{groups} (default = \code{FALSE}).
#'
#' @return A list with the following four elements:
#' \itemize{
#' \item{\code{eigenvalues}: a data frame with the eigenvalues and its variances associated with each dimension
#' including only the first \code{ncp} dimensions.}
#' \item{\code{pca.scores}: a data frame with scores (rotated observations on the new components) including
#' only the first \code{ncp} dimensions.}
#' \item{\code{plot.pca}: a scatterplot with the first two-dimensions (PC1 and PC2) and their scores.}
#' \item{\code{plot.scree}: a barchart with the percentage of variances explained by the \code{ncp} dimensions.}
#' }
#'
#' @references
#' Patterson N., Price A.L., and Reich, D. 2006. Population structure and eigenanalysis.
#' PLoS Genet 2(12):e190. doi:10.1371/journal.pgen.0020190
#'
#' @export
#'
#' @examples
#' # Perform the PCA.
#' SNP_pca <- snp.pca(M = geno.apple, ncp = 10)
#' ls(SNP_pca)
#' SNP_pca$eigenvalues
#' head(SNP_pca$pca.scores)
#' SNP_pca$plot.pca
#' SNP_pca$plot.scree
#'
#' # PCA plot by family (17 groups).
#' grp <- as.factor(pheno.apple$Family)
#' SNP_pca_grp <- snp.pca(M = geno.apple, groups = grp, label = FALSE)
#' SNP_pca_grp$plot.pca
#'
snp.pca <- function(M = NULL, label = FALSE, ncp = 10,
groups = NULL, ellipses = FALSE){
# Check if the class of M is matrix
if (is.null(M) || !inherits(M, "matrix")) {
stop("M should be a valid object of class matrix.")
}
if (is.null(colnames(M))){
stop("Marker names not assigned to columns of matrix M.")
}
if (is.null(rownames(M))){
stop("Individuals names not assigned to rows of matrix M.")
}
# Check if the are missing values
if (any(is.na(M))){
stop("M matrix contains some missing data, consider performing some imputation.")
}
if (ncp < 0 | ncp > nrow(M)) {
stop("Value ncp must be positive and smaller than the number of rows in matrix M.")
}
## PCA by Petterson et al. (2006)
M.mean <- colMeans(M)/2
M.scale <- sqrt(M.mean * (1 - M.mean))
M.norm <- matrix(NA, nrow = nrow(M), ncol = ncol(M))
for (i in 1:ncol(M)) { # Done by SNP column
M.norm[,i] <- (M[,i]/2 - M.mean[i]) / M.scale[i]
}
# Pass name of individuals along.
rownames(M.norm) <- rownames(M)
colnames(M.norm) <- colnames(M)
# Generating the pca
pca <- prcomp(var(t(M.norm)), scale.=FALSE) # Original it takes a lont time
# Percentage of variances explained by each principal component
scree_plot <- fviz_eig(pca, addlabels=TRUE, ncp=ncp,
barfill = "#0072B2",
barcolor = "#0072B2",
ggtheme = theme_classic())
# Extract the eigenvalues/variances of the principal dimensions
eig_var <- get_eig(pca)
# Plot PCA
if (isTRUE(label)) {
if(is.null(groups)) {
pca_plot <- fviz_pca_ind(pca, geom=c("point","text"),
repel=TRUE,
col.ind = "#0072B2",
ggtheme = theme_classic())
} else {
pca_plot <- fviz_pca_ind(pca,
geom = c("point","text"),
repel = TRUE,
col.ind = groups, # color by groups
mean.point = FALSE,
legend.title = "Groups",
ggtheme = theme_classic())
}
}
if (isFALSE(label)) {
if(is.null(groups)) {
pca_plot <- fviz_pca_ind(pca, geom="point",
col.ind = "#0072B2",
ggtheme = theme_classic())
} else {
pca_plot <- fviz_pca_ind(pca,
geom = "point",
col.ind = groups, # color by groups,
mean.point = FALSE,
legend.title = "Groups",
ggtheme = theme_classic())
}
}
# Process ellipses if requested.
if (!is.null(groups) & ellipses){
# TODO this can be more memory efficient (the data frame is being recreated).
group.comps <- cbind.data.frame(pca$x[, 1:2], groups)
# Get centroids.
centroids <- aggregate(cbind(PC1, PC2) ~ groups, data = group.comps, FUN = mean)
# Get ellipses.
ellipses.data <- do.call(
rbind,
lapply(unique(group.comps$groups), function(t) {
data.frame(
groups = as.character(t),
ellipse(
cov(group.comps[group.comps$groups == t, 1:2]),
centre = as.matrix(centroids[t, 2:3]), level = 0.95),
stringsAsFactors=FALSE)
}
)
)
# Add ellipses to plot.
pca_plot <- pca_plot +
geom_path(data = ellipses.data, linewidth = .5, inherit.aes = F,
aes(x = PC1, y = PC2, color = groups))
}
# Scores (rotated X observations on the new components) for ncp components.
scores <- pca$x[,c(1:ncp)]
eigenvalues <- eig_var[c(1:ncp),]
return(list(pca.scores=scores, eigenvalues=eigenvalues, plot.scree=scree_plot, plot.pca=pca_plot))
}
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/snp_pca.R |
#' Reduces the number of redundant markers on a molecular matrix M by pruning
#'
#' For a given molecular dataset \eqn{\boldsymbol{M}} (in the format 0, 1 and 2)
#' it produces a reduced molecular matrix by eliminating "redundant"
#' markers using pruning techniques. This function finds and drops some of the
#' SNPs in high linkage disequilibrium (LD).
#'
#' Pruning is recommended as redundancies can affect
#' the quality of matrices used for downstream analyses.
#' The algorithm used is based on the Pearson's correlation between markers
#' as a \emph{proxy} for LD. In the event of a pairwise correlation higher
#' than the selected threshold markers will be eliminated as specified by: call rate,
#' minor allele frequency. In case of tie, one marker will be dropped at random.
#'
#' @param M A matrix with marker data of full form (\eqn{n \times p}), with \eqn{n} individuals
#' and \eqn{p} markers. Individual and marker names are assigned to \code{rownames} and \code{colnames}, respectively.
#' Data in matrix is coded as 0, 1, 2 (integer or numeric) (default = \code{NULL}).
#' @param map (Optional) A data frame with the map information with \eqn{p} rows.
#' If \code{NULL} a dummy map is generated considering a single chromosome and sequential positions
#' for markers. A \code{map} is mandatory if \code{by.chrom = TRUE}, where also option \code{chrom}
#' must also be non-null.
#' @param marker A character indicating the name of the column in data frame \code{map}
#' with the identification
#' of markers. This is mandatory if \code{map} is provided (default = \code{NULL}).
#' @param chrom A character indicating the name of the column in data frame \code{map} with the identification
#' of chromosomes. This is mandatory if \code{map} is provided (default = \code{NULL}).
#' @param pos A character indicating the name of the column in data frame \code{map} with the identification
#' of marker positions (default = \code{NULL}).
#' @param method A character indicating the method (or algorithm) to be used as reference for
#' identifying redundant markers.
#' The only method currently available is based on correlations (default = \code{"correlation"}).
#' @param criteria A character indicating the criteria to choose which marker to drop
#' from a detected redundant pair.
#' Options are: \code{"callrate"} (the marker with fewer missing values will be kept) and
#' \code{"maf"} (the marker with higher minor allele frequency will be kept) (default = \code{"callrate"}).
#' @param pruning.thr A threshold value to identify redundant markers with Pearson's correlation larger than the
#' value provided (default = \code{0.95}).
#' @param by.chrom If TRUE the pruning is performed independently by chromosome (default = \code{FALSE}).
#' @param window.n A numeric value with number of markers to consider in each
#' window to perform pruning (default = \code{50}).
#' @param overlap.n A numeric value with number of markers to overlap between consecutive windows
#' (default = \code{5}).
#' @param iterations An integer indicating the number of sequential times the pruning procedure
#' should be executed on remaining markers.
#' If no markers are dropped in a given iteration/run, the algorithm will stop (default = \code{10}).
#' @param seed An integer to be used as seed for reproducibility. In case the criteria has the
#' same values for a given pair of markers, one will be dropped at random (default = \code{NULL}).
#' @param message If \code{TRUE} diagnostic messages are printed on screen (default = \code{TRUE}).
#'
#' @details Filtering markers (\link{qc.filtering}) is of high relevance before pruning.
#' Poor quality markers (\emph{e.g.}, monomorphic markers) may prevent correlations from being
#' calculated and may affect eliminations.
#'
#' @return
#' \itemize{
#' \item{\code{Mpruned}: a matrix containing the pruned marker \emph{M} matrix.}
#' \item{\code{map}: an data frame containing the pruned map.}
#' }
#'
#' @export
#'
#' @examples
#' # Read and filter genotypic data.
#' M.clean <- qc.filtering(
#' M = geno.pine655,
#' maf = 0.05,
#' marker.callrate = 0.20, ind.callrate = 0.20,
#' Fis = 1, heterozygosity = 0.98,
#' na.string = "-9",
#' plots = FALSE)$M.clean
#'
#' # Prune correlations > 0.9.
#' Mpr <- snp.pruning(
#' M = M.clean, pruning.thr = 0.90,
#' by.chrom = FALSE, window.n = 40, overlap.n = 10)
#' head(Mpr$map)
#' Mpr$Mpruned[1:5, 1:5]
#'
snp.pruning <- function(
M = NULL, map = NULL, marker = NULL, chrom = NULL, pos = NULL,
method = c('correlation'), criteria = c("callrate", "maf"),
pruning.thr = 0.95, by.chrom = FALSE, window.n = 50, overlap.n = 5,
iterations = 10,
# n.cores = 1,
seed = NULL, message = TRUE) {
# Traps ---------------------------------------------------------------------------------------
# Check M class.
check.data_(data_ = "M", class_ = "matrix")
# Get maf and callrate.
# Logic: as we are using correlations, viable maf and callrate are essential.
maf <- maf(M = M)
callrate <- callrate(M = M, margin = "col")
# Check callrate.
if (any(callrate == 0)){
stop("There are markers will all samples missing. Please use qc.filtering() before pruning.")
}
# Check maf.
if (any(maf == 0)){
stop("There are markers with minor allele frequency equal to 0. Please use qc.filtering() before pruning.")
}
# Create map if not provided.
if (is.null(map)) {
map <- dummy.map_(colnames(M))
marker <- "marker" ; chrom <- "chrom" ; pos <- "pos"
} else {
# Check map class.
check.data_(data_ = "map", class_ = "data.frame")
# Check map names.
# Check mandatory variables in map.
if(is.null(marker)){stop("The \'marker' option must be specified if \'map' is provided.")}
if(is.null(chrom)){stop("The \'chrom' option must be specified if \'map' is provided.")}
# Check if they are present in the map.
map.name.hit <- c(marker, chrom, pos) %in% names(map)
if (!all(map.name.hit)){
stop("Value provided to argument \'", c("marker", "chrom", "pos")[!map.name.hit], "' does not correspond to a variable in
data frame \'map'.")
}
# Match map and M.
if (!identical(as.character(map[[marker]]), colnames(M))){
stop("map[[marker]] and colnames(M) must be identical. Please check input.")
}
}
# Check method.
method <- match.arg(method)
# Check criteria.
criteria <- match.arg(criteria)
# Check threshold.
if (pruning.thr <= 0 | pruning.thr > 1){
stop("The condition for pruning.thr is between 0 and 1.")
}
# Check by.chrom.
check.logical_(arg_ = "by.chrom")
# Check window.
if(window.n <= 1){
stop("The \'window.n' argument should have an integer larger than 1.")
}
# Check overlap.
if(overlap.n <= 0){
stop("The \'overlap.n' argument should have an integer larger or eqaul than 0.")
}
if(overlap.n >= window.n){
stop("The \'overlap.n' argument should be lower than the \'window.n' argument.")
}
# Check iterations.
if(iterations <= 0){
stop("The \'iterations' argument should have an positive integer.")
}
# if(n.cores <= 0){
# stop("The \'n.cores' argument should have an integer > 0.")
# }
# Check message.
check.logical_(arg_ = "message")
# Body ----------------------------------------------------------------------------------------
# Setting seed.
if (!is.null(seed)) { set.seed(seed = seed) }
# Identify tiebraking criteria.
# Logic: the criteria to select markers are maf or callrate.
if (criteria == "maf"){
map$criteria <- maf
}
if (criteria == "callrate"){
map$criteria <- callrate
}
# Selection dummy.
map$sel <- 1
# Ordering by maf if requested.
# TODO check how this affects the code below.
# if (maf.order) {
# map <- map[order(map$maf, decreasing = FALSE),]
# M <- M[, map[[marker]]]
# }
# Collect garbage.
rm(maf, callrate)
# Marker drop function ------------------------------------------------------------------------
# Call function that drops markers based on the correlation.
marker.drop <- function(curr.set.index = NULL){
init.set.pos <- sets[curr.set.index] # Position on map and M.
# Selecting section of M based on set and overlap.
if (n.sets == 0){
window.M <- cur.M
} else if (curr.set.index == n.sets) {
window.M <- cur.M[, sets[curr.set.index]:ncol(cur.M)]
} else {
window.M <- cur.M[, sets[curr.set.index]:(sets[curr.set.index + 1] + overlap.n - 1)]
}
# Generating Corr matrix in sparse (no diagonals).
C.sparse <- suppressWarnings(cor(window.M, use = 'pairwise.complete.obs'))
# Replace NAs with 0 to avoid problems with full2sparse.
# Cause: if a correlation cannot be calculated for some reason,
# we set it up as 0 and do not remove the marker.
# This is a conservative approach as we do not have enough info about the marker.
is.na(C.sparse) <- 0
# Transform to sparse.
C.sparse <- as.data.table(full2sparse(C.sparse))
C.sparse[, Value := abs(Value)]
C.sparse <- C.sparse[Row != Col,]
# Order so we check from largest to smaller correlation.
setorder(C.sparse, -Value)
# Initiate indices to remove.
rm.pos <- c()
while (C.sparse$Value[1] >= pruning.thr) {
# Identify current Row and Col corrected for set position.
row.pos <- C.sparse[1, Row] + init.set.pos - 1
col.pos <- C.sparse[1, Col] + init.set.pos - 1
# Selecting which marker to keep based on the number of missing.
# Row and Col equal then random.
if (cur.map$criteria[row.pos] == cur.map$criteria[col.pos]) {
# Get a random TRUE or FALSE to select the marker.
if (sample(x = c(TRUE, FALSE), size = 1)) {
# Drop marker on col.
# Update C.sparse.
C.sparse <- C.sparse[Col != Col[1] & Row != Col[1], ]
# Append position to remove.
rm.pos <- append(rm.pos, col.pos)
} else {
# Drop marker on row.
# Update C.sparse.
C.sparse <- C.sparse[Col != Row[1] & Row != Row[1], ]
# Append position to remove.
rm.pos <- append(rm.pos, row.pos)
}
}
# Row better than Col.
else if (cur.map$criteria[row.pos] > cur.map$criteria[col.pos]) {
# Update C.sparse.
C.sparse <- C.sparse[Col != Col[1] & Row != Col[1], ]
# Append position to remove.
rm.pos <- append(rm.pos, col.pos)
}
# Col better than Row.
else {
# Update C.sparse.
C.sparse <- C.sparse[Col != Row[1] & Row != Row[1], ]
# Append position to remove.
rm.pos <- append(rm.pos, row.pos)
}
}
return(rm.pos)
}
# Collect info for summary --------------------------------------------------------------------
if (message){
# Total number of markers.
original.n.markers <- ncol(M)
# Number of markers by chromosome.
if (by.chrom){
original.n.markers.chrom <- table(map$chrom)
}
}
# Iterate through data ------------------------------------------------------------------------
if (message){
message(blue("\nInitiating pruning procedure."))
message("Initial marker matrix M contains ", nrow(M),
" individuals and ", ncol(M), " markers.")
}
# If by.chrom is requested, the range is obtained.
if (by.chrom){
# Get chromosome range.
chrom.range <- unique(map[[chrom]])
if (message){
message("Requesting pruning by chromosome.")
}
} else {
# Create dummy chromosome range.
chrom.range <- 1
if (message){
message("Requesting pruning without chromosome indexing.")
}
}
# Number of times to iterate in each chromosome.
iter.range <- 1:iterations
# Loop across chromosomes.
for (cur.chrom in chrom.range){
if (length(chrom.range) > 1 & message){
message(paste0("Chromosome: ", cur.chrom))
}
# Split datasets if by.chrom was requested.
# Get split index.
if(by.chrom){
split.index <- map[, chrom] == cur.chrom
# Get data to for current chromosome.
cur.map <- map[split.index, ]
cur.M <- M[, split.index]
# Save other chromosomes.
map <- map[!split.index, ]
M <- M[, !split.index, drop = FALSE]
} else {
# Collect map and M for calculations.
# Logic: this is required if map is passed but by.chrom is FALSE.
# Original map and M have to be NULL because they are bound later.
cur.map <- map
map <- NULL
cur.M <- M
M <- NULL
}
# Pre-set objects needed in loop.
drop.pos <- NULL
# This must be a loop because it is conditional.
# Logic: this section is conditional to the previous one, so, no parallelization in R.
for (iter in iter.range) {
if (message){
message(" Iteration: ", iter)
}
# Tag markers to eliminate.
cur.map$sel[unlist(drop.pos)] <- 0
# Stop criteria. If there was no drop on the last run. Break.
if (iter > 1 & all(cur.map$sel == 1)) { break }
# Eliminate markers.
cur.M <- cur.M[, cur.map$sel == 1]
cur.map <- cur.map[cur.map$sel == 1, ]
# Defining step size (based on current data).
step <- window.n - overlap.n
# Get sets based on step size.
sets <- seq(1, ncol(cur.M), step)
n.sets <- length(sets) - 1
# Get range of sets to loop across.
sets.range <- 1:(n.sets)
# Looping across all sets.
# if (n.cores > 1){
# drop.pos <- mclapply(X = sets.range, mc.cores = n.cores,
# FUN = marker.drop, mc.set.seed = seed)
# } else {
drop.pos <- lapply(X = sets.range, FUN = marker.drop)
# }
}
# Bind chromosomes.
map <- rbind(map, cur.map)
M <- cbind(M, cur.M)
}
# Summary -------------------------------------------------------------------------------------
if (message){
message("\nFinal pruned marker matrix M contains ", nrow(M),
" individuals and ", ncol(M), " markers.")
#message("A total of ", ncol(M), " markers were kept after pruning.")
message("A total of ", original.n.markers - ncol(M), " markers were pruned.")
if (by.chrom){
# Number of markers by chromosome.
message(paste0("A total of ", table(map$chrom), " markers were kept in chromosome ",
names(table(map$chrom)), ".", collapse = "\n"))
message(paste0("A total of ", original.n.markers.chrom - table(map$chrom),
" markers were pruned from chromosome ", names(table(map$chrom)),
".", collapse = "\n"))
}
# maf and call rate report.
message("Range of minor allele frequency after pruning: ",
paste0(round(range(maf(M = M)), 2), collapse = " ~ "))
message("Range of marker call rate after pruning: ",
paste0(round(range(callrate(M = M, margin = "col")), 2), collapse = " ~ "))
message("Range of individual call rate after pruning: ",
paste0(round(range(callrate(M = M, margin = "row")), 2), collapse = " ~ "))
}
# Finilize ------------------------------------------------------------------------------------
map <- map[, !names(map) %in% c("criteria", "sel")]
# Return pruned map and molecular matrix.
return(list(map = map, Mpruned = M))
}
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/snp_pruning.R |
#' Recodes the molecular matrix M for downstream analyses
#'
#' Reads molecular data in format of bi-allelic nucleotide bases (AA,
#' AG, GG, CC, etc.) and recodes them as 0, 1, 2 and \code{NA} to be used in other
#' downstream analyses.
#'
#' @param M A character matrix with SNP data of full form (\eqn{n \times p}),
#' with \eqn{n} individuals and \eqn{p} markers
#' Individual and marker names are assigned to \code{rownames} and \code{colnames}, respectively.
#' Data in matrix is coded as AA, AG, GG, CC, etc (default = \code{NULL}).
#' @param recoding A character indicating the recoding option to be performed.
#' Currently, only the nucleotide bases (AA, AG, ...) to allele count is available (\code{"ATGCto012"})
#' (default = \code{"ATGCto012"}).
#' @param map (Optional) A data frame with the map information with \eqn{p} rows.
#' If \code{NULL} a dummy map is generated considering a single chromosome and sequential
#' positions for markers and includes reference allele and alternative allele (default = \code{NULL}).
#' @param marker A character indicating the name of the column in data frame \code{map} with the identification
#' of markers. This is mandatory if \code{map} is provided (default = \code{NULL}).
#' @param ref A character indicating the name of the column in the map containing the reference allele for
#' recoding. If absent, then conversion will be based on the major allele (most frequent).
#' The marker information of a given individual with two of the specified major alleles
#' in \code{ref} will be coded as 2. This is mandatory if \code{map} is provided (default = \code{NULL}).
#' @param alt A character indicating the name of the column in the map containing the alternative allele for
#' recoding. If absent, then it will be inferred from the data. The marker information of a given individual
#' with two of the specified alleles in \code{alt} will be coded as 0 (default = \code{NULL}).
#' @param na.string A character that is interpreted as missing values (default = \code{"NA"}).
#' @param rename.markers If \code{TRUE} marker names (as provided in \strong{M}) will be expanded
#' to store the reference and alternative alleles. For example, from AX-88234566 to AX-88234566_C_A.
#' In the event of unidentified alleles, 0 will be used (default = \code{TRUE}).
#' @param message If \code{TRUE} diagnostic messages are printed on screen (default = \code{TRUE}).
#'
#' @return A list with the following two elements:
#' \itemize{
#' \item{\code{Mrecode}: the molecular matrix \eqn{\boldsymbol{M}} recoded to 0, 1, 2 and \code{NA}.}
#' \item{\code{mapr}: the data frame with the map information including reference allele and alternative allele.}
#' }
#'
#' @export
#'
#' @examples
#' # Create bi-allelic base data set.
#' Mnb <- matrix(c(
#' "A-", NA, "GG", "CC", "AT", "CC", "AA", "AA",
#' "AAA", NA, "GG", "AC", "AT", "CG", "AA", "AT",
#' "AA", NA, "GG", "CC", "AA", "CG", "AA", "AA",
#' "AA", NA, "GG", "AA", "AA", NA, "AA", "AA",
#' "AT", NA, "GG", "AA", "TT", "CC", "AT", "TT",
#' "AA", NA, NA, "CC", NA, "GG", "AA", "AA",
#' "AA", NA, NA, "CC", "TT", "CC", "AA", "AT",
#' "TT", NA, "GG", "AA", "AA", "CC", "AA", "AA"),
#' ncol = 8, byrow = TRUE, dimnames = list(paste0("ind", 1:8),
#' paste0("m", 1:8)))
#' Mnb
#'
#' # Recode without map (but map is created).
#' Mr <- snp.recode(M = Mnb, na.string = NA)
#' Mr$Mrecode
#' Mr$map
#'
#' # Create map.
#' mapnb <- data.frame(
#' marker = paste0("m", 1:8),
#' reference = c("A", "T", "G", "C", "T", "C", "A", "T"),
#' alternative = c("T", "G", "T", "A", "A", "G", "T", "A")
#' )
#' mapnb
#'
#' # Recode with map without alternative allele.
#' Mr <- snp.recode(M = Mnb, map = mapnb, marker = "marker", ref = "reference",
#' na.string = NA, rename.markers = TRUE)
#' Mr$Mrecode
#' Mr$map
#'
#' # Notice that the alternative allele is in the map as a regular variable,
#' # but in the names it is inferred from data (which might be 0 (missing)).
#'
#' # Recode with map with alternative allele.
#' Mr <- snp.recode(M = Mnb, map = mapnb, marker = "marker",
#' ref = "reference", alt = "alternative",
#' na.string = NA, rename.markers = TRUE)
#' Mr$Mrecode
#' Mr$map # Now the alternative is also on the names.
#'
#' # We can also recode without renaming the markers.
#' Mr <- snp.recode(M = Mnb, map = mapnb, marker = "marker", ref = "reference",
#' na.string = NA, rename.markers = FALSE)
#' Mr$Mrecode
#' Mr$map # Now the alternative is also on the names.
#'
snp.recode <- function(M = NULL, map = NULL, marker = NULL, ref = NULL, alt = NULL,
recoding = c("ATGCto012"),
na.string = NA, rename.markers = TRUE,
message = TRUE){
# Traps ---------------------------------------------------------------------------------------
# Check recoding.
# recoding is just a placeholder for now.
recoding <- match.arg(recoding)
# Check class of M.
check.data_(data_ = "M", class_ = "matrix")
# Check if class of values is character.
check.data.mode_(data_ = "M", mode_ = "character")
# Create map if not provided (same as in pruning).
if (!is.null(map)) {
# Check map class.
check.data_(data_ = "map", class_ = "data.frame")
# Check map names. Check mandatory variables in map.
if(is.null(marker)){stop("The \'marker' option must be specified if \'map' is provided.")}
if(is.null(ref)){stop("The \'ref' option must be specified if \'map' is provided.")}
# Check if they are present in the map.
map.name.hit <- c(marker, ref, alt) %in% names(map)
if (!all(map.name.hit)){
stop("Value provided to argument \'", c("marker", "ref", "alt")[!map.name.hit],
"' does not correspond to a variable in \'map'.")
}
# Match map and M.
if (!identical(as.character(map[[marker]]), colnames(M))){
stop("map[[marker]] and colnames(M) must be identical.")
}
}
# Check if all values are composed of 2 letters.
if (any(nchar(M) != 2, na.rm = TRUE)) {
warning("Marker(s) not compliant with bi-allelic coding: ",
paste0(colnames(M)[ceiling(which(nchar(M) != 2) / nrow(M))], collapse = ", "),
".\n The respective datapoints have been replaced with NA.")
M[nchar(M) != 2] <- NA
}
special.char <- apply(X = M, MARGIN = 2, FUN = function(col){
any(grepl(pattern = "[[:punct:]]", x = col))
})
# Removing eventual special characters (e.g. A-).
if (any(special.char)){
warning("Special characters identified in marker(s): ",
paste0(names(special.char)[special.char], collapse = ", "),
".\n The respective datapoints have been replaced with NA.")
M[grepl(pattern = "[[:punct:]]", x = M)] <- NA
}
# Body ----------------------------------------------------------------------------------------
# Replace na.string by NA.
if (!is.na(na.string)) {
if (na.string == "NA") { na.string <- NA }
}
if (!is.na(na.string)) {
if (message){
message('A total of ', sum(M %in% na.string),
" values were identified as missing with the string ",
na.string, " and were replaced by NA.")
}
M[M %in% na.string] <- NA
}
# Function to get sorted states of a marker.
get.states_ <- function(m = NULL){
sort( # TODO sorting is not really required now.
unique(
unlist(
strsplit(x = m, split = ""))))
}
# Identify the states.
states <- apply(X = M, MARGIN = 2, FUN = get.states_, simplify = FALSE)
# Initiate main frame.
reference.frame <- data.table()
# Get number of states.
reference.frame[, n.states := sapply(states, length) ]
# Get markers names.
reference.frame[, marker := colnames(M)]
# Check for more than 2 states.
if (any(reference.frame$n.states > 2)) {
stop("Markers with more than two allelic states: ",
paste0(colnames(M)[which(reference.frame$n.states > 2)], collapse = ", "),".")
}
# Add states to the frame.
reference.frame[, c("state1", "state2"):=
list(sapply(states, function(m) m[1]),
sapply(states, function(m) m[2]))]
# Add reference state to reference frame.
if(is.null(ref)) {
# Logic: the following code calculates the MAF based on the bases.
# It pastes all together, and the separate all and counts the composing letters.
# It this is too slow we might have to change a bit.
# The ifelse trick is required if a marker only has missing data.
reference.frame[, ref :=
apply(X = M, MARGIN = 2, FUN = function(m){
cur.ref <- names(
which.max(
table(
strsplit(
paste0(na.omit(m), collapse = ""),
"")
)
)
)
return(
ifelse(test = is.null(cur.ref),
yes = NA,
no = cur.ref))
})
]
} else {
reference.frame[, ref := ..map[[ref]]]
if (!is.null(alt)){
reference.frame[, alt := ..map[[alt]]]
}
}
# Identify the alternative state (based on the reference state).
if (is.null(alt)){
reference.frame[, alt := ifelse(test = {state2 == ref}, yes = state1, no = state2)]
}
# Find wrong coding in reference allele.
# Find wrong references.
wrong.code <- reference.frame[state1 != ref & state2 != ref, marker]
if (length(wrong.code) > 0) {
stop("The provided reference (\'ref') missmatches the allele codings in: ", wrong.code, ".")
}
# Find wrong coding in alternative allele.
if (!is.null(alt)){
# Find wrong references.
wrong.code <- reference.frame[state1 != alt & state2 != alt, marker]
if (length(wrong.code) > 0) {
stop("The provided reference (\'alt') missmatches the allele codings in: ", wrong.code, ".")
}
} ; rm(wrong.code)
# TODO try replacing on a new matrix.
# Create combinations for comparisons.
# NAs are checked in the alternative state for the first 3, and on the reference for the 4.
reference.frame[!is.na(alt), code0 := paste0(alt, alt)]
reference.frame[!is.na(alt), code1A := paste0(ref, alt)]
reference.frame[!is.na(alt), code1B := paste0(alt, ref)]
reference.frame[!is.na(ref), code2 := paste0(ref, ref)]
# Rename markers if requested.
if (rename.markers){
# reference.frame[, marker := paste0(marker, "_", ref, "_", alt)]
reference.frame[, marker :=
paste0(marker, "_",
replace(x = ref, list = is.na(ref), values = "0"), "_",
replace(x = alt, list = is.na(alt), values = "0"))]
}
# Replace letters with numbers.
M <- sapply(1:ncol(M), FUN = function(index){
m <- M[, index]
tmp.ref <- reference.frame[index,]
m[m %in% na.omit(tmp.ref[["code0"]])] <- 0
m[m %in% na.omit(tmp.ref[["code1A"]])] <- 1
m[m %in% na.omit(tmp.ref[["code1B"]])] <- 1
m[m %in% na.omit(tmp.ref[["code2"]])] <- 2
return(m)
})
# Reassign names to M.
colnames(M) <- reference.frame[["marker"]]
# Transform to numeric.
mode(M) <- "numeric"
# Finalize ------------------------------------------------------------------------------------
# Report.
if (message) {
message("Matrix M was recoded from bi-allelic nucleotide bases to numeric.")
}
# Prepare ref to export.
if (is.null(map)){
map <- dummy.map_(marker.id = reference.frame[["marker"]], message = FALSE)
# Add reference and alternative alleles to map.
map$ref <- reference.frame$ref
map$alt <- reference.frame$alt
} else {
# If map is not NULL and rename is requested. Collect names from reference frame.
if(rename.markers){
map[[marker]] <- reference.frame[["marker"]]
}
}
# Return the output list.
return(list(Mrecode = M, map = map))
}
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/snp_recode.R |
#' Generates a full matrix form from a sparse form matrix
#'
#' Modifies the input sparse form matrix into its full form.
#' The sparse form has three columns per line, corresponding to the set:
#' \code{Row, Col, Value}, and is defined by a lower triangle row-wise
#' of the full matrix and is sorted as columns within row.
#' Individual names should be assigned as attributes: \code{attr(K, "rowNames")}
#' and \code{attr(K, "colNames")}. If these are not provided they are considered
#' as 1 to \eqn{n}.
#'
#' Based on a function from ASReml-R 3 library by Butler \emph{et al.} (2009).
#'
#' @param K A square matrix in sparse form (default = \code{NULL}).
#'
#' @return A full square matrix where individual names are assigned to
#' \code{rownames} and \code{colnames}.
#' If attribute \code{INVERSE} is found this is also passed to the full matrix.
#'
#' @references
#' Butler, D.G., Cullis, B.R., Gilmour, A.R., and Gogel, B.J. 2009.
#' ASReml-R reference manual. Version 3. The Department of Primary
#' Industries and Fisheries (DPI&F).
#'
#' @export
#'
#' @examples
#' # Get G matrix.
#' Gsp <- G.matrix(M = geno.apple, method = "VanRaden", sparseform = TRUE)$G.sparse
#' head(Gsp)
#' head(attr(Gsp, "rowNames"))
#'
#' # Transform into full matrix.
#' G <- sparse2full(K = Gsp)
#' G[1:5, 1:5]
#'
sparse2full <- function(K = NULL) {
if (is.null(K) || !inherits(K, "matrix")) {
stop('K should be a valid object of class matrix.')
}
# Collect inverse attribute if any.
INVERSE <- attr(K, "INVERSE")
# Collect dimnames to apply on new matrix.
rownames <- attr(K, "rowNames")
# Collect number of rows.
nrow <- max(K[, 1])
# Collect number of columns.
ncol <- max(K[, 2])
# Get dummy rownames if not provided in attributes.
if (is.null(rownames)) {
rownames <- as.character(1:nrow)
}
# Assign relationships to relative positions.
K.full <- rep(0, nrow * ncol)
K.full[(K[, 2] - 1) * nrow + K[, 1]] <- K[, 3]
K.full[(K[, 1] - 1) * nrow + K[, 2]] <- K[, 3]
# Reshape to matrix.
K.full <- matrix(K.full, nrow = nrow, ncol = ncol, byrow = FALSE)
# Assign row and colnames.
attr(K.full, "colNames") <-
attr(K.full, "rowNames") <-
colnames(K.full) <-
rownames(K.full) <-
rownames
if (!is.null(INVERSE)) {attr(K.full, "INVERSE") <- INVERSE}
return(K.full)
}
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/sparse2full.R |
#' Generates a molecular matrix M for hypothetical crosses based on the
#' genomic information of the parents
#'
#' This function generates (or imputes) a molecular matrix for offspring
#' from hypothetical crosses based on the genomic information from the parents.
#' This is a common procedure in species such as maize, where only the parents
#' (inbred lines) are genotyped, and this information is used to generate/impute
#' the genotypic data of each of the hybrid offspring.
#' This function can be also used for bulked DNA analyses, in order to obtain an
#' bulked molecular matrix for full-sib individuals were only parents are genotyped.
#'
#'
#' @param M A matrix with marker data of full form (\eqn{n \times p}), with \eqn{n} individuals
#' (mothers and fathers) and \eqn{p} markers.
#' Individual and marker names are assigned to \code{rownames} and \code{colnames}, respectively.
#' Data in matrix is coded as 0, 1, 2 (integer or numeric) (default = \code{NULL}).
#' @param ped A data frame with three columns containing only the pedigree of the hypothetical offspring.
#' (not pedigree of parents)
#' It should include the three columns for individual, mother and father (default = \code{NULL}).
#' @param indiv A character indicating the column in \code{ped} data frame containing the identification
#' of the offspring (default = \code{NULL}).
#' @param mother A character indicating the column in \code{ped} data frame containing the identification
#' of the mother (default = \code{NULL}).
#' @param father A character indicating the column in \code{ped} data frame containing the identification
#' of the father (default = \code{NULL}).
#' @param heterozygote.action Indicates the action to take when heterozygotes are found in a marker.
#' Options are: \code{"useNA"}, \code{"exact"}, \code{"fail"}, and \code{"expected"}.
#' See details for more information (default = \code{"useNA"})
#' @param na.action Indicates the action to take when missing values are found in a marker.
#' Options are: \code{"useNA"} and \code{"expected"}.
#' See details for more information (default = \code{"useNA"}).
#' @param message If \code{TRUE} diagnostic messages are printed on screen (default = \code{TRUE}).
#'
#' @details
#' For double-haploids, almost the totality of the markers (except for genotyping errors)
#' will be homozygotic reads. But in many other cases (including recombinant inbred lines)
#' there will be a proportion of heterozygotic reads. In these case, it is
#' very difficult to infer (impute) the exact genotype of a given offspring individual.
#' For example, if parents are 0 (AA) and 1 (AC) then offsprings will differ given this
#' Mendelian sampling. However, different strategies exist to determine the
#' expected value for that specific cross (if required), which are detailed below
#' using the option \code{heterozygote.action}.
#'
#' \itemize{
#'
#' \item{If \code{heterozygote.action = "useNA"},
#' the generated offspring will have, for the heterozygote read, an \code{NA},
#' and no markers are removed.
#' Hence, no attempt will be done to impute/estimate this value.
#' }
#'
#' \item{If \code{heterozygote.action = "exact"},
#' any marker containing one or more heterozygote reads will be removed.
#' Hence, inconsistent markers are fully removed from the \eqn{\boldsymbol{M}} matrix.
#' }
#'
#' \item{If \code{heterozygote.action = "fail"},
#' function stops and informs of the presence of heterozygote reads.
#' }
#'
#' \item{If \code{heterozygote.action = "expected"},
#' then an algorithm is implemented, on the heterozygote read to determine its
#' expected value. For example, if parents are 0 and 1, then the expected value
#' (with equal probability) is 0.5. For a cross between two heterozygotes,
#' the expected value is: \eqn{0(1/4) + 1(1/2) + 2(1/4) = 1}. And for a cross
#' between 1 and 2, the expected value is: \eqn{1(1/2) + 2(1/2) = 1.5}}
#' }
#'
#' Missing value require special treatment, and an imputation strategy is detailed
#' below as indicated using the option \code{na.action}.
#'
#' \itemize{
#'
#' \item{If \code{na.action = "useNA"}, if at least one of the parental reads
#' is missing values for a given marker then it will be assigned as missing for
#' the hypothetical cross. Hence, no attempt will be done to impute/estimate
#' this value.}
#'
#' \item{If \code{na.action = "expected"}, then an algorithm is implemented that
#' will impute the expected read of the cross if the genotype of \strong{one of
#' the parents is missing} (\emph{e.g.}, cross between 0 and NA). Calculations
#' are based on parental allelic frequencies \eqn{p} and \eqn{q} for the given
#' marker. The expressions for expected values are detailed below.}
#'
#' \itemize{
#'
#' \item{If the genotype of the non-missing parent read is 0.
#'
#' \eqn{q^2} (probability that the missing parent is 0) x 0 (expected value of the offspring from a 0 x 0 cross: \eqn{0(1/1)}) +
#'
#' \eqn{2pq} (probability that the missing parent is 1) x 0.5 (expected offspring from a 0 x 1 cross: \eqn{0(1/2) + 1(1/2)}) +
#'
#' \eqn{q^2} (probability that the missing parent is 2) x 1 (expected offspring from a 0 x 2 cross: \eqn{1(1/1)})}
#'
#' \item{If the genotype of the non-missing parent read is 1.
#'
#' \eqn{q^2} (probability that the missing parent is 0) x 0.5 (offspring: \eqn{0(1/2) + 1(1/2)}) +
#'
#' \eqn{2pq} (probability that the missing parent is 1) x 1 (offspring: \eqn{0(1/4) + 1(1/2) + 2(1/4)}) +
#'
#' \eqn{q^2} (probability that the missing parent is 2) x 1.5 (offspring: \eqn{1(1/2) + 2(1/2)})}
#'
#' \item{If the genotype of the non-missing parent read is 2.
#'
#' \eqn{q^2} (probability that the missing parent is 0) x 1 (offspring: \eqn{1(1/1)}) +
#'
#' \eqn{2pq} (probability that the missing parent is 1) x 1.5 (offspring: \eqn{1(1/2) + 2(1/2)}) +
#'
#' \eqn{q^2} (probability that the missing parent is 2) x 2 (offspring: \eqn{2(1/1)})}
#' }
#'
#'
#' Similarly, the calculation of the expected read of a cross when \strong{both parents are missing} is
#' also based on population allelic frequencies for the given marker.
#' The expressions for expected values are detailed below.
#'
#' \eqn{q^2 \times q^2} (probability that both parents are 0) x 0 (expected value of the offspring from a 0 x 0 cross: 0(1/1)) +
#'
#' \eqn{2 \times (q^2 \times 2pq)} (probability that the first parent is 0 and the second is 1; this requires
#' the multiplication by 2 because it is also possible that the first parent is 1 and the second is 0)
#' x 0.5 (offspring: \eqn{0(1/2) + 1(1/2)}) +
#'
#' \eqn{2 \times (q^2 \times p^2)} (this could be 0 x 2 or 2 x 0) x 1 (offspring: \eqn{1(1/1)}) +
#'
#' \eqn{2pq \times 2pq} (both parents are 1) x 1 (offspring: \eqn{0(1/4) + 1(1/2) + 2(1/4)}) +
#'
#' \eqn{2 \times (2pq \times q2)} (this could be 1 x 2 or 2 x 1) x 1.5 (offspring: \eqn{1(1/2) + 2(1/2)}) +
#'
#' \eqn{p^2 \times p^2} (both parents are 2) x 2 (offspring: \eqn{2(1/1)})
#'
#' Note that the use of \code{na.action = "expected"} is recommended when
#' a large number of offspring will conform the hybrid cross (such as
#' with bulked DNA analyses) for family groups with reasonable number of individuals.
#'
#' \strong{Warning}. If \code{"expected"} is used for \code{heterozygote.action} or \code{na.action},
#' direct transformation of the molecular data to other codings (\emph{e.g.},
#' dominance matrix coded as \code{c(0,1,0)}) is not recommended.
#' }
#'
#' @return
#' A molecular matrix \eqn{\boldsymbol{M}} containing the genotypes generated/imputed for the
#' hypothetical cross.
#'
#' @export
#' @md
#'
#' @examples
#' # Create dummy pedigree (using first 10 as parents).
#' ped <- data.frame(
#' male = rownames(geno.apple)[1:5],
#' female = rownames(geno.apple)[6:10])
#' ped$offs <- paste(ped$male, ped$female, sep = "_")
#' ped
#'
#' # Select portion of M for parents.
#' Mp <- geno.apple[c(ped$male, ped$female), 1:15]
#'
#' # Get genotype of crosses removing markers with heterozygotes.
#' synthetic.cross(
#' M = Mp, ped = ped,
#' indiv = "offs", mother = "female", father = "male",
#' heterozygote.action = "exact",
#' na.action = "useNA")
#'
#' # Request the synthetic cross to be NA in the respective samples.
#' synthetic.cross(
#' M = Mp, ped = ped,
#' indiv = "offs", mother = "female", father = "male",
#' heterozygote.action = "useNA",
#' na.action = "useNA")
#'
#' # Get genotype of crosses and use expected values.
#' synthetic.cross(
#' M = Mp, ped = ped,
#' indiv = "offs", mother = "female", father = "male",
#' heterozygote.action = "expected", na.action = "expected")
#'
synthetic.cross <- function(M = NULL, ped = NULL, indiv = NULL, mother = NULL, father = NULL,
heterozygote.action = c("useNA", "exact", "fail", "expected"),
na.action = c("useNA", "expected"),
# n.cores = 1,
message = TRUE){
# Traps ---------------------------------------------------------------------------------------
# Check na.action.
na.action <- match.arg(na.action)
# Check heterozygote.action.
heterozygote.action <- match.arg(heterozygote.action)
# Check M class.
check.data_(data_ = "M", class_ = "matrix")
# Check ped class.
check.data_(data_ = "ped", class_ = "data.frame")
# Check indiv.
check.args_(data_ = ped, mandatory_ = TRUE, arg_ = indiv,
class_ = "character", mutate_ = TRUE,
class.action_ = "message", message_ = TRUE)
# Check indiv.
check.args_(data_ = ped, mandatory_ = TRUE, arg_ = mother,
class_ = "character", mutate_ = TRUE,
class.action_ = "message", message_ = TRUE)
# Check indiv.
check.args_(data_ = ped, mandatory_ = TRUE, arg_ = father,
class_ = "character", mutate_ = TRUE,
class.action_ = "message", message_ = TRUE)
# # Check ped names.
# ped.name.hit <- c(indiv, mother, father) %in% names(ped)
# if (!all(ped.name.hit)){
# stop("Value provided to argument \'", c("indiv", "mother", "father")[!ped.name.hit],
# "' does not correspond to a variable in \'ped'.")
# }
# Check heterozygote action.
heterozygote.action <- match.arg(heterozygote.action)
# Check na action.
na.action <- match.arg(na.action)
# Check message.
check.logical_(arg_ = "message")
# Stop if heterozygous action is fail.
if (heterozygote.action == "fail"){
# Identify markers with heterozygotes and remove.
het.markers <- apply(X = M, MARGIN = 2, FUN = function(m) {any(m == 1, na.rm = TRUE)})
if (any(het.markers)){
stop("Stop requested as some of the markers have have heterozygous states (1), e.g., ",
paste0(names(head(het.markers[het.markers], 5)), collapse = ", "), "...")
}
}
# Check if all parents have genotype in M.
if(!all(c(ped[[mother]], ped[[father]]) %in% rownames(M))){
stop("Some parents do not have genotypic information in matrix M.")
}
# Data manipulation traps ---------------------------------------------------------------------
# Get unique combinations.
unique.comb <- paste(ped[[mother]], ped[[father]], sep = "_")
# Get reciprocals.
unique.comb.rec <- paste(ped[[father]], ped[[mother]], sep = "_")
recs <- unique.comb %in% unique.comb.rec
# Identify duplicates.
dups <- duplicated(unique.comb)
if(any(dups)){
# Report duplicates.
message("A total of ", sum(dups), " duplicated rows were found in the supplied pedigree.")
message("Removing \'ped' lines: ", which(dups), ".")
# Remove duplicates.
ped <- ped[!dups, ]
}
if(any(recs)){
# Report reciprocals.
warning("Reciprocals found in the supplied pedigree. These were not removed.")
# TODO try to remove reciprocals?
}
rm(dups, recs)
# Prepare data --------------------------------------------------------------------------------
# Collect initial number of markers.
initial.n.markers <- ncol(M)
# Check and remove eventual heterozygotes in data if exact method is chosen.
if (heterozygote.action == "exact"){
if (message){
message(blue("\nExact method selected. Eliminating markers containing one or more heterozygotic read."))
}
# Identify markers with heterozygotes and remove.
het.markers <- apply(X = M, MARGIN = 2, FUN = function(m) {any(m == 1, na.rm = TRUE)})
M <- M[, !het.markers, drop = FALSE]
if (message){
message("Total number of dropped markers: ", initial.n.markers - ncol(M))
message("Total number of remaining markers: ", ncol(M))
}
# Stop if no marker left.
if (ncol(M) == 0){
stop("No marker were left after removal of heterozygote reads.")
}
}
# Generate hybrid space -----------------------------------------------------------------------
# Collect marker names (this is necessary in some cases, e.g., 1 marker left).
marker.ids <- colnames(M)
# Get range to loop through.
range.hybrids <- 1:nrow(ped)
if (message){
message(blue("\nGenerating in hypothetical crosses genotypic information."))
}
# Call function to get offspring genotype.
get.off.gen <- function(cur.offspring = NULL){
# Collect genotype of combination.
M.mother <- M[ped[cur.offspring, mother],]
M.father <- M[ped[cur.offspring, father],]
M.offspring <- rbind(M.mother, M.father)
if (heterozygote.action == "useNA"){
M.offspring[M.offspring %in% 1] <- NaN
}
# Get genotypes with NA.
if (na.action == "useNA"){
return(colMeans(M.offspring))
}
# Get genotypes with expected values.
if (na.action == "expected"){
# Initiate evo object.
evo <- NULL
# Get expected means.
kid <- colMeans(M.offspring)
# Identify genotypes missing in the current kid
missing.markers <- is.na(kid) & !is.nan(kid)
if (any(missing.markers)){
# Get the frequency of pseudo-major allele 2.
freq2 <- colMeans(M, na.rm = T)/2
# Loop across all missing markers from the current kid.
evo <- sapply(X = which(missing.markers), function(m) {
# Get current marker.
marker <- M.offspring[, m]
# Get pseudo-p.
f.pseudo.major <- freq2[m]
# Get pseudo-q.
f.pseudo.minor <- (1 - f.pseudo.major)
# Logic: pseudo-q and p are required so we know which frequency to use on
# the multiplications below. The MAF is not good here because
# the major allele might not be represented by a 2 in the molecular matrix as
# we dont know where this data comes from. Using pseudo-q and p, there is no
# need to know the reference allele! We have to mach the genotype with
# the possible offspring.
# If there is one missing parent.
if(sum(is.na(marker)) == 1){
par.gen.no.miss <- sum(marker, na.rm = TRUE)
# If the genotype of the non-missing parent is 0.
if (par.gen.no.miss == 0){
evo <-
# q2
f.pseudo.minor^2 * 0 +
# 2pq
2 * f.pseudo.minor * f.pseudo.major * 0.5 +
# q2
f.pseudo.major^2 * 1
}
# If the genotype of the non-missing parent is 1.
if (par.gen.no.miss == 1){
evo <-
# q2
f.pseudo.minor^2 * 0.5 +
# 2pq
2 * f.pseudo.minor * f.pseudo.major * 1 +
# q2
f.pseudo.major^2 * 1.5
}
# If the genotype of the non-missing parent is 2.
if (par.gen.no.miss == 2){
evo <-
# q2
f.pseudo.minor^2 * 1 +
# 2pq
2 * f.pseudo.minor * f.pseudo.major * 1.5 +
# q2
f.pseudo.major^2 * 2
}
}
# If there are two missing parents.
if(sum(is.na(marker)) == 2){
f.pseudo.major <- freq2[m]
# All possible combiations of unknown parents.
evo <-
# q2 x q2 (0 x 0)
f.pseudo.minor^2 * f.pseudo.minor^2 * 0 +
# 2 * q2 x 2pq (this could be 0 x 1 or 1 x 0)
2 * (f.pseudo.minor^2 * 2 * f.pseudo.minor * f.pseudo.major) * 0.5+
# 2 * q2 x p2 (this could be 0 x 2 or 2 x 0)
2 * (f.pseudo.minor^2 * f.pseudo.major^2) * 1 +
# 2pq x 2pq
2 * f.pseudo.minor * f.pseudo.major * 2 * f.pseudo.minor * f.pseudo.major * 1 +
# 2 * 2pq x q2 (this could be 1 x 2 or 2 x 1)
2 * (2 * f.pseudo.minor * f.pseudo.major * f.pseudo.major^2) * 1.5 +
# p2 x p2
f.pseudo.major^2 * f.pseudo.major^2 * 2
# Simplification (not sure if works on all cases - probably yes).
# evo <- f.pseudo.minor^2 * 0 +
# 2 * f.pseudo.minor * f.pseudo.major * 1 +
# f.pseudo.major^2 * 2
}
# Return the expected value of the offspring.
return(evo)
})
}
# Replace na in kid.
kid[which(missing.markers)] <- evo
# Return the imputed kid.
return(kid)
}
}
# Run function.
# if (n.cores > 1){
# M <- mclapply(X = range.hybrids, mc.cores = n.cores, FUN = get.off.gen)
# } else {
M <- lapply(X = range.hybrids, FUN = get.off.gen)
# }
# Get hybrid matrix.
M <- do.call(rbind, M)
# Replace NaN with NA because of heterozygotes.
M[is.nan(M)] <- NA
# Add names of hybrids to new matrix.
rownames(M) <- ped[[indiv]]
# Add maker ids.
colnames(M) <- marker.ids
# Return.
return(M)
}
| /scratch/gouwar.j/cran-all/cranData/ASRgenomics/R/synthetic_cross.R |
## ======================================================================== ##
## Miguel de Carvalho ##
## Copyright (C) 2018 ##
## ------------------------------------------------------------------------ ##
## This program is free software; you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation; either version 2 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program; if not, a copy is available at ##
## http://www.r-project.org/Licenses/ ##
## ======================================================================== ##
bmssa <- function(y, l = 32)
UseMethod("bmssa")
bmssa.default <- function(y, l = 32) {
## Run a basic input validation
if (is.ts(y) == FALSE | dim(y)[2] < 2)
stop('y must be a multivariate time series (mts) object')
if (l%%1 != 0 | l <= 0 | l > dim(y)[1])
stop('l must be a positive integer smaller than number of
observations per series')
n <- dim(y)[1]
M <- dim(y)[2]
k <- n - l + 1
# Embedding
Y <- matrix(NA, nrow = M * l, ncol = k)
for(i in 1:M)
Y[((i - 1) * l + 1): (i * l), ] <- trajectory(y[, i], l, k)
# Singular value decomposition
SVD <- svd(Y)
U <- SVD$u
PC <- matrix(0, nrow = n, ncol = M * l)
sfisher <- numeric()
pval <- numeric()
# Targeted grouping
for(i in 1:(M * l)) {
C <- U[, i]%*%t(U[, i])%*%Y
PC[, i] <- dbar(C[1:l, ], l, k)
sp <- spec.pgram(ts(PC[, i], frequency = frequency(y),
start = start(y)), plot = FALSE, taper = 0.5)
w <- sp$freq
spec <- sp$spec
wstar <- w[which(spec == max(spec))]
J <- length(w)
g <- max(spec) / sum(spec)
aux <- rep(0, J)
for(j in 1:J)
aux[j] <- (-1)^(j - 1) * choose(J, j) *
max((1 - j * g), 0)^(J - 1)
pval <- sum(aux)
if (pval < 0.05 & wstar > 4 / 32 & wstar < 4 / 6)
sfisher <- c(sfisher, i)
}
erc <- PC[, sfisher]
cycle <- as.numeric(rowSums(erc))
cycle <- ts(cycle, frequency = frequency(y), start = start(y))
erc <- ts(erc, frequency = frequency(y), start = start(y))
## Organize and return outputs
outputs <- list(cycle = cycle, sfisher = sfisher, erc = erc, l = l,
call = match.call())
class(outputs) <- "bmssa"
return(outputs)
}
print.bmssa <- function(x, ...) {
cat("\n Multivariate Singular Spectrum Business Cycle Analysis:\n =======================================================\n ")
print(x$call)
cat("\n Business cycle indicator:\n")
cat("\n")
print(x$cycle)
cat("\n Principal components selected by the Fisher g statistic \n")
cat("\n")
print(x$sfisher)
}
plot.bmssa <- function(x, ylab = "Multivariate Singular Spectrum Indicator",
lwd = 3, ...) {
plot(x$cycle, ylab = "Multivariate Singular Spectrum Indicator")
}
| /scratch/gouwar.j/cran-all/cranData/ASSA/R/bmssa.R |
## ======================================================================== ##
## Miguel de Carvalho ##
## Copyright (C) 2018 ##
## ------------------------------------------------------------------------ ##
## This program is free software; you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation; either version 2 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program; if not, a copy is available at ##
## http://www.r-project.org/Licenses/ ##
## ======================================================================== ##
bssa <- function(y, l = 32)
UseMethod("bssa")
bssa.default <- function(y, l = 32) {
n <- length(y)
## Run a basic input validation
if (is.ts(y) == FALSE | is.null(dim(y)) == FALSE)
stop('y must be a univariate time series (ts) object')
if (l%%1 != 0 | l <= 0 | l > n)
stop('l must be a positive integer smaller than number of observations')
k <- n - l + 1
## Embedding
Y <- trajectory(y, l, k)
## Singular value decomposition
SVD <- svd(Y)
U <- SVD$u
PC <- matrix(0, nrow = n, ncol = l)
sfisher <- numeric()
## Targeted grouping
for(i in 1:l) {
PC[, i] <- dbar(U[, i]%*%t(U[, i])%*%Y, l, k)
sp <- spec.pgram(ts(PC[, i], frequency = frequency(y),
start = start(y)), plot = FALSE, taper = 0.5)
w <- sp$freq
spec <- sp$spec
wstar <- w[which(spec == max(spec))]
J <- length(w)
g <- max(spec) / sum(spec)
aux <- rep(0, J)
for(j in 1:J)
aux[j] <- (-1)^(j - 1) * choose(J, j) *
max((1 - j * g), 0)^(J - 1)
pval <- sum(aux)
if (pval < 0.05 & wstar > 4 / 32 & wstar < 4 / 6)
sfisher <- c(sfisher, i)
}
erc <- PC[, sfisher]
cycle <- as.numeric(rowSums(erc))
cycle <- ts(cycle, frequency = frequency(y), start = start(y))
erc <- ts(erc, frequency = frequency(y), start = start(y))
## Organize and return outputs
outputs <- list(cycle = cycle, sfisher = sfisher, erc = erc, l = l,
call = match.call())
class(outputs) <- "bssa"
return(outputs)
}
print.bssa <- function(x, ...) {
cat("\n Singular Spectrum Business Cycle Analysis:\n ==========================================\n ")
print(x$call)
cat("\n Business cycle indicator:\n")
cat("\n")
print(x$cycle)
cat("\n Principal components selected by the Fisher g statistic \n")
cat("\n")
print(x$sfisher)
}
plot.bssa <- function(x, ylab = "Singular Spectrum Indicator",
lwd = 3, ...) {
plot(x$cycle, ylab = "Singular Spectrum Indicator", ...)
}
| /scratch/gouwar.j/cran-all/cranData/ASSA/R/bssa.R |
## ======================================================================== ##
## Miguel de Carvalho ##
## Copyright (C) 2018 ##
## ------------------------------------------------------------------------ ##
## This program is free software; you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation; either version 2 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program; if not, a copy is available at ##
## http://www.r-project.org/Licenses/ ##
## ======================================================================== ##
combplot <- function(fit) {
selection <- rep(0, fit$l)
selection[fit$sfisher] <- 1
s <- length(fit$sfisher)
par(mfrow = c(1, 2))
plot(1, selection[1], type = "h", xlab = "Principal Component",
ylab = expression(paste("Fisher ", italic(g), " Indicator")),
lwd = 3, col = "blue", xlim = c(0, fit$l), ylim = c(0, 1))
for(i in 2:fit$l)
lines(i, selection[i], type = "h",
lwd = 3, col = colorRampPalette(c("blue", "violet",
"red", "orange"))(fit$l)[i])
plot(fit$erc[, 1], type = "l", lwd = 1, col =
colorRampPalette(c("blue", "violet", "red",
"orange"))(fit$l)[fit$sfisher[1]], ylab = "")
for(i in 2:s)
lines(fit$erc[, i], type = "l", lwd = 1, col =
colorRampPalette(c("blue", "violet", "red",
"orange"))(fit$l)[fit$sfisher[i]])
}
| /scratch/gouwar.j/cran-all/cranData/ASSA/R/combplot.R |
.onAttach <- function(libname, pkgname) {
packageStartupMessage("## ============================================== ##")
RFver <- read.dcf(file = system.file("DESCRIPTION", package = pkgname),
fields = "Version")
packageStartupMessage(paste
("##", pkgname, RFver, " ##"))
packageStartupMessage("## ---------------------------------------------- ##")
packageStartupMessage("## Copyright (C) 2018 ##")
packageStartupMessage("## M. de Carvalho and G. Martos ##")
packageStartupMessage("## ============================================== ##")
packageStartupMessage("")
}
| /scratch/gouwar.j/cran-all/cranData/ASSA/R/hidden.R |
isst <-function(y, l = 'automatic', m = 'automatic')
UseMethod("isst")
isst.default <- function(y, l = 'automatic' , m = 'automatic') {
n <- y$n; D <- 1
## Run a basic input validation
if (class(y) %notin% c('itsframe') ){
stop('y must be a univariate interval time series object.')}
if(is.numeric(m) & length(m) != 1) { stop('Stop: Parameter m in the model must be a postive integer.') }
if(!is.numeric(l)){ l <- ceiling( (n + 1) / 2) }
k <- n - l + 1
## Step 1: Trajectory matrix *array*
Y <- array(NA, c(l, k, 2) ) # l rows, k columns, 2 dimensions
Y[ , , 1] <- trajectory(y$a, l, k)
Y[ , , 2] <- trajectory(y$b, l, k)
## Step 2:svd
S = Var.Est(Y,Y)$outer.product #*array input*
SVD <- svd(S)
rk <- qr(SVD$u)$rank # rank
## Step 3 and 4:
y.tilde = residuals = matrix(0, ncol = 2, nrow = n)
if(is.numeric(m)) {
if(m > rk) { m = rk
warning( "Number of components (see 'm') required to construct trendlines
was reduced to be compatible with the rank of the trajectory matrix." ) }
Y_i = YYI_i(Y, SVD, i=m, l=l, k=k) ;
y.tilde[,1] <- dbar(Y_i$Y_iA, l = l, k = k);
y.tilde[,2] <- dbar( Y_i$Y_iB, l = l, k = k)
residuals[,1] <- pmin(y$a-y.tilde[,1],y$b-y.tilde[,2]);
residuals[,2] <- pmax(y$a-y.tilde[,1],y$b-y.tilde[,2]);
} else { m = 0; stop.flag = 1
while(stop.flag != 0 & m < rk) {
m = m + 1
Y_i = YYI_i(Y, SVD, i=m, l=l, k=k) ;
y.tilde[,1] <- dbar(Y_i$Y_iA, l = l, k = k);
y.tilde[,2] <- dbar(Y_i$Y_iB, l = l, k = k);
residuals[,1] <- pmin(y$a-y.tilde[,1],y$b-y.tilde[,2]);
residuals[,2] <- pmax(y$a-y.tilde[,1],y$b-y.tilde[,2]);
stop.flag <- ecip(residuals);
} } # end 'else'
y.tilde <- itsframe(dates = y$date, a = pmin(y.tilde[ , 1],y.tilde[ , 2]), b = pmax(y.tilde[, 1], y.tilde[, 2]))
residuals <- itsframe(dates = y$date, a = residuals[ , 1], b = residuals[, 2])
#### ERC:
erc <- array(NA, c(n, m, 2) ) # n rows, m columns, 2 dimensions
for(i in 1:m){
Y_i = YYI_i.erc(Y, SVD, i=i, l=l, k=k) ;
ll = dbar(Y_i$Y_iA, l = l, k = k); uu = dbar( Y_i$Y_iB, l = l, k = k)
erc[,i,1] <- pmin(ll,uu); erc[,i,2] <- pmax(ll,uu)
}
outputs <- list(trendline = y.tilde,
l = l,
m = m,
residuals = residuals,
svd = SVD,
erc = erc,
observations = y,
call = match.call()) ;
class(outputs) <- "isst"
return(outputs)
}
print.isst <- function(x, ...) {
cat("\n Singular Spectrum Trendlines for Interval Data:\n ========================================= \n")
print(x$call)
cat("\n Interval Trendlines:\n"); cat("\n")
print(x$trendline)
cat("\n Elementary components included in the estimation \n"); cat("\n")
print(x$m)
}
plot.isst <- function(x, time.format = "%m-%y", col = NULL, lty = NULL, main = NULL, type = NULL, pch = NULL, lwd = NULL,
ylab = NULL,xlab = NULL, ylim = NULL, xlim = NULL, cex.lab = NULL, cex.axis = NULL, cex.main = NULL,
options = list(type = 'trendline',
ncomp = NULL), ...) {
if(options$type %!in% c('trendline', 'screeplot', 'components', 'cpgram')) {
stop('options type must be one of the strings: "trendline", "components", "cpgram", or "screeplot".')}
if(options$type == 'trendline') {
plot(x$trendline$date, x$trendline$a,
main = if(is.null(main)){''} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'l'} else {type},
xlab = if(is.null(xlab)){'Time'} else {xlab},
ylab = if(is.null(ylab)){'Interval Singular Spectrum Trendline'} else {ylab},
ylim = if(is.null(ylim)){c(min(x$trendline$a,x$trendline$b),max(x$trendline$a,x$trendline$b))} else {ylim},
xlim = if(is.null(xlim)){c(min(x$trendline$dates),max(x$trendline$dates))} else {xlim},
cex.lab = if(is.null(cex.lab)){1} else {cex.lab},
cex.axis = if(is.null(cex.axis)){1} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main},
xaxt = "n")
if(inherits(x$trendline$dates, "Date")==T){ timelabels<-format(x$trendline$dates,time.format) ; axis(1,at=x$trendline$dates,labels=timelabels,cex.axis = if(is.null(cex.axis)){1} else {cex.axis})} else { axis(1,at=x$trendline$dates,cex.axis = if(is.null(cex.axis)){1} else {cex.axis}) }
lines(x$trendline$date, x$trendline$b,
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
type = if(is.null(type)){'l'} else {type})
polygon(c(rev(x$trendline$date), x$trendline$date), c(rev(x$trendline$a), x$trendline$b),
border = NA, col = if(is.null(col)){'lightgray'} else {col})
}
if(options$type == 'screeplot') {
if(is.null(options$ncomp)){options$ncomp = c(1:3)}
plot(log(x$svd$d[options$ncomp]),
main = if(is.null(main)){'Scree--plot'} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'b'} else {type},
cex.lab = if(is.null(cex.lab)){1} else {cex.lab},
cex.axis = if(is.null(cex.axis)){1} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main},
xlab = if(is.null(xlab)){'Index'} else {xlab},
ylab = if(is.null(ylab)){'Eigenvalues (log-scale)'} else {ylab})
}
if(options$type == 'components') {
if(is.null(options$ncomp)){ options$ncomp = c(1:round(dim(x$erc)[2])) }
if( max(options$ncomp) > dim(x$erc)[2] ) {
print( paste('Please choose the number of ERC in the range from 1 :', dim(x$erc)[2]) )
} else {
coldim <- options$ncomp;
par(mfrow = c(1, length(coldim)))
for(j in coldim) {
plot(x$trendline$dates, x$erc[, j, 1],
main = if(is.null(main)){paste("ERC -", j)} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'l'} else {type},
xlab = if(is.null(xlab)){''} else {xlab},
ylab = if(is.null(ylab)){''} else {ylab},
ylim = if(is.null(ylim)){ c(min(c(x$erc[, j, 1],x$erc[, j, 2])),max(c(x$erc[, j, 1],x$erc[, j, 2]))) } else {ylim},
cex.lab = if(is.null(cex.lab)){0.8} else {cex.lab},
cex.axis = if(is.null(cex.axis)){0.8} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main} )
lines(x$trendline$dates, x$erc[, j, 2],
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'l'} else {type})
polygon(c(rev(x$trendline$dates), x$trendline$dates),
c(rev(x$erc[, j, 1]), x$erc[, j, 2]),
border = NA, col = if(is.null(col)){'lightgray'} else {col})
}
}
}
if(options$type == "cpgram") {
ecip(residuos = cbind(x$residuals$a,x$residuals$b), plot.flag = TRUE)
}
}
| /scratch/gouwar.j/cran-all/cranData/ASSA/R/isst.R |
## ======================================================================== ##
## Miguel de Carvalho ##
## Copyright (C) 2018 ##
## ------------------------------------------------------------------------ ##
## This program is free software; you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation; either version 2 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program; if not, a copy is available at ##
## http://www.r-project.org/Licenses/ ##
## ======================================================================== ##
itsframe <- function(dates, a, b)
UseMethod("itsframe")
itsframe.default <- function(dates, a, b) {
n <- length(a)
## Run basic input validation
if (length(a) != length(b))
stop('a and b must be of the same length')
if(inherits(dates, "Date")==T){dates = as.Date(dates)} else {dates = dates}
## Organize and return outputs
outputs <- list(dates = dates, a = a, b = b, n=n, D=1, call = match.call())
class(outputs) <- "itsframe"
return(outputs)
}
plot.itsframe <- function(x, time.format="%m-%y", col = NULL, lty = NULL, main = NULL, type = NULL, pch = NULL, lwd = NULL,
tick = TRUE, ylab = NULL,xlab = NULL, ylim = NULL, xlim = NULL,cex.lab=NULL, cex.axis=NULL,cex.main=NULL, ...) {
plot(x$dates, x$a,
main = if(is.null(main)){''} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
type = if(is.null(type)){'l'} else {type},
xlab = if(is.null(xlab)){'Time'} else {xlab},
lwd = if(is.null(lwd)){1} else {lwd},
ylab = if(is.null(ylab)){''} else {ylab},
ylim = if(is.null(ylim)){c(min(x$a, x$b),max(x$a, x$b))} else {ylim},
xlim = if(is.null(xlim)){c(min(x$dates),max(x$dates))} else {xlim},
cex.lab = if(is.null(cex.lab)){1} else {cex.lab},
cex.axis = if(is.null(cex.axis)){1} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main},
xaxt="n")
if(inherits(x$dates, "Date")==T){ timelabels<-format(x$dates,time.format) ;
axis(1,at=x$dates, tick =tick, labels=timelabels,cex.axis = if(is.null(cex.axis)){1} else {cex.axis})} else { axis(1,at=x$dates, tick =tick, cex.axis = if(is.null(cex.axis)){1} else {cex.axis}) }
lines(x$dates, x$b,
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
type = if(is.null(type)){'l'} else {type},
lwd = if(is.null(lwd)){1} else {lwd})
polygon(c(rev(x$dates), x$dates), c(rev(x$a), x$b),
border = NA, col = if(is.null(col)){'lightgray'} else {col})
}
| /scratch/gouwar.j/cran-all/cranData/ASSA/R/itsframe.R |
misst <- function(y, l = 'automatic', m = 'automatic', vertical = TRUE)
UseMethod("misst")
misst.default <- function(y, l = 'automatic', m = 'automatic',
vertical = TRUE) {
n <- y$n
D <- y$D
## H. Hassani & R. Mahmoudvand (2013; pp. 68, eq. 25).
if((l == 'automatic') & (vertical == T | vertical == TRUE)) {l <- ceiling((n + 1) / (D + 1))}
if((l == 'automatic') & (vertical == F | vertical == FALSE)){l <- ceiling(D * (n + 1) / (D + 1))}
k <- n - l + 1
## Run a basic input validation
if (class(y) != "mitsframe")
stop('Stop: the input y must be a multivariate interval valued object (see ?mitsframe).')
if (is.numeric(l) & (l%%1 != 0 | l <= 0 | l > y$n))
stop('Stop: l must be a positive integer smaller than number of observations per series.')
if(is.numeric(m) & length(m) != D)
stop('Stop: The length of vector m must be equal to the number of time series to analyze.')
## Step 1:
Y_list = list();
for(d in 1:D){
Y <- array(NA, c(l, k, 2) ) # l rows, k columns, 2 dimensions
Y[ , , 1] <- trajectory(y$A[, d], l, k)
Y[ , , 2] <- trajectory(y$B[, d], l, k)
Y_list[[d]] <- Y
}
if(vertical == T | vertical == TRUE) {
Y <- array(NA, c(D*l, k,2) )
for( i in 1:D){ Y[((i-1)*l+1):(i*l) , ,] <- Y_list[[i]] } } else {
Y <- array(NA, c(l, D*k,2) )
for( i in 1:D){ Y[ ,((i-1)*k+1):(i*k), ] <- Y_list[[i]] } }
## Step 2: SVD
if(vertical == T | vertical == TRUE) {
S <- matrix(NA, nrow = D*l, ncol = D*l)
for( i in 1:D){
for(j in 1:D){
S[((i-1)*l+1):(i*l),((j-1)*l+1):(j*l)] <- Var.Est(Y_list[[i]],Y_list[[j]])$outer.product;
}
} } else {
S <- matrix(0, nrow = l, ncol = l)
for( i in 1:D){
S = S + Var.Est(Y_list[[i]],Y_list[[i]])$outer.product
}
} # isSymmetric(S)
SVD <- svd(S)
rk <- qr(SVD$u)$rank # rank.
## Step 3 and 4:
A.tilde = B.tilde = A.Residuals = B.Residuals = matrix(0, ncol = D, nrow = n)
if(is.numeric(m) == T) {
if(sum(m > rk) > 0) {
m = replace(m, m > rk, rk)
warning( "Number of components (see 'm') required to construct trendlines in at least
^ one dimension was reduced to be compatible with the rank of the trajectory matrix." ) }
if(vertical == TRUE){
for(d in 1:D){
Y_i = YYI_i(Y, SVD, i=m[d], l=D*l, k=k) ;
tempA <- dbar(Y_i$Y_iA[((d - 1) * l + 1): (d * l), ], l = l, k = k)
tempB <- dbar(Y_i$Y_iB[((d - 1) * l + 1): (d * l), ], l = l, k = k)
A.tilde[,d] <- pmin(tempA, tempB)
B.tilde[,d] <- pmax(tempA, tempB)
A.Residuals[,d] <- pmin(y$A[,d]-A.tilde[,d],y$B[,d]-B.tilde[,d])
B.Residuals[,d] <- pmax(y$A[,d]-A.tilde[,d],y$B[,d]-B.tilde[,d])
} } else {
for(d in 1:D){
Y_i = YYI_i(Y, SVD, i=m[d], l=l, k=D*k) ;
tempA <- dbar(Y_i$Y_iA[ , ((d - 1) * k + 1) : (d * k) ], l = l, k = k)
tempB <- dbar(Y_i$Y_iB[ , ((d - 1) * k + 1) : (d * k) ], l = l, k = k)
A.tilde[,d] <- pmin(tempA, tempB)
B.tilde[,d] <- pmax(tempA, tempB)
A.Residuals[,d] <- pmin(y$A[,d]-A.tilde[,d],y$B[,d]-B.tilde[,d])
B.Residuals[,d] <- pmax(y$A[,d]-A.tilde[,d],y$B[,d]-B.tilde[,d])
} } } else{
m = rep(0,D); #* Storing here the number of ERC on each dimension.
for(d in 1:D){
stop.flag = 1
while(stop.flag != 0 & m[d] < rk) {
m[d] = m[d] + 1
if(vertical == TRUE){
Y_i = YYI_i(Y, SVD, i=m[d], l=D*l, k=k) ;
tempA <- dbar(Y_i$Y_iA[((d - 1) * l + 1): (d * l), ], l = l, k = k)
tempB <- dbar(Y_i$Y_iB[((d - 1) * l + 1): (d * l), ], l = l, k = k)
A.tilde[,d] <- pmin(tempA, tempB)
B.tilde[,d] <- pmax(tempA, tempB)
A.Residuals[,d] <- pmin(y$A[,d]-A.tilde[,d],y$B[,d]-B.tilde[,d])
B.Residuals[,d] <- pmax(y$A[,d]-A.tilde[,d],y$B[,d]-B.tilde[,d])
} else {
Y_i = YYI_i(Y, SVD, i=m[d], l=l, k=D*k) ;
tempA <- dbar(Y_i$Y_iA[ , ((d - 1) * k + 1) : (d * k) ], l = l, k = k)
tempB <- dbar(Y_i$Y_iB[ , ((d - 1) * k + 1) : (d * k) ], l = l, k = k)
A.tilde[,d] <- pmin(tempA, tempB)
B.tilde[,d] <- pmax(tempA, tempB)
A.Residuals[,d] <- pmin(y$A[,d]-A.tilde[,d],y$B[,d]-B.tilde[,d])
B.Residuals[,d] <- pmax(y$A[,d]-A.tilde[,d],y$B[,d]-B.tilde[,d])
}
stop.flag <- ecip(as.matrix(cbind(A.Residuals[,d],B.Residuals[,d])));
} } } # end 'else'
#### ERC:
erc = list()
for(j in 1:D){
erc.d <- array(NA, c(n, m[j], 2) ) # n rows, m columns, 2 dimensions
for(i in 1:m[j]){
if(vertical == TRUE){
Y_i = YYI_i(Y, SVD, i=i, l=D*l, k=k) ;
ll <- dbar(Y_i$Y_iA[((d - 1) * l + 1): (d * l), ], l = l, k = k)
uu <- dbar(Y_i$Y_iB[((d - 1) * l + 1): (d * l), ], l = l, k = k)
erc.d[,i,1] <- pmin(ll,uu); erc.d[,i,2] <- pmax(ll,uu)} else {
Y_i = YYI_i(Y, SVD, i=i, l=l, k=D*k) ;
ll <- dbar(Y_i$Y_iA[ , ((d - 1) * k + 1) : (d * k) ], l = l, k = k)
uu <- dbar(Y_i$Y_iB[ , ((d - 1) * k + 1) : (d * k) ], l = l, k = k)
erc.d[,i,1] <- pmin(ll,uu); erc.d[,i,2] <- pmax(ll,uu) } }
erc[[j]] <- erc.d
}
# Grouping the results into mtsframe objects to deliver in the output
Y.tilde <- mitsframe(dates = y$dates, A = A.tilde, B = B.tilde)
residuals <- mitsframe(dates = y$dates, A = A.Residuals, B = B.Residuals)
outputs <- list(trendlines = Y.tilde,
l = l,
m = m,
vertical = vertical,
residuals = residuals,
svd = SVD,
erc = erc,
observations = y,
call = match.call()) ;
class(outputs) <- "misst"
return(outputs)
}
print.misst <- function(x, ...) {
cat("\n Multivariate Singular Spectrum Trendlines for Interval Data:\n ========================================= \n")
print(x$call)
cat("\n Interval Trendlines:\n"); cat("\n")
print(x$trendlines)
cat("\n Elementary components included in the estimation \n"); cat("\n")
print(x$m)
}
plot.misst <- function(x, time.format = "%m-%y", col = NULL, lty = NULL, main = NULL, type = NULL, pch = NULL, lwd = NULL,
ylab = NULL,xlab = NULL, ylim = NULL, xlim = NULL, cex.lab = NULL, cex.axis = NULL, cex.main = NULL,
tick = FALSE, options = list(type = 'trendlines' , ncomp = 1:5), ...) {
if(options$type %!in% c('trendlines', 'screeplots', 'components', 'cpgrams')) {
print('options must be one of the strings: "trendlines", "components", "cpgrams", or "screeplots".')}
if(options$type == 'trendlines') {
matplot(x$trendlines$dates, x$trendlines$A,
main = if(is.null(main)){''} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'l'} else {type},
xlab = if(is.null(xlab)){'Time'} else {xlab},
ylab = if(is.null(ylab)){'Interval Singular Spectrum Trendline'} else {ylab},
ylim = if(is.null(ylim)){c(min(x$trendlines$A,x$trendlines$B),max(x$trendlines$A,x$trendlines$B))} else {ylim},
xlim = if(is.null(xlim)){c(min(x$trendlines$dates),max(x$trendlines$dates))} else {xlim},
cex.lab = if(is.null(cex.lab)){1} else {cex.lab},
cex.axis = if(is.null(cex.axis)){1} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main},
xaxt = "n")
if(inherits(x$trendlines$dates, "Date")==T){ timelabels<-format(x$trendlines$dates,time.format) ;
axis(1,at=x$trendlines$dates,labels=timelabels, tick = tick, cex.axis = if(is.null(cex.axis)){1} else {cex.axis})} else { axis(1,at=x$trendlines$dates, tick = tick, cex.axis = if(is.null(cex.axis)){1} else {cex.axis}) }
for(i in 1:x$trendlines$D){
lines(x$trendlines$dates, x$trendlines$A[,i],
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'l'} else {type})
lines(x$trendlines$dates, x$trendlines$B[,i],
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'l'} else {type})
polygon(c(rev(x$trendlines$dates), x$trendlines$dates),
c(rev(x$trendlines$A[,i]), x$trendlines$B[,i]),
col = if(is.null(col)){'lightgray'} else {col}, border = NA)}
}
if(options$type == 'screeplots') {
if(is.null(options$ncomp)){options$ncomp = c(1:3)}
D <- x$trendlines$D
for(i in 1:D) {
plot(log(x$svd$d[options$ncomp]),
main = if(is.null(main)){'Scree--plot'} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'b'} else {type},
cex.lab = if(is.null(cex.lab)){1} else {cex.lab},
cex.axis = if(is.null(cex.axis)){1} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main},
xlab = if(is.null(xlab)){'Index'} else {xlab},
ylab = if(is.null(ylab)){'Eigenvalues (log-scale)'} else {ylab})
}
}
if(options$type == 'components') {
if(is.null(options$ncomp)){options$ncomp = 1 ; print('ncomp is missing, only the first erc is ploted')}
coldim <- options$ncomp; rowdim <- x$trendlines$D
par(mfrow = c(rowdim, length(coldim)))
for(i in 1:rowdim) {
for(j in coldim) {
plot(x$trendline$dates, x$erc[[i]][, j, 1],
main = if(is.null(main)){paste("Serie: ",i," - ERC:", j)} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'l'} else {type},
xlab = if(is.null(xlab)){''} else {xlab},
ylab = if(is.null(ylab)){''} else {ylab},
ylim = if(is.null(ylim)){ c(min(c(x$erc[[i]][, j, 1],x$erc[[1]][, j, 2])),max(c(x$erc[[i]][, j, 1],x$erc[[i]][, j, 2]))) } else {ylim},
cex.lab = if(is.null(cex.lab)){0.8} else {cex.lab},
cex.axis = if(is.null(cex.axis)){0.8} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main} )
lines(x$trendline$dates, x$erc[[i]][, j, 2],
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'l'} else {type})
polygon(c(rev(x$trendline$dates), x$trendline$dates),
c(rev(x$erc[[i]][, j, 1]), x$erc[[i]][, j, 2]),
border = NA, col = if(is.null(col)){'lightgray'} else {col})
}
}
}
if(options$type == "cpgrams") {
D <- x$trendlines$D
for(i in 1:D){
ecip(residuos = cbind(x$residuals$A[,i],x$residuals$B[,i]), plot.flag = TRUE)
}
}
} | /scratch/gouwar.j/cran-all/cranData/ASSA/R/misst.R |
mitsframe <- function(dates, A,B)
UseMethod("mitsframe")
mitsframe.default <- function(dates, A,B) {
A = as.matrix(A) # force to be a matrix in case of univariate interval time series.
B = as.matrix(B) # force to be to a matrix in case of univariate interval time series.
n <- dim(A)[1]; D <- dim(A)[2]
if(inherits(dates, "Date")==T){dates = as.Date(dates)} else {dates = dates}
## Organize and return outputs
outputs <- list(dates = dates, A = A, B=B, n=n, D=D, call = match.call())
class(outputs) <- "mitsframe"
return(outputs)
}
plot.mitsframe <- function(x,time.format="%m-%y", col = NULL, lty = NULL, main = NULL, type = NULL, pch = NULL, lwd = NULL,
tick = TRUE, ylab = NULL,xlab = NULL, ylim = NULL, xlim = NULL,cex.lab=NULL, cex.axis=NULL,cex.main=NULL, ...) {
matplot(x$dates, x$A,
main = if(is.null(main)){''} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'l'} else {type},
xlab = if(is.null(xlab)){'Time'} else {xlab},
ylab = if(is.null(ylab)){''} else {ylab},
ylim = if(is.null(ylim)){ c(min(x$A,x$B), max(x$A,x$B)) } else {ylim},
xlim = if(is.null(xlim)){c(min(x$dates),max(x$dates))} else {xlim},
cex.lab = if(is.null(cex.lab)){1} else {cex.lab},
cex.axis = if(is.null(cex.axis)){1} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main},
xaxt="n")
if(inherits(x$dates, "Date")==T){ timelabels<-format(x$dates,time.format) ;
axis(1,at=x$dates,tick =tick, labels=timelabels,cex.axis = if(is.null(cex.axis)){1} else {cex.axis})} else { axis(1,at=x$dates,tick =tick, cex.axis = if(is.null(cex.axis)){1} else {cex.axis}) }
for(i in 1:x$D){
lines(x$dates, x$B[,i],
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
type = if(is.null(type)){'l'} else {type},
lwd = if(is.null(lwd)){1} else {lwd})
polygon(c(rev(x$dates), x$dates), c(rev(x$A[,i]), x$B[,i]),
border = NA, col = if(is.null(col)){'lightgray'} else {col})
}
}
| /scratch/gouwar.j/cran-all/cranData/ASSA/R/mitsframe.R |
msst <- function(y, l = 'automatic', m = 'automatic', vertical = TRUE)
UseMethod("msst")
msst.default <- function(y, l = 'automatic', m = 'automatic', vertical = TRUE) {
n <- y$n; D <- y$D
## Cf H. Hassani & R. Mahmoudvand (2013; pp. 68, eq. 25).
if((l == 'automatic') & (vertical == TRUE))
l <- ceiling((n + 1) / (D + 1))
if((l == 'automatic') & (vertical != TRUE))
l <- ceiling(D * (n + 1) / (D + 1))
k <- n - l + 1
## Run a basic input validation
if (class(y) != "mtsframe")
stop('The input y must be an mtsframe object.')
if (length(l) >1 | l%%1 != 0 | l <= 0 | l > y$n)
stop('l is a positive integer smaller than the observations on each series.')
if(is.numeric(m) & length(m) != D)
stop('Vector m must be equal to the number of time series.')
## Step 1: Embedding
if(vertical == TRUE) {
## Vertical embedding
Y <- matrix(NA, nrow = D * l, ncol = k)
for(d in 1:D)
Y[((d - 1) * l + 1): (d * l), ] <- trajectory(y$Y[, d], l, k)
} else {
## Horizontal embedding
Y <- matrix(NA, nrow = l, ncol = D * k)
for(d in 1:D)
Y[, ((d - 1) * k + 1) : (d * k) ] <- trajectory(y$Y[, d], l, k)
}
rk <- qr(Y)$rank # rank number (to make a consistency check later)
## Step 2: SVD
SVD <- svd(Y%*%t(Y))
## Step 3 and 4:
erc = list(); Y.tilde = Residuals = matrix(NA,nrow = n, ncol = D)
if(is.numeric(m) == T) {
if(sum(m > rk) > 0) { replace(m, m > rk, rk) # Rank consistency check first.
warning("some entries in m automatically reduced--rank deficient trajectory matrix--." ) }
for(d in 1:D) {
erc[[d]] <- matrix(NA, nrow = n, ncol = m[d])
for(i in 1:m[d]){ y_i <- Y_i(Y = Y, SVD = SVD, i = i);
if(vertical == TRUE) {erc[[d]][,i] <- dbar(y_i[((d - 1) * l + 1): (d * l), ], l = l, k = k)
} else {erc[[d]][,i] <- dbar(y_i[ , ((d - 1) * k + 1) : (d * k) ], l = l, k = k)} }
Y.tilde[,d] <- rowSums(erc[[d]])
Residuals[,d] <- Y.tilde[,d] - y$Y[,d] # Residuals matrix
}
# end if:numeric.
} else {
m = c() # *store here the total number of ERC on each dimension*
for(d in 1:D) {
stop.flag <- 0; mm <- 1; erc[[d]] <- matrix(NA, nrow = n, ncol = 0)
while(stop.flag==0){ y_i <- Y_i(Y = Y, SVD = SVD, i = mm);
if(vertical == TRUE) {erc[[d]] <- cbind(erc[[d]], dbar(y_i[((d - 1) * l + 1): (d * l), ], l = l, k = k))
} else {erc[[d]] <- cbind(erc[[d]],dbar(y_i[ , ((d - 1) * k + 1) : (d * k) ], l = l, k = k))}
Y.tilde[,d] <- rowSums(erc[[d]])
Residuals[,d] <- Y.tilde[,d] - y$Y[,d] # Residuals matrix
if(cpgram2(Residuals[,d]) == 1) { mm <- mm + 1 } else { stop.flag <- 1 }
} # end 'while'
m[d] = mm } # end 'for:d'
} #end steps 3 & 4.
# Grouping the results into mtsframe objects to deliver in the output
Y.tilde <- mtsframe(dates = y$date, Y = Y.tilde)
Residuals <- mtsframe(dates = y$date, Y = Residuals)
outputs <- list(trendlines = Y.tilde,
l = l,
m = m,
vertical = vertical,
residuals = Residuals,
svd = SVD,
erc = erc,
observations = y,
call = match.call()) ;
class(outputs) <- "msst"
return(outputs)
}
print.msst <- function(x, ...) {
cat("\n Multivariate Singular Spectrum Trendlines:\n ========================================= \n")
print(x$call)
cat("\n Trendlines:\n"); cat("\n")
print(x$trendlines)
cat("\n Elementary components included in the estimation \n"); cat("\n")
print(x$m)
}
plot.msst <- function(x, time.format="%m-%Y", col = NULL, lty = NULL, main = NULL, type = NULL, pch = NULL, lwd = NULL,
ylab = NULL,xlab = NULL, ylim = NULL, xlim = NULL, cex.lab = NULL, cex.axis = NULL, cex.main = NULL,
options = list(type = 'trendlines',
ncomp.scree = NULL,
ncomp.erc = rep(1,x$trendlines$D)), ...) {
if(options$type %!in% c('trendlines', 'screeplot', 'components', 'cpgrams')) {
print('options must be one of the strings: "trendlines", "components", "cpgrams", or "screeplot".')}
if(options$type == 'trendlines') {
matplot(x$trendlines$date, x$trendlines$Y,
main = if(is.null(main)){''} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
type = if(is.null(type)){'b'} else {type},
lwd = if(is.null(lwd)){1} else {lwd},
xlab = if(is.null(xlab)){'Time'} else {xlab},
ylab = if(is.null(ylab)){'Singular Spectrum Trendlines'} else {ylab},
ylim = if(is.null(ylim)){c(min(x$trendlines$Y),max(x$trendlines$Y))} else {ylim},
xlim = if(is.null(xlim)){c(min(x$trendlines$dates),max(x$trendlines$dates))} else {xlim},
cex.lab = if(is.null(cex.lab)){1} else {cex.lab},
cex.axis = if(is.null(cex.axis)){1} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main},
xaxt = "n")
if(inherits(x$trendline$dates, "Date")==T){ timelabels<-format(x$trendline$dates,time.format) ; axis(1,at=x$trendline$dates,labels=timelabels)} else { axis(1,at=x$trendline$dates) }
}
if(options$type == 'screeplot') {
if(is.null(options$ncomp.scree)){options$ncomp.scree = (1:3)}
plot(options$ncomp.scree, log(x$svd$d[options$ncomp.scree]),
ylab = if(is.null(ylab)){'Eigenvalues (log-scale)'} else {ylab},
xlab = if(is.null(xlab)){'Index'} else {xlab},
lty = if(is.null(lty)){1} else {lty},
type = if(is.null(type)){'b'} else {type},
main = if(is.null(main)){''} else {main},
pch = if(is.null(pch)){1} else {pch},
col = if(is.null(col)){'black'} else {col}, #,
xlim = if(is.null(xlim)){c(0,max(options$ncomp.scree))} else {xlim},
ylim = if(is.null(ylim)){c(min(log(x$svd$d[options$ncomp.scree])),
max(log(x$svd$d[options$ncomp.scree])))} else {ylim},
cex.lab = if(is.null(cex.lab)){1} else {cex.lab},
cex.axis = if(is.null(cex.axis)){1} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main} )
}
if(options$type == 'components') {
if(is.null(options$ncomp)){options$ncomp = 1 ; print('ncomp is missing, only the first erc is ploted')}
if(max(options$ncomp) > max(x$m) ){stop('Incompatible number of elementary reconstructed components')}
coldim <- options$ncomp; rowdim <- x$trendlines$D
par(mfrow = c(rowdim, length(coldim)))
for(i in 1:rowdim) {
for(j in coldim) {
plot(x$trendline$dates, x$erc[[i]][, j],
main = if(is.null(main)){paste("Serie: ",i," - ERC:", j)} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'l'} else {type},
xlab = if(is.null(xlab)){''} else {xlab},
ylab = if(is.null(ylab)){''} else {ylab},
ylim = if(is.null(ylim)){ c( min(x$erc[[i]][, j]),max(x$erc[[i]][, j]) ) } else {ylim},
xlim = if(is.null(xlim)){c(min(x$trendlines$dates),max(x$trendlines$dates))} else {xlim},
cex.lab = if(is.null(cex.lab)){0.8} else {cex.lab},
cex.axis = if(is.null(cex.axis)){0.8} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main} )
}
}
}
if(options$type == "cpgrams") {
D <- x$trendlines$D
for(i in 1:D){ cpgram(x$residuals$Y[,i],
main = if(is.null(main)){""} else {paste(options$series.names[i])}) }
}
}
| /scratch/gouwar.j/cran-all/cranData/ASSA/R/msst.R |
msstc <- function(y, l = 'automatic', m = 'automatic', vertical = TRUE)
UseMethod("msstc")
msstc.default <- function(y, l = 'automatic' , m = 'automatic',
vertical = TRUE) {
msst.output <- msst(y = y, l = l, m = m, vertical = vertical)
n <- msst.output$trendlines$n
D <- msst.output$trendlines$D
proj.trendline <- matrix(NA, nrow = n, ncol = D)
for(i in 1:n) { proj.trendline[i, ] <- c(simp.proj(msst.output$trendlines$Y[i,])) }
# Grouping the results into mts objects to deliver in the output.
Y.tilde <- mtsframe(dates = y$date, Y = proj.trendline)
Residuals <- mtsframe(dates = y$date, Y = proj.trendline - y$Y)
outputs <- list(trendlines = Y.tilde,
l = msst.output$l,
m = msst.output$m,
vertical = msst.output$vertical,
residuals = Residuals,
svd = msst.output$svd,
erc = msst.output$erc,
observations = msst.output$y,
call = match.call()) ;
class(outputs) <- "msstc"
return(outputs)
}
print.msstc <- function(x, ...) {
cat("\n Multivariate Singular Spectrum Trendlines:\n ========================================= \n")
print(x$call)
cat("\n Trendlines:\n"); cat("\n")
print(x$trendlines)
cat("\n Elementary components included in the estimation \n"); cat("\n")
print(x$m)
}
plot.msstc <- function(x, time.format="%m-%Y", col = NULL, lty = NULL, main = NULL, type = NULL, pch = NULL, lwd = NULL,
ylab = NULL,xlab = NULL, ylim = NULL, xlim = NULL, cex.lab = NULL, cex.axis = NULL, cex.main = NULL,
options = list(type = 'trendlines',
ncomp.scree = NULL,
ncomp.erc = rep(1,x$trendlines$D)), ...) {
if(options$type %!in% c('trendlines', 'screeplot', 'components', 'cpgrams')) {
print('options must be one of the strings: "trendlines", "components", "cpgrams", or "screeplot".')}
if(options$type == 'trendlines') {
matplot(x$trendlines$date, x$trendlines$Y,
main = if(is.null(main)){''} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
type = if(is.null(type)){'b'} else {type},
lwd = if(is.null(lwd)){1} else {lwd},
xlab = if(is.null(xlab)){'Time'} else {xlab},
ylab = if(is.null(ylab)){'Singular Spectrum Trendlines'} else {ylab},
ylim = if(is.null(ylim)){c(min(x$trendlines$Y),max(x$trendlines$Y))} else {ylim},
xlim = if(is.null(xlim)){c(min(x$trendlines$dates),max(x$trendlines$dates))} else {xlim},
cex.lab = if(is.null(cex.lab)){1} else {cex.lab},
cex.axis = if(is.null(cex.axis)){1} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main},
xaxt = "n")
if(inherits(x$trendline$dates, "Date")==T){ timelabels<-format(x$trendline$dates,time.format) ; axis(1,at=x$trendline$dates,labels=timelabels)} else { axis(1,at=x$trendline$dates) }
}
if(options$type == 'screeplot') {
if(is.null(options$ncomp.scree)){options$ncomp.scree = (1:3)}
plot(options$ncomp.scree, log(x$svd$d[options$ncomp.scree]),
ylab = if(is.null(ylab)){'Eigenvalues (log-scale)'} else {ylab},
xlab = if(is.null(xlab)){'Index'} else {xlab},
lty = if(is.null(lty)){1} else {lty},
type = if(is.null(type)){'b'} else {type},
main = if(is.null(main)){''} else {main},
pch = if(is.null(pch)){1} else {pch},
col = if(is.null(col)){'black'} else {col}, #,
xlim = if(is.null(xlim)){c(0,max(options$ncomp.scree))} else {xlim},
ylim = if(is.null(ylim)){c(min(log(x$svd$d[options$ncomp.scree])),
max(log(x$svd$d[options$ncomp.scree])))} else {ylim},
cex.lab = if(is.null(cex.lab)){1} else {cex.lab},
cex.axis = if(is.null(cex.axis)){1} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main} )
}
if(options$type == 'components') {
if(is.null(options$ncomp)){options$ncomp = 1 ; print('ncomp is missing, only the first erc is ploted')}
if(max(options$ncomp) > max(x$m) ){stop('Incompatible number of elementary reconstructed components')}
coldim <- options$ncomp; rowdim <- x$trendlines$D
par(mfrow = c(rowdim, length(coldim)))
for(i in 1:rowdim) {
for(j in coldim) {
plot(x$trendline$dates, x$erc[[i]][, j],
main = if(is.null(main)){paste("Serie: ",i," - ERC:", j)} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'l'} else {type},
xlab = if(is.null(xlab)){''} else {xlab},
ylab = if(is.null(ylab)){''} else {ylab},
ylim = if(is.null(ylim)){ c( min(x$erc[[i]][, j]),max(x$erc[[i]][, j]) ) } else {ylim},
xlim = if(is.null(xlim)){c(min(x$trendlines$dates),max(x$trendlines$dates))} else {xlim},
cex.lab = if(is.null(cex.lab)){0.8} else {cex.lab},
cex.axis = if(is.null(cex.axis)){0.8} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main} )
}
}
}
if(options$type == "cpgrams") {
D <- x$trendlines$D
for(i in 1:D){ cpgram(x$residuals$Y[,i],
main = if(is.null(main)){""} else {paste(options$series.names[i])}) }
}
}
| /scratch/gouwar.j/cran-all/cranData/ASSA/R/msstc.R |
mtsframe <- function(dates, Y)
UseMethod("mtsframe")
mtsframe.default <- function(dates, Y) {
Y = as.matrix(Y) # coherece to a matrix in case of univariate time series.
n <- dim(Y)[1]; D <- dim(Y)[2]
if(inherits(dates, "Date")==T){dates = as.Date(dates)} else {dates = dates}
## Organize and return outputs
outputs <- list(dates = dates, Y = Y, n=n, D=D, call = match.call())
class(outputs) <- "mtsframe"
return(outputs)
}
plot.mtsframe <- function(x, time.format="%m-%y", col = NULL, lty = NULL, main = NULL, type = NULL, pch = NULL, lwd = NULL,
ylab = NULL,xlab = NULL, ylim = NULL, xlim = NULL,cex.lab=NULL, cex.axis=NULL,cex.main=NULL, ...) {
matplot(x$dates, x$Y,
main = if(is.null(main)){''} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'l'} else {type},
xlab = if(is.null(xlab)){'Time'} else {xlab},
ylab = if(is.null(ylab)){''} else {ylab},
ylim = if(is.null(ylim)){ c(min(x$Y), max(x$Y)) } else {ylim},
xlim = if(is.null(xlim)){c(min(x$dates),max(x$dates))} else {xlim},
cex.lab = if(is.null(cex.lab)){1} else {cex.lab},
cex.axis = if(is.null(cex.axis)){1} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main},
xaxt="n")
if(inherits(x$dates, "Date")==T){ timelabels<-format(x$dates,time.format) ; axis(1,at=x$dates,labels=timelabels,cex.axis = if(is.null(cex.axis)){1} else {cex.axis})} else { axis(1,at=x$dates,cex.axis = if(is.null(cex.axis)){1} else {cex.axis}) }
}
| /scratch/gouwar.j/cran-all/cranData/ASSA/R/mtsframe.R |
predict <- function(fitted.model, p = 1 )
UseMethod("predict")
predict.default <- function(fitted.model, p = 1) {
if (missing(fitted.model))
stop('Stop: Please include a model as an imput to produce the forecasting')
if ( p%%1 != 0 | p <= 0 )
stop('Stop: The number of periods ahead to forecast must be a positive integer.')
# Parameters:
if(class(fitted.model)%in% c('sst')) {
# the svd's on each dimension are hosted in a list:
n <- fitted.model$trendline$n
D <- 1
m <- fitted.model$m
l <- fitted.model$l # this is a single number in case of mssa mssac
y.tilde <- fitted.model$trendline$y
U <- as.matrix(fitted.model$svd$u[,1:m])
a<-rep(NA,l-1)
pi<-U[l,] ; Pi<-as.matrix(U[-l,])
f <- rep(NA,p); a <-rep(NA,l-1);
v2<-sum(pi^2); aux<-matrix(0,l-1,m)
for(i in 1:m){ aux[,i] <- pi[i] * Pi[,i] }
a[(l-1):1] <- rowSums(aux) / (1-v2)
for(i in 1:p){
nn <- length(y.tilde)
f[i] <- a %*% y.tilde[nn:(nn-l+2)]
y.tilde <- c(y.tilde,f[i]) }
Forecast <- f; R <- a
outputs <- list(forecasts = Forecast, coefficients = R)
class(outputs) <- "predict"
return(outputs)
}
if(class(fitted.model)%in% c('msst','msstc')) {
n <- fitted.model$trendlines$n; D <- fitted.model$trendlines$D
m <- fitted.model$m; l <- fitted.model$l # this is a single number in case of mssa mssac
r <- max(m)
Forecast <- matrix(NA, nrow = p, ncol=D)
## Creating R matrix according to staking strategy
if(fitted.model$vertical==TRUE){
W <- matrix(0, nrow = D , ncol = r) # W matrix
Q <- matrix(0, nrow = (l-1)*D, ncol = r) # Q matrix (U^\nabbla,M^T) in Hassani).
Zh <- matrix(0, nrow = (l-1)*D, ncol = 1) # Zh must be row vector (dim: (l-1)D) *Notacion incosistente en Hassani*
for(j in 1:D){
U <- as.matrix(fitted.model$svd$u[ ((j-1)*l + 1):(j*l) , (1:r)])
W[j,(1:r)] <- U[l,(1:r)]
Q[((j-1)*(l-1)+1):(j*(l-1)) ,1:r] <- U[-l,(1:r)]
Zh[((j-1)*(l-1)+1):(j*(l-1)) , 1 ] <- fitted.model$trendlines$Y[(n-l+2):n, j]
}
R <- solve(diag(D) - W%*%t(W))%*%W%*%t(Q)
# Recursive forecasting:
h <- 1;
while(h<=p){
Forecast[h , ] <- t(R%*%Zh)
# Updates:
for(j in 1:D){ Zh[((j-1)*(l-1)+1):(j*(l-1)),] <- c(fitted.model$trendlines$Y[,j],Forecast[1:h ,j ])[(n+h-l+2):(n+h)] }
h <- h + 1
} } else {
# Horizontal staking:
R <- matrix(0, nrow = l-1, ncol = 1)
Zh <- matrix(0, nrow = D, ncol = (l-1))
U <- as.matrix(fitted.model$svd$u[ , 1:r])
v2<-sum(U[l, ]^2);
for(j in 1:r){R = R + U[l,j]*U[-l,j] }
R = R/(1-v2)
for(j in 1:D){ Zh[ j , ] <- fitted.model$trendlines$Y[(n-l+2):n, j] }
# Recursive forecasting:
h <- 1;
while(h<=p){
Forecast[h , ] <- Zh%*%R
# Updates:
for(j in 1:D){ Zh[ j , ] <- c(fitted.model$trendlines$Y[,j],Forecast[1:h , j ])[(n+h-l+2):(n+h)] }
h <- h + 1 } # end 'while'
} # end 'else'
if(class(fitted.model)%in% c('msstc')) { for(i in 1:p) { Forecast[i, ] <- c(simp.proj(Forecast[i,])) } }
outputs <- list(forecasts = Forecast, coefficients = R)
class(outputs) <- "predict"
return(outputs)
} # End msst and msstc.
if(class(fitted.model)%in%c('isst') ) {
y_ja <- fitted.model$trendline$a
y_jb <- fitted.model$trendline$b
m <- fitted.model$m;
l <- fitted.model$l;
U <- as.matrix(fitted.model$svd$u[,1:m])
Forecast = matrix(NA, ncol = 2, nrow = p)
pi<-U[l,] ; Pi<-as.matrix(U[-l,]);
fa = fb <- rep(NA,p); a<-rep(NA,l-1)
v2<-sum(pi^2); aux<-matrix(0,l-1,m)
for(i in 1:m){ aux[,i] <- pi[i] * Pi[,i] }
a[(l-1):1] <- rowSums(aux) / (1-v2)
for(i in 1:p){
nn <- length(y_ja)
fa[i] <- a %*% y_ja[nn:(nn-l+2)]
y_ja <- c(y_ja,fa[i])
fb[i] <- a %*% y_jb[nn:(nn-l+2)]
y_jb <- c(y_jb,fb[i])
}
Forecast[ , 1] <- pmin(fa,fb); Forecast[ , 2] <- pmax(fa,fb); R <- a
outputs <- list(forecasts = Forecast, coefficients = R)
class(outputs) <- "predict"
return(outputs)
}
if(class(fitted.model)%in% c('misst') ) {
n <- fitted.model$trendlines$n
D <- fitted.model$trendlines$D
m <- fitted.model$m
l <- fitted.model$l # this is a single number in case of mssa mssac
r <- max(m)
Forecast <- array(NA, c(p, D, 2))
if(fitted.model$vertical==TRUE){
W <- matrix(0, nrow = D , ncol = r)
Q <- matrix(0, nrow = (l-1)*D, ncol = r)
ZhA <- ZhB <- matrix(0, nrow = (l-1)*D, ncol = 1)
for(j in 1:D){
U <- fitted.model$svd$u[((j-1)*l+1):(j*l) , (1:r)]
W[j,1:r] <- U[l,1:r]
Q[((j-1)*(l-1)+1):(j*(l-1)) ,1:r] <- U[-l,(1:r)]
ZhA[((j-1)*(l-1)+1):(j*(l-1)) , 1 ] <- fitted.model$trendlines$A[(n-l+2):n, j]
ZhB[((j-1)*(l-1)+1):(j*(l-1)) , 1 ] <- fitted.model$trendlines$B[(n-l+2):n, j]
}
R <- solve(diag(D) - W%*%t(W))%*%W%*%t(Q)
# Recursive forecasting:
h <- 1;
while(h<=p){
Forecast[h , ,1] <- t(R%*%ZhA) ; Forecast[h , ,2] <- t(R%*%ZhB)
# Updates:
for(j in 1:D){ ZhA[((j-1)*(l-1)+1):(j*(l-1)),] <- c(fitted.model$trendlines$A[,j],Forecast[1:h , j, 1])[(n+h-l+2):(n+h)];
ZhB[((j-1)*(l-1)+1):(j*(l-1)),] <- c(fitted.model$trendlines$B[,j],Forecast[1:h , j, 2])[(n+h-l+2):(n+h)]}
h <- h + 1
} } else {
# Horizontal staking:
R <- matrix(0, nrow = (l-1), ncol = 1)
ZhA = ZhB <- matrix(0, nrow = D, ncol = (l-1))
U <- as.matrix(fitted.model$svd$u[ , 1:r])
v2<-sum(U[l, ]^2);
for(j in 1:r){R = R + U[l,j]*U[-l,j] }
R = R/(1-v2)
for(j in 1:D){ ZhA[ j , ] <- fitted.model$trendlines$A[(n-l+2):n, j]; ZhB[ j , ] <- fitted.model$trendlines$B[(n-l+2):n, j] }
# Recursive forecasting:
h <- 1;
while(h<=p){
Forecast[h , , 1] <- ZhA%*%R; Forecast[h , , 2] <- ZhB%*%R;
# Updates:
for(j in 1:D){ ZhA[ j , ] <- c(fitted.model$trendlines$A[,j],Forecast[1:h ,j ,1 ])[(n+h-l+2):(n+h)];
ZhB[ j , ] <- c(fitted.model$trendlines$B[,j],Forecast[1:h ,j ,2 ])[(n+h-l+2):(n+h)]}
h <- h + 1 } # end 'while'
} # end 'else'
outputs <- list(forecasts = Forecast, coefficients = R)
class(outputs) <- "predict"
return(outputs)
}
}
| /scratch/gouwar.j/cran-all/cranData/ASSA/R/predict.R |
sst <- function(y, l = 'automatic', m = 'automatic')
UseMethod("sst")
sst.default <- function(y, l = 'automatic', m = 'automatic') {
n <- y$n
if (class(y) != "tsframe") { stop('Input data must be an tsframe object.') }
if(!is.numeric(l)){ l <- ceiling( (n + 1) / 2) }
k <- n - l + 1
if(!is.numeric(m) & m !='automatic'){ stop('Plase choose the automatic criterion or an integer value for m.')}
if(is.numeric(m) & (length(m) != 1 | m<0 ) ) { stop('m must be positive integer.') }
## Step 1:
Y <- trajectory(y$y, l, k)
## Step 2:
SVD <- svd(Y%*%t(Y))
## Step 3 and 4:
erc = list(); # *will contain one element (a matrix with the ERC by rows)*
if(is.numeric(m)) {
erc[[1]] <- matrix(NA, nrow = n, ncol = m)
for(i in 1:m){ y_i <- Y_i(Y = Y, SVD = SVD, i = i); erc[[1]][,i] <- dbar(y_i, l = l, k = k) }
y.tilde <- rowSums(erc[[1]])
residuals <- y.tilde - y$y # residuals.
} else {
stop.flag <- 0; mm <- 1; erc[[1]] <- matrix(NA, nrow = n, ncol = 0)
while(stop.flag == 0) {
y_i <- Y_i(Y = Y, SVD = SVD, i = mm); erc[[1]] <-cbind(erc[[1]], dbar(y_i, l = l, k = k))
y.tilde <- rowSums(erc[[1]])
residuals <- y.tilde - y$y # residuals.
if(cpgram2(residuals) == 1) { mm <- mm + 1 } else { stop.flag <- 1 }
} # end 'while'
m = mm } # end 'else'
y.tilde <- tsframe(dates = y$date, y = y.tilde)
residuals <- tsframe(dates = y$date, y = residuals)
outputs <- list(trendline = y.tilde,
l = l,
m = m,
residuals = residuals,
svd = SVD,
erc = erc,
observations = y,
call = match.call()) ;
class(outputs) <- "sst"
return(outputs)
}
print.sst <- function(x, ...) {
cat("\n Singular Spectrum Trendline:\n ============================ \n")
print(x$call)
cat("\n Trendlines:\n"); cat("\n")
print(x$trendline)
cat("\n Elementary components included in the estimation \n"); cat("\n")
print(x$m)
}
plot.sst <- function(x, time.format="%m-%y", col = NULL, lty = NULL, main = NULL, type = NULL, pch = NULL, lwd = NULL,
ylab = NULL,xlab = NULL, ylim = NULL, xlim = NULL, cex.lab = NULL, cex.axis = NULL, cex.main = NULL,
options = list(type = "trendline", ncomp = NULL), ...) {
# Input check:
if(options$type %!in% c("trendline","components","screeplot","cpgram")){
stop("options type must be one of the strings: 'trendline', 'components', 'cpgram', or 'screeplot'.") }
if(options$type=="trendline") {
plot(x$trendline$dates, x$trendline$y,
main = if(is.null(main)){''} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'b'} else {type},
xlab = if(is.null(xlab)){'Time'} else {xlab},
ylab = if(is.null(ylab)){'Singular Spectrum Trendline'} else {ylab},
ylim = if(is.null(ylim)){c(min(x$trendline$y),max(x$trendline$y))} else {ylim},
xlim = if(is.null(xlim)){c(min(x$trendline$dates),max(x$trendline$dates))} else {xlim},
cex.lab = if(is.null(cex.lab)){1} else {cex.lab},
cex.axis = if(is.null(cex.axis)){1} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main},
xaxt = "n")
if(inherits(x$trendline$dates, "Date")==T){ timelabels<-format(x$trendline$dates,time.format) ; axis(1,at=x$trendline$dates,labels=timelabels)} else { axis(1,at=x$trendline$dates) }
}
if(options$type == 'screeplot') {
if(is.null(options$ncomp)){options$ncomp = (1:3)}
plot(log(x$svd$d[options$ncomp]),
main = if(is.null(main)){'Scree--plot'} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'b'} else {type},
cex.lab = if(is.null(cex.lab)){1} else {cex.lab},
cex.axis = if(is.null(cex.axis)){1} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main},
xlab = if(is.null(xlab)){'Index'} else {xlab},
ylab = if(is.null(ylab)){'Eigenvalues (log-scale)'} else {ylab})
}
if(options$type == 'components') {
if(is.null(options$ncomp)){options$ncomp = c(1:round(dim(x$erc[[1]])[2]))}
if( max(options$ncomp) > dim(x$erc[[1]])[2] ) {
print( paste('Please choose the number of ERC in the range from 1 :', dim(x$erc[[1]])[2]) )
} else {
coldim <- options$ncomp
par(mfrow = c(1, length(coldim)))
for(j in coldim) {
plot(x$erc[[1]][,j],
main = if(is.null(main)){paste("ERC -", j)} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'l'} else {type},
xlab = if(is.null(xlab)){''} else {xlab},
ylab = if(is.null(ylab)){''} else {ylab},
cex.lab = if(is.null(cex.lab)){0.8} else {cex.lab},
cex.axis = if(is.null(cex.axis)){0.8} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main} )
}
}
}
if(options$type == "cpgram") { cpgram(x$residuals$y,main = "") }
}
| /scratch/gouwar.j/cran-all/cranData/ASSA/R/sst.R |
tsframe <- function(dates, y)
UseMethod("tsframe")
tsframe.default <- function(dates, y) {
y = as.matrix(y); n <- dim(y)[1]; D <- dim(y)[2]
if(D != 1) { stop('Plase, give a univariate time series as an input data.') }
if(inherits(dates, "Date")==T){dates = as.Date(dates)} else {dates = dates}
outputs <- list(dates = dates, y = y, n=n, D=1, call = match.call())
class(outputs) <- "tsframe"
return(outputs)
}
plot.tsframe <- function(x, time.format="%m-%y", col = NULL, lty = NULL, main = NULL, type = NULL, pch = NULL, lwd = NULL,
ylab = NULL,xlab = NULL, ylim = NULL, xlim = NULL,cex.lab=NULL, cex.axis=NULL,cex.main=NULL, ...) {
plot(x$dates, x$y,
main = if(is.null(main)){''} else {main},
col = if(is.null(col)){'black'} else {col},
lty = if(is.null(lty)){1} else {lty},
pch = if(is.null(pch)){1} else {pch},
lwd = if(is.null(lwd)){1} else {lwd},
type = if(is.null(type)){'l'} else {type},
xlab = if(is.null(xlab)){'Time'} else {xlab},
ylab = if(is.null(ylab)){''} else {ylab},
ylim = if(is.null(ylim)){ c(min(x$y), max(x$y)) } else {ylim},
xlim = if(is.null(xlim)){c(min(x$dates),max(x$dates))} else {xlim},
cex.lab = if(is.null(cex.lab)){1} else {cex.lab},
cex.axis = if(is.null(cex.axis)){1} else {cex.axis},
cex.main = if(is.null(cex.main)){1} else {cex.main},
xaxt="n")
if(inherits(x$dates, "Date")==T){ timelabels<-format(x$dates,time.format) ; axis(1,at=x$dates,labels=timelabels,cex.axis = if(is.null(cex.axis)){1} else {cex.axis})} else { axis(1,at=x$dates,cex.axis = if(is.null(cex.axis)){1} else {cex.axis}) }
}
| /scratch/gouwar.j/cran-all/cranData/ASSA/R/tsframe.R |
###############################################################
#### Core functions in ASSA package ###########################
###############################################################
trajectory <- function(x, l = length(x)%/%2, k = length(x)%/%2){
# Trajectory matrix
Z <- matrix(x[1:k], nrow = l, ncol = k, byrow = T)
for (i in 1:(l - 1))
Z[i + 1, ] <- x[-(1:i)][1:k]
return(Z)
}
Y_i <- function(Y, SVD, i) {
v_i <- t(Y)%*%SVD$u[,i] / sqrt(SVD$d[i])
y_i <- sqrt(SVD$d[i])*SVD$u[,i]%*%t(v_i)
return(y_i) } # Output: i-th matrix corresponding to the trajectory matrix (Y) decomposition.
YYI_i <- function(Y, SVD, i, l, k) {
U = matrix(0,l,l)
for(j in 1:i){U = U + SVD$u[,j]%*%t(SVD$u[,j])}
ret <- .Fortran('yyi_', YA = as.matrix(Y[ , , 1]), YB = as.matrix(Y[ , , 2]),
U = as.matrix(U), l = as.integer(l), k = as.integer(k),
ansA = matrix(0,nrow = l, ncol = k) ,
ansB = matrix(0,nrow = l, ncol = k), PACKAGE = "ASSA")
res <- .Fortran('yyi_', YA = as.matrix(Y[ , , 1]), YB = as.matrix(Y[ , , 2]),
U = as.matrix(diag(l)-U), l = as.integer(l), k = as.integer(k),
ansA = matrix(0,nrow = l, ncol = k) ,
ansB = matrix(0,nrow = l, ncol = k), PACKAGE = "ASSA")
Y_i = list(Y_iA = ret$ansA, Y_iB = ret$ansB, E_A =res$ansA, E_B =res$ansB)
return(Y_i) } # Output: list with matrices Yi.
YYI_i.erc <- function(Y, SVD, i, l, k) {
U = SVD$u[,i]%*%t(SVD$u[,i])
ret <- .Fortran('yyi_', YA = as.matrix(Y[ , , 1]), YB = as.matrix(Y[ , , 2]),
U = as.matrix(U), l = as.integer(l), k = as.integer(k),
ansA = matrix(0,nrow = l, ncol = k) ,
ansB = matrix(0,nrow = l, ncol = k), PACKAGE = "ASSA")
Y_i = list(Y_iA = ret$ansA, Y_iB = ret$ansB)
return(Y_i) } #
dbar <- function(Y, l, k) {
## Defines R wrapper function to call the FORTRAN function DBAR
n <- k + l - 1
run <- .Fortran('dbar_', Y = as.matrix(Y, nrow = l, ncol = k),
l = as.integer(l), k = as.integer(k),
answer = numeric(n), PACKAGE = "ASSA")
return(run$answer)
}
autocov <- function(residuos) {
## Defines R wrapper function to call the FORTRAN function AUTOCOV
n = dim(residuos)[1]
residuos = scale(residuos, center = TRUE, scale = FALSE)
run <- .Fortran('autocov_', e = as.matrix(residuos, nrow = n , ncol = 2),
n = as.integer(n), answer = numeric(n), PACKAGE = "ASSA")
return(run$answer)
}
## Simplex projections:
simp.proj <- function(p) {
p <- as.vector(p)
p.sort <- sort(p, decreasing = TRUE);
n <- length(p.sort)
j <- 1
while(p.sort[j] + (1 - sum(p.sort[1:j])) / j > 0 & j <= n)
j <- j + 1
j <- j - 1
lambda <- (1 - sum(p.sort[1:j])) / j
p.simp <- rep(0, n)
for(i in 1:n)
p.simp[i] <- max(p[i] + lambda, 0)
return(p.simp)
}
Var.Est <-function(A,B){
try(if(is.array(A)!=T) stop('Y must be an array'))
try(if(is.array(B)!=T) stop('X must be an array'))
try(if(dim(A)[3]!=2) stop('Not suitable dimensions for the array Y, please check.'))
try(if(dim(B)[3]!=2) stop('Not suitable dimensions for the array X, please check.'))
l=dim(A)[1];k=dim(A)[2]
outer <- 2*A[,,1]%*%t(B[,,1]) + A[,,1]%*%t(B[,,2]) + A[,,2]%*%t(B[,,1]) + 2*A[,,2]%*%t(B[,,2])
# var <- (2*scale(A[,,1],scale=FALSE)%*%t(scale(B[,,1],scale=FALSE)) + scale(A[,,1],scale=FALSE)%*%t(scale(B[,,2],scale=FALSE)) +
# scale(A[,,2],scale=FALSE)%*%t(scale(B[,,1],scale=FALSE)) + 2*scale(A[,,2],scale=FALSE)%*%t(scale(B[,,2],scale=FALSE)))/(6*k)
output <- list(outer.product = outer) #
return(output)
}
## crit <- 1.628/(sqrt(mp) + 0.12 + 0.11/sqrt(mp)) # p-val = 0.01
cpgram2 <- function(ts, taper = 0.1, plot = FALSE,
main = paste("Series: ", deparse(substitute(ts))),
ci.col = "blue") {
x <- as.vector(ts)
x <- x[!is.na(x)]
x <- spec.taper(scale(x, TRUE, FALSE), p = taper)
y <- Mod(fft(x))^2 / length(x)
y[1L] <- 0
n <- length(x)
x <- (0:(n/2)) * frequency(ts)/n
if (length(x)%%2 == 0) {
n <- length(x) - 1
y <- y[1L:n]
x <- x[1L:n]
} else {
y <- y[seq_along(x)]
}
xm <- frequency(ts) / 2
mp <- length(x) - 1
crit <- 1.358/(sqrt(mp) + 0.12 + 0.11 / sqrt(mp)) # p-val = 0.05
## Our modifications to cpgram starts here:
## D_mp is our KS statistic:
D_mp <- max(abs(cumsum(y) / sum(y) - seq(0, 1, length.out = mp + 1)))
reject <- as.numeric(D_mp >= crit) # Rejection flag.
## Compute area under the cpgram:
area.under <- sum(diff(x)[1]*cumsum(y)/sum(y))
area.crit <- as.numeric(area.under >= xm/2)
## out = 1 then reject the null (erros are white noise).
if(plot == TRUE) {
oldpty <- par(pty = "s")
on.exit(par(oldpty))
plot(x, cumsum(y)/sum(y), type = "s", xlim = c(0, xm),
ylim = c(0,1), xaxs = "i",
yaxs = "i", xlab = "frequency", ylab = "")
lines(c(0, xm * (1 - crit)), c(crit, 1), col = ci.col, lty = 2)
lines(c(xm * crit, xm), c(0, 1 - crit), col = ci.col, lty = 2)
title(main = main)
}
if(reject == 1 & area.crit > xm/2) { out <- 1 } else { out <- 0 }
return(out = out)
}
#### ECIP estimation:
ecip <- function(residuos, plot.flag = FALSE){
mp = dim(residuos)[1]
omega = unique( (2*pi/mp)*( ceiling( (1:(mp-1))/2 ) ) )
hat.gamma <- c(autocov(residuos))
M = mp - 1
I = c() # Periodogram estimation.
for(i in 1:length(omega)){
I[i] = hat.gamma[1] + 2*(sum( cos(omega[i]*(1:M))*hat.gamma[2:(M+1)] ))} # BD Eq in Spectral Analysis of Signals (P. Stoica)
I = c(mp*(mean(colMeans(residuos)))^2 ,I)
c = sum(abs(I)) #
empirical.cdf <- cumsum(abs(I))/c
unif.ref <- cumsum(rep(1/length(I),length(I)))
crit <- 1.36*(ceiling((mp-1)/2) - 1)^{-1/2} # p-val = 0.05
# crit <- 1.63*(ceiling((mp-1)/2) - 1)^{-1/2} # p-val = 0.01
flag1 = sum(empirical.cdf > unif.ref + crit) ### ">0" when the null (withe noise) is rejected.
flag2 = sum(empirical.cdf < unif.ref - crit) ### ">0" when the null (withe noise) is rejected.
if(plot.flag == TRUE){
plot(empirical.cdf, type = 'l', xlab = '', ylab = '',ylim = c(0,1))
points(unif.ref + crit, type = 'l', col = 'blue', lty = 2)
points(unif.ref - crit, type = 'l', col = 'blue', lty = 2)
}
return(max(flag1,flag2))
}
###########################################
### Other internal functions ##
###########################################
###########################################
'%!in%' <- function(x,y) {!('%in%'(x,y))}
'%notin%' <- Negate('%in%')
########################################### END.
| /scratch/gouwar.j/cran-all/cranData/ASSA/R/workhorse.R |
#' A class to encapsulate the adaptive clinical trial design of Lai, Lavori and Liao
#'
#' @description `ASSISTDesign` objects are used to design, simulate and analyze
#' adaptive group sequential clinical trial with three stages. For details refer to the paper
#' _Adaptive Choice of Patient Subgroup for Comparing Two Treatments_
#' by Tze Leung Lai and Philip W. Lavori and Olivia Yueh-Wen Liao. Contemporary Clinical Trials,
#' Vol. 39, No. 2, pp 191-200 (2014).
#'
#' @seealso `LLL.SETTINGS` for an explanation of trial parameters
#' @importFrom R6 R6Class
#' @importFrom dplyr mutate summarize filter group_by ungroup n select
#' @importFrom magrittr %>%
#' @importFrom knitr kable
#' @importFrom mvtnorm pmvnorm Miwa
#' @importFrom stats uniroot rnorm pnorm qnorm
#' @export
#' @examples
#' \dontrun{
#' data(LLL.SETTINGS)
#' prevalence <- LLL.SETTINGS$prevalences$table1
#' scenario <- LLL.SETTINGS$scenarios$S0
#' designParameters <- list(prevalence = prevalence,
#' mean = scenario$mean,
#' sd = scenario$sd)
#' designA <- ASSISTDesign$new(trialParameters = LLL.SETTINGS$trialParameters,
#' designParameters = designParameters)
#' print(designA)
## A realistic design uses 5000 simulations or more!
#' result <- designA$explore(showProgress = interactive())
#' analysis <- designA$analyze(result)
#' designA$summary(analysis)
#' }
## For full examples, try:
## browseURL(system.file("full_doc/ASSISTant.html", package="ASSISTant"))
ASSISTDesign <-
R6Class(classname = "ASSISTDesign",
private = list(
designParameters = NA,
trialParameters = NA,
boundaries = NA,
discreteData = FALSE,
checkParameters = function(designParameters, trialParameters, discreteData) {
if (length(trialParameters$N) != NUM_STAGES) {
stop(sprintf("Improper sample size vector; this design assumes %d stages", NUM_STAGES))
}
if (!integerInRange(trialParameters$N, low = 1)) {
stop("Improper values for sample sizes")
}
if (!identical(order(trialParameters$N), seq_along(trialParameters$N))) {
stop("Improper values for sample sizes; need increasing sequence")
}
prevalence <- designParameters$prevalence
J <- length(prevalence)
if (!scalarInRange(J, low = 2, high = 10)) {
stop("Improper number of subgroups; need at least 2; max 10")
}
if (!scalarInRange(trialParameters$type1Error, low=0.0001, 0.2)) {
stop("Improper type 1 error")
}
if (!scalarInRange(trialParameters$type2Error, low=0.0001, 0.3)) {
stop("Improper type 2 error")
}
if (!scalarInRange(trialParameters$eps, low=1e-5, high = 1 - 1e-5)) {
stop("Improper epsilon specified")
}
if (any(prevalence <= 0)) {
stop("Improper prevalence specified")
}
if (discreteData) {
support <- designParameters$distSupport
## Assume Rankin is 0:6 unless specified in designParameters
if (is.null(support)) {
support <- 0L:6L
}
K <- length(support)
ctlDist <- designParameters$ctlDist
if (!is.matrix(ctlDist)) {
if ((length(ctlDist) != K) || any(ctlDist < 0)) {
stop(sprintf("Improper ctlDist; need a %d-length probability vector for Rankin scores", K))
}
} else {
if ((nrow(ctlDist) != K) || (ncol(ctlDist) != J) || any(ctlDist < 0)) {
stop(sprintf("Improper ctlDist; need a %d x %d matrix, with each column a probability vector", K, J))
}
}
trtDist <- designParameters$trtDist
if ((nrow(trtDist) != K) || (ncol(trtDist) != J) || any(trtDist < 0)) {
stop(sprintf("Improper trtDist; need a %d x %d matrix, with each column a probability vector", K, J))
}
} else {
if (!all.equal(dim(designParameters$mean), c(2, J))) {
stop("Mean dimension does not match number of groups")
}
if (!all.equal(dim(designParameters$sd), c(2, J))) {
stop("Mean dimension does not match number of groups")
}
if (!all(designParameters$sd > 0)) {
stop("SDs are not all positive")
}
}
TRUE
},
selectSubgroup = function (data) {
data <- data[order(data$subGroup), ]
## ASSUME EACH GROUP is represented!!
## FIX needed if you use names for groups instead of 0, 1, 2, .., J
counts <- cumsum(table(data$subGroup))
wcx <- sapply(counts[-length(counts)],
function(n) {
d <- data[seq_len(n), ]
score <- split(d$score, d$trt)
wilcoxon(score$`1`, score$`0`)
})
which.max(wcx)
},
doInterimLook = function (data, stage, recordStats = FALSE) {
d <- split(data$score, data$trt)
nc <- length(d$`0`) ## control
nt <- length(d$`1`) ## treatment
Nl <- sqrt(nt * nc / (nt + nc))
wcx <- wilcoxon(d$`1`, d$`0`)
wcx.fut <- NA
bdy <- private$boundaries
if (stage < NUM_STAGES) { ## all but last stage
if (wcx >= bdy["b"]) { ## Reject
decision <- 1
} else {
effectSize <- private$trialParameters$effectSize
wcx.fut <- wilcoxon(d$`1`, d$`0`, theta = effectSize)
if (wcx.fut < bdy["btilde"]) { ## Futility, so accept
decision <- -1
} else {
decision <- 0 ## continue
}
}
} else { ## the last stage
if (wcx >= bdy["c"]) { ## Final boundary
decision <- 1 ## reject
} else {
decision <- -1 ## accept
}
}
if (recordStats) {
stats <- private$getStat(data, stage)
} else {
stats <- NA
}
list(decision = decision, wcx = wcx, wcx.fut = wcx.fut,
Nl = Nl, stats = stats)
},
getStat = function(d, stage, sigma = 1) {
## THIS HAS TO BE ARTICULATED WITH the trial History columns
## IN explore()!!
## number of columns for each stage is
## decision, wilcoxon, wilcoxon.futility, Nl (= 4L)
## means, sds and N for control and treatment, wilcoxon (= 7L) for each group
## decision, wilcoxon, wilcoxon.futility NL (= 4L more) for selected subgroup IHat
## trialHistory <- matrix(NA, nrow = numberOfSimulations,
## ncol = NUM_STAGES * (4L + 7L * J) + 4L)
##
## d is the data so far in the trial
## stage is the stage at which the trial stopped (1, 2, 3)
N <- private$trialParameters$N
J <- private$designParameters$J
subGroup <- max(d$subGroup) ## Restrict to the groups we see
result <- unlist(lapply(seq_len(J),
function(j) {
if (j <= subGroup) {
data <- subset(d, subGroup <= j)
splitData <- split(data$score, data$trt)
c(wilcoxon(splitData$`1`, splitData$`0`),
length(splitData$`0`),
length(splitData$`1`),
mean(splitData$`0`),
mean(splitData$`1`),
sd(splitData$`0`),
sd(splitData$`1`))
} else {
rep(NA, 7L)
}
}))
## Drop the first three columns because of above
names(result) <- colNamesForStage(stage, J)[-(1:4)]
result
}
),
public = list(
#' @description
#' Create a new `ASSISTDesign` instance using the parameters specified.
#' @param designParameters parameters of the experimental design. Must contain apropriate distributions to sample from, if `discreteData = TRUE`
#' @param trialParameters the trial parameters, such as sample size etc.
#' @param discreteData a flag indicating that a discrete distribution is to be used for the Rankin scores
#' @param boundaries decision boundaries to use for interim looks, a named vector of `btilde`, `b` and `c` values
#' @return a new `AssistDesign` object
initialize = function(designParameters, trialParameters, discreteData = FALSE, boundaries) {
## Check parameters
private$checkParameters(designParameters, trialParameters, discreteData)
private$discreteData <- discreteData
## Conform parameters
private$designParameters <- conformParameters(designParameters, discreteData)
trialParameters$effectSize <- (qnorm(1 - trialParameters$type1Error) +
qnorm(1 - trialParameters$type2Error)) /
sqrt(3 * trialParameters$N[3])
private$trialParameters <- trialParameters
if (missing(boundaries)) {
private$boundaries <- self$computeCriticalValues()
} else {
self$setBoundaries(boundaries)
}
},
#' @description
#' return the designParameters field
getDesignParameters = function() private$designParameters,
#' @description
#' return the trialParameters field
getTrialParameters = function() private$trialParameters,
#' @description
#' return the boundaries field
getBoundaries = function() private$boundaries,
#' @description
#' Set the boundaries field
#' @param value a named vector of `btilde`, `b` and `c` values
setBoundaries = function(value) {
if (!all(names(value) == c("btilde", "b", "c"))) {
stop("setBoundaries: Need names 'btilde', 'b', and 'c' in order")
}
private$boundaries <- value
},
#' @description
#' Print details of the design to console
print = function() {
designParameters <- private$designParameters
cat("Design Parameters:\n")
cat(sprintf(" Number of Groups: %d\n", designParameters$J))
cat(" Prevalence:")
prevalence <- matrix(designParameters$prevalence, nrow = 1)
colnames(prevalence) <- names(designParameters$prevalence)
print(knitr::kable(prevalence))
cat(sprintf("\n Using Discrete Rankin scores? %s\n\n", private$discreteData))
if (private$discreteData) {
support <- designParameters$distSupport
cat(" Null Rankin Distribution:")
print(knitr::kable(designParameters$ctlDist))
cat(" Null Mean and SD")
print(knitr::kable(apply(designParameters$ctlDist, 2,
computeMeanAndSD, support = support)))
cat(" Alternative Rankin Distribution:\n")
print(knitr::kable(designParameters$trtDist))
cat(" Alternative Mean and SD")
print(knitr::kable(apply(designParameters$trtDist, 2,
computeMeanAndSD, support = support)))
} else {
cat(" Normal Rankin Distribution means (null row, alt. row):\n")
print(knitr::kable(designParameters$mean))
cat("\n Normal Rankin Distribution SDs (null row, alt. row):\n")
print(knitr::kable(designParameters$sd))
}
cat("\nTrial Parameters:\n")
str(private$trialParameters)
cat("\nBoundaries:\n")
boundaries <- matrix(private$boundaries, nrow = 1)
colnames(boundaries) <- names(private$boundaries)
print(knitr::kable(boundaries))
},
#' @description
#' Compute the critical boundary values \eqn{\tilde{b}}, \eqn{b} and \eqn{c} for futility, efficacy and final efficacy decisions. This is time consuming so cache where possible.
#' @return a named vector of critical values with names `btilde`, `b`, and `c` as in the paper
computeCriticalValues = function() {
trialParameters <- private$trialParameters
computeMHPBoundaries(prevalence = private$designParameters$prevalence,
N = trialParameters$N,
alpha = trialParameters$type1Error,
beta = trialParameters$type2Error,
eps = trialParameters$eps)
},
#' @description
#' Explore the design using the specified number of simulations and random number seed and other parameters.
#' @param numberOfSimulations default number of simulations is 5000
#' @param rngSeed default seed is 12345
#' @param trueParameters the state of nature, by default the value of `self$getDesignParameters()` as would be the case for a Type I error calculation. If changed, would yield power.
#' @param recordStats a boolean flag (default `TRUE`) to record statistics
#' @param showProgress a boolean flag to show progress, default `TRUE`
#' @param fixedSampleSize a bollean flag indicating that patients lost after a futile overall look are not made up, default `FALSE`.
#' @param saveRawData a flag (default `FALSE`) to indicate if raw data has to be saved
#' @return a list of results
explore = function (numberOfSimulations = 5000, rngSeed = 12345,
trueParameters = self$getDesignParameters(),
recordStats = TRUE,
showProgress = TRUE,
fixedSampleSize = FALSE,
saveRawData = FALSE) {
##browser()
## Save rng state
oldRngState <- if (exists(".Random.seed", envir = .GlobalEnv)) {
get(x = ".Random.seed", envir=.GlobalEnv)
} else {
NULL
}
## set our seed
set.seed(seed = rngSeed, normal.kind = NULL)
## SOME CHECKS needed here when trueParameters is provided
## for conformity
trialParameters <- private$trialParameters
trueParameters <- conformParameters(trueParameters, private$discreteData)
J <- trueParameters$J
support <- trueParameters$distSupport
glrBoundary <- private$boundaries
prevalence <- trueParameters$prevalence
## We record the entire trial history
## number of columns for each stage is
## decision, wilcoxon, Nl (= 3L)
## means, sds and N for control and treatment, wilcoxon (= 7L) for each group
## decision, wilcoxon, NL (= 3L more) for selected subgroup IHat
## + 1 for confidence interval bounds
##
trialHistoryColumnNames <- c(unlist(lapply(seq_len(NUM_STAGES),
colNamesForStage, J)),
IHAT_COL_NAMES,
CI_COL_NAME,
STAGE_COL_NAME)
trialHistory <- matrix(NA, nrow = numberOfSimulations,
ncol = length(trialHistoryColumnNames))
colnames(trialHistory) <- trialHistoryColumnNames
if (showProgress) {
pb <- txtProgressBar(min = 0, max = numberOfSimulations, style = 3)
}
if (saveRawData) {
rawData <- vector(mode = "list", length = numberOfSimulations)
}
discreteData <- private$discreteData
for (i in seq_len(numberOfSimulations)) {
## Generate Empty dataset
if (discreteData) {
thisTrialData <-
trialData <- generateDiscreteData(prevalence = prevalence,
N = 0,
support = support,
ctlDist = trueParameters$ctlDist,
trtDist = trueParameters$trtDist)
} else {
thisTrialData <-
trialData <- generateNormalData(prevalence = prevalence,
N = 0,
mean = trueParameters$mean,
sd = trueParameters$sd)
}
## H_J is tested first
subGroup <- J
N <- c(0, trialParameters$N)
## decisions always follow: 0 = continue, 1 = reject, -1 = accept
for (stage in seq_len(NUM_STAGES)) {
## Generate data for this stage
groupIndices <- seq_len(subGroup)
if (fixedSampleSize) {
sampleSizeForThisStage <- N[stage + 1] - N[stage]
} else {
sampleSizeForThisStage <- N[stage + 1] - nrow(trialData)
}
if (discreteData) {
thisStageData <- generateDiscreteData(prevalence = prevalence[groupIndices],
N = sampleSizeForThisStage,
support = support,
ctlDist = trueParameters$ctlDist,
trtDist = trueParameters$trtDist[, groupIndices,
drop = FALSE])
} else {
thisStageData <- generateNormalData(prevalence = prevalence[groupIndices],
N = sampleSizeForThisStage,
mean = trueParameters$mean[, groupIndices,
drop = FALSE],
sd = trueParameters$sd[, groupIndices,
drop = FALSE])
}
## Combine it with previous data
trialData <- rbind(trialData, thisStageData)
if (saveRawData) {
thisTrialData <- rbind(thisTrialData, thisStageData)
}
## doInterimLook is guaranteed to return a decision 1 or -1 at stage 3
interimResult <- private$doInterimLook(data = trialData,
stage = stage,
recordStats = recordStats)
resultNames <- colNamesForStage(stage, J)
if (recordStats) {
trialHistory[i, resultNames[-(1:4)]] <- interimResult$stats
}
trialHistory[i, resultNames[1:4] ] <- unlist(interimResult[1:4], use.names = FALSE)
if (interimResult$decision == 1L) {
## H_{subGroup} was rejected
## so trial stops
break
} else if (interimResult$decision == -1L) {
if (subGroup == J) {
## Select a subgroup and perform an interim look using that subgroup
subGroup <- private$selectSubgroup(trialData)
## Restrict the data from now on to those in the subGroup
## So we lose some patients when we restrict to subGroup!
prevN <- nrow(trialData)
trialData <- trialData[trialData$subGroup <= subGroup, ]
interimResult <- private$doInterimLook(data = trialData,
stage = stage,
recordStats = FALSE)
## Append this sub-result to the interim result already obtained
## so that both the overall and the subgroup results are retained
trialHistory[i, IHAT_COL_NAMES] <- c(unlist(interimResult[1:4], use.names = FALSE),
subGroup, stage, prevN - nrow(trialData))
if (interimResult$decision != 0L) {
## H_{\hat{I}} was accepted or rejected
## so trial stops
break
}
} else {
## Trial was futile for H_{\hat{I}}
## So stop
break
}
} else {
## Trial continues to next stage
##
}
}
## Record stage at which trial stopped
trialHistory[i, STAGE_COL_NAME] <- stage
##
## sigmahat is fixed at 1 for now
##
sigmahat <- 1
if (stage == 3) {
cut <- glrBoundary["c"] # c
} else {
cut <- glrBoundary["b"]
}
trialHistory[i, CI_COL_NAME] <- (interimResult$wcx - cut) * sigmahat / interimResult$Nl
if (showProgress) {
setTxtProgressBar(pb, i)
}
if (saveRawData) {
rawData[[i]] <- thisTrialData
}
}
if (showProgress) {
close(pb)
}
## Restore rng state
if (is.null(oldRngState)) {
rm(".Random.seed", envir = .GlobalEnv)
} else {
assign(x = ".Random.seed", value = oldRngState, envir = .GlobalEnv)
}
if (saveRawData) {
list(trialHistory = trialHistory, trueParameters = trueParameters,
rawData = rawData)
} else {
list(trialHistory = trialHistory, trueParameters = trueParameters)
}
},
#' @description
#' Perform an interim look on trial data
#' @param trialData trial data frame
#' @param stage the trial stage
#' @param recordStats a boolean flag to record all statistics
#' @param fixedSampleSize a flag to use a fixed sample size to account for loss to follow up
#' @return the trial history
performInterimLook = function (trialData, stage, recordStats = FALSE, fixedSampleSize = FALSE) {
## Functionally equivalent to private$doInterimLook, but
## more error checking is done on data and stage
expectedDFNames <- c("subGroup", "trt", "score")
if (any(is.na(match(expectedDFNames, names(trialData))))) {
stop("Data is missing one or more of columns 'subGroup', 'trt', 'score'")
}
if (!(scalarIntegerInRange(stage, low = 1, high = 3))) {
stop("Stage has to be between 1 and 3 (inclusive)")
}
n <- nrow(trialData)
N <- c(0, private$trialParameters$N)
## if (nrow(trialData) != N[stage]) {
## stop("Data size does not match design sample size!")
## }
## We have to perform interim looks at all previous stages as well..
## Remember that the data is cumulative, includes _ALL_ subjects
subGroup <- J <- private$designParameters$J
trialHistoryColumnNames <- c(unlist(lapply(seq_len(stage),
colNamesForStage, J)),
IHAT_COL_NAMES,
CI_COL_NAME,
STAGE_COL_NAME)
trialHistory <- matrix(NA, nrow = 1,
ncol = length(trialHistoryColumnNames))
colnames(trialHistory) <- trialHistoryColumnNames
##
## sigmahat is fixed at 1 for now
##
sigmahat <- 1
## Initialize
stageData <- trialData[0, ]
dataPtr <- 0
for (st in seq_len(stage)) {
if (fixedSampleSize) {
sampleSizeForThisStage <- N[stage + 1] - N[stage]
} else {
sampleSizeForThisStage <- N[stage + 1] - nrow(stageData)
}
stageData <- rbind(stageData ,
trialData[(dataPtr + 1):(dataPtr + sampleSizeForThisStage), ])
dataPtr <- dataPtr + sampleSizeForThisStage
interimResult <- private$doInterimLook(stageData, stage, recordStats)
resultNames <- colNamesForStage(st, J)
if (recordStats) {
trialHistory[1, resultNames[-(1:4)]] <- interimResult$stats
}
trialHistory[1, resultNames[1:4] ] <- unlist(interimResult[1:4], use.names = FALSE)
if (interimResult$decision == 1L) {
## H_{subGroup} was rejected
## so trial stops
trialHistory[1, STAGE_COL_NAME] <- st
if (st == 3) {
cut <- glrBoundary["c"] # c
} else {
cut <- glrBoundary["b"]
}
trialHistory[1, CI_COL_NAME] <- (interimResult$wcx - cut) * sigmahat / interimResult$Nl
break
} else if (interimResult$decision == -1L) {
if (subGroup == J) {
## Select a subgroup and perform an interim look using that subgroup
subGroup <- private$selectSubgroup(stageData)
## Restrict the data from now on to those in the subGroup
## So we lose some patients when we restrict to subGroup!
prevN <- nrow(stageData)
stageData <- stageData[stageData$subGroup <= subGroup, ]
interimResult <- private$doInterimLook(data = stageData,
stage = st,
recordStats = FALSE)
## Append this sub-result to the interim result already obtained
## so that both the overall and the subgroup results are retained
trialHistory[1, IHAT_COL_NAMES] <- c(unlist(interimResult[1:4], use.names = FALSE),
subGroup, st, prevN - nrow(stageData))
if (interimResult$decision != 0L) {
## H_{\hat{I}} was accepted or rejected
## so trial stops
trialHistory[1, STAGE_COL_NAME] <- st
if (st == 3) {
cut <- glrBoundary["c"] # c
} else {
cut <- glrBoundary["b"]
}
trialHistory[1, CI_COL_NAME] <- (interimResult$wcx - cut) * sigmahat / interimResult$Nl
break
}
} else {
## Trial was futile for H_{\hat{I}}
## So stop
trialHistory[1, STAGE_COL_NAME] <- st
if (st == 3) {
cut <- glrBoundary["c"] # c
} else {
cut <- glrBoundary["b"]
}
trialHistory[1, CI_COL_NAME] <- (interimResult$wcx - cut) * sigmahat / interimResult$Nl
break
}
}
}
trialHistory
},
#' @description
#' Analyze the exploration data from trial
#' @param trialExploration the result of a call to `explore()` to simulate the design
#' @return Return a list of summary quantities
analyze = function (trialExploration) {
trialHistory <- as.data.frame(trialExploration$trialHistory)
trueParameters <- trialExploration$trueParameters
numberOfSimulations <- nrow(trialHistory)
trialParameters <- private$trialParameters
designParameters <- private$designParameters
J <- designParameters$J
if (private$discreteData) {
mu <- computeMeanAndSD(probVec = designParameters$ctlDist,
support = designParameters$distSupport)["mean"]
trueTheta <- cumsum(designParameters$prevalence * mu)
} else {
## These two lines were in versions prior to 1.3-15. They seem wrong!
## trueTheta <- cumsum(designParameters$mean[2, ]) / seq_len(J)
## trueDelta <- designParameters$mean[2, ]
trueTheta <- cumsum(designParameters$prevalence * designParameters$mean[2, ])
}
trialHistory %>%
dplyr::mutate(
## Compute reject.ITT
## i.e. no subgroup is chosen and a decision is only made on H_J!
reject.ITT = (is.na(Ihat) &
(decision_1 == 1 | decision_2 == 1 | decision_3 == 1)),
## Compute reject.subgp
## i.e. A subgroup is chosen and a rejection is made on the subgroup
reject.subgp = !is.na(Ihat) &
((stage_Ihat == 1) &
(decision_Ihat == 1 | decision_2 == 1 | decision_3 == 1)) |
((stage_Ihat == 2) & (decision_Ihat == 1 | decision_3 == 1)) |
((stage_Ihat == 3) & (decision_Ihat == 1)),
## Fix up the NAs
reject.subgp = ifelse(is.na(reject.subgp), FALSE, reject.subgp),
## Did the trial stop before the last stage?
earlyStop = (exitStage < 3),
## Fix up lost, which is 0 if NA
lost = ifelse(is.na(lost), 0, lost),
## Did the trial reject the overall or subgroup null?
reject = (reject.ITT | reject.subgp),
## Did the trial stop before the last stage for efficacy?
earlyStopEff = (reject & earlyStop),
## Did the trial stop before the last stage for futility?
earlyStopFut = (!reject & earlyStop),
## If H_J is tested, Ihat is NA, so create a group variable
group = ifelse(is.na(Ihat), J, Ihat),
## for each group, we have the relevant true theta
theta = trueTheta[group]
) -> result
result %>%
dplyr::summarize(Rej_H0_ITT = mean(reject.ITT),
Rej_H0_subgp = mean(reject.subgp),
Rej_H0 = mean(reject)) ->
rejectStats
result %>%
dplyr::summarize(earlyStopEff = mean(earlyStopEff),
earlyStopFut = mean(earlyStopFut)) ->
earlyStopStats
## Proportion of rejections by subgroup
result %>%
dplyr::filter(reject) %>%
dplyr::group_by(group) %>%
dplyr::summarize(count = n()) %>%
dplyr::mutate(proportion = count / numberOfSimulations) ->
popReject
## Sample size at trial exit
exitRandSS <- trialParameters$N[result$exitStage]
## Sample size at exit taking loss into account
exitAnalyzeSS <- exitRandSS - result$lost
## Table of exit Stage and proportion of occurrence
result %>%
dplyr::group_by(exitStage) %>%
dplyr::summarize(count = n()) %>%
dplyr::mutate(proportion = count / numberOfSimulations) %>%
dplyr::select(exitStage, proportion) ->
stageAtExitProportion
## Table of futility by stage
result %>%
dplyr::group_by(stage_Ihat, Ihat) %>%
dplyr::summarize(count = n()) %>%
dplyr::filter(!is.na(stage_Ihat)) %>%
dplyr::select(stage_Ihat, Ihat, count) %>%
as.matrix -> temp
futilityTable <- matrix(0L, nrow = 3, ncol = J - 1)
for (i in seq_len(nrow(temp))) {
futilityTable[temp[i, 1], temp[i, 2]] <- temp[i, 3]
}
futilityTable <- cbind(seq_len(3), futilityTable)
colnames(futilityTable) <- c("FutilityStage", paste0("G", seq_len(J-1)))
## Table of loss statistics by stage (mean and sd)
result %>%
dplyr::group_by(stage_Ihat, Ihat) %>%
dplyr::filter(!is.na(stage_Ihat)) %>%
dplyr::summarize(mean = mean(lost), sd = sd(lost)) %>%
dplyr::rename(FutilityStage = stage_Ihat, selectedGroup = Ihat) ->
lossTable
## CI Report
result %>%
dplyr::group_by(group) %>%
dplyr::summarize(coverage = mean(bounds <= theta),
selectedCount = n(),
rejectedCount = sum(reject)) %>%
dplyr::select(coverage, selectedCount, rejectedCount) ->
coverage
result %>%
summarize(overall = mean(bounds <= theta ),
rejection = mean( bounds[reject] <= theta[reject] , na.rm = TRUE)) ->
overallAndRejectionCoverage
list(numberOfSimulations = numberOfSimulations,
reject = rejectStats,
earlyStopStats = earlyStopStats,
popReject = popReject,
lost_Stats= list(mean = mean(result$lost), sd = sd(result$lost)),
exitRandSS_Stats = list(mean = mean(exitRandSS), sd = sd(exitRandSS)),
exitAnalyzeSS_Stats = list(mean = mean(exitAnalyzeSS), sd = sd(exitAnalyzeSS)),
futilityTable = futilityTable,
lossTable = lossTable,
stageAtExitProportion = stageAtExitProportion,
coverage = coverage,
overallAndRejectionCoverage = overallAndRejectionCoverage)
},
#' @description
#' Print the operating characteristics of the design using the analysis data
#' @param analysis the analysis result from the `analyze()` call
summary = function(analysis) {
with(analysis, {
cat(sprintf("P(Reject H0_ITT) = %f; P(Reject H0_subgp) = %f; P(Reject H0) = %f\n",
reject$Rej_H0_ITT, reject$Rej_H0_subgp, reject$Rej_H0))
cat(sprintf("P(Early stop for efficacy [futility]) = %f [%f]\n",
earlyStopStats$earlyStopEff, earlyStopStats$earlyStopFut))
cat(sprintf("Mean [SD] Randomized N = %f [%f]\n",
exitRandSS_Stats$mean, exitRandSS_Stats$sd))
cat("\nStage at exit (proportion)\n")
print(knitr::kable(stageAtExitProportion))
cat(sprintf("\nMean [SD] Lost N = %f [%f]\n",
lost_Stats$mean, lost_Stats$sd))
cat(sprintf("Mean [SD] Analyzed N = %f [%f]\n",
exitAnalyzeSS_Stats$mean, exitAnalyzeSS_Stats$sd))
cat("\nMean loss by futility stage and subgroup\n")
print(knitr::kable(lossTable))
cat("\nChance of each subpopulation rejected\n")
print(knitr::kable(popReject))
cat("\nCounts by futility stage and subgroup choice\n")
print(knitr::kable(futilityTable))
cat("\nCI Statistics:")
cat('\nOverall coverage and coverage for rejections:')
print(knitr::kable(overallAndRejectionCoverage))
cat('\nP(theta_test is in the confidence interval)\n')
print(knitr::kable(coverage))
invisible()
})
}
))
#' A fixed sample design to compare against the adaptive clinical
#' trial design
#'
#' @description `ASSISTDesignB` objects are used to design a trial
#' with certain characteristics provided in the object instantiation
#' method. This design differs from `ASSISTDesign` in only how it
#' computes the critical boundaries, how it performs the interim
#' look, and what quantities are computed in a trial run.
#'
#' @seealso `ASSISTDesign` which is a superclass of this object
#' @importFrom R6 R6Class
#' @importFrom mvtnorm pmvnorm Miwa
#' @importFrom stats uniroot rnorm pnorm qnorm
#' @export
#' @examples
#' \dontrun{
#' data(LLL.SETTINGS)
#' prevalence <- LLL.SETTINGS$prevalences$table1
#' scenario <- LLL.SETTINGS$scenarios$S0
#' designParameters <- list(prevalence = prevalence,
#' mean = scenario$mean,
#' sd = scenario$sd)
#' designB <- ASSISTDesignB$new(trialParameters = LLL.SETTINGS$trialParameters,
#' designParameters = designParameters)
#' print(designB)
#' ## A realistic design uses 5000 simulations or more!
#' result <- designB$explore(showProgress = interactive())
#' analysis <- designB$analyze(result)
#' designB$summary(analysis)
#' }
#' ## For full examples, try:
#' ## browseURL(system.file("full_doc/ASSISTant.html", package="ASSISTant"))
#'
ASSISTDesignB <-
R6Class("ASSISTDesignB",
inherit = ASSISTDesign,
private = list(
doInterimLook = function (data) {
d <- split(data$score, data$trt)
wcx <- wilcoxon(d$`1`, d$`0`)
bdy <- private$boundaries
if (wcx >= bdy["cAlpha"]) { ## Final boundary
decision <- 1 ## reject
} else {
decision <- -1 ## accept
}
list(decision = decision, wcx = wcx)
},
## Function for computing futility boundary btilde
mHP.ITT = function (mu.prime, Sigma.prime, alpha) {
J <- private$designParameters$J
## Derive interim eff boundary b.I for subgp
crossingProb <- function(c) {
f <- function(i) { #i=sub-population selected
integrate(function(x) {
sapply(x, function(x)
private$den.vs(x, i, mu.prime, Sigma.prime, c))}, c, Inf)$value
}
sum(sapply(seq_len(J - 1), function(i) f(i))) +
pnorm(c, lower.tail = FALSE) - alpha
}
uniroot(f = crossingProb, lower = 1, upper = 4, maxiter = 20)$root
}
),
public = list(
#' @description
#' Compute the critical boundary value \eqn{c_\alpha}
#' @return a named vector of a single value containing the value for `c`
computeCriticalValues = function() {
trialParameters <- private$trialParameters
designParameters <- private$designParameters
computeMHPBoundaryITT(prevalence = private$designParameters$prevalence,
alpha = private$trialParameters$type1Error)
},
#' @description
#' Explore the design using the specified number of simulations, random number seed, and
#' further parameters.
#' @param numberOfSimulations default number of simulations is 100
#' @param rngSeed default seed is 12345
#' @param trueParameters the state of nature, by default the value of `self$getDesignParameters()` as would be the case for a Type I error calculation. If changed, would yield power.
#' @param showProgress a boolean flag to show progress, default `TRUE`
#' @param saveRawData a flag (default `FALSE`) to indicate if raw data has to be saved
#' @return a list of results
explore = function (numberOfSimulations = 100, rngSeed = 12345,
trueParameters = self$getDesignParameters(),
showProgress = TRUE,
saveRawData = FALSE) {
## Save rng state
oldRngState <- if (exists(".Random.seed", envir = .GlobalEnv)) {
get(x = ".Random.seed", envir=.GlobalEnv)
} else {
NULL
}
## set our seed
set.seed(seed = rngSeed, normal.kind = NULL)
trialParameters <- private$trialParameters
## SOME CHECKS needed here when trueParameters is provided
## for conformity
trueParameters <- conformParameters(trueParameters, private$discreteData)
support <- trueParameters$distSupport
J <- trueParameters$J
glrBoundary <- private$boundaries
support <- trueParameters$distSupport
naVec <- rep(NA, numberOfSimulations)
zeroVec <- integer(numberOfSimulations)
trialHistory <- data.frame(decision = naVec, select = naVec,
statistic = naVec,
matrix(0, numberOfSimulations, J))
if (showProgress) {
pb <- txtProgressBar(min = 0, max = numberOfSimulations, style = 3)
}
if (saveRawData) {
rawData <- data.frame(simId = integer(0),
subGroup = integer(0),
trt = integer(0),
score = numeric(0))
}
for (i in seq_len(numberOfSimulations)) {
if (private$discreteData) {
dataSoFar <- generateDiscreteData(prevalence = trueParameters$prevalence,
N = trialParameters$N[3],
support = support,
ctlDist = trueParameters$ctlDist,
trtDist = trueParameters$trtDist)
} else {
dataSoFar <- generateNormalData(prevalence = trueParameters$prevalence,
N = trialParameters$N[3],
mean = trueParameters$mean,
sd = trueParameters$sd)
}
if (saveRawData) {
rawData <- rbind(rawData,
data.frame(simId = i, trialData))
}
interim <- private$doInterimLook(dataSoFar)
subGroup <- J ## Last group
if (interim$decision == -1) { ## continue
subGroup <- private$selectSubgroup(dataSoFar)
interim <- private$doInterimLook(dataSoFar[dataSoFar$subGroup <= subGroup, ])
}
trialHistory[i, ] <- c(decision = interim$decision,
select = subGroup,
statistic = interim$wcx,
table(dataSoFar$subGroup))
if (showProgress) {
setTxtProgressBar(pb, i)
}
}
if (showProgress) {
close(pb)
}
## Restore rng state
if (is.null(oldRngState)) {
rm(".Random.seed", envir = .GlobalEnv)
} else {
assign(x = ".Random.seed", value = oldRngState, envir = .GlobalEnv)
}
names(trialHistory) <- c("decision", "select", "statistic",
sapply(seq_len(J), function(i) paste0("G", i)))
if (saveRawData) {
list(trialHistory = trialHistory, trueParameters = trueParameters,
rawData = rawData)
} else {
list(trialHistory = trialHistory, trueParameters = trueParameters)
}
},
#' @description
#' Analyze the exploration data from trial
#' @param trialExploration the result of a call to `explore()` to simulate the design
#' @return Return a list of summary quantities
analyze = function (trialExploration) {
J <- private$designParameters$J
trialHistory <- trialExploration$trialHistory
numberOfSimulations <- nrow(trialHistory)
reject <- (trialHistory$decision == 1)
rejectGroupTable <- table(trialHistory$select[reject])
list(reject = reject, rejectGroupTable = rejectGroupTable,
rejectSubgroup = sum(rejectGroupTable[-J]) / numberOfSimulations)
},
#' @description
#' Print the operating characteristics of the design using the analysis data
#' @param analysis the analysis result from the `analyze()` call
summary = function(analysis) {
numberOfSimulations <- length(analysis$reject)
cat(sprintf("P(Reject H0) = %f\n",
mean(analysis$reject)))
cat(sprintf("P(Reject H0_ITT) = %f\n",
mean(analysis$reject) - analysis$rejectSubgroup))
cat(sprintf("P(Reject H0_subgp) = %f\n",
analysis$rejectSubgroup))
cat("\nChance of each subpopulation rejected\n")
print(analysis$rejectGroupTable / numberOfSimulations)
}
))
#' A fixed sample RCT design to compare against the adaptive clinical
#' trial design of Lai, Lavori and Liao.
#'
#' @description `ASSISTDesignC` objects are used to design a trial
#' with certain characteristics provided in the object instantiation
#' method. This design differs from `ASSISTDesign` in only how it
#' computes the critical boundaries, how it performs the interim
#' look, and what quantities are computed in a trial run.
#'
#' @seealso `ASSISTDesignB` which is a superclass of this object
#' @importFrom R6 R6Class
#' @importFrom mvtnorm pmvnorm Miwa
#' @importFrom stats uniroot rnorm pnorm qnorm
#' @export
#' @examples
#' data(LLL.SETTINGS)
#' prevalence <- LLL.SETTINGS$prevalences$table1
#' scenario <- LLL.SETTINGS$scenarios$S0
#' designParameters <- list(prevalence = prevalence,
#' mean = scenario$mean,
#' sd = scenario$sd)
#' ## A realistic design uses 5000 simulations or more!
#' designC <- ASSISTDesignC$new(trialParameters = LLL.SETTINGS$trialParameters,
#' designParameters = designParameters)
#' print(designC)
#' result <- designC$explore(numberOfSimulations = 100, showProgress = interactive())
#' analysis <- designC$analyze(result)
#' designC$summary(analysis)
#' ## For full examples, try:
#' ## browseURL(system.file("full_doc/ASSISTant.html", package="ASSISTant"))
#'
ASSISTDesignC <-
R6Class("ASSISTDesignC",
inherit = ASSISTDesignB,
public = list(
#' @description
#' Compute the critical boundary values \eqn{\tilde{b}}, \eqn{b} and \eqn{c} for futility, efficacy and final efficacy decisions. This is time consuming so cache where possible.
#' @return a named list containing the critical value `cAlpha`
computeCriticalValues = function() {
list(cAlpha = qnorm(1 - private$trialParameters$type1Error))
},
#' @description
#' Explore the design using the specified number of simulations and random number seed and other parameters.
#' @param numberOfSimulations default number of simulations is 5000
#' @param rngSeed default seed is 12345
#' @param trueParameters the state of nature, by default the value of `self$getDesignParameters()` as would be the case for a Type I error calculation. If changed, would yield power.
#' @param showProgress a boolean flag to show progress, default `TRUE`
#' @param saveRawData a flag (default `FALSE`) to indicate if raw data has to be saved
#' @return a list of results
explore = function (numberOfSimulations = 5000, rngSeed = 12345,
trueParameters = self$getDesignParameters(),
showProgress = TRUE,
saveRawData = FALSE) {
## Save rng state
oldRngState <- if (exists(".Random.seed", envir = .GlobalEnv)) {
get(x = ".Random.seed", envir=.GlobalEnv)
} else {
NULL
}
## set our seed
set.seed(seed = rngSeed, normal.kind = NULL)
trialParameters <- private$trialParameters
trueParameters <- conformParameters(trueParameters, private$discreteData)
J <- trueParameters$J
glrBoundary <- private$boundaries
support <- trueParameters$distSupport
naVec <- rep(NA, numberOfSimulations)
zeroVec <- integer(numberOfSimulations)
trialHistory <- data.frame(decision = naVec,
statistic = naVec,
matrix(0, numberOfSimulations, J))
if (showProgress) {
pb <- txtProgressBar(min = 0, max = numberOfSimulations, style = 3)
}
if (saveRawData) {
rawData <- data.frame(simId = integer(0),
subGroup = integer(0),
trt = integer(0),
score = numeric(0))
}
for (i in seq_len(numberOfSimulations)) {
if (private$discreteData) {
dataSoFar <- generateDiscreteData(prevalence = trueParameters$prevalence,
N = trialParameters$N[3],
support = support,
ctlDist = trueParameters$ctlDist,
trtDist = trueParameters$trtDist)
} else {
dataSoFar <- generateNormalData(prevalence = trueParameters$prevalence,
N = trialParameters$N[3],
mean = trueParameters$mean,
sd = trueParameters$sd)
}
if (saveRawData) {
rawData <- rbind(rawData,
data.frame(simId = i, trialData))
}
interim <- private$doInterimLook(dataSoFar)
trialHistory[i, ] <- c(decision = interim$decision,
statistic = interim$wcx,
table(dataSoFar$subGroup))
if (showProgress) {
setTxtProgressBar(pb, i)
}
}
if (showProgress) {
close(pb)
}
## Restore rng state
if (is.null(oldRngState)) {
rm(".Random.seed", envir = .GlobalEnv)
} else {
assign(x = ".Random.seed", value = oldRngState, envir = .GlobalEnv)
}
names(trialHistory) <- c("decision", "statistic",
sapply(seq_len(J), function(i) paste0("G", i)))
if (saveRawData) {
list(trialHistory = trialHistory, trueParameters = trueParameters,
rawData = rawData)
} else {
list(trialHistory = trialHistory, trueParameters = trueParameters)
}
},
#' @description
#' Analyze the design given the `trialExploration` data
#' @param trialExploration the results from a call to `explore()` to simulate the design
#' @return a named list of rejections
analyze = function (trialExploration) {
J <- private$designParameters$J
trialHistory = trialExploration$trialHistory
numberOfSimulations <- nrow(trialHistory)
reject <- (trialHistory$decision == 1)
list(reject = reject)
},
#' @description
#' Print the operating characteristics of the design using the analysis data
#' @param analysis the analysis result from the `analyze()` call
#' @return no value, just print
summary = function(analysis) {
numberOfSimulations <- length(analysis$reject)
cat(sprintf("P(Reject H0) = %f\n",
mean(analysis$reject)))
}
))
#' The DEFUSE3 design
#'
#'
#' @description `DEFUSE3Design` is a slight variant of the the adaptive
#' clinical trial design of Lai, Lavori and Liao. Simulation is used to compute
#' the expected maximum sample size and the boundary for early futility is adjusted to
#' account as well.
#'
#' @seealso `ASSISTDesign` which is a superclass of this object
#' @importFrom R6 R6Class
#' @importFrom mvtnorm pmvnorm Miwa
#' @importFrom stats uniroot rnorm pnorm qnorm
#'
#' @export
#' @examples
#' trialParameters <- list(N = c(200, 340, 476), type1Error = 0.025,
#' eps = 1/2, type2Error = 0.1)
#' designParameters <- list(
#' nul0 = list(prevalence = rep(1/6, 6), mean = matrix(0, 2, 6),
#' sd = matrix(1, 2, 6)),
#' alt1 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
#' c(0.5, 0.4, 0.3, 0, 0, 0)),
#' sd = matrix(1, 2, 6)),
#' alt2 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
#' c(0.5, 0.5, 0, 0, 0, 0)),
#' sd = matrix(1,2, 6)),
#' alt3 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.36, 6)),
#' sd = matrix(1,2, 6)),
#' alt4 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.30, 6)),
#' sd = matrix(1,2, 6)),
#' alt5 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
#' c(0.4, 0.3, 0.2, 0, 0, 0)),
#' sd = matrix(1,2, 6)),
#' alt6 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
#' c(0.5, 0.5, 0.3, 0.3, 0.1, 0.1)),
#' sd = matrix(1,2, 6)))
#'
#'\dontrun{
#' ## A realistic design uses 5000 simulations or more!
#' defuse3 <- DEFUSE3Design$new(trialParameters = trialParameters,
#' numberOfSimulations = 25,
#' designParameters = designParameters$nul0,
#' showProgress = FALSE)
#' print(defuse3)
#' result <- defuse3$explore(showProgress = interactive())
#' analysis <- defuse3$analyze(result)
#' print(defuse3$summary(analysis))
#' }
#' ## For full examples, try:
#' ## browseURL(system.file("full_doc/defuse3.html", package="ASSISTant"))
#'
DEFUSE3Design <-
R6Class("DEFUSE3Design",
inherit = ASSISTDesign,
private = list(
originalBoundaries = NA
),
public = list(
#' @description
#' Return the original boundaries for the design
#' @return a named vector of values for `b`, `btilde` and `c`
getOriginalBoundaries = function() private$originalBoundaries,
#' @description
#' Create a `DEFUSE3Design` object
#' @param designParameters parameters of the experimental design. Must contain apropriate distributions to sample from, if `discreteData = TRUE`
#' @param trialParameters the trial parameters, such as sample size etc.
#' @param discreteData a flag indicating that a discrete distribution is to be used for the Rankin scores
#' @param numberOfSimulations the number of simulations to use, default 5000
#' @param rngSeed the random number generator seed
#' @param showProgress a boolean flag to show progress (default `TRUE`)
#' @param trueParameters a list of true parameter values reflecting the state of nature
#' @param boundaries decision boundaries to use for interim looks, a named vector of `btilde`, `b` and `c` values
#' @return a new `AssistDesign` object
initialize = function(designParameters, trialParameters, discreteData = FALSE,
numberOfSimulations = 5000, rngSeed = 54321,
showProgress = TRUE,
trueParameters = NULL,
boundaries) {
super$initialize(designParameters, trialParameters, discreteData, boundaries)
## Save original Effect sizes
##browser()
private$originalBoundaries <- private$boundaries
private$trialParameters$originalEffectSize <- private$trialParameters$effectSize
if (missing(boundaries)) {
self$adjustCriticalValues(numberOfSimulations, rngSeed, showProgress)
}
},
#' @description
#' Adjust critical values to account for sample size loss due to futility
#' @param numberOfSimulations the number of simulations to use
#' @param rngSeed the random number generator seed
#' @param showProgress a boolean flag for showing progress
#' @return the adjusted boundaries
adjustCriticalValues = function(numberOfSimulations, rngSeed, showProgress) {
designParameters <- private$designParameters
trialParameters <- private$trialParameters
## Run simulations to estimate expect max sample sizes
result <- as.data.frame(
self$explore(numberOfSimulations = numberOfSimulations,
rngSeed = rngSeed,
recordStats = FALSE,
showProgress = showProgress)$trialHistory
)
q <- cumsum(designParameters$prevalence)
N <- trialParameters$N
simDN <- matrix(NA, nrow = numberOfSimulations, ncol = 3L)
for (i in seq_len(numberOfSimulations)) {
j <- result$exitStage[i] ## the stage at which the trial ended
seq_j <- seq_len(j)
## incremental recruitment per stage per norm
simDN[i, seq_j] <- diff(c(0, N[seq_j]))
jfut <- result$stage_Ihat[i] ## Stage at which we had futility
if (!is.na(jfut)) {
seq_jfut <- seq_len(jfut)
## sample size adjustment for loss
simDN[i, seq_jfut] <- simDN[i, seq_jfut] * q[result$Ihat[i]]
}
}
## End Simulation
DEM <- apply(simDN, 2, mean, na.rm = TRUE)
EM <- floor(cumsum(DEM)) ## Expected N actually
J <- designParameters$J
expectedEffectSize <- (qnorm(1 - trialParameters$type1Error) +
qnorm(1 - trialParameters$type2Error)) /
sqrt(3 * EM[3])
## Update the effect size
private$trialParameters$effectSize <- expectedEffectSize
private$boundaries <- computeMHPBoundaries(prevalence = designParameters$prevalence,
N = EM,
alpha = trialParameters$type1Error,
beta = trialParameters$type2Error,
eps = trialParameters$eps)
},
#' @description
#' Explore the design using the specified number of simulations and random number seed and other parameters.
#' @param numberOfSimulations default number of simulations is 5000
#' @param rngSeed default seed is 12345
#' @param trueParameters the state of nature, by default the value of `self$getDesignParameters()` as would be the case for a Type I error calculation. If changed, would yield power.
#' @param showProgress a boolean flag to show progress, default `TRUE`
#' @param recordStats a boolean flag (default `TRUE`) to record statistics
#' @param saveRawData a flag (default `FALSE`) to indicate if raw data has to be saved
#' @return a list of results
explore = function (numberOfSimulations = 5000, rngSeed = 12345,
trueParameters = self$getDesignParameters(),
recordStats = TRUE,
showProgress = TRUE,
saveRawData = FALSE) {
super$explore(numberOfSimulations, rngSeed, trueParameters,
recordStats, showProgress, fixedSampleSize = TRUE,
saveRawData = saveRawData)
},
#' @description
#' Perform an interim look for futility
#' @param trialData trial data frame
#' @param stage the trial stage
#' @param recordStats a boolean flag to record all statistics
#' @return the trial history
performInterimLook = function (trialData, stage, recordStats = FALSE) {
super$performInterimLook(trialData, stage, recordStats, fixedSampleSize = TRUE)
}
))
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/R/ASSISTDesigns.R |
#' Three stage group sequential adaptive design with subgroup selection
#'
#' \code{ASSISTant} is a package that implements a three-stage
#' adaptive clinical trial design with provision for subgroup
#' selection where the treatment may be effective; see Lai, Lavori and
#' Liao (\doi{10.1016/j.cct.2014.09.001}). The main design object is
#' an \code{R6} class that can be instantiated and manipulated to
#' obtain the operating characteristics. A vignette is provided
#' showing the use of this package for designing the DEFUSE-3 trial,
#' described in the paper by Lai, Lavori and Liao. The package
#' contains everything necessary to reproduce the results of the
#' paper.
#' @docType package
#' @name ASSISTant
#' @references Adaptive Choice of Patient Subgroup for Comparing Two
#' Treatments by Tze Leung Lai and Philip W. Lavori and Olivia
#' Yueh-Wen Liao. Contemporary Clinical Trials, Vol. 39, No. 2, pp
#' 191-200 (2014, \doi{10.1016/j.cct.2014.09.001}).
#' @references Adaptive design of confirmatory trials: Advances and
#' challenges by Tze Leung Lai and Philip W. Lavori and Ka Wai
#' Tsang. Contemporary Clinical Trials, Vol. 45, Part A, pp 93-102
#' (2015, \doi{10.1016/j.cct.2015.06.007}).
#'
NULL
#' Is a scalar quantity is a specified range?
#' @description Check if the argument is a scalar within specified range
#'
#' @param low the lower bound, default \code{-Inf}
#' @param high the upper bound, default \code{Inf}
#' @return \code{TRUE} or \code{FALSE}
#'
#' @rdname ASSISTant-internal
#'
#' @keywords internal
scalarInRange <- function(x, low = -Inf, high = Inf) {
## #'
## #' @examples
## #' ASSISTant:::scalarInRange(x = 10, low = 2) ## TRUE
## #' ASSISTant:::scalarInRange(x = 10, high = 2) ## FALSE
(length(x) == 1) && (x >= low) && (x <= high)
}
#' Is the numeric quantity is a specified range?
#' @description Check if the argument is within specified range
#'
#' @param low the lower bound, default \code{-Inf}
#' @param high the upper bound, default \code{Inf}
#' @return \code{TRUE} or \code{FALSE}
#'
#' @rdname ASSISTant-internal
#'
#' @keywords internal
numberInRange <- function(x, low = -Inf, high = Inf) {
## #'
## #' @examples
## #' ASSISTant:::numberInRange(x = 2:10, low = 2) ## TRUE
## #' ASSISTant:::numberInRange(x = 10:15, high = 2) ## FALSE
all((x >= low) & (x <= high))
}
#' Is a scalar quantity is an integer in specified range?
#' @description Check if the argument is a scalar integer within specified range
#'
#' @param low the lower bound, default \code{-Inf}
#' @param high the upper bound, default \code{Inf}
#' @return \code{TRUE} or \code{FALSE}
#'
#' @rdname ASSISTant-internal
#'
#' @keywords internal
scalarIntegerInRange <- function(x, low = -Inf, high = Inf) {
## #'
## #' @examples
## #' ASSISTant:::integerInRange(x = 10, low = 2) ## TRUE
## #' ASSISTant:::integerInRange(x = 10.5) ## FALSE
(length(x) == 1) && (x == trunc(x)) && (x >= low) && (x <= high)
}
#' Is the numeric quantity is an integer vector in specified range?
#' @description Check if the argument is an integer vector within specified range
#'
#' @param low the lower bound, default \code{-Inf}
#' @param high the upper bound, default \code{Inf}
#' @return \code{TRUE} or \code{FALSE}
#'
#' @rdname ASSISTant-internal
#'
#' @keywords internal
integerInRange <- function(x, low = -Inf, high = Inf) {
## #' @examples
## #' ASSISTant:::integerInRange(x = 2:10, low = 2) ## TRUE
## #' ASSISTant:::integerInRange(x = 10:15, high = 2) ## FALSE
## #' ASSISTant:::integerInRange(x = c(0.5, 1.5), high = 2) ## FALSE
## #'
all((x == trunc(x)) & (x >= low) & (x <= high))
}
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/R/ASSISTant.R |
#' Design and trial settings used in the Lai, Lavori, Liao paper simulations
#'
#' A list of design and trial design settings used for analysis and simulations in
#' the Lai, Lavori, Liao paper displayed in Tables 1 and 2. The
#' elements of the list are the following
#' \describe{
#' \item{trialParameters}{
#' \describe{
#' \item{N}{the sample size at each of three interim looks, the last being the final one;
#' The length of this also determines the number of interim looks}
#' \item{type1Error}{the overall type I error}
#' \item{eps}{the fraction of type I error spent at each interim look}
#' \item{type2Error}{the type II error desired}
#' }
#' }
#' \item{scenarios}{
#' A list of the 10 settings used in the simulations named \code{S0}, \code{S1}, ...,
#' \code{S10} as in the paper, each with three elements
#' \describe{
#' \item{mean}{a \eqn{2\times J} matrix of means, the first row for the null setting,
#' the second for the alternative}
#' \item{sd}{a \eqn{2\times J} matrix of standard deviations, the first row for the
#' null setting, the second for the alternative}
#' }
#' }
#' \item{prevalences}{
#' A list of two elements with prevalence vectors used in the paper; the lengths of these
#' vectors implicitly define the number of groups.
#' \describe{
#' \item{table1}{a vector of equal prevalences for six groups used in table 1}
#' \item{table2}{a vector of prevalences used in table 2 of the paper}
#' }
#' }
#' }
#' @name LLL.SETTINGS
#' @docType data
#' @references Adaptive Choice of Patient Subgroup for Comparing Two
#' Treatments by Tze Leung Lai and Philip W. Lavori and Olivia
#' Yueh-Wen Liao. Contemporary Clinical Trials, Vol. 39, No. 2, pp
#' 191-200 (2014, \doi{10.1016/j.cct.2014.09.001}).
#' @keywords data
#'
NULL
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/R/LLL.SETTINGS.R |
## Global constant, the number of stages for this design
NUM_STAGES <- 3L
## Global constant, the names of columns in trial history relating to Ihat, the subgroup chosen
IHAT_COL_NAMES = c("decision_Ihat", "wcx_Ihat", "wcx.fut_Ihat", "Nl_Ihat", "Ihat", "stage_Ihat", "lost")
## Global constant, the name of the CI column
CI_COL_NAME = c("bounds")
## Global constant, the name of the column for stage at which the trial exits
STAGE_COL_NAME = "exitStage"
#' Compute the standardized Wilcoxon test statistic for two samples
#'
#' We compute the standardized Wilcoxon test statistic with mean 0 and
#' and standard deviation 1 for samples \eqn{x} and \eqn{y}. The R function
#' [stats::wilcox.test()] returns the statistic
#'
#' \deqn{
#' U = \sum_i R_i - \frac{m(m + 1)}{2}
#' }{
#' U = (sum over i) R_i - m(m + 1) / 2
#' }
#'
#' where \eqn{R_i} are the ranks of the first sample \eqn{x} of size
#' \eqn{m}. We compute
#'
#' \deqn{
#' \frac{(U - mn(1/2 + \theta))}{\sqrt{mn(m + n + 1) / 12}}
#' }{
#' (U - mn(1/2 + theta)) / (mn(m + n + 1) / 12)^(1/2)
#' }
#'
#' where \eqn{\theta} is the alternative hypothesis shift on the
#' probability scale, i.e. \eqn{P(X > Y) = 1/2 + \theta}.
#'
#' @param x a sample numeric vector
#' @param y a sample numeric vector
#' @param theta a value > 0 but < 1/2.
#' @return the standardized Wilcoxon statistic
#'
#' @importFrom stats wilcox.test
#' @export
wilcoxon <- function(x, y, theta = 0) {
r <- rank(c(x, y))
n.x <- as.double(length(x))
n.y <- as.double(length(y))
STATISTIC <- c(W = sum(r[seq_along(x)]) - n.x * (n.x +
1)/2)
TIES <- (length(r) != length(unique(r)))
NTIES <- table(r)
z <- STATISTIC - n.x * n.y * (1/2 + theta)
SIGMA <- sqrt((n.x * n.y/12) * ((n.x + n.y + 1) -
sum(NTIES^3 - NTIES)/((n.x + n.y) * (n.x + n.y -
1))))
##CORRECTION <- sign(z) * 0.5
z / SIGMA
}
#' Compute the sample size for any group at a stage assuming a nested
#' structure as in the paper.
#'
#' In the three stage design under consideration, the groups are
#' nested with assumed prevalences and fixed total sample size at each
#' stage. This function returns the sample size for a specified group
#' at a given stage, where the futility stage for the overall group
#' test may be specified along with the chosen subgroup.
#'
#' @param prevalence the vector of prevalence, will be normalized if
#' not already so. The length of this vector implicitly indicates
#' the number of groups J.
#' @param N an integer vector of length 3 indicating total sample size
#' at each of the three stages
#' @param stage the stage of the trial
#' @param group the group whose sample size is desired
#' @param HJFutileAtStage is the stage at which overall futility
#' occured. Default `NA` indicating it did not occur. Also
#' ignored if stage is 1.
#' @param chosenGroup the selected group if HJFutilityAtStage is not
#' `NA`. Ignored if stage is 1.
#' @return the sample size for group
#'
#' @export
groupSampleSize <- function(prevalence, N, stage, group, HJFutileAtStage = NA, chosenGroup = NA) {
if (!integerInRange(N, low = 1) || length(N) != NUM_STAGES) {
stop("Improper values for sample size N")
}
if (!identical(order(N), seq_along(N))) {
stop("Sample size vector N is not monotone increasing sequence")
}
J <- length(prevalences)
if (!scalarInRange(J, low = 2, high = 10)) {
stop("Improper number of subgroups; need at least 2; max 10")
}
if (any(prevalence <= 0)) {
stop("Improper prevalence specified")
}
prevalences <- prevalence / sum(prevalence)
q <- cumsum(prevalence)
if (stage == 1 || is.na(HJFutileAtStage) || stage == HJFutileAtStage) {
## catches stage = 1 and all cases where stage == HJFutileAtStage
N[stage] * q[group]
} else {
## stage > 1 && stage > HJFutileAtStage
stopifnot(stage <= 3 && 1 <= HJFutileAtStage && HJFutileAtStage < stage)
if (stage == 2) {
## HJFutileAtStage = 1 for sure
if (group <= chosenGroup) {
qq <- prevalence[seq_len(chosenGroup)]
qq <- cumsum(qq / sum(qq))
s1 <- N[1] * q[chosenGroup]
(N[2] - s1) * qq[group] + N[1] * q[group]
} else {
N[1] * q[group] + (N[2] - N[1] * q[chosenGroup])
}
} else {
## stage == 3 here
if (HJFutileAtStage == 2) {
if (group <= chosenGroup) {
qq <- prevalence[seq_len(chosenGroup)]
qq <- cumsum(qq / sum(qq))
s2 <- N[2] * q[chosenGroup]
(N[3] - s2) * qq[group] + N[2] * q[group]
} else {
N[2] * q[group] + (N[3] - N[2]* q[chosenGroup])
}
} else {
## HJFutileAtStage = 1
if (group <= chosenGroup) {
qq <- prevalence[seq_len(chosenGroup)]
qq <- cumsum(qq / sum(qq))
s1 <- N[1] * q[chosenGroup]
(N[3] - s1) * qq[group] + N[1] * q[group]
} else {
N[1] * q[group] + (N[3] - N[1] * q[chosenGroup])
}
}
}
}
}
#' Conditional probability of \eqn{i}-th subgroup statistic being
#' chosen given the appropriate mean, covariance matrix and futility
#' boundary \eqn{\tilde{b}}{btilde} at \eqn{v}.
#'
#' The computation involves a \eqn{J-1} multivariate normal integral
#' of the conditional density of the \eqn{i}-th subgroup statistic
#' given that it was maximal among all subgroups:
#'
#'\deqn{
#' \phi_i(v)(\int_0^v\int_0^v\ldots
#' \int_0^{\tilde{b}} \phi_v(z_{-i})dz_{-i})
#' }
#'
#' where \eqn{z_{-i}} denotes all subgroups other than \eqn{i}.
#'
#' @param v the value of the statistic
#' @param i the subgroup
#' @param mu.prime the conditional mean vector of the distribution of
#' length \eqn{J - 1}; needs to be multipled by the conditional
#' value, the parameter `v`.
#' @param Sigma.prime the conditional covariance matrix of dimension
#' \eqn{J-1} by \eqn{J-1}
#' @param fut the futility boundary, which is \eqn{\tilde{b}}{btilde}
#' for stages 1 and 2, but \eqn{c} for stage 3
#' @return the conditional probability
#'
#' @rdname ASSISTant-internal
#'
#' @importFrom mvtnorm pmvnorm Miwa
#' @importFrom stats dnorm
den.vs <- function(v, i, mu.prime, Sigma.prime, fut ) {
## Density function used in integration
mu.prime <- mu.prime * v
mvtnorm::pmvnorm(upper = c(rep(v, nrow(mu.prime) - 1), fut), mean = mu.prime[, i],
sigma = Sigma.prime[[i]], algorithm = mvtnorm::Miwa()) * stats::dnorm(v)
}
#' Compute the futility boundary (modified Haybittle-Peto) for the
#' first two stages
#'
#' The futility boundary \eqn{\tilde{b}}{btilde} is computed by
#' solving (under the alternative)
#'
#' \deqn{
#' P(\tilde{Z}_J^1\le\tilde{b} or \tilde{Z}_J^2\le\tilde{b}) = \epsilon\beta }
#'
#' where the superscripts denote the stage and \eqn{\epsilon} is the
#' fraction of the type I error (\eqn{\alpha}) spent and \eqn{\beta}
#' is the type II error. We make use of the joint normal density of
#' \eqn{Z_{J}} (the overall group) at each of the three stages and the
#' fact that the \eqn{\tilde{Z_J}} is merely a translation of
#' \eqn{Z_J}. So here the calculation is based on a mean of zero and
#' has to be translated during use!
#'
#' @param beta the type II error
#' @param cov.J the 3 x 3 covariance matrix
#'
#' @importFrom stats uniroot qnorm
#' @importFrom mvtnorm pmvnorm Miwa
#' @export
mHP.btilde <- function (beta, cov.J) {
sigma <- cov.J[-NUM_STAGES, -NUM_STAGES]
btilde <- stats::uniroot(f = function(btilde) {
1 - mvtnorm::pmvnorm(lower = rep(btilde, NUM_STAGES - 1),
upper = rep(Inf, NUM_STAGES - 1),
sigma = sigma,
algorithm = Miwa()) -
beta },
lower = stats::qnorm(beta) - 1,
upper = stats::qnorm(beta^(1 / (NUM_STAGES - 1))) + 1)
btilde$root
}
#' Compute the efficacy boundary (modified Haybittle-Peto) for the
#' first two stages
#'
#' @param prevalence the vector of prevalences between 0 and 1 summing
#' to 1. \eqn{J}, the number of groups, is implicitly the length
#' of this vector and should be at least 2.
#' @param N a three-vector of total sample size at each stage
#' @param cov.J the 3 x 3 covariance matrix for Z_J at each of the
#' three stages
#' @param mu.prime a list of \eqn{J} mean vectors, each of length
#' \eqn{J-1} representing the conditional means of all the other
#' \eqn{Z_j} given \eqn{Z_i}. This mean does not account for the
#' conditioned value of \eqn{Z_i} and so has to be multiplied by
#' that during use!
#' @param Sigma.prime a list of \eqn{J} covariance matrices, each
#' \eqn{J-1} by \eqn{J-1} representing the conditional covariances
#' all the other \eqn{Z_j} given \eqn{Z_i}
#' @param alpha the amount of type I error to spend
#' @param btilde the futility boundary
#' @param theta the effect size on the probability scale
#' @importFrom stats integrate pnorm uniroot
#' @importFrom mvtnorm pmvnorm Miwa
#' @export
mHP.b <- function (prevalence, N, cov.J, mu.prime, Sigma.prime, alpha, btilde, theta) {
J <- length(prevalence)
q <- cumsum(prevalence / sum(prevalence))
crossingProb <- function(b) {
##
## Function to compute conditional probability of rejecting
## the subgroup hypothesis for group i at stage (should be 1
## or 2), given that the trial was futile at stage.accept.J.
##
f <- function(stage, stage.accept.J, i) {
## Translate btilde appropriately from the theta
## (probability) scale to the standard scale; see writeup.
btilde <- btilde + theta * sqrt(3 * N[stage.accept.J])
## Adjust the sample size to account for the loss, once
## HJ is accepted
ssi <- replace(N, stage.accept.J, N[stage.accept.J] * q[i])
if (stage == stage.accept.J) {
## Rejection of subgroup at same stage as futility stage
## So this is just an integral of the conditional joint
## distribution
##
stats::integrate(
function(x) {
sapply(x, function(x) den.vs(x, i, mu.prime,
Sigma.prime, fut = btilde))
},
lower = b, upper = Inf)$value
} else {
## Rejection of subgroup at the subsequent stage,
## that is, stage === stage.accept.J + 1
## For efficiency, we don't do explicit checking of such
## conditions, but the invocation below is implicitly expected
## to respect this fact.
##
## So here we have to integrate the product of the
## conditional joint distribution and the probability
## of the i-th group statistic exceeding the at the
## next stage.
##
sigma <- sqrt(ssi[stage - 1] / ssi[stage])
integrand <- function(u) {
den.vs(u, i, mu.prime, Sigma.prime, fut = btilde) *
stats::pnorm(b, mean = u * sigma, sd = sqrt(1 - sigma^2),
lower.tail = FALSE)
}
stats::integrate(
function(u) sapply(u, integrand),
lower = -Inf, upper = b)$value
}
}
## Type I error at interim stages 1 and 2 =
## P(accept H_J at stage 1, reject H_I at stage 1) +
## P(accept H_J at stage 1, reject H_I at stage 2) +
## P(accept H_J at stage 2, reject H_I at stage 2)
## for I in 1:(J-1) +
## P(reject H_J at stage 1 or 2)
##
sum(sapply(
seq_len(J - 1), function(i) f(1, 1, i) + f(2, 1, i) + f(2, 2, i))) +
##
1 - mvtnorm::pmvnorm(lower = -Inf, upper = b, mean = rep(0, NUM_STAGES - 1),
sigma = cov.J[-NUM_STAGES, -NUM_STAGES],
algorithm = mvtnorm::Miwa()) -
##
alpha
}
stats::uniroot(f = crossingProb, lower = 1.0, upper = 4.0)$root
}
#' Compute the efficacy boundary (modified Haybittle-Peto) for the
#' final (third) stage
#'
#' @param prevalence the vector of prevalences between 0 and 1 summing
#' to 1. \eqn{J}, the number of groups, is implicitly the length
#' of this vector and should be at least 2.
#' @param N a three-vector of total sample size at each stage
#' @param cov.J the 3 x 3 covariance matrix for Z_J at each of the
#' three stages
#' @param mu.prime a list of \eqn{J} mean vectors, each of length
#' \eqn{J-1} representing the conditional means of all the other
#' \eqn{Z_j} given \eqn{Z_i}. This mean does not account for the
#' conditioned value of \eqn{Z_i} and so has to be multiplied by
#' that during use!
#' @param Sigma.prime a list of \eqn{J} covariance matrices, each
#' \eqn{J-1} by \eqn{J-1} representing the conditional covariances
#' all the other \eqn{Z_j} given \eqn{Z_i}
#' @param alpha the amount of type I error to spend
#' @param btilde the futility boundary
#' @param b the efficacy boundary for the first two stages
#' @param theta the effect size on the probability scale
#'
#' @importFrom stats uniroot qnorm integrate
#' @importFrom mvtnorm pmvnorm Miwa
#' @export
mHP.c <- function (prevalence, N, cov.J, mu.prime, Sigma.prime, alpha, btilde, b, theta) {
## Function for computing final boundary c
J <- length(prevalence)
q <- cumsum(prevalence / sum(prevalence))
crossingProb <- function(c) {
##
## Function for bounding the probability of rejecting either
## H_J or H_I at the third stage.
##
f <- function(stage.accept.J, i) {
## i is sub-population selected
##
## Translate btilde appropriately from the theta
## (probability) scale to the standard scale; see writeup.
##
btilde <- btilde + theta * sqrt(3 * N[stage.accept.J])
## Adjust the sample size to account for the loss, once
## HJ is accepted
ssi <- replace(N, stage.accept.J, N[stage.accept.J] * q[i])
if (stage.accept.J == 3 ) {
## Rejection of subgroup at same stage as futility
## stage, that is, stage = 3. So this is just an
## integral of the conditional joint distribution,
## except that at the third stage, the critical
## boundary is c.
##
stats::integrate(
function(x) {
sapply(x, function(x)
den.vs(x, i, mu.prime, Sigma.prime, fut = c))
},
lower = c,
upper = Inf)$value
} else if (stage.accept.J == 2 ) {
## Rejection of subgroup at the subsequent stage,
## that is, H_J was futile at stage 2, and H_I is rejected
## at stage 3.
##
## So here we have to integrate the product of the
## conditional joint distribution at stage 2 and the
## probability of the i-th group statistic exceeding
## the critical value stage 3. The latter is a
## one-dimensional integral in this case, with
## appropriate standard deviation. Note that the
## critical boundary is c in the last stage!
##
sigma <- sqrt(ssi[2] / ssi[3])
integrand <- function(u) {
den.vs(u, i, mu.prime, Sigma.prime, btilde) *
stats::pnorm(c, mean = u * sigma, sd = sqrt(1 - sigma^2), lower.tail = FALSE)
}
stats::integrate(function(u) sapply(u, integrand),
lower = -Inf,
upper = b)$value
} else {
## Rejection of subgroup at the third stage, while
## H_J was futile at stage 1, and H_I is rejected
## at stage 3.
##
## So here we have to integrate the product of the
## conditional joint distribution at stage 1 and the
## probability of the i-th group statistic exceeding
## the critical value at stage 3, but not stage 2. The
## latter probability is a 2-dimensional integral with
## an appropriate covariance structure. Once again,
## note that the critical boundary at stage 3 is c.
##
v23 <- sqrt(c(ssi[1] / ssi[2], ssi[1] / ssi[3]))
sigma23 <- matrix(sqrt(ssi[2] / ssi[3]), 2, 2)
diag(sigma23) <- 1
sigma <- sigma23 - v23 %*% t(v23)
integrand <- function(u) {
den.vs(u, i, mu.prime, Sigma.prime, btilde) *
mvtnorm::pmvnorm(lower = c(-Inf, c),
upper = c(b, Inf),
mean = u * v23,
sigma = sigma,
algorithm = mvtnorm::Miwa())
}
stats::integrate(function(u) sapply(u, integrand),
lower = -Inf,
upper = b)$value
}
}
##
## Type I error at final stage =
## P(accept H_J at stage 1, reject H_I at stage 3) +
## P(accept H_J at stage 2, reject H_I at stage 3) +
## P(accept H_J at stage 3, reject H_I at stage 3) or
## for I in 1:(J-1) +
## P(reject H_J at stage 3)
##
sum(sapply(seq_len(J - 1), function(i) f(1, i) + f(2, i) + f(3, i))) +
##
mvtnorm::pmvnorm(lower = c(rep(-Inf, NUM_STAGES - 1), c),
upper = c(rep(b, NUM_STAGES - 1), Inf), sigma = cov.J,
mean = rep(0, NUM_STAGES),
algorithm = mvtnorm::Miwa()) -
##
alpha
}
stats::uniroot(f = crossingProb,
lower = min(0.0, b - 2.0),
upper = max(b + 2.0, 4.0))$root
}
#' Compute the three modified Haybittle-Peto boundaries
#'
#' @param prevalence the vector of prevalences between 0 and 1 summing
#' to 1. \eqn{J}, the number of groups, is implicitly the length
#' of this vector and should be at least 2.
#' @param N a three-vector of total sample size at each stage
#' @param alpha the type I error
#' @param beta the type II error
#' @param eps the fraction (between 0 and 1) of the type 1 error to
#' spend in the interim stages 1 and 2
#' @param futilityOnly a logical value indicating only the futility
#' boundary is to be computed; default `FALSE`
#' @return a named vector of three values containing
#' \eqn{\tilde{b}}{btilde}, b, c
#'
#' @importFrom stats integrate pnorm uniroot
#' @export
computeMHPBoundaries <- function(prevalence, N, alpha, beta, eps, futilityOnly = FALSE) {
J <- length(prevalence)
q <- cumsum(prevalence / sum(prevalence))
theta <- (qnorm(1 - alpha) + qnorm(1 - beta)) / sqrt(3 * N[3])
## Sigma = covariance matrix between subgroup,
## which is roughly stage independent
Sigma <- matrix(0, J, J)
for (i in seq_len(J - 1)) {
for (j in (i + 1):J) {
Sigma[i, j] <- sqrt(q[i] / q[j])
}
}
Sigma <- Sigma + t(Sigma)
diag(Sigma) <- 1
mu.prime <- matrix(0, (J - 1), J)
Sigma.prime <- vector("list", J)
for (i in seq_len(J)) {
mu.prime[, i] <- Sigma[-i, i]
Sigma.prime[[i]] <- Sigma[-i, -i] - Sigma[-i, i] %*% t(Sigma[i, -i])
}
cov.J <- matrix(0, NUM_STAGES, NUM_STAGES)
for (i in seq_len(NUM_STAGES - 1)) {
for (j in (i + 1):NUM_STAGES) {
cov.J[i, j] <- sqrt((N[i]^2 * (N[j] + 1)) / (N[j]^2 * (N[i] + 1)))
}
}
cov.J <- cov.J + t(cov.J)
diag(cov.J) <- 1
btilde <- mHP.btilde(beta = beta * eps, cov.J = cov.J)
if (futilityOnly) {
b <- c <- NA
} else {
b <- mHP.b(prevalence = prevalence,
N = N,
cov.J = cov.J,
mu.prime = mu.prime,
Sigma.prime = Sigma.prime,
alpha = alpha * eps,
btilde = btilde,
theta = theta)
c <- mHP.c(prevalence = prevalence,
N = N,
cov.J = cov.J,
mu.prime = mu.prime,
Sigma.prime = Sigma.prime,
alpha = alpha * (1 - eps),
btilde = btilde,
b = b,
theta = theta)
}
c(btilde = btilde, b = b, c = c)
}
#' Compute the three modified Haybittle-Peto boundaries and effect size
#'
#' @param prevalence the vector of prevalences between 0 and 1 summing
#' to 1. \eqn{J}, the number of groups, is implicitly the length
#' of this vector and should be at least 2.
#' @param alpha the type I error
#' @return a named vector of a single value containing the value for `c`
#' @export
computeMHPBoundaryITT <- function(prevalence, alpha) {
J <- length(prevalence)
q <- cumsum(prevalence / sum(prevalence))
## Sigma = covariance matrix between subgroup,
## which is roughly stage independent
Sigma <- matrix(0, J, J)
for (i in seq_len(J - 1)) {
for (j in (i + 1):J) {
Sigma[i, j] <- sqrt(q[i] / q[j])
}
}
Sigma <- Sigma + t(Sigma)
diag(Sigma) <- 1
mu.prime <- matrix(0, (J - 1), J)
Sigma.prime <- vector("list", J)
for (i in seq_len(J)) {
mu.prime[, i] <- Sigma[-i, i]
Sigma.prime[[i]] <- Sigma[-i, -i] - Sigma[-i, i] %*% t(Sigma[i, -i])
}
## Derive interim eff boundary b.I for subgp
crossingProb <- function(c) {
f <- function(i) {
##i=sub-population selected
stats::integrate(
function(x) {
sapply(x, function(x)
den.vs(x, i, mu.prime, Sigma.prime, fut = c))
},
lower = c,
upper = Inf)$value
}
sum(sapply(seq_len(J - 1), function(i) f(i))) +
stats::pnorm(c, lower.tail = FALSE) - alpha
}
c(cAlpha = stats::uniroot(f = crossingProb, lower = 1, upper = 4)$root)
}
#' Return a vector of column names for statistics for a given stage
#'
#' @param stage the trial stage (1 to 3 inclusive).
#' @param J the number of subgroups
#' @return a character vector of the column names
#' @export
colNamesForStage <- function(stage, J) {
seqJ <- seq_len(J)
c(paste(c("decision", "wcx", "wcx.fut", "Nl"), stage, sep = "_"),
sapply(seqJ, function(group) {
c(sprintf("wcx_%d_%d", stage, group),
sprintf("nc_%d_%d", stage, group),
sprintf("nt_%d_%d", stage, group),
sprintf("muc_%d_%d", stage, group),
sprintf("mut_%d_%d", stage, group),
sprintf("sdc_%d_%d", stage, group),
sprintf("sdt_%d_%d", stage, group))
}))
}
#' A data generation function using a discrete distribution for Rankin
#' score rather than a normal distribution
#'
#' @param prevalence a vector of group prevalences (length denoted by J below)
#' @param N the sample size to generate
#' @param support the support values of the discrete distribution (length K), default 0:6
#' @param ctlDist a probability vector of length K denoting the Rankin score distribution for control.
#' @param trtDist an K x J probability matrix with each column is the Rankin distribution for the associated group
#' @return a three-column data frame of `subGroup`, `trt` (0 or 1), and `score`
#' @examples
#' # Simulate data from a discrete distribution for the Rankin scores,
#' # which are typically ordinal integers from 0 to 6 in the following
#' # simulations. So we define a few scenarios.
#' library(ASSISTant)
#' null.uniform <- rep(1, 7L) ## uniform on 7 support points
#' hourglass <- c(1, 2, 2, 1, 2, 2, 1)
#' inverted.hourglass <- c(2, 1, 1, 2, 1, 1, 2)
#' bottom.heavy <- c(2, 2, 2, 1, 1, 1, 1)
#' bottom.heavier <- c(3, 3, 2, 2, 1, 1, 1)
#' top.heavy <- c(1, 1, 1, 1, 2, 2, 2)
#' top.heavier <- c(1, 1, 1, 2, 2, 3, 3)
#' ctlDist <- null.uniform
#' trtDist <- cbind(null.uniform, null.uniform, hourglass, hourglass) ## 4 groups
#' generateDiscreteData(prevalence = rep(1, 4), N = 10, ctlDist = ctlDist,
#' trtDist = trtDist) ## default support is 0:6
#' trtDist <- cbind(bottom.heavy, bottom.heavy, top.heavy, top.heavy)
#' generateDiscreteData(prevalence = rep(1, 4), N = 10, ctlDist = ctlDist,
#' trtDist = trtDist)
#' support <- c(-2, -1, 0, 1, 2) ## Support of distribution
#' top.loaded <- c(1, 1, 1, 3, 3) ## Top is heavier
#' ctl.dist <- c(1, 1, 1, 1, 1) ## null on 5 support points
#' trt.dist <- cbind(ctl.dist, ctl.dist, top.loaded) ## 3 groups
#' generateDiscreteData(prevalence = rep(1, 3), N = 10, support = support,
#' ctlDist = ctl.dist, trtDist = trt.dist)
#' ## ctl.dist can also be a matrix with different nulls for each subgroup
#' uniform <- rep(1, 5)
#' bot.loaded <- c(3, 3, 1, 1, 1)
#' ctl.dist <- matrix(c(uniform, bot.loaded, top.loaded), nrow = 5)
#' generateDiscreteData(prevalence = rep(1, 3), N = 10, support = support,
#' ctlDist = ctl.dist, trtDist = trt.dist)
#' @export
generateDiscreteData <- function(prevalence, N, support = 0L:6L, ctlDist, trtDist) {
nR <- length(support)
J <- length(prevalence)
if (is.matrix(ctlDist)) {
null <- ctlDist
} else {
null <- matrix(rep(ctlDist, J), ncol = J)
}
if(nrow(null) != nR || nrow(trtDist) != nR || ncol(trtDist) != J) {
stop("generateDiscreteData: wrong dimensions for ctrlDist/trtDist")
}
dists <- cbind(null, trtDist)
if (N == 0) {
data.frame(subGroup = integer(0), trt = integer(0),
score = integer(0))
} else {
subGroup <- sample.int(n = J, size = N, replace = TRUE,
prob = prevalence)
## Scale trt to 0:1 from 1:2.
trt <- sample.int(n = 2L, size = N, replace = TRUE) - 1L
rankin <- sapply(J * trt + subGroup,
function(k) sample(support, size = 1, prob = dists[, k] ))
data.frame(subGroup = subGroup, trt = trt, score = rankin)
}
}
#' A data generation function along the lines of what was used in the Lai, Lavori, Liao paper.
#' score rather than a normal distribution
#'
#' @param prevalence a vector of group prevalences (length denoted by J below)
#' @param N the sample size to generate
#' @param mean a 2 x J matrix of means under the null (first row) and alternative for each group
#' @param sd a 2 x J matrix of standard deviations under the null (first row) and alternative for each group
#' @return a three-column data frame of `subGroup`, `trt` (0 or 1), and `score`
#' @export
generateNormalData <- function(prevalence, N, mean, sd) {
J <- length(prevalence)
stopifnot((J == ncol(mean)) && (J == ncol(sd)))
if (N == 0) {
data.frame(subGroup = integer(0), trt = integer(0),
score = numeric(0))
} else {
subGroup <- sample.int(n = J, size = N, replace = TRUE,
prob = prevalence)
trt <- sample.int(n = 2L, size = N, replace = TRUE) - 1L
rankin <- unlist(
Map(function(i, j)
rnorm(n = 1, mean = mean[i, j], sd = sd[i, j]),
trt + 1, subGroup))
data.frame(subGroup = subGroup, trt = trt, score = rankin)
}
}
#' Compute the mean and sd of a discrete Rankin distribution
#' @param probVec a probability vector of length equal to length of support,
#' default is uniform
#' @param support a vector of support values (default 0:6 for Rankin Scores)
#' @return a named vector of `mean` and `sd`
#' @export
computeMeanAndSD <- function(probVec = rep(1, 7L), support = 0L:6L) {
stopifnot(all(probVec >= 0))
probVec <- probVec / sum(probVec)
mean <- sum(support * probVec)
sd <- sqrt(sum(probVec * support^2) - mean^2)
c(mean = mean, sd = sd)
}
#' Conform designParameters so that weights are turned in to probabilities, the null and control distributions are proper matrices etc.
#' @param plist the parameter list
#' @param discreteData flag if data is discrete
#' @return the modified parameter list
conformParameters <- function(plist, discreteData = FALSE) {
prevalence <- plist$prevalence
J <- length(prevalence)
plist$J <- J
prevalence <- prevalence / sum(prevalence)
names(prevalence) <- paste0("Group", seq_len(J))
plist$prevalence <- prevalence
if (discreteData) {
support <- plist$distSupport
## Assume Rankin is 0:6 unless specified in designParameters
if (is.null(support)) {
support <- 0L:6L
}
plist$distSupport <- support
ctlDist <- plist$ctlDist
if (!is.matrix(ctlDist)) {
ctlDist <- matrix(c(rep(ctlDist, J)), ncol = J)
}
ctlDist <- apply(ctlDist, 2, function(x) x/sum(x))
rownames(ctlDist) <- support
colnames(ctlDist) <- names(prevalence)
plist$ctlDist <- ctlDist
trtDist <- plist$trtDist
trtDist <- apply(trtDist, 2, function(x) x/sum(x))
rownames(trtDist) <- support
colnames(trtDist) <- names(prevalence)
plist$trtDist <- trtDist
} else {
mean <- plist$mean
sd <- plist$sd
rownames(mean) <- rownames(sd) <- c("Null", "Alt")
colnames(mean) <- colnames(sd) <- names(prevalence)
plist$mean <- mean
plist$sd <- sd
}
plist
}
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/R/utilities.R |
## ----echo=F-------------------------------------------------------------------
### get knitr just the way we like it
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
error = FALSE,
tidy = FALSE,
cache = FALSE
)
## ---- eval = FALSE------------------------------------------------------------
# system.file("full_doc", package="ASSISTant")
## -----------------------------------------------------------------------------
library(ASSISTant)
data(LLL.SETTINGS)
str(LLL.SETTINGS)
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S0
designParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
designA <- ASSISTDesign$new(trialParameters = LLL.SETTINGS$trialParameters,
designParameters = designParameters)
print(designA)
## -----------------------------------------------------------------------------
result <- designA$explore(numberOfSimulations = 50, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S1
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S2
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S3
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S4
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S5
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S6
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S7
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S8
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S9
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S10
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S0
designParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
designA <- ASSISTDesign$new(trialParameters = LLL.SETTINGS$trialParameters,
designParameters = designParameters)
print(designA)
## -----------------------------------------------------------------------------
result <- designA$explore(numberOfSimulations = 50, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S1
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S2
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S3
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S4
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S5
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S6
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S7
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S8
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S9
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## -----------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S10
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/inst/doc/ASSISTant.R |
---
title: "Adaptive Subgroup Selection in Sequential Trials"
author: "Tze Leung Lai, Philip W. Lavori, Olivia Liao, Ka Wai Tsang and Balasubramanian Narasimhan"
date: '`r Sys.Date()`'
bibliography: assistant.bib
output:
html_document:
theme: cerulean
toc: yes
toc_depth: 2
vignette: >
%\VignetteIndexEntry{Adaptive Subgroup Selection in Sequential Trials}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r echo=F}
### get knitr just the way we like it
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
error = FALSE,
tidy = FALSE,
cache = FALSE
)
```
## Introduction
`ASSISTant` is an R package for **A**daptive **S**ubgroup
**S**election **I**n **S**equential **T**rials. This vignette
reproduces all the simulations in the original paper of Lai, Lavori
and Liao [-@Lai2014191].
_NOTE_ The number of simulations has been drastically reduced in this
vignette in order to avoid taxing CRAN servers. The `full_doc` sources
contain the complete sources and output; see files in the directory
```{r, eval = FALSE}
system.file("full_doc", package="ASSISTant")
```
```{r}
library(ASSISTant)
data(LLL.SETTINGS)
str(LLL.SETTINGS)
```
The `LLL.SETTINGS` list contains all the scenarios described in the
paper.
## Table 1 Results
### Scenario S0
This is the _null_ setting.
```{r}
scenario <- LLL.SETTINGS$scenarios$S0
designParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
designA <- ASSISTDesign$new(trialParameters = LLL.SETTINGS$trialParameters,
designParameters = designParameters)
print(designA)
```
```{r}
result <- designA$explore(numberOfSimulations = 50, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S1
```{r}
scenario <- LLL.SETTINGS$scenarios$S1
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S2
```{r}
scenario <- LLL.SETTINGS$scenarios$S2
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S3
```{r}
scenario <- LLL.SETTINGS$scenarios$S3
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S4
```{r}
scenario <- LLL.SETTINGS$scenarios$S4
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S5
```{r}
scenario <- LLL.SETTINGS$scenarios$S5
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S6
```{r}
scenario <- LLL.SETTINGS$scenarios$S6
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S7
```{r}
scenario <- LLL.SETTINGS$scenarios$S7
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S8
```{r}
scenario <- LLL.SETTINGS$scenarios$S8
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S9
```{r}
scenario <- LLL.SETTINGS$scenarios$S9
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S10
```{r}
scenario <- LLL.SETTINGS$scenarios$S10
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
## Table 2 Results
### Scenario S0
```{r}
scenario <- LLL.SETTINGS$scenarios$S0
designParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
designA <- ASSISTDesign$new(trialParameters = LLL.SETTINGS$trialParameters,
designParameters = designParameters)
print(designA)
```
```{r}
result <- designA$explore(numberOfSimulations = 50, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S1
```{r}
scenario <- LLL.SETTINGS$scenarios$S1
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S2
```{r}
scenario <- LLL.SETTINGS$scenarios$S2
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S3
```{r}
scenario <- LLL.SETTINGS$scenarios$S3
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S4
```{r}
scenario <- LLL.SETTINGS$scenarios$S4
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S5
```{r}
scenario <- LLL.SETTINGS$scenarios$S5
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S6
```{r}
scenario <- LLL.SETTINGS$scenarios$S6
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S7
```{r}
scenario <- LLL.SETTINGS$scenarios$S7
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S8
```{r}
scenario <- LLL.SETTINGS$scenarios$S8
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S9
```{r}
scenario <- LLL.SETTINGS$scenarios$S9
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S10
```{r}
scenario <- LLL.SETTINGS$scenarios$S10
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
## References
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/inst/doc/ASSISTant.Rmd |
## ----echo=F-------------------------------------------------------------------
### get knitr just the way we like it
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
error = FALSE,
tidy = FALSE,
cache = FALSE
)
## -----------------------------------------------------------------------------
library(ASSISTant)
null.uniform <- rep(1, 7L) ## uniform on 7 support points
hourglass <- c(1, 2, 2, 1, 2, 2, 1)
inverted.hourglass <- c(2, 1, 1, 2, 1, 1, 2)
bottom.heavy <- c(2, 2, 2, 1, 1, 1, 1)
bottom.heavier <- c(3, 3, 2, 2, 1, 1, 1)
top.heavy <- c(1, 1, 1, 1, 2, 2, 2)
top.heavier <- c(1, 1, 1, 2, 2, 3, 3)
## -----------------------------------------------------------------------------
ctlDist <- null.uniform
trtDist <- cbind(null.uniform, null.uniform, null.uniform,
hourglass, hourglass, hourglass)
##d <- generateDiscreteRankinScores(rep(1, 6), 10, ctlDist, trtDist)
## -----------------------------------------------------------------------------
data(LLL.SETTINGS)
designParameters <- list(prevalence = rep(1/6, 6),
ctlDist = ctlDist,
trtDist = trtDist)
designA <- ASSISTDesign$new(trialParameters = LLL.SETTINGS$trialParameters,
designParameters = designParameters, discreteData = TRUE)
print(designA)
## -----------------------------------------------------------------------------
result <- designA$explore(numberOfSimulations = 5000, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/inst/doc/Rankin.R |
---
title: "Using Discrete Rankin Scores"
author: "Tze Leung Lai, Philip W. Lavori, Olivia Liao, Ka Wai Tsang and Balasubramanian Narasimhan"
date: '`r Sys.Date()`'
bibliography: assistant.bib
output:
html_document:
theme: cerulean
toc: yes
toc_depth: 2
vignette: >
%\VignetteIndexEntry{Using Discrete Rankin Scores}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r echo=F}
### get knitr just the way we like it
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
error = FALSE,
tidy = FALSE,
cache = FALSE
)
```
## Introduction
We simulate data from a discrete distribution for the Rankin scores,
which are ordinal integers from 0 to 6 in the following
simulations. So we define a few scenarios.
```{r}
library(ASSISTant)
null.uniform <- rep(1, 7L) ## uniform on 7 support points
hourglass <- c(1, 2, 2, 1, 2, 2, 1)
inverted.hourglass <- c(2, 1, 1, 2, 1, 1, 2)
bottom.heavy <- c(2, 2, 2, 1, 1, 1, 1)
bottom.heavier <- c(3, 3, 2, 2, 1, 1, 1)
top.heavy <- c(1, 1, 1, 1, 2, 2, 2)
top.heavier <- c(1, 1, 1, 2, 2, 3, 3)
```
```{r}
ctlDist <- null.uniform
trtDist <- cbind(null.uniform, null.uniform, null.uniform,
hourglass, hourglass, hourglass)
##d <- generateDiscreteRankinScores(rep(1, 6), 10, ctlDist, trtDist)
```
### Scenario S0
This is the _null_ setting.
```{r}
data(LLL.SETTINGS)
designParameters <- list(prevalence = rep(1/6, 6),
ctlDist = ctlDist,
trtDist = trtDist)
designA <- ASSISTDesign$new(trialParameters = LLL.SETTINGS$trialParameters,
designParameters = designParameters, discreteData = TRUE)
print(designA)
```
```{r}
result <- designA$explore(numberOfSimulations = 5000, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/inst/doc/Rankin.Rmd |
## ----echo=F-------------------------------------------------------------------
### get knitr just the way we like it
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
error = FALSE,
tidy = FALSE,
cache = FALSE
)
## -----------------------------------------------------------------------------
library(ASSISTant)
##Fix randomization vector N, errors, eps
trialParameters <- list(N = c(200, 340, 476), type1Error = 0.025,
eps = 1/2, type2Error = 0.1)
## -----------------------------------------------------------------------------
designParameters <- list(
nul0 = list(prevalence = rep(1/6, 6), mean = matrix(0, 2, 6),
sd = matrix(1, 2, 6)),
alt1 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.4, 0.3, 0, 0, 0)),
sd = matrix(1, 2, 6)),
alt2 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.5, 0, 0, 0, 0)),
sd = matrix(1,2, 6)),
alt3 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.36, 6)),
sd = matrix(1,2, 6)),
alt4 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.30, 6)),
sd = matrix(1,2, 6)),
alt5 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.4, 0.3, 0.2, 0, 0, 0)),
sd = matrix(1,2, 6)),
alt6 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.5, 0.3, 0.3, 0.1, 0.1)),
sd = matrix(1,2, 6))
)
## -----------------------------------------------------------------------------
defuse3 <- DEFUSE3Design$new(trialParameters = trialParameters,
numberOfSimulations = 500,
designParameters = designParameters$nul0,
showProgress = FALSE)
print(defuse3)
## -----------------------------------------------------------------------------
result <- defuse3$explore(numberOfSimulations = 500,
showProgress = FALSE,
rngSeed = 28912)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
## -----------------------------------------------------------------------------
result1 <- defuse3$explore(numberOfSimulations = 500,
trueParameters = designParameters$alt1,
showProgress = FALSE,
rngSeed = 737218)
analysis1 <- defuse3$analyze(result1)
print(defuse3$summary(analysis1))
## -----------------------------------------------------------------------------
result2 <- defuse3$explore(numberOfSimulations = 500,
trueParameters = designParameters$alt2,
showProgress = FALSE,
rngSeed = 928812)
analysis2 <- defuse3$analyze(result2)
print(defuse3$summary(analysis2))
## -----------------------------------------------------------------------------
null.uniform <- rep(1, 7L) ## uniform on 7 support points
hourglass <- c(1, 2, 2, 1, 2, 2, 1)
inverted.hourglass <- c(2, 1, 1, 2, 1, 1, 2)
bottom.heavy <- c(2, 2, 2, 1, 1, 1, 1)
bottom.heavier <- c(3, 3, 2, 2, 1, 1, 1)
bottom.loaded <- c(4, 4, 3, 3, 2, 1, 1)
top.heavy <- c(1, 1, 1, 1, 2, 2, 2)
top.heavier <- c(1, 1, 1, 2, 2, 3, 3)
top.loaded <- c(1, 1, 2, 3, 3, 4, 4)
## -----------------------------------------------------------------------------
## -----------------------------------------------------------------------------
knitr::kable(
sapply(list(null = null.uniform,
hourglass = hourglass,
inv.hourglass = inverted.hourglass,
bot.heavy = bottom.heavy,
bot.heavier = bottom.heavier,
bot.loaded = bottom.loaded,
top.heavy = top.heavy,
top.heavier = top.heavier,
top.loaded = top.loaded),
computeMeanAndSD)
)
## -----------------------------------------------------------------------------
designParameters <- list(
nul0 = list(prevalence = rep(1, 2),
ctlDist = null.uniform,
trtDist = cbind(null.uniform,
null.uniform)),
alt1 = list(prevalence = rep(1, 2),
ctlDist = null.uniform,
trtDist = cbind(top.loaded,
null.uniform)),
alt2 = list(prevalence = rep(1, 2),
ctlDist = null.uniform,
trtDist = cbind(null.uniform,
top.loaded))
)
## -----------------------------------------------------------------------------
discDefuse3 <- DEFUSE3Design$new(trialParameters = trialParameters,
numberOfSimulations = 5000,
discreteData = TRUE,
designParameters = designParameters$nul0,
showProgress = FALSE)
print(discDefuse3)
## -----------------------------------------------------------------------------
result <- discDefuse3$explore(numberOfSimulations = 50,
showProgress = FALSE,
rngSeed = 3783)
analysis <- discDefuse3$analyze(result)
print(discDefuse3$summary(analysis))
## -----------------------------------------------------------------------------
result1 <- discDefuse3$explore(numberOfSimulations = 50,
trueParameters = designParameters$alt1,
showProgress = FALSE,
rngSeed = 28912)
analysis1 <- discDefuse3$analyze(result1)
print(discDefuse3$summary(analysis1))
## -----------------------------------------------------------------------------
result2 <- discDefuse3$explore(numberOfSimulations = 50,
trueParameters = designParameters$alt2,
showProgress = FALSE,
rngSeed = 931)
analysis2 <- discDefuse3$analyze(result2)
print(discDefuse3$summary(analysis2))
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/inst/doc/defuse3.R |
---
title: "Design of the DEFUSE3 Trial"
author: "Tze Leung Lai, Philip W. Lavori, Olivia Liao, Ka Wai Tsang and Balasubramanian Narasimhan"
date: '`r Sys.Date()`'
bibliography: assistant.bib
output:
html_document:
theme: cerulean
toc: yes
toc_depth: 2
vignette: >
%\VignetteIndexEntry{Design of the DEFUSE3 Trial}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r echo=F}
### get knitr just the way we like it
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
error = FALSE,
tidy = FALSE,
cache = FALSE
)
```
## Continuous Rankin Simulations
Here, we present the calculations for the initial design of the
DEFUSE3 trial based on [@Lai2014191] and [@Lai201593]. The trial
parameters are fixed as follows.
```{r}
library(ASSISTant)
##Fix randomization vector N, errors, eps
trialParameters <- list(N = c(200, 340, 476), type1Error = 0.025,
eps = 1/2, type2Error = 0.1)
```
The design parameters are the following for various scenarios.
```{r}
designParameters <- list(
nul0 = list(prevalence = rep(1/6, 6), mean = matrix(0, 2, 6),
sd = matrix(1, 2, 6)),
alt1 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.4, 0.3, 0, 0, 0)),
sd = matrix(1, 2, 6)),
alt2 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.5, 0, 0, 0, 0)),
sd = matrix(1,2, 6)),
alt3 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.36, 6)),
sd = matrix(1,2, 6)),
alt4 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.30, 6)),
sd = matrix(1,2, 6)),
alt5 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.4, 0.3, 0.2, 0, 0, 0)),
sd = matrix(1,2, 6)),
alt6 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.5, 0.3, 0.3, 0.1, 0.1)),
sd = matrix(1,2, 6))
)
```
### The NULL Scenario
```{r}
defuse3 <- DEFUSE3Design$new(trialParameters = trialParameters,
numberOfSimulations = 500,
designParameters = designParameters$nul0,
showProgress = FALSE)
print(defuse3)
```
```{r}
result <- defuse3$explore(numberOfSimulations = 500,
showProgress = FALSE,
rngSeed = 28912)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
```
### The ALT1 Scenario
```{r}
result1 <- defuse3$explore(numberOfSimulations = 500,
trueParameters = designParameters$alt1,
showProgress = FALSE,
rngSeed = 737218)
analysis1 <- defuse3$analyze(result1)
print(defuse3$summary(analysis1))
```
### The ALT2 Scenario
```{r}
result2 <- defuse3$explore(numberOfSimulations = 500,
trueParameters = designParameters$alt2,
showProgress = FALSE,
rngSeed = 928812)
analysis2 <- defuse3$analyze(result2)
print(defuse3$summary(analysis2))
```
## Discrete Rankin Simulations
### The Discretized Scenarios
The discretized scenarios are designed to generally mimic the trends
above in the alternatives. However, we have a problem: we cannot
simulatenously match the mean and sd of the alternatives
above. (Actually, we can, but not with Rankin scores 0 through 6. The
software can easily be modified to generate discrete values where the
values are 0 to 6 divided by the standard deviation of the respective
distribution, for example.)
_Also in future versions, I need to allow for more general support
values for the scores, not just 0 through 6. Easy to do, but not done
yet._
Some types of distributions:
```{r}
null.uniform <- rep(1, 7L) ## uniform on 7 support points
hourglass <- c(1, 2, 2, 1, 2, 2, 1)
inverted.hourglass <- c(2, 1, 1, 2, 1, 1, 2)
bottom.heavy <- c(2, 2, 2, 1, 1, 1, 1)
bottom.heavier <- c(3, 3, 2, 2, 1, 1, 1)
bottom.loaded <- c(4, 4, 3, 3, 2, 1, 1)
top.heavy <- c(1, 1, 1, 1, 2, 2, 2)
top.heavier <- c(1, 1, 1, 2, 2, 3, 3)
top.loaded <- c(1, 1, 2, 3, 3, 4, 4)
```
It is instructive to see what the means and standard deviations are.
```{r}
```{r}
knitr::kable(
sapply(list(null = null.uniform,
hourglass = hourglass,
inv.hourglass = inverted.hourglass,
bot.heavy = bottom.heavy,
bot.heavier = bottom.heavier,
bot.loaded = bottom.loaded,
top.heavy = top.heavy,
top.heavier = top.heavier,
top.loaded = top.loaded),
computeMeanAndSD)
)
```
With this in mind, we can reel off some runs. Phil, you mentioned you
wanted $J = 2$, which I adhere to, below.
```{r}
designParameters <- list(
nul0 = list(prevalence = rep(1, 2),
ctlDist = null.uniform,
trtDist = cbind(null.uniform,
null.uniform)),
alt1 = list(prevalence = rep(1, 2),
ctlDist = null.uniform,
trtDist = cbind(top.loaded,
null.uniform)),
alt2 = list(prevalence = rep(1, 2),
ctlDist = null.uniform,
trtDist = cbind(null.uniform,
top.loaded))
)
```
### The NULL Scenario
```{r}
discDefuse3 <- DEFUSE3Design$new(trialParameters = trialParameters,
numberOfSimulations = 5000,
discreteData = TRUE,
designParameters = designParameters$nul0,
showProgress = FALSE)
print(discDefuse3)
```
```{r}
result <- discDefuse3$explore(numberOfSimulations = 50,
showProgress = FALSE,
rngSeed = 3783)
analysis <- discDefuse3$analyze(result)
print(discDefuse3$summary(analysis))
```
### The ALT1 Scenario
```{r}
result1 <- discDefuse3$explore(numberOfSimulations = 50,
trueParameters = designParameters$alt1,
showProgress = FALSE,
rngSeed = 28912)
analysis1 <- discDefuse3$analyze(result1)
print(discDefuse3$summary(analysis1))
```
### The ALT2 Scenario
```{r}
result2 <- discDefuse3$explore(numberOfSimulations = 50,
trueParameters = designParameters$alt2,
showProgress = FALSE,
rngSeed = 931)
analysis2 <- discDefuse3$analyze(result2)
print(discDefuse3$summary(analysis2))
```
## References
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/inst/doc/defuse3.Rmd |
## ----echo=F--------------------------------------------------------------
### get knitr just the way we like it
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
error = FALSE,
tidy = FALSE,
cache = FALSE
)
## ------------------------------------------------------------------------
library(ASSISTant)
data(LLL.SETTINGS)
str(LLL.SETTINGS)
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S0
designParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
designA <- ASSISTDesign$new(trialParameters = LLL.SETTINGS$trialParameters,
designParameters = designParameters)
print(designA)
## ------------------------------------------------------------------------
result <- designA$explore(numberOfSimulations = 5000, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S1
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S2
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S3
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S4
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S5
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S6
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S7
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S8
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S9
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S10
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S0
designParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
designA <- ASSISTDesign$new(trialParameters = LLL.SETTINGS$trialParameters,
designParameters = designParameters)
print(designA)
## ------------------------------------------------------------------------
result <- designA$explore(numberOfSimulations = 5000, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S1
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S2
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S3
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S4
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S5
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S6
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S7
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S8
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S9
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
## ------------------------------------------------------------------------
scenario <- LLL.SETTINGS$scenarios$S10
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 5000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/inst/full_doc/ASSISTant.R |
#
---
title: "Adaptive Subgroup Selection in Sequential Trials"
author: "Tze Leung Lai, Philip W. Lavori, Olivia Liao, Ka Wai Tsang
and Balasubramanian Narasimhan"
date: '`r Sys.Date()`'
bibliography: assistant.bib
output:
html_document:
theme: cerulean
toc: yes
toc_depth: 2
vignette: >
%\VignetteIndexEntry{Adaptive Subgroup Selection in Sequential Trials}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r echo=F}
### get knitr just the way we like it
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
error = FALSE,
tidy = FALSE,
cache = FALSE
)
```
## Introduction
`ASSISTant` is an R package for **A**daptive **S**ubgroup
**S**election **I**n **S**equential **T**rials. This vignette
reproduces all the simulations in the original paper of Lai, Lavori
and Liao [-@Lai2014191].
```{r}
library(ASSISTant)
data(LLL.SETTINGS)
str(LLL.SETTINGS)
```
The `LLL.SETTINGS` list contains all the scenarios used for the null
and alternative cases in Lai, Lavori and Liao [-@Lai2014191].
## Table 1 Results
The results shown here should closely approximate those in Table 1 of
Lai, Lavori and Liao [-@Lai2014191].
### Scenario S0
This is the _null_ setting.
```{r}
scenario <- LLL.SETTINGS$scenarios$S0
designParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
designA <- ASSISTDesign$new(trialParameters = LLL.SETTINGS$trialParameters,
designParameters = designParameters)
print(designA)
```
```{r}
result <- designA$explore(numberOfSimulations = 25000, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S1
```{r}
scenario <- LLL.SETTINGS$scenarios$S1
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S2
```{r}
scenario <- LLL.SETTINGS$scenarios$S2
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S3
```{r}
scenario <- LLL.SETTINGS$scenarios$S3
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S4
```{r}
scenario <- LLL.SETTINGS$scenarios$S4
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S5
```{r}
scenario <- LLL.SETTINGS$scenarios$S5
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S6
```{r}
scenario <- LLL.SETTINGS$scenarios$S6
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S7
```{r}
scenario <- LLL.SETTINGS$scenarios$S7
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S8
```{r}
scenario <- LLL.SETTINGS$scenarios$S8
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S9
```{r}
scenario <- LLL.SETTINGS$scenarios$S9
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S10
```{r}
scenario <- LLL.SETTINGS$scenarios$S10
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
## Table 2 Results
### Scenario S0
```{r}
scenario <- LLL.SETTINGS$scenarios$S0
designParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
designA <- ASSISTDesign$new(trialParameters = LLL.SETTINGS$trialParameters,
designParameters = designParameters)
print(designA)
```
```{r}
result <- designA$explore(numberOfSimulations = 25000, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S1
```{r}
scenario <- LLL.SETTINGS$scenarios$S1
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S2
```{r}
scenario <- LLL.SETTINGS$scenarios$S2
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S3
```{r}
scenario <- LLL.SETTINGS$scenarios$S3
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S4
```{r}
scenario <- LLL.SETTINGS$scenarios$S4
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S5
```{r}
scenario <- LLL.SETTINGS$scenarios$S5
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S6
```{r}
scenario <- LLL.SETTINGS$scenarios$S6
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S7
```{r}
scenario <- LLL.SETTINGS$scenarios$S7
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S8
```{r}
scenario <- LLL.SETTINGS$scenarios$S8
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S9
```{r}
scenario <- LLL.SETTINGS$scenarios$S9
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S10
```{r}
scenario <- LLL.SETTINGS$scenarios$S10
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 25000, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
## References
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/inst/full_doc/ASSISTant.Rmd |
## ----echo=F--------------------------------------------------------------
### get knitr just the way we like it
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
error = FALSE,
tidy = FALSE,
cache = FALSE
)
## ------------------------------------------------------------------------
library(ASSISTant)
## Various settings
settings <- list(setting1 = list(N = c(250, 400, 550), type1Error = 0.025,
eps = 1/2, type2Error = 0.1),
setting2 = list(N = c(250, 400, 550), type1Error = 0.05,
eps = 1/2, type2Error = 0.1),
setting3 = list(N = c(250, 400, 550), type1Error = 0.1,
eps = 1/2, type2Error = 0.2),
setting4 = list(N = c(250, 400, 550), type1Error = 0.2,
eps = 1/2, type2Error = 0.3))
## ------------------------------------------------------------------------
scenarios <- list(
scenario0 = list(prevalence = rep(1/6, 6), mean = matrix(0, 2, 6),
sd = matrix(1, 2, 6)),
scenario1 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.4, 0.3, 0, 0, 0)),
sd = matrix(1, 2, 6)),
scenario2 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.3, 0.3, 0, 0, 0, 0)),
sd = matrix(1, 2, 6)),
scenario3 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.3, 6)),
sd = matrix(1, 2, 6)),
scenario4 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.4, 0.3, 0.2, 0, 0, 0)),
sd = matrix(1, 2, 6)),
scenario5 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.5, 0.3, 0.3, 0.1, 0.1)),
sd = matrix(1, 2, 6)),
scenario6 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.6, 0.6, -0.3, -0.3, -0.3, -0.3)),
sd = matrix(1, 2, 6)),
scenario7 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.01, 6)),
sd = matrix(1, 2, 6)), ## very small effect
scenario8 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.3, 6)),
sd = matrix(1, 2, 6)), ## moderate negative effect
scenario9 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.9, 0.3, 0, -0.1, -0.4, -0.7)),
sd = matrix(1, 2, 6)), ## single strong effect with negatives thrown in
scenario10 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(-0.01, 6)),
sd = matrix(1, 2, 6)) ## very small negative effect
)
## ------------------------------------------------------------------------
rngSeed <- 2128783
set.seed(rngSeed)
for (setting in names(settings)) {
trialParameters <- settings[[setting]]
for (scenario in names(scenarios)) {
designParameters <- scenarios[[scenario]]
cat("##############################\n")
print(sprintf("%s/%s", setting, scenario))
cat("##############################\n")
designA <- ASSISTDesign$new(trialParameters = trialParameters,
designParameters = designParameters)
print(designA)
result <- designA$explore(numberOfSimulations = 5000,
rngSeed = rngSeed,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
rngSeed <- floor(runif(100000 * runif(1)))
}
}
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/inst/full_doc/coverage.R |
#
---
title: "Confidence Interval Simulations"
author: "Tze Leung Lai, Philip W. Lavori, Olivia Liao, Ka Wai Tsang
and Balasubramanian Narasimhan"
date: '`r Sys.Date()`'
bibliography: assistant.bib
output:
html_document:
theme: cerulean
toc: yes
toc_depth: 2
vignette: >
%\VignetteIndexEntry{Design of the DEFUSE3 Trial}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r echo=F}
### get knitr just the way we like it
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
error = FALSE,
tidy = FALSE,
cache = FALSE
)
```
## Introduction
Here, we present the calculations for assessing the coverage
probabilities of the constructed confidence intervals under various
scenarios.
```{r}
library(ASSISTant)
## Various settings
settings <- list(setting1 = list(N = c(250, 400, 550), type1Error = 0.025,
eps = 1/2, type2Error = 0.1),
setting2 = list(N = c(250, 400, 550), type1Error = 0.05,
eps = 1/2, type2Error = 0.1),
setting3 = list(N = c(250, 400, 550), type1Error = 0.1,
eps = 1/2, type2Error = 0.2),
setting4 = list(N = c(250, 400, 550), type1Error = 0.2,
eps = 1/2, type2Error = 0.3))
```
The design parameters are the following for various scenarios.
```{r}
scenarios <- list(
scenario0 = list(prevalence = rep(1/6, 6), mean = matrix(0, 2, 6),
sd = matrix(1, 2, 6)),
scenario1 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.4, 0.3, 0, 0, 0)),
sd = matrix(1, 2, 6)),
scenario2 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.3, 0.3, 0, 0, 0, 0)),
sd = matrix(1, 2, 6)),
scenario3 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.3, 6)),
sd = matrix(1, 2, 6)),
scenario4 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.4, 0.3, 0.2, 0, 0, 0)),
sd = matrix(1, 2, 6)),
scenario5 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.5, 0.3, 0.3, 0.1, 0.1)),
sd = matrix(1, 2, 6)),
scenario6 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.6, 0.6, -0.3, -0.3, -0.3, -0.3)),
sd = matrix(1, 2, 6)),
scenario7 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.01, 6)),
sd = matrix(1, 2, 6)), ## very small effect
scenario8 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.3, 6)),
sd = matrix(1, 2, 6)), ## moderate negative effect
scenario9 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.9, 0.3, 0, -0.1, -0.4, -0.7)),
sd = matrix(1, 2, 6)), ## single strong effect with negatives thrown in
scenario10 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(-0.01, 6)),
sd = matrix(1, 2, 6)) ## very small negative effect
)
```
## The Results
```{r}
rngSeed <- 2128783
set.seed(rngSeed)
for (setting in names(settings)) {
trialParameters <- settings[[setting]]
for (scenario in names(scenarios)) {
designParameters <- scenarios[[scenario]]
cat("##############################\n")
print(sprintf("%s/%s", setting, scenario))
cat("##############################\n")
designA <- ASSISTDesign$new(trialParameters = trialParameters,
designParameters = designParameters)
print(designA)
result <- designA$explore(numberOfSimulations = 5000,
rngSeed = rngSeed,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
rngSeed <- floor(runif(100000 * runif(1)))
}
}
```
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/inst/full_doc/coverage.Rmd |
## ----echo=F--------------------------------------------------------------
### get knitr just the way we like it
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
error = FALSE,
tidy = FALSE,
cache = FALSE
)
## ------------------------------------------------------------------------
library(ASSISTant)
##Fix randomization vector N, errors, eps
trialParameters <- list(N = c(200, 340, 476), type1Error = 0.025,
eps = 1/2, type2Error = 0.1)
## ------------------------------------------------------------------------
designParameters <- list(
nul0 = list(prevalence = rep(1/6, 6), mean = matrix(0, 2, 6),
sd = matrix(1, 2, 6)),
alt1 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.4, 0.3, 0, 0, 0)),
sd = matrix(1, 2, 6)),
alt2 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.5, 0, 0, 0, 0)),
sd = matrix(1,2, 6)),
alt3 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.36, 6)),
sd = matrix(1,2, 6)),
alt4 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.30, 6)),
sd = matrix(1,2, 6)),
alt5 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.4, 0.3, 0.2, 0, 0, 0)),
sd = matrix(1,2, 6)),
alt6 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.5, 0.3, 0.3, 0.1, 0.1)),
sd = matrix(1,2, 6))
)
## ------------------------------------------------------------------------
defuse3 <- DEFUSE3Design$new(trialParameters = trialParameters,
numberOfSimulations = 5000,
designParameters = designParameters$nul0,
showProgress = FALSE)
print(defuse3)
## ------------------------------------------------------------------------
result <- defuse3$explore(numberOfSimulations = 5000,
rngSeed = 283768,
showProgress = FALSE)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
## ------------------------------------------------------------------------
result <- defuse3$explore(numberOfSimulations = 5000,
rngSeed = 873782,
trueParameters = designParameters$alt1,
showProgress = FALSE)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
## ------------------------------------------------------------------------
result <- defuse3$explore(numberOfSimulations = 5000,
rngSeed = 45242,
trueParameters = designParameters$alt2,
showProgress = FALSE)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
## ------------------------------------------------------------------------
result <- defuse3$explore(numberOfSimulations = 5000,
rngSeed = 833722,
trueParameters = designParameters$alt3,
showProgress = FALSE)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
## ------------------------------------------------------------------------
result <- defuse3$explore(numberOfSimulations = 5000,
rngSeed = 434272,
trueParameters = designParameters$alt4,
showProgress = FALSE)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
## ------------------------------------------------------------------------
result <- defuse3$explore(numberOfSimulations = 5000,
rngSeed = 132323,
trueParameters = designParameters$alt5,
showProgress = FALSE)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
## ------------------------------------------------------------------------
result <- defuse3$explore(numberOfSimulations = 5000,
rngSeed = 653221,
trueParameters = designParameters$alt6,
showProgress = FALSE)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/inst/full_doc/defuse3.R |
#
---
title: "Design of the DEFUSE3 Trial"
author: "Tze Leung Lai, Philip W. Lavori, Olivia Liao, Ka Wai Tsang
and Balasubramanian Narasimhan"
date: '`r Sys.Date()`'
bibliography: assistant.bib
output:
html_document:
theme: cerulean
toc: yes
toc_depth: 2
vignette: >
%\VignetteIndexEntry{Design of the DEFUSE3 Trial}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r echo=F}
### get knitr just the way we like it
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
error = FALSE,
tidy = FALSE,
cache = FALSE
)
```
## Introduction
Here, we present the calculations for the initial design of the
DEFUSE3 trial based on [@Lai2014191] and [@Lai201593]. The trial
parameters are fixed as follows.
```{r}
library(ASSISTant)
##Fix randomization vector N, errors, eps
trialParameters <- list(N = c(200, 340, 476), type1Error = 0.025,
eps = 1/2, type2Error = 0.1)
```
The design parameters are the following for various scenarios.
```{r}
designParameters <- list(
nul0 = list(prevalence = rep(1/6, 6), mean = matrix(0, 2, 6),
sd = matrix(1, 2, 6)),
alt1 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.4, 0.3, 0, 0, 0)),
sd = matrix(1, 2, 6)),
alt2 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.5, 0, 0, 0, 0)),
sd = matrix(1,2, 6)),
alt3 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.36, 6)),
sd = matrix(1,2, 6)),
alt4 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.30, 6)),
sd = matrix(1,2, 6)),
alt5 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.4, 0.3, 0.2, 0, 0, 0)),
sd = matrix(1,2, 6)),
alt6 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.5, 0.3, 0.3, 0.1, 0.1)),
sd = matrix(1,2, 6))
)
```
## The NULL Scenario
```{r}
defuse3 <- DEFUSE3Design$new(trialParameters = trialParameters,
numberOfSimulations = 25000,
designParameters = designParameters$nul0,
showProgress = FALSE)
print(defuse3)
```
```{r}
result <- defuse3$explore(numberOfSimulations = 25000,
rngSeed = 283768,
showProgress = FALSE)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
```
## The ALT1 Scenario
```{r}
result <- defuse3$explore(numberOfSimulations = 25000,
rngSeed = 873782,
trueParameters = designParameters$alt1,
showProgress = FALSE)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
```
## The ALT2 Scenario
```{r}
result <- defuse3$explore(numberOfSimulations = 25000,
rngSeed = 45242,
trueParameters = designParameters$alt2,
showProgress = FALSE)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
```
## The ALT3 Scenario
```{r}
result <- defuse3$explore(numberOfSimulations = 25000,
rngSeed = 833722,
trueParameters = designParameters$alt3,
showProgress = FALSE)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
```
## The ALT4 Scenario
```{r}
result <- defuse3$explore(numberOfSimulations = 25000,
rngSeed = 434272,
trueParameters = designParameters$alt4,
showProgress = FALSE)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
```
## The ALT5 Scenario
```{r}
result <- defuse3$explore(numberOfSimulations = 25000,
rngSeed = 132323,
trueParameters = designParameters$alt5,
showProgress = FALSE)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
```
## The ALT6 Scenario
```{r}
result <- defuse3$explore(numberOfSimulations = 25000,
rngSeed = 653221,
trueParameters = designParameters$alt6,
showProgress = FALSE)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
```
## References
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/inst/full_doc/defuse3.Rmd |
---
title: "Adaptive Subgroup Selection in Sequential Trials"
author: "Tze Leung Lai, Philip W. Lavori, Olivia Liao, Ka Wai Tsang and Balasubramanian Narasimhan"
date: '`r Sys.Date()`'
bibliography: assistant.bib
output:
html_document:
theme: cerulean
toc: yes
toc_depth: 2
vignette: >
%\VignetteIndexEntry{Adaptive Subgroup Selection in Sequential Trials}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r echo=F}
### get knitr just the way we like it
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
error = FALSE,
tidy = FALSE,
cache = FALSE
)
```
## Introduction
`ASSISTant` is an R package for **A**daptive **S**ubgroup
**S**election **I**n **S**equential **T**rials. This vignette
reproduces all the simulations in the original paper of Lai, Lavori
and Liao [-@Lai2014191].
_NOTE_ The number of simulations has been drastically reduced in this
vignette in order to avoid taxing CRAN servers. The `full_doc` sources
contain the complete sources and output; see files in the directory
```{r, eval = FALSE}
system.file("full_doc", package="ASSISTant")
```
```{r}
library(ASSISTant)
data(LLL.SETTINGS)
str(LLL.SETTINGS)
```
The `LLL.SETTINGS` list contains all the scenarios described in the
paper.
## Table 1 Results
### Scenario S0
This is the _null_ setting.
```{r}
scenario <- LLL.SETTINGS$scenarios$S0
designParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
designA <- ASSISTDesign$new(trialParameters = LLL.SETTINGS$trialParameters,
designParameters = designParameters)
print(designA)
```
```{r}
result <- designA$explore(numberOfSimulations = 50, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S1
```{r}
scenario <- LLL.SETTINGS$scenarios$S1
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S2
```{r}
scenario <- LLL.SETTINGS$scenarios$S2
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S3
```{r}
scenario <- LLL.SETTINGS$scenarios$S3
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S4
```{r}
scenario <- LLL.SETTINGS$scenarios$S4
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S5
```{r}
scenario <- LLL.SETTINGS$scenarios$S5
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S6
```{r}
scenario <- LLL.SETTINGS$scenarios$S6
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S7
```{r}
scenario <- LLL.SETTINGS$scenarios$S7
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S8
```{r}
scenario <- LLL.SETTINGS$scenarios$S8
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S9
```{r}
scenario <- LLL.SETTINGS$scenarios$S9
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S10
```{r}
scenario <- LLL.SETTINGS$scenarios$S10
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table1,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50,
trueParameters = trueParameters, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
## Table 2 Results
### Scenario S0
```{r}
scenario <- LLL.SETTINGS$scenarios$S0
designParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
designA <- ASSISTDesign$new(trialParameters = LLL.SETTINGS$trialParameters,
designParameters = designParameters)
print(designA)
```
```{r}
result <- designA$explore(numberOfSimulations = 50, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S1
```{r}
scenario <- LLL.SETTINGS$scenarios$S1
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S2
```{r}
scenario <- LLL.SETTINGS$scenarios$S2
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S3
```{r}
scenario <- LLL.SETTINGS$scenarios$S3
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S4
```{r}
scenario <- LLL.SETTINGS$scenarios$S4
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S5
```{r}
scenario <- LLL.SETTINGS$scenarios$S5
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S6
```{r}
scenario <- LLL.SETTINGS$scenarios$S6
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S7
```{r}
scenario <- LLL.SETTINGS$scenarios$S7
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S8
```{r}
scenario <- LLL.SETTINGS$scenarios$S8
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S9
```{r}
scenario <- LLL.SETTINGS$scenarios$S9
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
### Alternative Scenario S10
```{r}
scenario <- LLL.SETTINGS$scenarios$S10
trueParameters <- list(prevalence = LLL.SETTINGS$prevalences$table2,
mean = scenario$mean,
sd = scenario$sd)
result <- designA$explore(numberOfSimulations = 50, trueParameters = trueParameters,
showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
## References
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/vignettes/ASSISTant.Rmd |
---
title: "Using Discrete Rankin Scores"
author: "Tze Leung Lai, Philip W. Lavori, Olivia Liao, Ka Wai Tsang and Balasubramanian Narasimhan"
date: '`r Sys.Date()`'
bibliography: assistant.bib
output:
html_document:
theme: cerulean
toc: yes
toc_depth: 2
vignette: >
%\VignetteIndexEntry{Using Discrete Rankin Scores}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r echo=F}
### get knitr just the way we like it
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
error = FALSE,
tidy = FALSE,
cache = FALSE
)
```
## Introduction
We simulate data from a discrete distribution for the Rankin scores,
which are ordinal integers from 0 to 6 in the following
simulations. So we define a few scenarios.
```{r}
library(ASSISTant)
null.uniform <- rep(1, 7L) ## uniform on 7 support points
hourglass <- c(1, 2, 2, 1, 2, 2, 1)
inverted.hourglass <- c(2, 1, 1, 2, 1, 1, 2)
bottom.heavy <- c(2, 2, 2, 1, 1, 1, 1)
bottom.heavier <- c(3, 3, 2, 2, 1, 1, 1)
top.heavy <- c(1, 1, 1, 1, 2, 2, 2)
top.heavier <- c(1, 1, 1, 2, 2, 3, 3)
```
```{r}
ctlDist <- null.uniform
trtDist <- cbind(null.uniform, null.uniform, null.uniform,
hourglass, hourglass, hourglass)
##d <- generateDiscreteRankinScores(rep(1, 6), 10, ctlDist, trtDist)
```
### Scenario S0
This is the _null_ setting.
```{r}
data(LLL.SETTINGS)
designParameters <- list(prevalence = rep(1/6, 6),
ctlDist = ctlDist,
trtDist = trtDist)
designA <- ASSISTDesign$new(trialParameters = LLL.SETTINGS$trialParameters,
designParameters = designParameters, discreteData = TRUE)
print(designA)
```
```{r}
result <- designA$explore(numberOfSimulations = 5000, showProgress = FALSE)
analysis <- designA$analyze(result)
print(designA$summary(analysis))
```
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/vignettes/Rankin.Rmd |
---
title: "Design of the DEFUSE3 Trial"
author: "Tze Leung Lai, Philip W. Lavori, Olivia Liao, Ka Wai Tsang and Balasubramanian Narasimhan"
date: '`r Sys.Date()`'
bibliography: assistant.bib
output:
html_document:
theme: cerulean
toc: yes
toc_depth: 2
vignette: >
%\VignetteIndexEntry{Design of the DEFUSE3 Trial}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r echo=F}
### get knitr just the way we like it
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
error = FALSE,
tidy = FALSE,
cache = FALSE
)
```
## Continuous Rankin Simulations
Here, we present the calculations for the initial design of the
DEFUSE3 trial based on [@Lai2014191] and [@Lai201593]. The trial
parameters are fixed as follows.
```{r}
library(ASSISTant)
##Fix randomization vector N, errors, eps
trialParameters <- list(N = c(200, 340, 476), type1Error = 0.025,
eps = 1/2, type2Error = 0.1)
```
The design parameters are the following for various scenarios.
```{r}
designParameters <- list(
nul0 = list(prevalence = rep(1/6, 6), mean = matrix(0, 2, 6),
sd = matrix(1, 2, 6)),
alt1 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.4, 0.3, 0, 0, 0)),
sd = matrix(1, 2, 6)),
alt2 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.5, 0, 0, 0, 0)),
sd = matrix(1,2, 6)),
alt3 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.36, 6)),
sd = matrix(1,2, 6)),
alt4 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6), rep(0.30, 6)),
sd = matrix(1,2, 6)),
alt5 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.4, 0.3, 0.2, 0, 0, 0)),
sd = matrix(1,2, 6)),
alt6 = list(prevalence = rep(1/6, 6), mean = rbind(rep(0, 6),
c(0.5, 0.5, 0.3, 0.3, 0.1, 0.1)),
sd = matrix(1,2, 6))
)
```
### The NULL Scenario
```{r}
defuse3 <- DEFUSE3Design$new(trialParameters = trialParameters,
numberOfSimulations = 500,
designParameters = designParameters$nul0,
showProgress = FALSE)
print(defuse3)
```
```{r}
result <- defuse3$explore(numberOfSimulations = 500,
showProgress = FALSE,
rngSeed = 28912)
analysis <- defuse3$analyze(result)
print(defuse3$summary(analysis))
```
### The ALT1 Scenario
```{r}
result1 <- defuse3$explore(numberOfSimulations = 500,
trueParameters = designParameters$alt1,
showProgress = FALSE,
rngSeed = 737218)
analysis1 <- defuse3$analyze(result1)
print(defuse3$summary(analysis1))
```
### The ALT2 Scenario
```{r}
result2 <- defuse3$explore(numberOfSimulations = 500,
trueParameters = designParameters$alt2,
showProgress = FALSE,
rngSeed = 928812)
analysis2 <- defuse3$analyze(result2)
print(defuse3$summary(analysis2))
```
## Discrete Rankin Simulations
### The Discretized Scenarios
The discretized scenarios are designed to generally mimic the trends
above in the alternatives. However, we have a problem: we cannot
simulatenously match the mean and sd of the alternatives
above. (Actually, we can, but not with Rankin scores 0 through 6. The
software can easily be modified to generate discrete values where the
values are 0 to 6 divided by the standard deviation of the respective
distribution, for example.)
_Also in future versions, I need to allow for more general support
values for the scores, not just 0 through 6. Easy to do, but not done
yet._
Some types of distributions:
```{r}
null.uniform <- rep(1, 7L) ## uniform on 7 support points
hourglass <- c(1, 2, 2, 1, 2, 2, 1)
inverted.hourglass <- c(2, 1, 1, 2, 1, 1, 2)
bottom.heavy <- c(2, 2, 2, 1, 1, 1, 1)
bottom.heavier <- c(3, 3, 2, 2, 1, 1, 1)
bottom.loaded <- c(4, 4, 3, 3, 2, 1, 1)
top.heavy <- c(1, 1, 1, 1, 2, 2, 2)
top.heavier <- c(1, 1, 1, 2, 2, 3, 3)
top.loaded <- c(1, 1, 2, 3, 3, 4, 4)
```
It is instructive to see what the means and standard deviations are.
```{r}
```{r}
knitr::kable(
sapply(list(null = null.uniform,
hourglass = hourglass,
inv.hourglass = inverted.hourglass,
bot.heavy = bottom.heavy,
bot.heavier = bottom.heavier,
bot.loaded = bottom.loaded,
top.heavy = top.heavy,
top.heavier = top.heavier,
top.loaded = top.loaded),
computeMeanAndSD)
)
```
With this in mind, we can reel off some runs. Phil, you mentioned you
wanted $J = 2$, which I adhere to, below.
```{r}
designParameters <- list(
nul0 = list(prevalence = rep(1, 2),
ctlDist = null.uniform,
trtDist = cbind(null.uniform,
null.uniform)),
alt1 = list(prevalence = rep(1, 2),
ctlDist = null.uniform,
trtDist = cbind(top.loaded,
null.uniform)),
alt2 = list(prevalence = rep(1, 2),
ctlDist = null.uniform,
trtDist = cbind(null.uniform,
top.loaded))
)
```
### The NULL Scenario
```{r}
discDefuse3 <- DEFUSE3Design$new(trialParameters = trialParameters,
numberOfSimulations = 5000,
discreteData = TRUE,
designParameters = designParameters$nul0,
showProgress = FALSE)
print(discDefuse3)
```
```{r}
result <- discDefuse3$explore(numberOfSimulations = 50,
showProgress = FALSE,
rngSeed = 3783)
analysis <- discDefuse3$analyze(result)
print(discDefuse3$summary(analysis))
```
### The ALT1 Scenario
```{r}
result1 <- discDefuse3$explore(numberOfSimulations = 50,
trueParameters = designParameters$alt1,
showProgress = FALSE,
rngSeed = 28912)
analysis1 <- discDefuse3$analyze(result1)
print(discDefuse3$summary(analysis1))
```
### The ALT2 Scenario
```{r}
result2 <- discDefuse3$explore(numberOfSimulations = 50,
trueParameters = designParameters$alt2,
showProgress = FALSE,
rngSeed = 931)
analysis2 <- discDefuse3$analyze(result2)
print(discDefuse3$summary(analysis2))
```
## References
| /scratch/gouwar.j/cran-all/cranData/ASSISTant/vignettes/defuse3.Rmd |
AST <- function(data.residual, spaceMatrix, par.time=0.5,par.age=1,
weight.coverage=0.9, agecat, minyear,maxyear){
################### err #####
if( !is.numeric(c(weight.coverage,par.time,par.age,agecat,minyear,maxyear)) ) stop(" weight.coverage,par.time,par.age,agecat,minyear,maxyear must be number.")
if( prod(c(weight.coverage,par.time,par.age,agecat,minyear,maxyear))<0 ) stop("weight.coverage,par.time,par.age,agecat,minyear,maxyear must be positive numbers")
if( weight.coverage<0 | weight.coverage>1) stop("weight.coverage must be a number between 0 and 1")
if(maxyear<minyear) stop("min year must be lower than max year.")
if(!is.matrix(spaceMatrix)) stop("spaceMatrix must be a matrix format.")
if(!is.data.frame(data.residual)) stop("data.residual must be a data.frame format.")
if( !all( c("age","year","location","residual")%in%colnames(data.residual) ) ) stop("data.residual must contain these variable names: age, year , location, residual ")
if( any(c( !is.numeric(spaceMatrix) , min(spaceMatrix)<0 , max(spaceMatrix)>1 )) ) stop("spaceMatrix must be between 0 and 1 ")
if( is.null(rownames(spaceMatrix) ) ) stop("row names of spaceMatrix is necessary!")
if( any( is.na(
as.numeric(rownames(spaceMatrix))
)
)
)stop("row names of spaceMatrix must be matched with location(number)")
if( c("coverage")%in%colnames(data.residual) ){
t <- data.residual$coverage
if( any(c( !is.numeric(t) , length(unique(t))!=2 , max(t)!=1 , min(t)!=0 )) ) stop("coverage must be a binary variable ")
data.residual <- data.residual[!is.na(data.residual$coverage),]
}else{
data.residual$coverage <- 1
weight.coverage = 1
}
#######################
T=(maxyear-minyear)+1
LOC = as.numeric(rownames(spaceMatrix) )
data.pred = expand.grid(year=minyear:maxyear,age=1:agecat,location=LOC)
data.residual <- data.residual[!is.na(data.residual$year),]
data.residual <- data.residual[!is.na(data.residual$age),]
data.residual <- data.residual[!is.na(data.residual$location),]
data.residual <- data.residual[!is.na(data.residual$residual),]
data.residual <- data.residual[data.residual$age%in%c(1:agecat) & data.residual$location%in%LOC & data.residual$year%in%c(minyear:maxyear) , ]
########################### fun ###########
############age weight
calcage = function (ag)
{
z = matrix(NA , nrow=ag , ncol=ag)
z[1:ag , 1:ag] = matrix(NA , nrow=ag , ncol=ag)
A <- as.numeric(rownames(z) <- c(1:ag))
B <- (colnames(z) <- c(1:ag))
for (i in 1:ag)
{
for (j in 1:ag)
{
z[i,j]=1/exp(par.age*(abs(A[i]-B[j])))
}
}
return(z)
}
###############time_weight
calctime = function (T ,minyear,maxyear )
{
y = matrix(NA,nrow=T,ncol=T)
y[1:T,1:T] = matrix(NA,nrow=T,ncol=T)
A <- as.numeric(rownames(y) <- c(minyear:maxyear))
B <- (colnames(y) <- c(minyear:maxyear))
for (i in 1:T)
{
for (j in 1:T)
{
y[i,j]=(1-(abs((A[i])-(B[j]))/T)^par.time)^3
}
}
return (y)
}
###############final weight and rescaling
calcW = function(x,y,z)
{
zy = kronecker(z, y)
weight = apply(zy,1,sum,na.rm=T)
for (i in 1:nrow(zy))
{
zy[i,]= zy[i,]/weight[i]
}
W = kronecker(x, zy)
return(W)
}
###################weight matrix colnames
calcmatchnames = function(W , Data)
{
colnames(W) = rep (1:ncol(W))
out <- c()
for (i in LOC) for (j in (1:agecat)) for(k in minyear:maxyear) out= c(out,(paste(k,"-",j,"-",i, sep="")))
colnames(W) = out
Data <- Data[order(Data$location, Data$age, Data$year), ]
Data$ID = paste0(Data$year,"-",Data$age,"-",Data$location)
W_Data <- as.matrix( W[, as.character(Data$ID)])
return(W_Data)
}
########################coverage weight
calccove = function(W_Data , Data)
{
data <- Data[order(Data$location, Data$age, Data$year), ]
data$ID = paste0(data$year,data$age,data$location)
data$N = 1:nrow(data)
d = split(data$N, as.factor(data$coverage) )
d1 = as.vector(d$'1')
d2 = as.vector(d$'0')
colnames(W_Data) = rep (1:ncol(W_Data))
W_Data[,d1] = apply(W_Data[,d1], 2, function(x) (x*weight.coverage))
W_Data[,d2] = apply(W_Data[,d2], 2, function(x) (x*(1-weight.coverage)))
w_all = W_Data
return(w_all)
}
########################final rescale
finalw = function(w_all)
{
weight = apply(w_all,1,sum,na.rm=T)
for (i in 1:nrow(w_all))
{
w_all[i,]= w_all[i,]/weight[i]
}
fw = w_all
return(fw)
}
#################residual and weight matrix
calcrespred = function (fw , Data)
{
Data <- Data[order(Data$location, Data$age, Data$year), ]
Data$ID = paste0(Data$year,"-",Data$age,"-",Data$location)
resvec = Data[ , c("residual")]
mat.vec <- c(fw %*% resvec)
rownames(fw) = rep (1:nrow(fw))
out.row <- c()
for (i in LOC) for (j in (1:agecat)) for(k in minyear:maxyear) out.row= c(out.row,(paste(k,"-",j,"-",i, sep="")))
rownames(fw) = out.row
mat = cbind(mat.vec , out.row)
mat = as.data.frame(mat)
return(mat)
}
####################
calcpred = function (mat , pData)
{
pData$ID = paste0(pData$year,"-",pData$age,"-",pData$location)
mat$ID = as.character(mat$ID)
colnames(mat)[2] <- "ID"
outdata= merge(mat, pData, by.x= "ID" , by.y="ID" )
return(outdata)
}
################# run #######
timeMat = calctime(T , minyear , maxyear)
ageMat = calcage(agecat)
final_weight = calcW(spaceMatrix ,timeMat , ageMat)
# W=final_weight ; Data = data.residual
names = calcmatchnames(final_weight ,data.residual)
rm(final_weight)
coverageW = calccove(names,data.residual)
rm(names)
fw = finalw(coverageW)
rm(coverageW)
wmat = calcrespred(fw , data.residual)
rm(fw)
colnames(wmat) <- c("residual_AST","ID")
out = calcpred(wmat , data.pred)
out$residual_AST = as.numeric( as.character(out$residual_AST) )
out = out[order(out$location,out$year,out$age),]
OUT = list()
OUT$adj.res <- out
OUT$Age_weight <- ageMat
OUT$time_weight <- timeMat
return(OUT)
}
| /scratch/gouwar.j/cran-all/cranData/AST/R/AST.R |
calcSpaceMat<- function (adjacent.mat,par.space=0.9){
if( !is.matrix(adjacent.mat)) stop("space matrix must be a square matrix.")
if(!is.numeric(par.space)) stop("par.space must be a number")
if( par.space<=0 | par.space>1 ) stop("par.space must be a number between 0 and 1.")
if( any(c( !is.numeric(adjacent.mat) , length(table(adjacent.mat)) !=2, max(adjacent.mat)!=1, min(adjacent.mat)!=0 )) ) stop("adjacent.mat must contain 0 or 1 ")
if( is.null(rownames(adjacent.mat) ) ) stop("row names of adjacent.mat is necessary!")
if( any( is.na(
as.numeric(rownames(adjacent.mat))
)
)
)stop("row names of adjacent.mat must be matched with location(number)")
diag(adjacent.mat) <- par.space
adjacent.mat[which(adjacent.mat==1)] <- par.space*(1-par.space)
return (adjacent.mat)
}
| /scratch/gouwar.j/cran-all/cranData/AST/R/CalcSpaceMat.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
sv_mcmc <- function(return_vector, nSim = NULL, nBurn = NULL, vHyper = NULL) {
.Call(`_ASV_sv_mcmc`, return_vector, nSim, nBurn, vHyper)
}
sv_posterior <- function(H, Theta, Theta_star, Y, iM = NULL, vHyper = NULL) {
.Call(`_ASV_sv_posterior`, H, Theta, Theta_star, Y, iM, vHyper)
}
sv_prior <- function(Theta_star, vHyper = NULL) {
.Call(`_ASV_sv_prior`, Theta_star, vHyper)
}
sv_logML <- function(H, Theta, Theta_star, Y, iI = NULL, iM = NULL, vHyper = NULL) {
.Call(`_ASV_sv_logML`, H, Theta, Theta_star, Y, iI, iM, vHyper)
}
asv_mcmc <- function(return_vector, nSim = NULL, nBurn = NULL, vHyper = NULL) {
.Call(`_ASV_asv_mcmc`, return_vector, nSim, nBurn, vHyper)
}
asv_posterior <- function(H, Theta, Theta_star, Y, iM = NULL, vHyper = NULL) {
.Call(`_ASV_asv_posterior`, H, Theta, Theta_star, Y, iM, vHyper)
}
asv_prior <- function(Theta_star, vHyper = NULL) {
.Call(`_ASV_asv_prior`, Theta_star, vHyper)
}
asv_logML <- function(H, Theta, Theta_star, Y, iI = NULL, iM = NULL, vHyper = NULL) {
.Call(`_ASV_asv_logML`, H, Theta, Theta_star, Y, iI, iM, vHyper)
}
sv_pf <- function(mu, phi, sigma_eta, Y, I) {
.Call(`_ASV_sv_pf`, mu, phi, sigma_eta, Y, I)
}
sv_apf <- function(mu, phi, sigma_eta, Y, I) {
.Call(`_ASV_sv_apf`, mu, phi, sigma_eta, Y, I)
}
asv_pf <- function(mu, phi, sigma_eta, rho, Y, I) {
.Call(`_ASV_asv_pf`, mu, phi, sigma_eta, rho, Y, I)
}
asv_apf <- function(mu, phi, sigma_eta, rho, Y, I) {
.Call(`_ASV_asv_apf`, mu, phi, sigma_eta, rho, Y, I)
}
| /scratch/gouwar.j/cran-all/cranData/ASV/R/RcppExports.R |
#' Title
#' Report Summary statistics and plots for MCMC outputs by Yasuhiro Omori, University of Tokyo
#'
#' @param mx
#' MCMC output
#' n x K matrix where n: number of samples, K: number of parameters
#'
#' @param dBm
#' Bandwidth to compute the variance of the sample mean
#' Also used for the lag of sample autocorrelation functions
#'
#' @param vname
#' labels for parameters
#'
#' @return
#' @export
#' @import freqdom spectral.density
#' @import stats acf density pnorm quantile sd var
#' @import graphics par
#'
#' @examples
#'
ReportMCMC <-function(mx, dBm=NULL, vname=NULL)
{
oldpar <- graphics::par(no.readonly = TRUE)
on.exit(graphics::par(oldpar));
cRep = NROW(mx)
cdim = NCOL(mx)
if(is.null(dBm)){dBm = 2*( floor( sqrt(cRep) ) +1 );}
if(is.null(vname))
{
vname = "Param1"
if(cdim > 1)
{
for(i in 2:cdim){vname = cbind(vname, paste("Param",i,sep=""));}
}
}
# library(freqdom) # spectral.density
# library(coda) # autocorr.plot
# Compute the estimate for the variance of sample mean of time series
fTsvar <-function(vx,dbm)
{
dsp = as.double(freqdom::spectral.density(vx,freq=c(0),q=dbm,weights="Parzen")$operators[1,,])
# dsp = spec.ar(vx,plot=FALSE)$spec[1]
# dsp = spectrum0.ar(vx)$spec # using coda library
return(dsp)
}
# Function to compute the p-value of Convergence Diagnostics by comparing
# two means (for the first 10% and the last 50% of the series)
# H0:two means are equal (convergence)
fCD <-function(vx, dbm)
{
cT = length(vx); cn1 = floor(0.1*cT); cn2 = floor(0.5*cT);
vx1 = vx[1:cn1]; vx2 = vx[cn2:cT];
dx1bar = mean(vx1); dx2bar = mean(vx2);
dvar1=fTsvar(vx1, floor( 2*sqrt(cn1) ) +1 )
dvar2=fTsvar(vx2, floor( 2*sqrt(cn2) ) +1 )
dz=(dx1bar - dx2bar)/sqrt(dvar1/cn1+dvar2/cn2)
return(2*stats::pnorm(abs(dz), lower.tail=FALSE));
}
################################################################
# Estimation results
################################################################
result1 = matrix(0, cdim, 5)
result2 = matrix(0, cdim, 4)
vx = matrix(0, cRep, 1)
for(i in 1:cdim)
{
if( cdim==1 ){ vx = mx;}
else { vx = mx[,i];}
# Compute Inefficiency Factor
dIF = fTsvar(vx, dBm) / stats::var(vx)
if(dIF < 1) {dIF = 1; }
if(dIF < 10) {dIF.rounded = round(dIF, digits=1); }
else {dIF.rounded = round(dIF, digits=0); }
# Summary Statistics
result1[i,1] = mean(vx)
result1[i,2] = stats::sd(vx)
result1[i,3:5] = stats::quantile(vx, c(0.025, 0.5, 0.975))
result2[i,1] = round(cRep/dIF, digits=0) # ESS
result2[i,2] = dIF.rounded
result2[i,3] = round(fCD(vx,dBm),digits=3)
result2[i,4] = round(sum(vx>0)/cRep, digits=4)
}
colnames(result1) = c("Mean", "Std Dev", "95%L", "Median", "95%U")
rownames(result1) = vname
colnames(result2) = c("ESS", "IF", "CD", "Pr(+)")
rownames(result2) = vname
# Output on screen
print(result1, digit=5); print(result2, digit=5);
if(cdim == 1){vdisp=c(1,1);}
else if(cdim == 2){vdisp=c(2,1);}
else if(cdim == 3){vdisp=c(3,1);}
else if(cdim == 4){vdisp=c(2,2);}
else {vdisp=c(4,2);}
################################################################
# Sample path
################################################################
# Output on screen
graphics::par(mfrow=vdisp, plt = c(0.2,0.8,0.4,0.8))
for(i in 1:cdim)
{
if( cdim==1 ){ vx = mx;}
else { vx = mx[,i];}
# plot thinned MCMC samples
vx.ind = seq(1, length(vx), by = max(1,floor(length(vx)/1000)))
vx.thinned = vx[vx.ind]
plot(vx.ind, vx.thinned, xlab="Iteration", ylab=vname[i], main="", type="l")
}
################################################################
# Sample autocorrelation function
################################################################
# Output on screen
graphics::par(mfrow=vdisp, plt = c(0.2,0.8,0.4,0.8))
for(i in 1:cdim)
{
if( cdim==1 ){ vx = mx;}
else { vx = mx[,i];}
stats::acf(vx, lag.max=floor(length(vx)*0.05), main=vname[i]);
}
################################################################
# Estimated posterior density
################################################################
# Output on screen
graphics::par(mfrow=vdisp, plt = c(0.2,0.8,0.4,0.8))
for(i in 1:cdim)
{
if( cdim==1 ){ vx = mx;}
else { vx = mx[,i];}
plot(stats::density(vx), xlab=vname[i], main ="")
}
}
| /scratch/gouwar.j/cran-all/cranData/ASV/R/ReportMCMC.R |
#' ATAforecasting: Automatic Time Series Analysis and Forecasting using Ata Method with Box-Cox Power Transformations Family and Seasonal Decomposition Techniques
#'
#' @description Returns ATA(p,q,phi)(E,T,S) applied to the data.
#' The Ata method based on the modified simple exponential smoothing as described in Yapar, G. (2016) <doi:10.15672/HJMS.201614320580> ,
#' Yapar G., Capar, S., Selamlar, H. T., Yavuz, I. (2017) <doi:10.15672/HJMS.2017.493> and Yapar G., Selamlar, H. T., Capar, S., Yavuz, I. (2019)
#' <doi:10.15672/hujms.461032> is a new univariate time series forecasting method which provides innovative solutions to issues faced during
#' the initialization and optimization stages of existing methods.
#' Forecasting performance of the Ata method is superior to existing methods both in terms of easy implementation and accurate forecasting.
#' It can be applied to non-seasonal or seasonal time series which can be decomposed into four components (remainder, level, trend and seasonal).
#' This methodology performed well on the M3 and M4-competition data.
#' Returns ATA(p,q,phi) (E,T,S) applied to the data.
#'
#' @docType package
#'
#' @name ATAforecasting-package
#'
#' @author Ali Sabri Taylan and Hanife Taylan Selamlar
#'
#' Maintainer: [email protected]
#'
#' @keywords package
NULL # Instead of "_PACKAGE" to remove inclusion of \alias{ATAforecasting}
# "_PACKAGE"
## Generic Ata Methods functions
## Part of ATAforecasting package
#' Automatic Time Series Analysis and Forecasting using Ata Method with Box-Cox Power Transformations Family and Seasonal Decomposition Techniques
#'
#' \code{ATA} is a generic function for Ata Method forecasting.
#' The Ata method based on the modified simple exponential smoothing as described in Yapar, G. (2016) <doi:10.15672/HJMS.201614320580> ,
#' Yapar G., Capar, S., Selamlar, H. T., Yavuz, I. (2017) <doi:10.15672/HJMS.2017.493> and Yapar G., Selamlar, H. T., Capar, S., Yavuz, I. (2019)
#' <doi:10.15672/hujms.461032> is a new univariate time series forecasting method which provides innovative solutions to issues faced during
#' the initialization and optimization stages of existing methods.
#' Forecasting performance of the Ata method is superior to existing methods both in terms of easy implementation and accurate forecasting.
#' It can be applied to non-seasonal or seasonal time series which can be decomposed into four components (remainder, level, trend and seasonal).
#' This methodology performed well on the M3 and M4-competition data.
#'
#' Returns ATA(p,q,phi)(E,T,S) applied to \code{X}.
#'
#' @param X A numeric vector or time series of class \code{ts} or \code{msts} for in-sample.
#' @param Y A numeric vector or time series of class \code{ts} or \code{msts} for out-sample. If you do not have out-sample data, you can split in-sample data into training and test dataset with \code{train_test_split} argument.
#' @param parP Value of Level parameter \code{p}. If NULL or "opt", it is estimated. \code{p} has all integer values from 1 to \code{length(X)}.
#' @param parQ Value of Trend parameter \code{q}. If NULL or "opt", it is estimated. \code{q} has all integer values from 0 to \code{p}.
#' @param parPHI Value of Damping Trend parameter \code{phi}. If NULL or "opt", it is estimated. phi has all values from 0 to 1.
#' @param model.type An one-character string identifying method using the framework terminology. The letter "A" for additive model, the letter "M" for multiplicative model.
#' If NULL, both letters will be tried and the best model (according to the accuracy measure \code{accuracy.type}) returned.
#' @param seasonal.test Testing for stationary and seasonality. If TRUE, the method firstly uses \code{test="adf"}, Augmented Dickey-Fuller, unit-root test then the test returns the least number of differences required to pass the test at level \code{alpha}.
#' After the unit-root test, seasonal test applies on the stationary \code{X}.
#' @param seasonal.model A string identifying method for seasonal decomposition. If NULL, "decomp" method is default. c("none", "decomp", "stl", "stlplus", "tbats", "stR") phrases of methods denote
#' \itemize{
#' \item{none} : seasonal decomposition is not required.
#' \item{decomp} : classical seasonal decomposition. If \code{decomp}, the \code{stats} package will be used.
#' \item{stl} : seasonal-trend decomposition procedure based on loess developed by Cleveland et al. (1990). If \code{stl}, the \code{stats} and \code{forecast} packages will be used. Multiple seasonal periods are allowed.
#' \item{stlplus} : seasonal-trend decomposition procedure based on loess developed by Cleveland et al. (1990). If \code{stlplus}, the \code{stlplus} package will be used.
#' \item{tbats} : exponential smoothing state space model with Box-Cox transformation, ARMA errors, trend and seasonal components.
#' as described in De Livera, Hyndman & Snyder (2011). Parallel processing is used by default to speed up the computations. If \code{tbats}, the \code{forecast} package will be used. Multiple seasonal periods are allowed.
#' \item{stR} : seasonal-trend decomposition procedure based on regression developed by Dokumentov and Hyndman (2015). If \code{stR}, the \code{stR} package will be used. Multiple seasonal periods are allowed.
#' \item{x13} : seasonal-trend decomposition procedure based on X13ARIMA/SEATS. If \code{x13}, the \code{seasonal} package will be used.
#' \item{x11} : seasonal-trend decomposition procedure based on X11. If \code{x11}, the \code{seasonal} package will be used.
#' }
#' @param seasonal.period Value(s) of seasonal periodicity. If NULL, \code{frequency} of X is default If \code{seasonal.period} is not integer, \code{X} must be \code{msts} time series object. c(s1,s2,s3,...) for multiple period. If \code{X} has multiple periodicity, "tbats" or "stR" seasonal model have to be selected.
#' @param seasonal.type An one-character string identifying method for the seasonal component framework. The letter "A" for additive model, the letter "M" for multiplicative model.
#' If NULL, both letters will be tried and the best model (according to the accuracy measure \code{accuracy.type}) returned.
#' If seasonal decomposition methods except \code{decomp} with "M", Box-Cox transformation with \code{lambda}=0 is selected.
#' @param seasonal.test.attr Attributes set for unit root, seasonality tests, X13ARIMA/SEATS and X11. If NULL, corrgram.tcrit=1.28, uroot.test="adf", suroot.test="correlogram", suroot.uroot=TRUE, uroot.type="trend", uroot.alpha=0.05, suroot.alpha=0.05, uroot.maxd=2, suroot.maxD=1, suroot.m=frequency(X), uroot.pkg="urca", multi.period="min", x13.estimate.maxiter=1500, x13.estimate.tol=1.0e-5, x11.estimate.maxiter=1500, x11.estimate.tol=1.0e-5. If you want to change, please use \code{ATA.SeasAttr} function and its output.
#' For example, you can use \code{seasonal.test.attr = ATA.SeasAttr(corrgram.tcrit=1.65)} equation in \code{ATA} function.
#' @param find.period Find seasonal period(s) automatically. If NULL, 0 is default. When \code{find.period},
#' \itemize{
#' \item{0} : none
#' \item{1} : single period with find.freq
#' \item{2} : single period with \code{forecast::findfrequency}
#' \item{3} : multiple period with find.freq & stR
#' \item{4} : multiple period with find.freq & tbats
#' \item{5} : multiple period with find.freq & stl
#' }
#' @param accuracy.type Accuracy measure for optimization of the best ATA Method forecasting. IF NULL, \code{sMAPE} is default.
#' \itemize{
#' \item{lik} : maximum likelihood functions
#' \item{sigma} : residual variance.
#' \item{MAE} : mean absolute error.
#' \item{MSE} : mean square error.
#' \item{AMSE} : Average MSE over first `nmse` forecast horizons using k-step forecast.
#' \item{GAMSE} : Average MSE over first `nmse` forecast horizons using one-step forecast.
#' \item{RMSE} : root mean squared error.
#' \item{MPE} : mean percentage error.
#' \item{MAPE} : mean absolute percentage error.
#' \item{sMAPE} : symmetric mean absolute percentage error.
#' \item{MASE} : mean absolute scaled error.
#' \item{OWA} : overall weighted average of MASE and sMAPE.
#' \item{MdAE} : median absolute error.
#' \item{MdSE} : median square error.
#' \item{RMdSE} : root median squared error.
#' \item{MdPE} : median percentage error.
#' \item{MdAPE} : median absolute percentage error.
#' \item{sMdAPE} : symmetric median absolute percentage error.
#' }
#' @param nmse If accuracy.type == "AMSE" or "GAMSE", nmse provides the number of steps for average multistep MSE (`2<=nmse<=30`).
#' @param level.fixed If TRUE, "pStarQ" --> First, fits ATA(p,0) where p = p* is optimized for q=0. Then, fits ATA(p*,q) where q is optimized for p = p*.
#' @param trend.opt When \code{trend.opt},
#' \itemize{
#' \item{none} : none
#' \item{fixed} : "pBullet" --> Fits ATA(p,1) where p = p* is optimized for q = 1.
#' \item{search} : "qBullet" --> Fits ATA(p,q) where p = p* is optimized for q = q* (q > 0). Then, fits ATA(p*,q) where q is optimized for p = p*.
#' }
#' @param h The forecast horizon.
#' When the parameter is NULL; if the frequency of \code{X} is 4, the parameter is set to 8; if the frequency of \code{X} is 12, the parameter is set to 18; the parameter is set to 6 for other cases.
#' @param train_test_split If \code{Y} is NULL, this parameter divides \code{X} into two parts: training set (in-sample) and test set (out-sample). \code{train_test_split} is number of periods for forecasting and size of test set.
#' If the value is between 0 and 1, percentage of length is active.
#' @param holdout Default is FALSE. If TRUE, ATA Method uses the holdout forecasting for accuracy measure to select the best model. In holdout forecasting, the last few data points are removed from the data series.
#' The remaining historical data series is called in-sample data (training set), and the holdout data is called validation set (holdout set).
#' If TRUE, holdout.set_size will used for holdout data.
#' @param holdout.adjustedP Default is TRUE. If TRUE, parP will be adjusted by length of training - validation sets and in-sample set when the holdout forecasting is active.
#' @param holdout.set_size If \code{holdout} is TRUE, this parameter will be same as \code{h} for defining holdout set.
#' @param holdout.onestep Default is FALSE. if TRUE, the dynamic forecast strategy uses a one-step model multiple times (\code{h} forecast horizon) where the holdout prediction for the prior time step is used as an input for making a prediction on the following time step.
#' @param holdin Default is FALSE. If TRUE, ATA Method uses the hold-in forecasting for accuracy measure to select the best model. In hold-in forecasting, the last h-length data points are used for accuracy measure.
#' @param transform.order If "before", Box-Cox transformation family will be applied and then seasonal decomposition techniques will be applied. If "after", seasonal decomposition techniques will be applied and then Box-Cox transformation family will be applied.
#' @param transform.method Transformation method --> "Box_Cox", "Sqrt", "Reciprocal", "Log", "NegLog", "Modulus", "BickelDoksum", "Manly", "Dual", "YeoJohnson", "GPower", "GLog". If the transformation process needs shift parameter,
#' \code{ATA.Transform} will calculate required shift parameter automatically.
#' @param transform.attr Attributes set for Box-Cox transformation. If NULL, bcMethod = "loglik", bcLower = 0, bcUpper = 1, bcBiasAdj = FALSE. If you want to change, please use \code{ATA.BoxCoxAttr} function and its output.
#' @param lambda Box-Cox power transformation family parameter. Default is NULL. When "transform.method" is selected and lambda is set as NULL, required "lambda" parameter will be calculated automatically based on "transform.attr".
#' @param shift Box-Cox power transformation family shifting parameter. Default is 0. When "transform.method" is selected, required shifting parameter will be calculated automatically according to dataset.
#' @param initial.level "none" is default,
#' \itemize{
#' \item{none} : ATA Method calculates the pth observation in \code{X} for level.
#' \item{mean} : ATA Method calculates average of first p value in \code{X}for level.
#' \item{median}: ATA Method calculates median of first p value in \code{X}for level.
#' }
#' @param initial.trend "none" is default,
#' \itemize{
#' \item{none} : ATA Method calculates the qth observation in \code{X} for trend.
#' \item{mean} : ATA Method calculates average of first q value in \code{X(T)-X(T-1)} for trend.
#' \item{median}: ATA Method calculates median of first q value in \code{X(T)-X(T-1)} for trend.
#' }
#' @param ci.level Confidence Interval levels for forecasting.
#' @param start.phi Lower boundary for searching \code{parPHI}.If NULL, 0 is default.
#' @param end.phi Upper boundary for searching \code{parPHI}. If NULL, 1 is is default.
#' @param size.phi Increment step for searching \code{parPHI}. If NULL, the step size will be determined as the value that allows the bounds for the optimised value of \code{parPHI} to be divided into 20 equal parts.
#' @param negative.forecast Negative values are allowed for forecasting. Default value is TRUE. If FALSE, all negative values for forecasting are set to 0.
#' @param onestep Default is FALSE. if TRUE, the dynamic forecast strategy uses a one-step model multiple times (\code{h} forecast horizon) where the prediction for the prior time step is used as an input for making a prediction on the following time step.
#' @param print.out Default is TRUE. If FALSE, summary of ATA Method is not shown.
#' @param plot.out Default is TRUE. If FALSE, graphics of ATA Method are not shown.
#'
#' @return Returns an object of class \code{ata}. The generic accessor functions \code{ATA.Forecast} and \code{ATA.Accuracy} extract useful features of the value returned by \code{ATA} and associated functions.
#' \code{ata} object is a list containing at least the following elements
#' \itemize{
#' \item{actual} : The original time series.
#' \item{fitted} : Fitted values (one-step forecasts). The mean is of the fitted values is calculated over the ensemble.
#' \item{level} : Estimated level values.
#' \item{trend} : Estimated trend values.
#' \item{residuals} : Original values minus fitted values.
#' \item{coefp} : The weights attached to level observations.
#' \item{coefq} : The weights attached to trend observations.
#' \item{p} : Optimum level parameter.
#' \item{q} : Optimum trend parameter.
#' \item{phi} : Optimum damped trend parameter.
#' \item{model.type}: Form of trend.
#' \item{h} : The number of steps to forecast ahead.
#' \item{forecast} : Point forecasts as a time series.
#' \item{out.sample}: Test set as a time series.
#' \item{method} : The name of the optimum forecasting method as a character string for ATA(P,Q,PHI)(Error,Trend,Season).
#' \item{initial.level} : Selected initial level values for the time series forecasting method.
#' \item{initial.trend} : Selected initial trend values for the time series forecasting method.
#' \item{level.fixed} : A choice of optional level-fixed trended methods.
#' \item{trend.opt} : A choice of optional trend and level optimized trended methods (none, trend.fixed or trend.search).
#' \item{transform.method} : Box-Cox power transformation family method --> Box_Cox, Sqrt, Reciprocal, Log, NegLog, Modulus, BickelDoksum, Manly, Dual, YeoJohnson, GPower, GLog.
#' \item{transform.order} : Define how to apply Box-Cox power transformation techniques, before or after seasonal decomposition.
#' \item{lambda} : Box-Cox power transformation family parameter.
#' \item{shift} : Box-Cox power transformation family shifting parameter.
#' \item{accuracy.type} : Accuracy measure that is chosen for model selection.
#' \item{nmse} : The number of steps for average multistep MSE.
#' \item{accuracy} : In and out sample accuracy measures and its descriptives that are calculated for optimum model are given.
#' \item{par.specs} : Parameter sets for Information Criteria.
#' \item{holdout} : Holdout forecasting is TRUE or FALSE.
#' \item{holdout.training} : Training set in holdout forecasting.
#' \item{holdout.validation}: Validation set in holdout forecasting.
#' \item{holdout.forecast} : Holdout forecast.
#' \item{holdout.accuracy} : Accuracy measure chosen for model selection in holdout forecasting.
#' \item{holdin} : Hold-in forecasting is TRUE or FALSE.
#' \item{is.season} : Indicates whether it contains seasonal pattern.
#' \item{seasonal.model} : The name of the selected decomposition method.
#' \item{seasonal.type} : Form of seasonality.
#' \item{seasonal.period} : The number of seasonality periods.
#' \item{seasonal.index} : Weights of seasonality.
#' \item{seasonal} : Estimated seasonal values.
#' \item{seasonal.adjusted} : Deseasonalized time series values.
#' \item{execution.time} : The real and CPU time 'in seconds' spent by the system executing that task, including the time spent executing run-time or system services on its behalf.
#' \item{calculation.time} : How much real time 'in seconds' the currently running R process has already taken.
#' }
#'
#' @author Ali Sabri Taylan and Hanife Taylan Selamlar
#'
#' @seealso \code{forecast}, \code{stlplus}, \code{stR}, \code{\link[stats]{stl}}, \code{\link[stats]{decompose}},
#' \code{tbats}, \code{seasadj}, \code{seasonal}.
#'
#' @references
#'
#' #'\insertRef{yapar2017mses}{ATAforecasting}
#'
#' #'\insertRef{yapar2018mhes}{ATAforecasting}
#'
#' #'\insertRef{yapar2018mses}{ATAforecasting}
#'
#' #'\insertRef{yapar2019ata}{ATAforecasting}
#'
#' @keywords Ata forecast accuracy ts msts
#'
#' @importFrom forecast findfrequency
#' @importFrom stats cycle frequency ts tsp tsp<-
#' @importFrom Rdpack reprompt
#'
#' @examples
#' trainATA <- head(touristTR, 84)
#' testATA <- window(touristTR, start = 2015, end = 2016.917)
#' ata_fit <- ATA(trainATA, h=24, parQ = 1, seasonal.test = TRUE, seasonal.model = "stl")
#' ata_fc <- ATA.Forecast(ata_fit, out.sample = testATA)
#' ata_accry <- ATA.Accuracy(ata_fc)
#'
#' @export
ATA <- function(X, Y = NULL,
parP = NULL,
parQ = NULL,
parPHI = NULL,
model.type = NULL,
seasonal.test = NULL,
seasonal.model = "decomp",
seasonal.period = NULL,
seasonal.type = NULL,
seasonal.test.attr = NULL,
find.period = NULL,
accuracy.type = NULL,
nmse = 3,
level.fixed = FALSE,
trend.opt = "none",
h = NULL,
train_test_split = NULL,
holdout = FALSE,
holdout.adjustedP = TRUE,
holdout.set_size = NULL,
holdout.onestep = FALSE,
holdin = FALSE,
transform.order = "before",
transform.method = NULL,
transform.attr = NULL,
lambda = NULL,
shift = 0,
initial.level = "none",
initial.trend = "none",
ci.level = 95,
start.phi = NULL,
end.phi = NULL,
size.phi = NULL,
negative.forecast = TRUE,
onestep = FALSE,
print.out = TRUE,
plot.out = TRUE)
{
if (is.null(parQ)){
parQ <- "opt"
}
if (is.null(parP)){
parP <- "opt"
}
if (is.null(parPHI)){
parPHI <- "opt"
}
if (parPHI == "opt"){
if (is.null(start.phi)){
start.phi <- 0.4
}
if (is.null(end.phi)){
end.phi <- 1
}
if (is.null(size.phi)){
size.phi <- signif((end.phi - start.phi) / 20, 6)
}
}else {
start.phi <- parPHI
end.phi <- parPHI
size.phi <- 1
}
if (!is.null(seasonal.period)){
find.period <- 0
s.frequency <- seasonal.period
X <- forecast::msts(X, seasonal.periods = seasonal.period)
}else{
if (is.null(find.period)){
find.period <- 0
}
X_len <- length(X)
if ("msts" %in% class(X)) {
X_msts <- attributes(X)$msts
if (any(X_msts >= X_len / 2)) {
warning("Dropping seasonal components with fewer than two full periods.")
X_msts <- X_msts[X_msts < X_len / 2]
X <- forecast::msts(X, seasonal.periods = X_msts)
}
s.frequency <- seasonal.period <- sort(X_msts, decreasing = FALSE)
}else if ("ts" %in% class(X)) {
s.frequency <- seasonal.period <- frequency(X)
}else {
X <- as.ts(X)
s.frequency <- seasonal.period <- 1L
}
}
if (find.period!=0){
if(find.period==1){
seasonal.period <- find.freq(X)
seasonal.test <- TRUE
}else if(find.period==2){
seasonal.period <- forecast::findfrequency(X)
seasonal.test <- TRUE
}else if (find.period==3){
seasonal.period <- find.multi.freq(X)
seasonal.model=="stR"
seasonal.test <- TRUE
}else if (find.period==4){
seasonal.period <- find.multi.freq(X)
seasonal.model=="tbats"
seasonal.test <- TRUE
}else if (find.period==5){
seasonal.period <- find.multi.freq(X)
seasonal.model=="stl"
seasonal.test <- TRUE
}else {
stop("find.period must be integer and between 0 and 5. ATAforecasting was terminated!")
}
s.frequency <- seasonal.period
}
if (length(s.frequency)>1 & length(seasonal.model)==1){
if (seasonal.model != "tbats" & seasonal.model != "stR" & seasonal.model != "stl"){
seasonal.model <- "stl"
seasonal.test <- TRUE
warning("Seasonal decompostion method has been set to 'stl' because invalid method is chosen.")
}
}else if (length(s.frequency)>1 & is.null(seasonal.model)){
seasonal.model <- c("stl","stR","tbats")
warning("Seasonal decompostion method has been set to c('stl', 'stR', 'tbats').")
}else {
if (s.frequency > 1 & is.null(seasonal.model)){
seasonal.model <- "decomp"
seasonal.test <- TRUE
warning("Seasonal decompostion method has been set to 'decomp'.")
}
}
if (is.null(accuracy.type)){
accuracy.type <- "sMAPE"
}
if (trend.opt=="search"){
trend.search <- TRUE
level.fixed <- FALSE
trend.fixed <- FALSE
warning("level.fixed parameter has been turned FALSE as trend.opt is set 'search'")
}else if (trend.opt=="fixed"){
trend.search <- FALSE
level.fixed <- FALSE
trend.fixed <- TRUE
warning("level.fixed parameter has been turned FALSE as trend.opt is set 'search'")
}else if (trend.opt=="none"){
trend.search <- FALSE
trend.fixed <- FALSE
}else {
}
if (is.null(initial.level)){
initial.level = "none"
}
if (is.null(initial.trend)){
initial.level = "none"
}
if (is.null(seasonal.test.attr)) {
seas_attr_set <- ATA.SeasAttr()
}else {
seas_attr_set <- seasonal.test.attr
}
if (!is.null(seasonal.type)){
if (is.null(seasonal.test)){
seasonal.test <- TRUE
}
}else {
seasonal.test <- FALSE
}
if (min(s.frequency) == 1 & seasonal.test == TRUE){
stop("'period' can not be equal 1 if 'seasonal.test' is set TRUE.")
}
if (is.null(transform.attr)) {
boxcox_attr_set <- ATA.BoxCoxAttr()
}else {
boxcox_attr_set <- transform.attr
}
if (holdout == TRUE & holdin == TRUE){
return("Only one parameter of the two parameters (holdout or holdin) must be selected. Please choose one one of them. ATAforecasting was terminated!")
}
if (holdout == TRUE & (accuracy.type == "AMSE" | accuracy.type == "GAMSE")) {
accuracy.type <- "sMAPE"
warning("ATA Method does not support 'AMSE' for 'holdout' forecasting. 'accuracy.type' is set to 'sMAPE'.")
}
if (nmse > 30 & (accuracy.type == "AMSE" | accuracy.type == "GAMSE")) {
nmse <- 30
warning("'nmse' must be less than 30. 'nmse' is set to 30.")
}else if ((is.null(nmse) | nmse <= 1) & (accuracy.type == "AMSE" | accuracy.type == "GAMSE")) {
nmse <- 3
warning("'nmse' must be greater than 1. 'nmse' is set to 3.")
}else{
}
Qlen <- length(parQ)
Plen <- length(parP)
if (inherits(parP, "character") & parP!="opt"){
stop("p value must be integer and between 1 and length of input. ATAforecasting was terminated!")
}else if ((inherits(parP, "numeric") | inherits(parP, "integer")) & Plen>1 & (max(parP)>length(X))){
stop("p value must be integer and between 1 and length of input. ATAforecasting was terminated!")
}else{
}
if (inherits(parQ, "character") & parQ!="opt"){
stop("p value must be integer and between 0 and p. ATAforecasting was terminated!")
}else if ((inherits(parQ, "numeric") | inherits(parQ, "integer")) & Qlen>1 & (max(parQ)>=max(parP))){
stop("q value must be integer and between 0 and p. ATAforecasting was terminated!")
}else{
}
if (inherits(parPHI, "character") & parPHI!="opt"){
stop("phi value must be numeric and between 0 and 1. ATAforecasting was terminated!")
}else if ((inherits(parPHI, "numeric") | inherits(parPHI, "integer")) & parPHI<0 & parPHI>1 & length(parPHI)>1){
stop("phi value must be numeric and between 0 and 1. ATAforecasting was terminated!")
}
if (!is.null(seasonal.type)){
if ((seasonal.type != "A" & seasonal.type != "M") | !is.character(seasonal.type) | length(seasonal.type) > 1){
stop("Seasonal Type value must be string. A for additive or M for multiplicative. ATAforecasting was terminated!")
}
}
if (!is.null(seasonal.model)){
if (length(seasonal.model) == 1){
if ((seasonal.model != "none" & seasonal.model != "decomp" & seasonal.model != "stl" & seasonal.model != "stlplus" & seasonal.model != "tbats" & seasonal.model != "stR" & seasonal.model != "x13" & seasonal.model != "x11") | !is.character(seasonal.model)){
stop("Seasonal Decomposition Model value must be string: decomp, stl, stlplus, tbats, stR. ATAforecasting was terminated!")
}
}else {
if(any(seasonal.model %in% c("decomp","stl", "stlplus", "stR", "tbats", "x13", "x11"))){
}else {
stop("Seasonal Decomposition Model value must be string: decomp, stl, stlplus, tbats, stR. ATAforecasting was terminated!")
}
}
}
if ((accuracy.type != "lik" & accuracy.type != "sigma" & accuracy.type != "MAE" & accuracy.type != "MSE" & accuracy.type != "AMSE" & accuracy.type != "GAMSE" & accuracy.type != "RMSE" &
accuracy.type != "MPE" & accuracy.type != "MAPE" & accuracy.type != "sMAPE" & accuracy.type != "MASE" & accuracy.type != "OWA" & accuracy.type != "MdAE" &
accuracy.type != "MdSE" & accuracy.type != "MdPE" & accuracy.type != "MdAPE" & accuracy.type != "sMdAPE") | !is.character(accuracy.type) | length(accuracy.type) > 1){
stop("Accuracy Type value must be string and it must get one value: MAE or MSE or AMSE or GAMSE or MPE or MAPE or sMAPE or MASE or MdAE or MdSE or MdPE or MdAPE or sMdAPE. ATAforecasting was terminated!")
}
if (!is.null(model.type)){
if ((model.type != "A" & model.type != "M") | !is.character(model.type) | length(model.type) > 1){
stop("Model Type value must be string. A for additive or M for multiplicative or NULL for both of them. ATAforecasting was terminated!")
}
}
if (!is.null(initial.level)){
if (initial.level != "none" & initial.level != "mean" & initial.level != "median") {
stop("Initial value for Level must get one value: 'none' or 'mean' or 'median'. ATAforecasting was terminated!")
}
}
if (!is.null(initial.trend)){
if (initial.trend != "none" & initial.trend != "mean" & initial.trend != "median") {
stop("Initial value for Trend must get one value: 'none' or 'mean' or 'median'. ATAforecasting was terminated!")
}
}
if (!is.null(transform.order)){
if ((transform.order != "before" & transform.order != "after") | !is.character(transform.order) | length(transform.order) > 1){
stop("Transformation Order value must be string. 'before' for Transformation --> Decompostion or 'after' for Decomposition --> Transformation. ATAforecasting was terminated!")
}
}
if (!is.null(transform.method)){
if ((transform.method != "Box_Cox" & transform.method != "Modulus" & transform.method != "BickelDoksum" & transform.method != "Dual" & transform.method != "Manly" & transform.method != "Sqrt" &
transform.method != "YeoJohnson" & transform.method != "GPower" & transform.method != "GLog" & transform.method != "Log" & transform.method != "Reciprocal" &
transform.method != "NegLog") | !is.character(transform.method) | length(transform.method) > 1){
stop("Transform Method value must be string. Please select a valid Box-Cox transformation technique. ATAforecasting was terminated!")
}
}
if (!inherits(seas_attr_set, "ataoptim")){
stop("Attributes set for unit root and seasonality tests are not suitable set. ATAforecasting was terminated!")
}
if (!inherits(boxcox_attr_set, "ataoptim")){
stop("Attributes set for Box-Cox transformation are not suitable set. ATAforecasting was terminated!")
}
if (is.null(shift)){
shift <- 0
warning("'shift' is set to 0 to calculate automatically.")
}else{
if (shift<0){
shift <- 0
warning("'shift' is set to 0 to calculate automatically.")
}
}
WD <- getwd()
start.time <- Sys.time()
ptm <- proc.time()
train_set <- main_set <- forecast::msts(X, start = start(X), seasonal.periods = s.frequency)
tspX <- tsp(main_set)
if (!is.null(Y[1])){
test_set <- Y
h <- length(Y)
}else {
if (!is.null(train_test_split)){
test_len <- part_h <- as.integer(ifelse(train_test_split > 0 & train_test_split < 1, floor(length(main_set) * train_test_split), train_test_split))
mainset_len <- length(main_set)
train_len <- mainset_len - test_len
test_set <- main_set[(train_len+1):mainset_len]
test_set <- forecast::msts(test_set, start = end(train_set) - ifelse(tspX[3]>1, (part_h - 1) * (1/tspX[3]), (part_h - 1) * 1), seasonal.periods = s.frequency)
main_set <- train_set <- main_set[1:train_len]
train_set <- forecast::msts(train_set, start = start(main_set), seasonal.periods = s.frequency)
main_set <- forecast::msts(main_set, start = start(main_set), seasonal.periods = s.frequency)
h <- length(test_set)
}else {
m <- max(s.frequency)
if (is.null(h)){
if (m==4){
h <- 8
}else if (m==5){
h <- 10
}else if (m==7){
h <- 14
}else if (m==12){
h <- 24
}else if (m==24){
h <- 48
}else {
h <- 6
}
}
test_set <- rep(NA,times=h)
}
}
test_set <- forecast::msts(test_set, start = end(train_set) + ifelse(tspX[3]>1, 1/tspX[3], 1), seasonal.periods = s.frequency)
freqYh <- cycle(test_set)
par.specs <- list("p" = parP, "q" = parQ, "phi" = parPHI,
"trend" = ifelse(is.null(model.type), "opt", ifelse(parQ==0, "N", ifelse(parPHI==0, "N", model.type))),
"seasonal" = ifelse(is.null(seasonal.type), "opt", seasonal.type),
"period" = s.frequency,
"decomp_model" = seasonal.model,
"initial_level" = ifelse(initial.level=="none", NA, TRUE),
"initial_trend" = ifelse(initial.trend=="none", NA, TRUE))
par_specs <- c(stats::na.omit(unlist(par.specs)))
np <- length(par_specs)
if (np >= length(train_set) - 1) {
stop("Not enough data to estimate this ATA method.")
}
if (transform.order == "before"){
trfm_train_set <- ATA.Transform(train_set,tMethod=transform.method, tLambda=lambda, tShift=shift, bcMethod = boxcox_attr_set$bcMethod, bcLower = boxcox_attr_set$bcLower, bcUpper = boxcox_attr_set$bcUpper)
train_set <- forecast::msts(trfm_train_set$trfmX, start = start(main_set), seasonal.periods = s.frequency)
lambda <- trfm_train_set$tLambda
shift <- trfm_train_set$tShift
if (length(seasonal.type)==1 & length(seasonal.model)==1){
my_list <- SubATA_Single_Before(train_set, parP, parQ, model.type, seasonal.test, seasonal.model, seasonal.type, s.frequency, h, accuracy.type,
level.fixed, trend.fixed, trend.search, start.phi, end.phi, size.phi, initial.level, initial.trend, transform.method,
lambda, shift, main_set, test_set, seas_attr_set, freqYh, ci.level, negative.forecast, boxcox_attr_set, holdout,
holdout.set_size, holdout.adjustedP, holdin, nmse, onestep, holdout.onestep)
}else {
my_list <- SubATA_Multi_Before(train_set, parP, parQ, model.type, seasonal.test, seasonal.model, seasonal.type, s.frequency, h, accuracy.type,
level.fixed, trend.fixed, trend.search, start.phi, end.phi, size.phi, initial.level, initial.trend, transform.method,
lambda, shift, main_set, test_set, seas_attr_set, freqYh, ci.level, negative.forecast, boxcox_attr_set, holdout,
holdout.set_size, holdout.adjustedP, holdin, nmse, onestep, holdout.onestep)
}
}else {
if (!is.null(transform.method)){
model.type <- "M"
warning("model.type parameter has been set as 'M' because of a transformation techniques from Box-Cox power transformation family selected.")
}
if (length(seasonal.type)==1 & length(seasonal.model)==1){
my_list <- SubATA_Single_After(train_set, parP, parQ, model.type, seasonal.test, seasonal.model, seasonal.type, s.frequency, h, accuracy.type,
level.fixed, trend.fixed, trend.search, start.phi, end.phi, size.phi, initial.level, initial.trend, transform.method,
lambda, shift, main_set, test_set, seas_attr_set, freqYh, ci.level, negative.forecast, boxcox_attr_set, holdout,
holdout.set_size, holdout.adjustedP, holdin, nmse, onestep, holdout.onestep)
}else {
my_list <- SubATA_Multi_After(train_set, parP, parQ, model.type, seasonal.test, seasonal.model, seasonal.type, s.frequency, h, accuracy.type,
level.fixed, trend.fixed, trend.search, start.phi, end.phi, size.phi, initial.level, initial.trend, transform.method,
lambda, shift, main_set, test_set, seas_attr_set, freqYh, ci.level, negative.forecast, boxcox_attr_set, holdout,
holdout.set_size, holdout.adjustedP, holdin, nmse, onestep, holdout.onestep)
}
}
my_list$transform.order <- transform.order
my_list$trend.opt <- trend.opt
my_list$holdout.onestep <- TRUE
executionTime <- proc.time() - ptm
end.time <- Sys.time()
my_list$execution.time <- executionTime
my_list$calculation.time <- round(as.double(difftime(end.time, start.time,units="sec")),4)
if (plot.out==TRUE) {
ATA.Plot(my_list)
}
if (print.out==TRUE) {
ATA.Print(my_list)
}
my_list<-my_list[order(names(my_list))]
attr(my_list, "class") <- "ata"
gc()
return(my_list)
}
#' Specialized Screen Print Function of The ATAforecasting
#'
#' @param object an object of \code{ata}
#' @param ... other inputs
#'
#' @return a summary for the results of the ATAforecasting
#'
#' @export
ATA.Print <- function(object,...)
{
opscipen <- options("scipen" = 100, "digits"=7)
on.exit(options(opscipen))
x <- object
cat(x$method,"\n\n")
if (x$level.fixed==TRUE){
cat(" level.fixed: TRUE","\n\n")
}
if (x$trend.opt!="none"){
cat(paste(" trend optimization method: trend.", x$trend.opt, "\n\n", sep=""))
}
if(!is.null(x$transform.method)){
cat(paste(" '",x$transform.method, "' transformation method was selected.","\n\n", sep=""))
}
if(!is.null(x$lambda)){
cat(" Box-Cox transformation: lambda=",round(x$lambda,4), "\n\n")
}
cat(paste(" model.type:",x$model.type, "\n\n"))
if (x$is.season==FALSE){
cat(" seasonal.model: no seasonality","\n\n")
}else {
cat(paste(" seasonal.model:",x$seasonal.model, "\n\n"))
}
if (x$is.season==TRUE){
cat(paste(" seasonal.type:",x$seasonal.type, "\n\n"))
}
cat(paste(" forecast horizon:",x$h, "\n\n"))
cat(paste(" accuracy.type:",x$accuracy.type, "\n\n"))
cat("Model Fitting Measures:","\n")
stats <- c(x$accuracy$fits$sigma2, x$accuracy$fits$loglik, x$accuracy$MAE$inSample$MAE, x$accuracy$MSE$inSample$MSE, x$accuracy$MSE$inSample$RMSE, x$accuracy$MPE$inSample$MPE, x$accuracy$MAPE$inSample$MAPE, x$accuracy$sMAPE$inSample$sMAPE, x$accuracy$MASE$inSample$MASE, x$accuracy$OWA$inSample$OWA)
names(stats) <- c("sigma2", "loglik", "MAE", "MSE", "RMSE", "MPE", "MAPE", "sMAPE", "MASE", "OWA")
cat("\n")
print(stats)
cat("\n")
cat("In-Sample Accuracy Measures:","\n")
stats <- c(x$accuracy$MAE$inSample$MdAE, x$accuracy$MSE$inSample$MdSE, x$accuracy$MSE$inSample$RMdSE, x$accuracy$MPE$inSample$MdPE, x$accuracy$MAPE$inSample$MdAPE, x$accuracy$sMAPE$inSample$sMdAPE)
names(stats) <- c("MdAE", "MdSE", "RMdSE", "MdPE", "MdAPE", "sMdAPE")
cat("\n")
print(stats)
cat("\n")
cat("Out-Sample Accuracy Measures:","\n")
stats <- c(x$accuracy$MAE$outSample$MAE, x$accuracy$MSE$outSample$MSE, x$accuracy$MSE$outSample$RMSE, x$accuracy$MPE$outSample$MPE, x$accuracy$MAPE$outSample$MAPE, x$accuracy$sMAPE$outSample$sMAPE, x$accuracy$MASE$outSample$MASE, x$accuracy$OWA$outSample$OWA)
names(stats) <- c("MAE", "MSE", "RMSE", "MPE", "MAPE", "sMAPE", "MASE", "OWA")
cat("\n")
print(stats)
cat("\n")
cat("Out-Sample Accuracy Measures:","\n")
stats <- c(x$accuracy$MAE$outSample$MdAE, x$accuracy$MSE$outSample$MdSE, x$accuracy$MSE$outSample$RMdSE, x$accuracy$MPE$outSample$MdPE, x$accuracy$MAPE$outSample$MdAPE, x$accuracy$sMAPE$outSample$sMdAPE)
names(stats) <- c("MdAE", "MdSE", "RMdSE", "MdPE", "MdAPE", "sMdAPE")
cat("\n")
print(stats)
cat("\n")
cat("Information Criteria:","\n")
stats <- c(x$accuracy$fits$AIC, x$accuracy$fits$AICc, x$accuracy$fits$BIC)
names(stats) <- c("AIC", "AICc", "BIC")
cat("\n")
print(stats)
cat("\n")
stats <- c(x$execution.time[1], x$execution.time[2], x$execution.time[3])
names(stats) <- c("user","system","elapsed")
cat("\n")
print(stats)
cat("\n")
cat(paste("calculation.time:",x$calculation.time, "\n\n"))
cat("\n")
cat("Forecasts:","\n")
print(x$forecast)
cat("\n\n")
}
#' Specialized Plot Function of The ATAforecasting
#'
#' @param object an object of \code{ata}
#' @param fcol line color
#' @param flty line type
#' @param flwd line width
#' @param ... other inputs
#'
#' @return a graphic output for the components of the ATAforecasting
#'
#' @importFrom stats cycle frequency ts tsp tsp<-
#' @importFrom graphics axis legend layout lines mtext par plot polygon
#'
#' @export
ATA.Plot <- function(object, fcol=4, flty = 2, flwd = 2, ...)
{
x <- object
oldpar <- par(no.readonly = TRUE)# save default, for resetting...
on.exit(par(oldpar))
caption <- x$method
xx <- x$actual
hpred <- length(x$forecast)
freq <- frequency(xx)
xxx <- ts(c(x$actual, rep(NA,hpred)), end=tsp(xx)[2] + hpred/freq, frequency=freq)
xxy <- ts(c(x$fitted, rep(NA,hpred)), end=tsp(xx)[2] + hpred/freq, frequency=freq)
min_y <- min(x$actual, x$fitted, x$out.sample, x$forecast, x$forecast.lower, na.rm=TRUE)
max_y <- max(x$actual, x$fitted, x$out.sample, x$forecast, x$forecast.upper, na.rm=TRUE)
range_y <- abs(max_y - min_y)
min_last <- floor(min_y - range_y * 0.20)
max_last <- ceiling(max_y + range_y * 0.20)
range_last <- signif(abs(max_last - min_last),6)
rnd_par <- ifelse(range_last<10, 1, 0)
dataset <- cbind(xxx,xxy)
colnames(dataset, do.NULL = FALSE)
colnames(dataset) <- c("actual","fitted")
legend_names <- c("actual","fitted","out-sample","forecast")
tmp <- seq(from = tsp(x$forecast)[1], by = 1/freq, length = hpred)
if (x$is.season==FALSE){
layout(matrix(c(1, 2, 3, 4), 2, 2, byrow=TRUE))
par(mar = c(bottom=1, 4.1, top=2, 1.1))
plot(dataset,plot.type="s", ylim=c(min_last, max_last), col=1:ncol(dataset), xlab=NULL, ylab="fitted", yaxt="n")
axis(side=2,at=seq(min_last, max_last,round(range_last/10, rnd_par)), labels=seq(min_last, max_last,round(range_last/10, rnd_par)), las=1, lwd=1)
polygon(x=c(tmp, rev(tmp)), y=c(x$forecast.lower, rev(x$forecast.upper)), col="lightgray", border=NA)
lines(x$forecast, lty = flty, lwd = flwd, col = fcol)
lines(x$out.sample, lty = 1, lwd = flwd, col = fcol+2)
legend("topleft", legend_names, col=c(1,2,fcol+2,fcol), lty=1, cex=.80, box.lty=0, text.font=2, ncol=2, bg="transparent")
mtext(caption, side = 3, line = -1.5, outer = TRUE)
par(mar = c(bottom=1, 4.1, top=2, 1.1))
plot(x$trend, ylab="trend")
par(mar = c(bottom=2, 4.1, top=2, 1.1))
plot(x$level, ylab="level")
par(mar = c(bottom=2, 4.1, top=2, 1.1))
plot(x$residuals, ylab="residuals")
}else {
layout(matrix(c(1, 2, 3, 4, 5, 6), 3, 2, byrow=TRUE))
par(mar = c(bottom=1, 4.1, top=2, 1.1))
plot(dataset,plot.type="s", ylim=c(min_last, max_last), col=1:ncol(dataset), xlab=NULL, ylab="fitted", yaxt="n")
axis(side=2,at=seq(min_last, max_last,round(range_last/10, rnd_par)), labels=seq(min_last, max_last,round(range_last/10, rnd_par)), las=1, lwd=1)
polygon(x=c(tmp, rev(tmp)), y=c(x$forecast.lower, rev(x$forecast.upper)), col="lightgray", border=NA)
lines(x$forecast, lty = flty, lwd = flwd, col = fcol)
lines(x$out.sample, lty = 1, lwd = flwd, col = fcol+2)
legend("topleft", legend_names, col=c(1,2,fcol+2,fcol), lty=1, cex=.80, box.lty=0, text.font=2, ncol=2, bg="transparent")
mtext(paste(caption, " with ", ifelse(x$seasonal.model=="decomp","classical",x$seasonal.model), " decomposition method", sep=""), side = 3, line = -1.5, outer = TRUE)
par(mar = c(bottom=1, 4.1, top=2, 1.1))
plot(x$seasonal.adjusted,ylab="deseasonalized")
par(mar = c(bottom=1, 4.1, top=2, 1.1))
plot(x$level, ylab="level")
par(mar = c(bottom=1, 4.1, top=2, 1.1))
plot(x$trend,ylab="trend")
par(mar = c(bottom=2, 4.1, top=2, 1.1))
plot(x$seasonal,ylab="seasonality")
par(mar = c(bottom=2, 4.1, top=2, 1.1))
plot(x$residuals, ylab="residuals")
}
#par(oldpar)
}
find.precision <- function(x)
{
results <- nchar(sub(".", "", x, fixed=TRUE))
return(results)
}
find.scale <- function(x)
{
results <- nchar(sub("\\d+\\.?(.*)$", "\\1", x))
return(results)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/ATA.R |
#' Accuracy Measures for The ATAforecasting
#'
#' @description Returns ATA(p,q,phi)(E,T,S) applied to `ata` \code{object}.
#' Accuracy measures for a forecast model
#' Returns range of summary measures of the forecast accuracy. If \code{out.sample} is
#' provided, the function measures test set forecast accuracy.
#' If \code{out.sample} is not provided, the function only produces
#' training set accuracy measures.
#' The measures calculated are:
#' \itemize{
#' \item{lik} : maximum likelihood functions
#' \item{sigma} : residual variance.
#' \item{MAE} : mean absolute error.
#' \item{MSE} : mean square error.
#' \item{RMSE} : root mean squared error.
#' \item{MPE} : mean percentage error.
#' \item{MAPE} : mean absolute percentage error.
#' \item{sMAPE} : symmetric mean absolute percentage error.
#' \item{MASE} : mean absolute scaled error.
#' \item{OWA} : overall weighted average of MASE and sMAPE.
#' \item{MdAE} : median absolute error.
#' \item{MdSE} : median square error.
#' \item{RMdSE} : root median squared error.
#' \item{MdPE} : median percentage error.
#' \item{MdAPE} : median absolute percentage error.
#' \item{sMdAPE} : symmetric median absolute percentage error.
#' }
#'
#' @param object An object of class \code{ata} is required.
#' @param out.sample A numeric vector or time series of class \code{ts} or \code{msts} for out-sample.
#' @param print.out Default is TRUE. If FALSE, summary of ATA Method's accuracy measures is not shown.
#'
#' @return Matrix giving forecast accuracy measures.
#'
#' @author Ali Sabri Taylan and Hanife Taylan Selamlar
#'
#' @seealso \code{forecast}, \code{stlplus}, \code{stR}, \code{\link[stats]{stl}}, \code{\link[stats]{decompose}}, \code{tbats}, \code{seasadj}.
#'
#' @references
#'
#' #'\insertRef{hyndmanandkoehler2006}{ATAforecasting}
#'
#' #'\insertRef{hyndman2019forecasting}{ATAforecasting}
#'
#'
#' @keywords Ata forecast accuracy ts msts
#'
#' @importFrom stats median var
#' @importFrom timeSeries colKurtosis colSkewness
#' @importFrom Rdpack reprompt
#'
#' @examples
#' trainATA <- head(touristTR, 84)
#' testATA <- window(touristTR, start = 2015, end = 2016.917)
#' ata_fit <- ATA(trainATA, h=24, seasonal.test = TRUE, seasonal.model = "decomp")
#' ata_accuracy <- ATA.Accuracy(ata_fit, testATA)
#'
#' @export
ATA.Accuracy <- function(object, out.sample=NULL, print.out = TRUE)
{
if (!inherits(object, "ata")){
stop("The Input must be 'ata' object. Please use ATA() function to produce 'ata' object.")
}
train_set <- object$actual
ts_fit <- object$fitted
ts_fc <- object$forecast
ts_act <- as.numeric(train_set[-1])
ts_fit <- as.numeric(ts_fit[-1])
if (is.null(out.sample)) {
test_set <- NA
}else {
test_set <- as.numeric(out.sample)
}
ata.error <- ts_act - ts_fit
ata.pe <- ata.error / ts_act * 100
pre_mae <- abs(ata.error)
pre_mse <- ata.error^2
pre_mpe <- ata.pe
pre_mape <- abs(ata.pe)
pre_smape <- abs(ts_act - ts_fit)/(abs(ts_act) + abs(ts_fit)) * 200
if (!is.null(test_set)){
ata.error.os <- test_set - ts_fc
ata.pe.os <- ata.error.os / test_set * 100
pre_mae_os <- abs(ata.error.os)
pre_mse_os <- ata.error.os^2
pre_mpe_os <- ata.pe.os
pre_mape_os <- abs(ata.pe.os)
pre_smape_os <- abs(test_set - ts_fc)/(abs(test_set) + abs(ts_fc)) * 200
}else {
pre_mae_os <- NA
pre_mse_os <- NA
pre_mpe_os <- NA
pre_mape_os <- NA
pre_smape_os <- NA
}
np <- length(c(stats::na.omit(unlist(object$par.specs))))
ny <- length(train_set)
mae <- round(mean(pre_mae, na.rm=TRUE),8)
mse <- round(mean(pre_mse, na.rm=TRUE),8)
lik <- ny * round(log(sum(pre_mse, na.rm=TRUE)),8)
rmse <- round(sqrt(mse),8)
mpe <- round(mean(pre_mpe, na.rm=TRUE),8)
mape <- round(mean(pre_mape, na.rm=TRUE),8)
smape <- round(mean(pre_smape, na.rm=TRUE),8)
mdae <- round(median(pre_mae, na.rm=TRUE),8)
mdse <- round(median(pre_mse, na.rm=TRUE),8)
rmdse <- round(sqrt(mdse),8)
mdpe <- round(median(pre_mpe, na.rm=TRUE),8)
mdape <- round(median(pre_mape, na.rm=TRUE),8)
smdape <- round(median(pre_smape, na.rm=TRUE),8)
if (object$is.season==TRUE){
Dec <- stats::decompose(train_set,type="multiplicative")
des_input <- train_set/Dec$seasonal
}else{
des_input <- train_set
}
if (length(object$seasonal.period) > 1){
naive1Accry <- NaiveSV_Accry(as.double(ts_act), as.double(object$seasonal.period), 1)
naive2Accry <- NaiveSV_Accry(as.double(des_input), as.double(object$seasonal.period), 1)
}else {
naive1Accry <- NaiveSD_Accry(as.double(ts_act), as.double(object$seasonal.period), 1)
naive2Accry <- NaiveSD_Accry(as.double(des_input), as.double(object$seasonal.period), 1)
}
mase <- mae / naive1Accry
owa <- ((smape / naive2Accry) + (mase / naive2Accry)) / 2
aic <- lik + 2 * np
bic <- lik + log(ny) * np
aicc <- aic + 2 * np * (np + 1) / (ny - np - 1)
stdDev_mae <- round(sqrt(var(pre_mae, na.rm=TRUE)),8)
skew_mae <- round(timeSeries::colSkewness(pre_mae),8)
kurt_mae <- round(timeSeries::colKurtosis(pre_mae),8)
stdDev_mse <- round(sqrt(var(pre_mse, na.rm=TRUE)),8)
skew_mse <- round(timeSeries::colSkewness(pre_mse),8)
kurt_mse <- round(timeSeries::colKurtosis(pre_mse),8)
stdDev_mpe <- round(sqrt(var(pre_mpe, na.rm=TRUE)),8)
skew_mpe <- round(timeSeries::colSkewness(pre_mpe),8)
kurt_mpe <- round(timeSeries::colKurtosis(pre_mpe),8)
stdDev_mape <- round(sqrt(var(pre_mape, na.rm=TRUE)),8)
skew_mape <- round(timeSeries::colSkewness(pre_mape),8)
kurt_mape <- round(timeSeries::colKurtosis(pre_mape),8)
stdDev_smape <- round(sqrt(var(pre_smape, na.rm=TRUE)),8)
skew_smape <- round(timeSeries::colSkewness(pre_smape),8)
kurt_smape <- round(timeSeries::colKurtosis(pre_smape),8)
if (!is.na(test_set[1])){
mae_os <- round(mean(pre_mae_os, na.rm=TRUE),8)
mse_os <- round(mean(pre_mse_os, na.rm=TRUE),8)
rmse_os <- round(sqrt(mse_os),8)
mpe_os <- round(mean(pre_mpe_os, na.rm=TRUE),8)
mape_os <- round(mean(pre_mape_os, na.rm=TRUE),8)
smape_os <- round(mean(pre_smape_os, na.rm=TRUE),8)
mdae_os <- round(median(pre_mae_os, na.rm=TRUE),8)
mdse_os <- round(median(pre_mse_os, na.rm=TRUE),8)
rmdse_os <- round(sqrt(mdse_os),8)
mdpe_os <- round(median(pre_mpe_os, na.rm=TRUE),8)
mdape_os <- round(median(pre_mape_os, na.rm=TRUE),8)
smdape_os <- round(median(pre_smape_os, na.rm=TRUE),8)
mase_os <- mae_os / naive1Accry
owa_os <- ((smape_os / naive2Accry) + (mase_os / naive2Accry)) / 2
}else {
mae_os <- NA
mse_os <- NA
lik_os <- NA
rmse_os <- NA
mpe_os <- NA
mape_os <- NA
smape_os <- NA
mdae_os <- NA
mdse_os <- NA
rmdse_os <- NA
mdpe_os <- NA
mdape_os <- NA
smdape_os <- NA
mase_os <- NA
owa_os <- NA
}
RawAccuracy_is <- list("MAE"=pre_mae, "MSE"=pre_mse, "MPE"= pre_mpe, "MAPE"=pre_mape, "sMAPE"=pre_smape)
RawAccuracy_os <- list("MAE"=pre_mae_os, "MSE"=pre_mse_os, "MPE"= pre_mpe_os, "MAPE"=pre_mape_os, "sMAPE"=pre_smape_os)
RawAccuracy_all <- list("inSample"=RawAccuracy_is, "outSample"=RawAccuracy_os)
MAE_is <- list("MAE"=mae, "MdAE"=mdae, "stdDev.MAE"=stdDev_mae, "skewness.MAE"=skew_mae, "kurtosis.MAE"=kurt_mae)
MSE_is <- list("MSE"=mse, "MdSE"=mdse, "RMSE" = rmse, "RMdSE" = rmdse, "stdDev.MSE"=stdDev_mse, "skewness.MSE"=skew_mse, "kurtosis.MSE"=kurt_mse)
MPE_is <- list("MPE"=mpe, "MdPE"=mdpe, "stdDev.MPE"=stdDev_mpe, "skewness.MPE"=skew_mpe, "kurtosis.MPE"=kurt_mpe)
MAPE_is <- list("MAPE"=mape, "MdAPE"=mdape, "stdDev.MAPE"=stdDev_mape, "skewness.MAPE"=skew_mape, "kurtosis.MAPE"=kurt_mape)
sMAPE_is <- list("sMAPE"=smape, "sMdAPE"=smdape, "stdDev.sMAPE"=stdDev_smape, "skewness.sMAPE"=skew_smape, "kurtosis.sMAPE"=kurt_smape)
MASE_is <- list("MASE"=round(mase, 8), "MdASE"=NA, "stdDev.MASE"=NA, "skewness.MASE"=NA, "kurtosis.MASE"=NA)
OWA_is <- list("OWA"=round(owa, 8), "stdDev.OWA"=NA, "skewness.OWA"=NA, "kurtosis.OWA"=NA)
MAE_os <- list("MAE"=mae_os, "MdAE"=mdae_os)
MSE_os <- list("MSE"=mse_os, "MdSE"=mdse_os, "RMSE" = rmse_os, "RMdSE" = rmdse_os)
MPE_os <- list("MPE"=mpe_os, "MdPE"=mdpe_os)
MAPE_os <- list("MAPE"=mape_os, "MdAPE"=mdape_os)
sMAPE_os <- list("sMAPE"=smape_os, "sMdAPE"=smdape_os)
MASE_os <- list("MASE"=round(mase_os, 8), "MdASE"=NA)
OWA_os <- list("OWA"=round(owa_os, 8))
MAE_all <- list("inSample"=MAE_is, "outSample"=MAE_os)
MSE_all <- list("inSample"=MSE_is, "outSample"=MSE_os)
MPE_all <- list("inSample"=MPE_is, "outSample"=MPE_os)
MAPE_all <- list("inSample"=MAPE_is, "outSample"=MAPE_os)
sMAPE_all <- list("inSample"=sMAPE_is, "outSample"=sMAPE_os)
MASE_all <- list("inSample"=MASE_is, "outSample"=MASE_os)
OWA_all <- list("inSample"=OWA_is, "outSample"=OWA_os)
fits <- list("sigma2" = round(sum(pre_mse, na.rm=TRUE),8) / (ny - np),
"loglik" = -0.5 * lik,
"AIC" = aic,
"AICc" = aicc,
"BIC" = bic,
"MSE" = mse,
"MAE" = mae,
"sMAPE" = smape,
"MASE" = mase,
"OWA" = owa)
my_list <- list("MAE"=MAE_all, "MSE"=MSE_all, "MPE"= MPE_all, "MAPE"=MAPE_all, "sMAPE"=sMAPE_all, "MASE"=MASE_all, "OWA"=OWA_all, "RawAccuracy"=RawAccuracy_all, "fits"=fits)
if (print.out==TRUE) {
opscipen <- options("scipen" = 100, "digits"=4)
on.exit(options(opscipen))
cat("Model Fitting Measures:","\n")
print_out <- c(fits$sigma2, fits$loglik, MAE_all$inSample$MAE, MSE_all$inSample$MSE, MSE_all$inSample$RMSE, MPE_all$inSample$MPE, MAPE_all$inSample$MAPE, sMAPE_all$inSample$sMAPE, MASE_all$inSample$MASE, OWA_all$inSample$OWA)
names(print_out) <- c("sigma2", "loglik", "MAE", "MSE", "RMSE", "MPE", "MAPE", "sMAPE", "MASE", "OWA")
cat("\n")
print(print_out)
cat("\n")
cat("Out-Sample Accuracy Measures:","\n")
print_out <- c(MAE_all$outSample$MAE, MSE_all$outSample$MSE, MSE_all$outSample$RMSE, MPE_all$outSample$MPE, MAPE_all$outSample$MAPE, sMAPE_all$outSample$sMAPE, MASE_all$outSample$MASE, OWA_all$outSample$OWA)
names(print_out) <- c("MAE", "MSE", "RMSE", "MPE", "MAPE", "sMAPE", "MASE", "OWA")
cat("\n")
print(print_out)
cat("\n\n")
}
return(my_list)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/ATA_Accuracy.R |
#' The ATA.BoxCoxAttr function works with many different types of inputs.
#'
#' @param bcMethod Choose method to be used in calculating lambda. "guerrero" (Guerrero, V.M. (1993) is default. Other method is "loglik").
#' @param bcLower Lower limit for possible lambda values. The lower value is limited by -5. Default value is 0.
#' @param bcUpper Upper limit for possible lambda values. The upper value is limited by 5. Default value is 5.
#' @param bcBiasAdj Use adjusted back-transformed mean for Box-Cox transformations.
#' If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts.
#' If bcBiasAdj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.
#' If bcBiasAdj=TRUE. Can either be the forecast variance, or a list containing the interval \code{level}, the corresponding \code{upper} and \code{lower} intervals.
#'
#' @return An object of class \code{ataoptim}.
#'
#' @author Ali Sabri Taylan and Hanife Taylan Selamlar
#'
#' @seealso \code{\link{BoxCox}}, \code{\link{InvBoxCox}}, \code{\link{BoxCox.lambda}}
#'
#' @references
#'
#' #'\insertRef{boxcox1964}{ATAforecasting}
#'
#' #'\insertRef{guerrero1993}{ATAforecasting}
#'
#'
#'
#' @export
ATA.BoxCoxAttr <- function(bcMethod = "guerrero", bcLower = 0, bcUpper = 5, bcBiasAdj = FALSE)
{
if ((bcMethod != "guerrero" & bcMethod != "loglik") | !is.character(bcMethod)){
warning("Selected method for calculating lambda must be string. guerrero or loglik for calculating lambda.")
bcMethod <- "guerrero"
}
if(bcLower < 0){
warning("Specified lower value is less than the minimum, setting bcLower=0")
bcLower <- 0
}else if(bcUpper > 5){
warning("Specified upper value is larger than the maximum, setting bcUpper=5")
bcUpper <- 5
}
if (!is.logical(bcBiasAdj)) {
warning("bcBiasAdj information not found, defaulting to FALSE.")
biasadj <- FALSE
}
mylist <- list("bcMethod"=bcMethod, "bcLower"=bcLower, "bcUpper"=bcUpper, "bcBiasAdj"=bcBiasAdj)
attr(mylist, "class") <- "ataoptim"
return(mylist)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/ATA_BoxCoxAttributes.r |
#' Confidence Interval function for the ATA Method
#'
#' @param object An \code{ATA} object is required.
#' @param ci.level Confidence level, for example: 90, 95 or 99.
#'
#' @return The confidence interval output for the ATA forecasts
#'
#' @importFrom stats qnorm qt sd ts tsp tsp<-
#'
#' @export
#'
ATA.CI <- function(object, ci.level = 95)
{
ata.output <- object
if (!inherits(ata.output, "ata")){
return("The Input must be 'ata' object. Please use ATA function to produce 'ata' object. Calculation of Confidence Intervals of ATA Forecasts will terminate!")
}
ci.alpha <- 1 - (ci.level/100)
length_resid <- length(ata.output$residuals[!is.na(ata.output$residuals)])
if (length_resid<=30){
ci.ZTvalue <- stats::qt(ci.alpha/2, df=length_resid)
}else {
ci.ZTvalue <- stats::qnorm(ci.alpha/2, lower.tail=FALSE)
}
std_resid <- sd(ata.output$residual, na.rm=TRUE)
ci.value <- sqrt(1:ata.output$h) * abs(ci.ZTvalue) * std_resid
tsp_F <- tsp(ata.output$forecast)
forecast.lower <- ts(ata.output$forecast - ci.value, frequency = tsp_F[3], start = tsp_F[1])
forecast.upper <- ts(ata.output$forecast + ci.value, frequency = tsp_F[3], start = tsp_F[1])
my_list <- list("forecast"=ata.output$forecast, "forecast.lower"=forecast.lower, "forecast.upper"=forecast.upper)
return(my_list)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/ATA_CI.r |
#' The core algorithm of the ATA Method
#'
#' @param X A numeric vector or time series.
#' @param pk Value of Level parameter.
#' @param qk Value of Trend parameter.
#' @param phik Value of Damping Trend parameter.
#' @param mdlType An one-character string identifying method using the framework terminology.
#' @param initialLevel "none" is default,
#' \itemize{
#' \item{none} : ATA Method calculates the pth observation in \code{X} for level.
#' \item{mean} : ATA Method calculates average of first p value in \code{X}for level.
#' \item{median}: ATA Method calculates median of first p value in \code{X}for level.
#' }
#' @param initialTrend "none" is default,
#' \itemize{
#' \item{none} : ATA Method calculates the qth observation in \code{X} for trend.
#' \item{mean} : ATA Method calculates average of first q value in \code{X(T)-X(T-1)} for trend.
#' \item{median}: ATA Method calculates median of first q value in \code{X(T)-X(T-1)} for trend.
#' }
#' @return Returns an object of class "\code{ATA}"
#'
#' @importFrom forecast msts
#' @importFrom stats as.ts ts tsp tsp<-
#'
#' @export
ATA.Core <- function(X, pk, qk, phik, mdlType, initialLevel, initialTrend)
{
tspX <- tsp(X)
lenX <- length(X)
if ("msts" %in% class(X)) {
X_msts <- attributes(X)$msts
if (any(X_msts >= lenX / 2)) {
X_msts <- X_msts[X_msts < lenX / 2]
}
X_msts <- sort(X_msts, decreasing = FALSE)
}else if ("ts" %in% class(X)) {
X_ts <- frequency(X)
}else {
X_ts <- 1L
}
X <- as.numeric(X)
ata.S <- rep(NA, lenX)
ata.T <- rep(NA, lenX)
ata.error <- rep(NA, lenX)
ata.fitted <- rep(NA, lenX)
ata.coefp <- rep(NA, lenX)
ata.coefq <- rep(NA, lenX)
S_1 <- NA
T_1 <- NA
if (initialTrend==TRUE){
if (mdlType=="A"){
IT_0 <- X-ATA.Shift(X,1)
}else {
IT_0 <- X/ATA.Shift(X,1)
}
}
for(i in 1:(lenX-1)){
Xh = X[i+1]
if (i==1) {
Xlag = X[i]
Xobs = X[i]
}else {
if (initialLevel=="mean" & i<=pk){
Xlag = mean(X[1:i-1])
Xobs = mean(X[1:i])
}else if (initialLevel=="median" & i<=pk){
Xlag = median(X[1:i-1])
Xobs = median(X[1:i])
}else {
if(initialLevel=="mean" & ((i-1)<=pk)){
Xlag = mean(X[1:i-1])
}else if (initialLevel=="median" & ((i-1)<=pk)){
Xlag = median(X[1:i-1])
}else{
Xlag = X[i-1]
}
Xobs = X[i]
}
}
if (mdlType=="A") {
T_0 = Xobs - Xlag
}else {
T_0 = Xobs / Xlag
}
if (i == 1){
if (mdlType=="A"){
ata.coefp[i] <- NA
ata.coefq[i] <- NA
ata.S[i] <- S <- Xobs
ata.T[i] <- T <- 0
ata.fitted[i] <- FF <- S + (phik * T)
ata.error[i] <- Xh - FF
S_1 <- S
T_1 <- T
}
if (mdlType=="M"){
ata.coefp[i] <- NA
ata.coefq[i] <- NA
ata.S[i] <- S <- Xobs
ata.T[i] <- T <- 1
ata.fitted[i] <- FF <- S * (T^phik)
ata.error[i] <- Xh - FF
S_1 <- S
T_1 <- T
}
}else if (i<=pk & i<=qk & pk>=qk){
if (mdlType=="A"){
ata.coefp[i] <- NA
ata.coefq[i] <- NA
ata.S[i] <- S <- Xobs
if (initialTrend=="mean"){
ata.T[i] <- T <- mean(IT_0[1:i])
}else if (initialTrend=="median"){
ata.T[i] <- T <- median(IT_0[1:i])
}else {
ata.T[i] <- T <- T_0
}
ata.fitted[i] <- FF <- S + (phik * T)
ata.error[i] <- Xh - FF
S_1 <- S
T_1 <- T
}
if (mdlType=="M"){
ata.coefp[i] <- NA
ata.coefq[i] <- NA
ata.S[i] <- S <- Xobs
if (initialTrend=="mean"){
ata.T[i] <- T <- mean(IT_0[1:i])
}else if (initialTrend=="median"){
ata.T[i] <- T <- median(IT_0[1:i])
}else {
ata.T[i] <- T <- T_0
}
ata.fitted[i] <- FF <- S * (T^phik)
ata.error[i] <- Xh - FF
S_1 <- S
T_1 <- T
}
}else if (i<=pk & i>qk & pk>=qk){
if (mdlType=="A"){
ata.coefp[i] <- NA
ata.coefq[i] <- coefqk <- abs(qk/i)
ata.S[i] <- S <- Xobs
ata.T[i] <- T <- (coefqk * (S-S_1)) + ((1-coefqk) * phik * T_1)
ata.fitted[i] <- FF <- S + (phik * T)
ata.error[i] <- Xh - FF
S_1 <- S
T_1 <- T
}
if (mdlType=="M"){
ata.coefp[i] <- NA
ata.coefq[i] <- coefqk <- abs(qk/i)
ata.S[i] <- S <- Xobs
ata.T[i] <- T <- (coefqk * (S/S_1)) + ((1-coefqk) * (T_1^phik))
ata.fitted[i] <- FF <- S * (T^phik)
ata.error[i] <- Xh - FF
S_1 <- S
T_1 <- T
}
}else if (i>pk & i<=qk & pk>=qk){
if (mdlType=="A"){
ata.coefp[i] <- coefpk <- abs(pk/i)
ata.coefq[i] <- NA
ata.S[i] <- S <- (coefpk * Xobs) + ((1-coefpk) * (S_1 + (phik * T_1)))
if (initialTrend=="mean"){
ata.T[i] <- T <- mean(IT_0[1:i])
}else if (initialTrend=="median"){
ata.T[i] <- T <- median(IT_0[1:i])
}else {
ata.T[i] <- T <- T_0
}
ata.fitted[i] <- FF <- S + (phik * T)
ata.error[i] <- Xh - FF
S_1 <- S
T_1 <- T
}
if (mdlType=="M"){
ata.coefp[i] <- coefpk <- abs(pk/i)
ata.coefq[i] <- NA
ata.S[i] <- S <- (coefpk * Xobs) + ((1-coefpk) * S_1 * (T_1^phik))
if (initialTrend=="mean"){
ata.T[i] <- T <- mean(IT_0[1:i])
}else if (initialTrend=="median"){
ata.T[i] <- T <- median(IT_0[1:i])
}else {
ata.T[i] <- T <- T_0
}
ata.fitted[i] <- FF <- S * (T^phik)
ata.error[i] <- Xh - FF
S_1 <- S
T_1 <- T
}
}else if (i>pk & i>qk & pk>=qk){
if (mdlType=="A"){
ata.coefp[i] <- coefpk <- abs(pk/i)
ata.coefq[i] <- coefqk <- abs(qk/i)
ata.S[i] <- S <- (coefpk * Xobs) + ((1-coefpk) * (S_1 + (phik * T_1)))
ata.T[i] <- T <- (coefqk * (S-S_1)) + ((1-coefqk) * phik * T_1)
ata.fitted[i] <- FF <- S + (phik * T)
ata.error[i] <- Xh - FF
S_1 <- S
T_1 <- T
}
if (mdlType=="M"){
ata.coefp[i] <- coefpk <- abs(pk/i)
ata.coefq[i] <- coefqk <- abs(qk/i)
ata.S[i] <- S <- (coefpk * Xobs) + ((1-coefpk) * S_1 * (T_1^phik))
ata.T[i] <- T <- (coefqk * (S/S_1)) + ((1-coefqk) * (T_1^phik))
ata.fitted[i] <- FF <- S * (T^phik)
ata.error[i] <- Xh - FF
S_1 <- S
T_1 <- T
}
}else {
ata.coefp[i] <- NA
ata.coefq[i] <- NA
ata.S[i] <- NA
ata.T[i] <- NA
ata.fitted[i] <- NA
ata.error[i] <- NA
S_1 <- NA
T_1 <- NA
}
}
ata.fitted <- ATA.Shift(ata.fitted,-1)
ata.error <- ATA.Shift(ata.error,-1)
if ("msts" %in% class(X)) {
X <- forecast::msts(X, start = tspX[1], seasonal.periods = X_msts)
ata.fitted <- forecast::msts(ata.fitted, start = tspX[1], seasonal.periods = X_msts)
ata.error <- forecast::msts(ata.error, start = tspX[1], seasonal.periods = X_msts)
ata.S <- forecast::msts(ata.S, start = tspX[1], seasonal.periods = X_msts)
ata.T <- forecast::msts(ata.T, start = tspX[1], seasonal.periods = X_msts)
ata.coefp <- forecast::msts(ata.coefp, start = tspX[1], seasonal.periods = X_msts)
ata.coefq <- forecast::msts(ata.coefq, start = tspX[1], seasonal.periods = X_msts)
}else {
X <- ts(X, frequency = X_ts, start = tspX[1])
ata.fitted <- ts(ata.fitted, frequency = X_ts, start = tspX[1])
ata.error <- ts(ata.error, frequency = X_ts, start = tspX[1])
ata.S <- ts(ata.S, frequency = X_ts, start = tspX[1])
ata.T <- ts(ata.T, frequency = X_ts, start = tspX[1])
ata.coefp <- ts(ata.coefp, frequency = X_ts, start = tspX[1])
ata.coefq <- ts(ata.coefq, frequency = X_ts, start = tspX[1])
}
my_list <- list("actual" = X, "fitted" = ata.fitted , "level" = ata.S, "trend" = ata.T, "residuals" = ata.error, "coefp" = ata.coefp, "coefq" = ata.coefq,
"p" = as.integer(pk), "q" = as.integer(qk), "phi" = signif(phik,6), "model.type" = mdlType)
attr(my_list, 'class') <- "ata"
return(my_list)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/ATA_Core.R |
#' Seasonal Decomposition for The ATAforecasting
#'
#' @description Automatic seasonal decomposition for ATA Method is called \code{ATA.Decomposition} function in ATAforecasting package.
#' The function returns seasonally adjusted data constructed by removing the seasonal component. The methodology is fully automatic.
#' The \code{ATA.Decomposition} function works with many different types of inputs.
#' @param input It must be \code{ts} or \code{msts} or \code{numeric} object. if it is \code{numeric} object, \code{findPeriod} must be 1 or 2 or 3 or 4. if it is \code{msts} object, \code{findPeriod} must be 3 or 4.
#' @param s.model A string identifying method for seasonal decomposition. If NULL, "decomp" method is default. c("none", "decomp", "stl", "stlplus", "tbats", "stR") phrases of methods denote.
#' \itemize{
#' \item{none} : seasonal decomposition is not required.
#' \item{decomp} : classical seasonal decomposition. If \code{decomp}, the \code{stats} package will be used.
#' \item{stl} : seasonal-trend decomposition procedure based on loess developed by Cleveland et al. (1990). If \code{stl}, the \code{stats} and \code{forecast} packages will be used. Multiple seasonal periods are allowed.
#' \item{stlplus} : seasonal-trend decomposition procedure based on loess developed by Cleveland et al. (1990). If \code{stlplus}, the \code{stlplus} package will be used.
#' \item{tbats} : exponential smoothing state space model with Box--Cox transformation, ARMA errors, trend and seasonal components.
#' as described in De Livera, Hyndman & Snyder (2011). Parallel processing is used by default to speed up the computations. If \code{tbats}, the \code{forecast} package will be used. Multiple seasonal periods are allowed.
#' \item{stR} : seasonal-trend decomposition procedure based on regression developed by Dokumentov and Hyndman (2015). If \code{stR}, the \code{stR} package will be used. Multiple seasonal periods are allowed.
#' \item{x13} : seasonal-trend decomposition procedure based on X13ARIMA/SEATS. If \code{x13}, the \code{seasonal} package will be used.
#' \item{x11} : seasonal-trend decomposition procedure based on X11. If \code{x11}, the \code{seasonal} package will be used.
#' }
#' @param s.type A one-character string identifying method for the seasonal component framework. If NULL, "M" is default. The letter "A" for additive model, the letter "M" for multiplicative model.
#' @param s.frequency Value(s) of seasonal periodicity. If \code{s.frequency} is not integer, \code{X} must be \code{msts} time series object. c(s1,s2,s3,...) for multiple period. If \code{X} has multiple periodicity, "tbats" or "stR" seasonal model have to be selected.
#' @param seas_attr_set Assign from \code{ATA.SeasAttr} function. Attributes set for unit root and seasonality tests.
#' For example: period of the input data which have one seasonal pattern --> 12 for monthly / 4 for quarterly / 7 for daily / 5 for business days. periods of the input data which have complex/multiple seasonal patterns --> c(7,354.37,365.25).
#'
#' @return Seasonal components of the univariate time series.
#' \code{ATA.Decomposition} is a list containing at least the following elements:
#' \item{AdjustedX}{Deseasonalized data}
#' \item{SeasIndex}{Particular weights of seasonality given cycle/frequency}
#' \item{SeasActual}{Seasonality given original data}
#' \item{SeasType}{Seasonal decomposition technique}
#'
#' @author Ali Sabri Taylan and Hanife Taylan Selamlar
#' @seealso \code{\link[stats]{stl}}, \code{\link[stats]{decompose}}, \code{\link[seasonal]{seas}},
#' \code{\link[forecast]{tbats}}, \code{\link{stlplus}}, \code{\link[stR]{AutoSTR}}.
#'
#' @keywords Ata seasonal decomposition forecast accuracy ts msts mstl
#'
#' @references
#'
#' #'\insertRef{shishkin1967}{ATAforecasting}
#'
#' #'\insertRef{dagum1988}{ATAforecasting}
#'
#' #'\insertRef{cleveland1990stl}{ATAforecasting}
#'
#' #'\insertRef{hafen2010local}{ATAforecasting}
#'
#' #'\insertRef{delivera2011}{ATAforecasting}
#'
#' #'\insertRef{dokumentov2015}{ATAforecasting}
#'
#' #'\insertRef{dokumentov2020str}{ATAforecasting}
#'
#' #'\insertRef{monsell2003toward}{ATAforecasting}
#'
#' #'\insertRef{monsell2007x}{ATAforecasting}
#'
#' #'\insertRef{artseasonal2018}{ATAforecasting}
#'
#'
#' @importFrom forecast mstl msts tbats tbats.components
#' @importFrom stats cycle decompose frequency ts tsp tsp<- stl
#' @importFrom stlplus stlplus
#' @importFrom stR AutoSTR
#' @importFrom seasonal seas series udg
#' @importFrom Rdpack reprompt
#'
#' @export
#'
ATA.Decomposition <- function(input, s.model, s.type, s.frequency, seas_attr_set)
{
tsp_input <- tsp(input)
last_seas_type <- s.type
if (s.model == "none" | min(s.frequency)==1){
if (s.type=="A"){
adjX <- input
SeasActual <- rep(0,times=length(input))
SeasActual <- ts(SeasActual, frequency = tsp_input[3], start = tsp_input[1])
s.frequency <- frequency(input)
SeasIndex <- rep(0,times=s.frequency)
}else {
adjX <- input
SeasActual <- rep(1,times=length(input))
SeasActual <- ts(SeasActual, frequency = tsp_input[3], start = tsp_input[1])
s.frequency <- frequency(input)
SeasIndex <- rep(1,times=s.frequency)
}
}else {
if (class(input)[1]!="ts" & class(input)[1]!="msts"){
return("The data set must be time series object (ts or msts) ATA Method was terminated!")
}
input <- forecast::msts(input, start=tsp_input[1], seasonal.periods = s.frequency)
tsp_input <- tsp(input)
if (s.model=="decomp"){ # Do classical decomposition
if (s.type=="A"){
desX <- stats::decompose(input, type = c("additive"))
adjX <- forecast::seasadj(desX)
SeasActual <- desX$seasonal
SeasIndex <- rep(NA,times=s.frequency)
for (s in 1:s.frequency){
SeasIndex[s] <- as.numeric(SeasActual[cycle(SeasActual)==s][1])
}
}else {
desX <- stats::decompose(input, type = c("multiplicative"))
adjX <- forecast::seasadj(desX)
SeasActual <- desX$seasonal
SeasIndex <- rep(NA,times=s.frequency)
for (s in 1:s.frequency){
SeasIndex[s] <- as.numeric(SeasActual[cycle(SeasActual)==s][1])
}
}
}else if (s.model=="stl"){ # Do STL decomposition
if (length(s.frequency)==1){
stldesX <- stats::stl(input, s.window = "per", robust=TRUE)
adjX <- forecast::seasadj(stldesX)
SeasActual <- forecast::seasonal(stldesX)
SeasIndex <- rep(NA,times=s.frequency)
for (s in 1:s.frequency){
SeasIndex[s] <- as.numeric(SeasActual[cycle(SeasActual)==s][1])
}
}else {
stldesX <- forecast::mstl(input, lambda = NULL, s.window = "per")
nameCol <- colnames(stldesX)
nameCol <- grep('Season', nameCol, value=TRUE)
if (length(nameCol)==0){
if (s.type=="A"){
adjX <- input
SeasActual <- forecast::msts(rep(0,times=length(input)), start=tsp_input[1], seasonal.periods = tsp_input[3])
SeasIndex <- rep(0,times=max(s.frequency))
}else {
adjX <- input
SeasActual <- forecast::msts(rep(1,times=length(input)), start=tsp_input[1], seasonal.periods = tsp_input[3])
SeasIndex <- rep(1,times=max(s.frequency))
}
}else {
adjX <- forecast::seasadj(stldesX)
if (length(s.frequency)==1){
SeasActual <- stldesX[,nameCol]
SeasIndex <- rep(NA,times=s.frequency)
for (s in 1:s.frequency){
SeasIndex[s] <- as.numeric(SeasActual[cycle(SeasActual)==s][1])
}
}else {
SeasActual <- rowSums(stldesX[,nameCol],na.rm=TRUE)
SeasActual <- forecast::msts(SeasActual, start=tsp_input[1], seasonal.periods = tsp_input[3])
SeasIndex <- rep(NA,times=max(s.frequency))
for (s in 1:max(s.frequency)){
SeasIndex[s] <- as.numeric(SeasActual[cycle(SeasActual)==s][1])
}
}
}
}
}else if (s.model=="stlplus"){ # Do STLPlus decomposition
stlplusdesX <- stlplus::stlplus(input, s.window = "per", robust=TRUE)
adjX <- input - stlplusdesX$data$seasonal
SeasActual <- stlplusdesX$data$seasonal
SeasActual <- forecast::msts(SeasActual, start=tsp_input[1], seasonal.periods = s.frequency)
SeasIndex <- rep(NA,times=s.frequency)
for (s in 1:s.frequency){
SeasIndex[s] <- as.numeric(SeasActual[cycle(SeasActual)==s][1])
}
}else if (s.model=="stR"){ # Do stR decomposition
if (length(input)>1600){
stRdesX <- stR::AutoSTR(input)
}else {
stRdesX <- stR::AutoSTR(input, robust=TRUE)
}
stRcomp <- stR_components(stRdesX)
nameCol <- colnames(stRcomp)
nameCol <- grep('Seasonal', nameCol, value=TRUE)
if (length(nameCol)==0){
if (s.type=="A"){
adjX <- input
SeasActual <- forecast::msts(rep(0,times=length(input)), start=tsp_input[1], seasonal.periods = tsp_input[3])
SeasIndex <- rep(0,times=max(s.frequency))
}else {
adjX <- input
SeasActual <- forecast::msts(rep(1,times=length(input)), start=tsp_input[1], seasonal.periods = tsp_input[3])
SeasIndex <- rep(1,times=max(s.frequency))
}
}else {
adjX <- stR_seasadj(stRdesX)
if (length(s.frequency)==1){
SeasActual <- stRcomp[,nameCol]
SeasIndex <- rep(NA,times=s.frequency)
for (s in 1:s.frequency){
SeasIndex[s] <- as.numeric(SeasActual[cycle(SeasActual)==s][1])
}
}else {
SeasActual <- rowSums(stRcomp[,nameCol],na.rm=TRUE)
SeasActual <- forecast::msts(SeasActual, start=tsp_input[1], seasonal.periods = tsp_input[3])
SeasIndex <- rep(NA,times=max(s.frequency))
for (s in 1:max(s.frequency)){
SeasIndex[s] <- as.numeric(SeasActual[cycle(SeasActual)==s][1])
}
}
}
}else if (s.model=="tbats"){ # Do tbats decomposition
tbatsdesX <- forecast::tbats(input, use.box.cox = FALSE)
tbatscomp <- forecast::tbats.components(tbatsdesX)
nameCol <- colnames(tbatscomp)
nameCol <- grep('season', nameCol, value=TRUE)
if (length(nameCol)==0){
if (s.type=="A"){
adjX <- input
SeasActual <- forecast::msts(rep(0,times=length(input)), start=tsp_input[1], seasonal.periods = tsp_input[3])
SeasIndex <- rep(0,times=max(s.frequency))
}else {
adjX <- input
SeasActual <- forecast::msts(rep(1,times=length(input)), start=tsp_input[1], seasonal.periods = tsp_input[3])
SeasIndex <- rep(1,times=max(s.frequency))
}
}else {
adjX <- forecast::seasadj(tbatsdesX)
if (length(s.frequency)==1){
SeasActual <- tbatscomp[,nameCol]
SeasIndex <- rep(NA,times=s.frequency)
for (s in 1:s.frequency){
SeasIndex[s] <- as.numeric(SeasActual[cycle(SeasActual)==s][1])
}
}else {
SeasActual <- rowSums(tbatscomp[,nameCol],na.rm=TRUE)
SeasActual <- forecast::msts(SeasActual, start=tsp_input[1], seasonal.periods = tsp_input[3])
SeasIndex <- rep(NA,times=max(s.frequency))
for (s in 1:max(s.frequency)){
SeasIndex[s] <- as.numeric(mean(SeasActual[cycle(SeasActual)==s]))
}
}
}
}else if (s.model=="x13"){ # Do X13ARIMA/SEATS decomposition
x13desX <- seasonal::seas(input, transform.function="none", estimate.maxiter=seas_attr_set$x13.estimate.maxiter, estimate.tol=seas_attr_set$x13.estimate.tol)
SeasActual <- seasonal::series(x13desX,"seats.adjustfac")
ifelse(seasonal::udg(x13desX, stats = "finmode")=="additive", s.type <- "A", s.type <- "M")
if (is.null(SeasActual)) {
if (s.type=="A"){
adjX <- input
SeasActual <- forecast::msts(rep(0,times=length(input)), start=tsp_input[1], seasonal.periods = tsp_input[3])
SeasIndex <- rep(0,times=max(s.frequency))
}else {
adjX <- input
SeasActual <- forecast::msts(rep(1,times=length(input)), start=tsp_input[1], seasonal.periods = tsp_input[3])
SeasIndex <- rep(1,times=max(s.frequency))
}
}else {
adjX <- seasonal::series(x13desX,"seats.seasonaladj")
SeasIndex <- rep(NA,times=s.frequency)
for (s in 1:s.frequency){
SeasIndex[s] <- as.numeric(mean(SeasActual[cycle(SeasActual)==s]))
}
}
}else if (s.model=="x11"){ # Do X13ARIMA/SEATS X11 decomposition
x11desX <- seasonal::seas(input, x11 = "", transform.function="none", estimate.maxiter=seas_attr_set$x11.estimate.maxiter, estimate.tol=seas_attr_set$x11.estimate.tol)
SeasActual <- seasonal::series(x11desX,"x11.adjustfac")
ifelse(seasonal::udg(x11desX, stats = "finmode")=="additive", s.type <- "A", s.type <- "M")
if (is.null(SeasActual)) {
if (s.type=="A"){
adjX <- input
SeasActual <- forecast::msts(rep(0,times=length(input)), start=tsp_input[1], seasonal.periods = tsp_input[3])
SeasIndex <- rep(0,times=max(s.frequency))
}else {
adjX <- input
SeasActual <- forecast::msts(rep(1,times=length(input)), start=tsp_input[1], seasonal.periods = tsp_input[3])
SeasIndex <- rep(1,times=max(s.frequency))
}
}else {
adjX <- seasonal::series(x11desX,"x11.seasadj")
SeasIndex <- rep(NA,times=s.frequency)
for (s in 1:s.frequency){
SeasIndex[s] <- as.numeric(mean(SeasActual[cycle(SeasActual)==s]))
}
}
}else {
}
}
my_list <- list("AdjustedX" = adjX, "SeasIndex" = SeasIndex, "SeasActual" = SeasActual, "SeasType" = s.type)
return(my_list)
gc()
}
### Extract STR components
stR_components <- function(object)
{
len_y <- length(object$input$data)
len_x <- length(object$output$predictors) + 2
str_cmp <- matrix(0, len_y, len_x)
str_cmp[, 1] <- as.vector(object$input$data)
str_cmp[, ncol(str_cmp)] <- as.vector(object$output$random$data)
names <- rep("", ncol(str_cmp))
names[c(1, ncol(str_cmp))] = c("Data", "Random")
for(i in seq_along(object$output$predictors)) {
str_cmp[, i+1] <- object$output$predictors[[i]]$data
names[i+1] <- object$input$predictors[[i]]$name
}
colnames(str_cmp) <- names
str_cmp <- ts(str_cmp)
if("ts" %in% class(object$input$data))
tsp(str_cmp) <- tsp(object$input$data)
return(str_cmp)
}
### Seasonal adjustment based on STR
stR_seasadj <- function(object, include = c("Trend", "Random"))
{
str_cmp <- stR_components(object)
nameTrend <- colnames(str_cmp)[2]
if(is.null(nameTrend) || is.na(nameTrend) || nchar(nameTrend) == 0) {
warning("Trend component is not specified by name, using the first component as the Trend component.")
colnames(str_cmp)[2] <- "Trend"
}
for(cmpname in include[!(include %in% colnames(str_cmp))]) {
warning(paste(cmpname, "is not one of the components of the decomposion, skipping..."))
}
result <- NULL
for(i in include[include %in% colnames(str_cmp)]) {
if(is.null(result)) {
result <- str_cmp[,i]
} else {
result <- result + str_cmp[,i]
}
}
return(result)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/ATA_Decomposition.r |
#' Find Frequency Using Spectral Density Of A Time Series From AR Fit
#'
#' @param x an univariate time series
#'
#' @return frequency/cycle of the given time data
#'
#' @importFrom stats spec.ar
#'
#' @export
#'
find.freq <- function(x)
{
n <- length(x)
spec <- stats::spec.ar(c(x),plot=FALSE)
if(max(spec$spec)>10) # Arbitrary threshold chosen by trial and error.
{
period <- round(1/spec$freq[which.max(spec$spec)])
if(period==Inf) # Find next local maximum
{
j <- which(diff(spec$spec)>0)
if(length(j)>0)
{
nextmax <- j[1] + which.max(spec$spec[j[1]:500])
period <- round(1/spec$freq[nextmax])
}
else
period <- 1
}
}
else
period <- 1
return(period)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/ATA_Find_Freq.R |
#' Find Frequency Using Periodogram
#'
#' @param x an univariate time series
#'
#' @return frequency/cycle of the given data
#'
#' @importFrom TSA periodogram
#' @importFrom utils head
#'
#' @export
#'
find.freq.fourier <- function(x)
{
pppx <- TSA::periodogram(x)
dddx = data.frame(freq=pppx$freq, spec=pppx$spec)
orderpppx = dddx[order(-dddx$spec),]
top5X = utils::head(orderpppx, 5)
freq_all <- 1/top5X$freq
period <- sort(freq_all)
period <- period[period < 367]
return(period)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/ATA_Find_Freq_Fourier.R |
#' Find Multi Frequency Using Spectral Density Of A Time Series From AR Fit
#'
#' @param x an univariate time series
#'
#' @return multi frequencies/cycles of the given data
#'
#' @importFrom xts period.apply
#'
#' @export
#'
find.multi.freq <- function(x)
{
f = find.freq(x)
if (is.na(f)){
f <- 1
}
freqs=c(f)
while(f>1){
start=1 #also try start=f;
x=xts::period.apply(x,seq(start,length(x),f),mean)
f=find.freq(x)
freqs=c(freqs,f)
if (is.na(f)){
f <- 1
}
}
if(length(freqs)==1){ return(freqs) }
for(i in 2:length(freqs)){
freqs[i]=freqs[i]*freqs[i-1]
}
freqs[1:(length(freqs)-1)]
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/ATA_Find_Multi_Freq.R |
#' Forecasting Method for The ATAforecasting
#'
#' @description \code{ATA.Forecast} is a generic function for forecasting of the ATA Method.
#'
#' @param object An \code{ata} object is required for forecast.
#' @param h Number of periods for forecasting.
#' @param out.sample A numeric vector or time series of class \code{ts} or \code{msts} for out-sample.
#' @param ci.level Confidence Interval levels for forecasting. Default value is 95.
#' @param negative.forecast Negative values are allowed for forecasting. Default value is TRUE. If FALSE, all negative values for forecasting are set to 0.
#' @param onestep Default is FALSE. if TRUE, the dynamic forecast strategy uses a one-step model multiple times (\code{h} forecast horizon) where the prediction for the prior time step is used as an input for making a prediction on the following time step.
#' @param print.out Default is TRUE. If FALSE, forecast summary of ATA Method is not shown.
#'
#' @return An object of class \code{ata} and forecast values.
#'
#' @author Ali Sabri Taylan and Hanife Taylan Selamlar
#'
#' @seealso \code{forecast}, \code{stlplus}, \code{stR}, \code{\link[stats]{stl}}, \code{\link[stats]{decompose}},
#' \code{tbats}, \code{seasadj}.
#'
#' @references
#'
#' #'\insertRef{yapar2017mses}{ATAforecasting}
#'
#' #'\insertRef{yapar2018mhes}{ATAforecasting}
#'
#' #'\insertRef{yapar2018mses}{ATAforecasting}
#'
#' #'\insertRef{yapar2019ata}{ATAforecasting}
#'
#'
#' @keywords Ata forecast accuracy ts msts
#'
#' @importFrom stats cycle end frequency start ts tsp tsp<- var
#' @importFrom Rdpack reprompt
#' @importFrom forecast msts
#'
#' @examples
#' trainATA <- head(touristTR, 84)
#' ata_fit <- ATA(trainATA, parPHI = 1, seasonal.test = TRUE, seasonal.model = "decomp")
#' ata_fc <- ATA.Forecast(ata_fit, h=12)
#'
#' @export
ATA.Forecast <- function(object, h=NULL, out.sample=NULL, ci.level=95, negative.forecast=TRUE, onestep = FALSE, print.out = TRUE)
{
if (!inherits(object, "ata")){
stop("The Input must be 'ata' object. Please use ATA() function to produce 'ata' object.")
}
m <- frequency(object$actual)
if(!is.null(out.sample)){
if(!is.na(out.sample[1])){
h <- length(out.sample)
message("Forecast horizon has been set to the length of out.sample data.")
}
}else{
if (is.null(h)){
if (m==4){
h <- 8
}else if (m==5){
h <- 10
}else if (m==7){
h <- 14
}else if (m==12){
h <- 24
}else if (m==24){
h <- 48
}else {
h <- 6
}
message(paste("Input forecast horizon has been changed with ", h))
}
}
tsp_y <- tsp(object$actual)
fsample <- forecast::msts(rep(NA,h), start = end(object$actual) + ifelse(tsp_y[3]>1, 1/tsp_y[3], 1), seasonal.periods = object$seasonal.period)
freqYh <- cycle(fsample)
if (object$is.season==FALSE & object$seasonal.type=="A"){
OS_SIValue <- rep(0,times=h)
}else if (object$is.season==FALSE & object$seasonal.type=="M"){
OS_SIValue <- rep(1,times=h)
}else if (object$is.season==TRUE){
OS_SIValue <- rep(NA,times=h)
for (k in 1:h){
OS_SIValue[k] <- object$seasonal.index[freqYh[k]]
}
}else{
}
if (onestep == FALSE){
y <- SubATA.Forecast(object, hh = h)
}else {
y <- SubATA.OneStepForecast(object, outSample = out.sample, hh = h)
}
object$onestep.forecast <- y$onestep.forecast
if(object$seasonal.type=="A"){
ATA_forecast <- y$forecast + OS_SIValue
}else {
ATA_forecast <- y$forecast * OS_SIValue
}
if (negative.forecast==TRUE){
object$forecast <- forecast::msts(ATA_forecast, start = end(object$actual) + ifelse(tsp_y[3]>1, 1/tsp_y[3], 1), seasonal.periods = object$seasonal.period)
}else {
ATA_forecast[ATA_forecast<0] <- 0
object$forecast <- forecast::msts(ATA_forecast, start = end(object$actual) + ifelse(tsp_y[3]>1, 1/tsp_y[3], 1), seasonal.periods = object$seasonal.period)
}
object$h <- h
accuracy.ata <- ATA.Accuracy(object, out.sample = out.sample, print.out = FALSE)
object$accuracy <- accuracy.ata
object$out.sample <- ifelse(is.null(out.sample), fsample, out.sample)
ci.output <- ATA.CI(object = object, ci.level = ci.level)
object$ci.level <- ci.level
if (negative.forecast==TRUE){
object$forecast.lower <- ci.output$forecast.lower
object$forecast.upper <- ci.output$forecast.upper
}else {
ci_low <- ci.output$forecast.lower
ci_up <- ci.output$forecast.upper
ci_low[ci_low<0] <- 0
ci_up[ci_up<0] <- 0
object$forecast.lower <- ci_low
object$forecast.upper <- ci_up
}
object$onestep <- onestep
attr(object, "class") <- "ata"
if (print.out==TRUE) {
opscipen <- options("scipen" = 100, "digits"=4)
on.exit(options(opscipen))
print_out <- cbind(object$forecast.lower, object$forecast, object$forecast.upper)
colnames(print_out) <- c("lower", "forecast", "upper")
print(print_out)
}
gc()
return(object)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/ATA_Forecast.R |
#' Attributes Set For Unit Root and Seasonality Tests
#'
#' @description This function is a class of seasonality tests using \code{corrgram.test} from ATAforecasting package, \code{ndiffs} and \code{nsdiffs} functions from forecast package.
#' Also, this function is modified version of \code{ndiffs} and \code{nsdiffs} written by Hyndman et al. \code{forecast} package.
#' Please review manual and vignette documents of latest \code{forecast} package. According to \code{forecast} package,
#' \code{ndiffs} and \code{nsdiffs} functions to estimate the number of differences required to make a given time series stationary.
#' \code{ndiffs} uses unit root tests to determine the number of differences required for time series to be made trend stationary. Several different tests are available:
#' \itemize{
#' \item {uroot.test = 'kpss'} : the KPSS test is used with the null hypothesis that \code{x} has a stationary root against a unit-root alternative. Then the test returns the least number of differences required to pass the test at the level \code{uroot.alpha}.
#' \item {uroot.test = 'adf'} : the Augmented Dickey-Fuller test is used.
#' \item {uroot.test = 'pp'} : the Phillips-Perron test is used. In both of these cases, the null hypothesis is that \code{x} has a unit root against a stationary root alternative. Then the test returns the least number of differences required to fail the test at the level \code{alpha}.
#' }
#' \code{nsdiffs} uses seasonal unit root tests to determine the number of seasonal differences required for time series to be made stationary. Several different tests are available:
#' \itemize{
#' \item {suroot.test = 'seas'} : a measure of seasonal strength is used, where differencing is selected if the seasonal strength (Wang, Smith & Hyndman, 2006) exceeds 0.64 (based on minimizing MASE when forecasting using auto.arima on M3 and M4 data).
#' \item {suroot.test = 'ch'} : the Canova-Hansen (1995) test is used (with null hypothesis of deterministic seasonality)
#' \item {suroot.test = 'hegy'} : the Hylleberg, Engle, Granger & Yoo (1990) test is used.
#' \item {suroot.test = 'ocsb'} : the Osborn-Chui-Smith-Birchenhall (1988) test is used (with null hypothesis that a seasonal unit root exists).
#' \item {suroot.test = 'correlogram'} : this function is written based on M4 Competition Seasonality Test.
#' }
#'
#' @param corrgram.tcrit t-value for autocorrelogram.
#' @param uroot.test Type of unit root test before all type seasonality test. Possible values are "adf", "pp" and "kpss".
#' @param suroot.test Type of seasonal unit root test to use. Possible values are "correlogram", "seas", "hegy", "ch" and "ocsb".
#' @param suroot.uroot If TRUE, unit root test for stationary before seasonal unit root test is allowed.
#' @param uroot.type Specification of the deterministic component in the regression for unit root test. Possible values are "level" and "trend".
#' @param uroot.alpha Significant level of the unit root test, possible values range from 0.01 to 0.1.
#' @param suroot.alpha Significant level of the seasonal unit root test, possible values range from 0.01 to 0.1
#' @param uroot.maxd Maximum number of non-seasonal differences allowed.
#' @param suroot.maxD Maximum number of seasonal differences allowed.
#' @param suroot.m Deprecated. Length of seasonal period: frequency of data for nsdiffs.
#' @param uroot.pkg Using \code{urca} or \code{tseries} packages for unit root test. The default value is "urca".
#' @param multi.period Selection type of multi seasonal period. \code{min} or \code{max} function for selection
#' @param x13.estimate.maxiter Maximum iteration for X13ARIMA/SEATS estimation
#' @param x13.estimate.tol Convergence tolerence for X13ARIMA/SEATS estimation
#' @param x11.estimate.maxiter Maximum iteration for X11 estimation
#' @param x11.estimate.tol Convergence tolerence for X11 estimation
#'
#' @return An object of class \code{ataoptim}.
#'
#' @author Ali Sabri Taylan and Hanife Taylan Selamlar
#' @seealso \code{forecast}, \code{stlplus}, \code{stR}, \code{\link[stats]{stl}}, \code{\link[stats]{decompose}}, \code{tbats}, \code{seasadj}.
#'
#' @export
ATA.SeasAttr <- function(corrgram.tcrit=1.28, uroot.test="adf", suroot.test="correlogram", suroot.uroot=TRUE, uroot.type="level", uroot.alpha=0.05, suroot.alpha=0.05, uroot.maxd=2, suroot.maxD=1, suroot.m=NULL, uroot.pkg="tseries", multi.period="min", x13.estimate.maxiter=1500, x13.estimate.tol=1.0e-5, x11.estimate.maxiter=1500, x11.estimate.tol=1.0e-5)
{
if ((uroot.test != "adf" & uroot.test != "pp" & uroot.test != "kpss") | !is.character(uroot.test)){
warning("Selection method of unit root test must be string. adf, pp or kpss test for searching unit root.")
uroot.test <- "adf"
}
if ((suroot.test != "correlogram" & suroot.test != "seas" & suroot.test != "hegy" & suroot.test != "ch" & suroot.test != "ocsb") | !is.character(suroot.test)){
warning("Selection method of seasonal unit root test must be string. correlogram, seas, hegy, ch or ocsb test for searching seasonal unit root.")
suroot.test <- "correlogram"
}
if ((uroot.type != "level" & uroot.type != "trend") | !is.character(uroot.type)){
warning("Selection type of deterministic component in the regression for unit root test must be string. level or trend for searching unit root.")
uroot.type <- "trend"
}
if ((multi.period != "min" & multi.period != "max" ) | !is.character(multi.period)){
warning("Selection type of multi seasonal period must be string. min or max function for selection.")
multi.period <- "min"
}
if ((uroot.pkg != "urca" & uroot.pkg != "tseries") | !is.character(uroot.pkg)){
warning("Selection package of unit root test must be string. urca or tseries packages for searching unit root.")
uroot.pkg <- "tseries"
}
if(corrgram.tcrit < -10){
warning("Specified tcrit value is less than the minimum, setting corrgram.tcrit=1.28")
corrgram.tcrit <- 1.28
}else if(corrgram.tcrit > 10){
warning("Specified tcrit value is larger than the maximum, setting corrgram.tcrit=1.28")
corrgram.tcrit <- 1.28
}
if(uroot.alpha < 0.01){
warning("Specified alpha value is less than the minimum, setting uroot.alpha=0.01")
uroot.alpha <- 0.01
}else if(uroot.alpha > 0.1){
warning("Specified alpha value is larger than the maximum, setting uroot.alpha=0.1")
uroot.alpha <- 0.1
}
if(suroot.alpha < 0.01){
warning("Specified alpha value is less than the minimum, setting suroot.alpha=0.01")
suroot.alpha <- 0.01
}else if(suroot.alpha > 0.1){
warning("Specified alpha value is larger than the maximum, setting suroot.alpha=0.1")
suroot.alpha <- 0.1
}
mylist <- list("corrgram.tcrit"=corrgram.tcrit, "uroot.test"=uroot.test, "suroot.test"=suroot.test, "suroot.uroot"=suroot.uroot, "uroot.type"=uroot.type, "uroot.alpha"=uroot.alpha, "suroot.alpha"=suroot.alpha, "uroot.maxd"=uroot.maxd, "suroot.maxD"=suroot.maxD, "suroot.m"=suroot.m, "uroot.pkg"=uroot.pkg, "multi.period"=multi.period, "x13.estimate.maxiter"=x13.estimate.maxiter, "x13.estimate.tol"=x13.estimate.tol, "x11.estimate.maxiter"=x11.estimate.maxiter, "x11.estimate.tol"=x11.estimate.tol)
attr(mylist, "class") <- "ataoptim"
return(mylist)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/ATA_SeasAttributes.R |
#' Seasonality Tests for The ATAforecasting
#'
#' @description This function is a class of seasonality tests using \code{corrgram_test} from ATAforecasting package, \code{ndiffs} and \code{nsdiffs} functions from forecast package.
#' Also, this function is modified version of \code{ndiffs} and \code{nsdiffs} written by Hyndman et al. \code{forecast} package.
#' Please review manual and vignette documents of latest \code{forecast} package. According to \code{forecast} package,
#' \code{ndiffs} and \code{nsdiffs} functions to estimate the number of differences required to make a given time series stationary.
#' \code{ndiffs} uses unit root tests to determine the number of differences required for time series to be made trend stationary. Several different tests are available:
#' \itemize{
#' \item {uroot.test = 'kpss'} : the KPSS test is used with the null hypothesis that \code{x} has a stationary root against a unit-root alternative. Then the test returns the least number of differences required to pass the test at the level \code{uroot.alpha}.
#' \item {uroot.test = 'adf'} : the Augmented Dickey-Fuller test is used.
#' \item {uroot.test = 'pp'} : the Phillips-Perron test is used. In both of these cases, the null hypothesis is that \code{x} has a unit root against a stationary root alternative. Then the test returns the least number of differences required to fail the test at the level \code{uroot.alpha}.
#' }
#' \code{nsdiffs} uses seasonal unit root tests to determine the number of seasonal differences required for time series to be made stationary. Several different tests are available:
#' \itemize{
#' \item {suroot.test = 'seas'} : a measure of seasonal strength is used, where differencing is selected if the seasonal strength (Wang, Smith & Hyndman, 2006) exceeds 0.64 (based on minimizing MASE when forecasting using auto.arima on M3 and M4 data).
#' \item {suroot.test = 'ch'} : the Canova-Hansen (1995) test is used (with null hypothesis of deterministic seasonality)
#' \item {suroot.test = 'hegy'} : the Hylleberg, Engle, Granger & Yoo (1990) test is used.
#' \item {suroot.test = 'ocsb'} : the Osborn-Chui-Smith-Birchenhall (1988) test is used (with null hypothesis that a seasonal unit root exists).
#' \item {suroot.test = 'correlogram'} : this function is written based on M4 Competition Seasonality Test.
#' }
#'
#' @param input The data.
#' @param ppy Frequency of the data.
#' @param attr_set Assign from \code{ATA.SeasAttr} function. Attributes set for unit root, seasonality tests.
#'
#' @return \code{TRUE} if the serie has seasonality. Otherwise, \code{FALSE}.
#'
#' @author Ali Sabri Taylan and Hanife Taylan Selamlar
#'
#' @seealso \code{forecast}, \code{urca}, \code{tseries}, \code{uroot}, \code{stlplus}, \code{stR},
#' \code{\link[stats]{stl}}, \code{\link[stats]{decompose}}, \code{tbats}, \code{seasadj}.
#'
#' @keywords ata ADF Canova-Hansen correlogram HEGY KPSS Phillips-Perron OCSB seasonal unit-root
#'
#' @references
#'
#' #'\insertRef{dickey1979}{ATAforecasting}
#'
#' #'\insertRef{said1984}{ATAforecasting}
#'
#' #'\insertRef{dhf1984}{ATAforecasting}
#'
#' #'\insertRef{phillips1988}{ATAforecasting}
#'
#' #'\insertRef{ocsb1988}{ATAforecasting}
#'
#' #'\insertRef{hegy1990}{ATAforecasting}
#'
#' #'\insertRef{kpss1992}{ATAforecasting}
#'
#' #'\insertRef{ch1995}{ATAforecasting}
#'
#' #'\insertRef{seas2006}{ATAforecasting}
#'
#'
#' @importFrom forecast ndiffs nsdiffs
#' @importFrom Rdpack reprompt
#'
#' @export
#'
ATA.Seasonality <- function(input, ppy, attr_set)
{
if (max(ppy)==1){
test_seasonal <- FALSE
}else {
test <- attr_set$suroot.test
if (test=="correlogram"){
test_seasonal <- corrgram_test(input, ppy, attr_set)
}else {
if (length(ppy)>1){
if (attr_set$multi.period=="max"){
ppy <- max(ppy)
}else {
ppy <- min(ppy)
}
}
if (attr_set$suroot.uroot==TRUE){
uroot.test <- attr_set$uroot.test
uroot.type <- attr_set$uroot.type
uroot.alpha <- attr_set$uroot.alpha
uroot.pkg <- attr_set$uroot.pkg
uroot.maxd <- attr_set$uroot.maxd
#Used to determine whether a time series is stationary (trend)
if (uroot.pkg=="urca") {
d <- forecast::ndiffs(input, alpha=uroot.alpha, test=uroot.test, type=uroot.type, max.d=uroot.maxd)
}else {
d <- ndiffs_tseries(input, alpha=uroot.alpha, test=uroot.test, max.d=uroot.maxd)
}
if (d > 0){
input <- diff(input, differences=d, lag=1)
}
}
suroot.alpha <- attr_set$suroot.alpha
suroot.maxD <- attr_set$suroot.maxD
suroot.m <- attr_set$suroot.m
if (!is.null(attr_set$suroot.m)){
D <- forecast::nsdiffs(input, alpha=suroot.alpha, m=suroot.m, test=test, max.D=suroot.maxD)
}else {
D <- forecast::nsdiffs(input, alpha=suroot.alpha, test=test, max.D=suroot.maxD)
}
test_seasonal <- ifelse(D==0, FALSE, TRUE)
}
}
}
#' @importFrom forecast ndiffs nsdiffs
#' @importFrom stats acf
corrgram_test <- function(input, ppy, attr_set)
{
if (max(ppy)==1){
test_seasonal <- FALSE
}else {
corrgram.tcrit <- attr_set$corrgram.tcrit
uroot.test <- attr_set$uroot.test
uroot.type <- attr_set$uroot.type
uroot.alpha <- attr_set$uroot.alpha
uroot.pkg <- attr_set$uroot.pkg
uroot.maxd <- attr_set$uroot.maxd
if (length(ppy) > 1){
if (attr_set$multi.period=="max"){
ppy <- max(ppy)
}else {
ppy <- min(ppy)
}
}
#Used to determine whether a time series is stationary (trend)
if (uroot.pkg=="urca") {
d <- forecast::ndiffs(input, alpha=uroot.alpha, test=uroot.test, type=uroot.type, max.d=uroot.maxd)
}else {
d <- ndiffs_tseries(input, alpha=uroot.alpha, test=uroot.test, max.d=uroot.maxd)
}
if (d > 0){
input <- diff(input, differences=d, lag=1)
}
#Used to determine whether a time series is seasonal
if (length(input) < 3 * ppy){
test_seasonal <- FALSE
}else {
if (stats::acf(input, plot = FALSE)$acf[1] == 1){
xacf <- stats::acf(input, plot = FALSE)$acf[-1, 1, 1]
}else {
xacf <- stats::acf(input, plot = FALSE)$acf
}
clim <- corrgram.tcrit / sqrt(length(input)) * sqrt(cumsum(c(1, 2 * xacf^2)))
test_seasonal <- (abs(xacf[ppy]) > clim[ppy])
if (is.na(test_seasonal) == TRUE){
test_seasonal <- FALSE
}
}
}
return(test_seasonal)
}
#' @importFrom stats acf na.omit
#' @importFrom forecast is.constant
#' @importFrom tseries adf.test kpss.test pp.test
ndiffs_tseries <- function(x, alpha = 0.05, test = c("kpss","adf","pp"), max.d=2)
{
#ndiffs function using tseries package
test <- match.arg(test)
x <- c(na.omit(c(x)))
d <- 0
if(is.constant(x))
return(d)
if(test == "kpss")
suppressWarnings(dodiff <- tseries::kpss.test(x)$p.value < alpha)
else if(test == "adf")
suppressWarnings(dodiff <- tseries::adf.test(x)$p.value > alpha)
else if(test == "pp")
suppressWarnings(dodiff <- tseries::pp.test(x)$p.value > alpha)
else
stop("This shouldn't happen")
if(is.na(dodiff))
{
return(d)
}
while(dodiff & d < max.d)
{
d <- d+1
x <- diff(x)
if(is.constant(x))
return(d)
if(test == "kpss")
suppressWarnings(dodiff <- tseries::kpss.test(x)$p.value < alpha)
else if(test == "adf")
suppressWarnings(dodiff <- tseries::adf.test(x)$p.value > alpha)
else if(test == "pp")
suppressWarnings(dodiff <- tseries::pp.test(x)$p.value > alpha)
else
stop("This shouldn't happen")
if(is.na(dodiff))
return(d-1)
}
return(d)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/ATA_Seasonality.r |
#' Lag/Lead (Shift) Function for Univariate Series
#'
#' @param x given vector
#' @param shift_by lag or lead parameter
#' @param fill a value to be used to fill the rows
#'
#' @return Generating a lag/lead variables
#' @importFrom utils head tail
#' @export
ATA.Shift <- function(x, shift_by, fill = NA){
stopifnot(is.numeric(shift_by))
stopifnot(is.numeric(x))
if (length(shift_by) > 1)
return(sapply(shift_by, ATA.Shift, x=x))
out <- NULL
abs_shift_by <- abs(shift_by)
if (shift_by > 0 )
out <- c(tail(x,-abs_shift_by),rep(fill,abs_shift_by))
else if (shift_by < 0 )
out <- c(rep(fill,abs_shift_by), utils::head(x,-abs_shift_by))
else
out <- x
return(out)
}
#' Lag/Lead (Shift) Function for Multivariate Series
#'
#' @param X given matrice
#' @param direction direction of shifting. Default is "down".
#' @param shift_by number of rows to be shifed upwards/downwards
#' @param fill a value to be used to fill the rows
#'
#' @return Generating a lag/lead matrice
#'
#' @export
ATA.Shift_Mat <- function(X, direction = "down", shift_by = 1, fill = NA)
{
if (direction != "down" & direction != "up"){
stop("'direction' is not a 'down' or 'up'")
}
if (!is.matrix(X)) {
stop("X is not a matrix")
}
if (!is.numeric(X)) {
stop("X is not a numeric matrix")
}
if (shift_by < 0)
stop("'rows' is not positive")
if (shift_by != trunc(shift_by))
stop("'rows' is not an integer")
if (direction == "down"){
if (shift_by > 0)
return(ATA.Shift_Mat(rbind(rep(fill, ncol(X)), X[1:nrow(X)-1,]), direction = "down", shift_by - 1, fill))
}else {
if (shift_by > 0)
return(ATA.Shift_Mat(rbind(X[2:nrow(X),], rep(fill, ncol(X))), direction = "up", shift_by - 1, fill))
}
return(X)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/ATA_Shift.r |
#' Transformation Techniques for The ATAforecasting
#'
#' @description The function provides the applicability of different types of transformation techniques for the data to which the Ata method will be applied.
#' The \code{ATA.Transform} function works with many different types of inputs.
#'
#' @param X a numeric vector or time series of class \code{ts} or \code{msts} for in-sample.
#' @param tMethod Box-Cox power transformation family is consist of "Box_Cox", "Sqrt", "Reciprocal", "Log", "NegLog",
#' "Modulus", "BickelDoksum", "Manly", "Dual", "YeoJohnson", "GPower", "GLog" in ATAforecasting package. If the transformation process needs shift parameter,
#' \code{ATA.Transform} will calculate required shift parameter automatically.
#' @param tLambda Box-Cox power transformation family parameter. Default is NULL. When lambda is set as NULL, required "lambda" parameter will be calculated automatically based on "bcMethod, bcLower, and bcUpper".
#' @param tShift Box-Cox power transformation family shifting parameter. Default is 0. When "transform.method" is selected, required shifting parameter will be calculated automatically according to dataset.
#' @param bcMethod Choose method to be used in calculating lambda. "loglik" is default. Other method is "guerrero" (Guerrero, V.M. (1993)).
#' @param bcLower Lower limit for possible lambda values. The lower value is limited by -5. Default value is 0.
#' @param bcUpper Upper limit for possible lambda values. The upper value is limited by 5. Default value is 1.
#'
#' @return A list object consists of transformation parameters and transformed data.
#' \code{ATA.Transform} is a list containing at least the following elements:
#' \itemize{
#' \item{trfmX} : Transformed data
#' \item{tLambda} : Box-Cox power transformation family parameter
#' \item{tShift} : Box-Cox power transformation family shifting parameter
#'}
#'
#' @references
#'
#' #'\insertRef{tukey1957}{ATAforecasting}
#'
#' #'\insertRef{boxcox1964}{ATAforecasting}
#'
#' #'\insertRef{manly1976}{ATAforecasting}
#'
#' #'\insertRef{johndraper1980}{ATAforecasting}
#'
#' #'\insertRef{bickeldoksum1982}{ATAforecasting}
#'
#' #'\insertRef{sakia1992}{ATAforecasting}
#'
#' #'\insertRef{guerrero1993}{ATAforecasting}
#'
#' #'\insertRef{yeojohn2000}{ATAforecasting}
#'
#' #'\insertRef{glog2002}{ATAforecasting}
#'
#' #'\insertRef{neglog2005}{ATAforecasting}
#'
#' #'\insertRef{yang2006}{ATAforecasting}
#'
#' #'\insertRef{gpower2013}{ATAforecasting}
#'
#' @keywords Ata Bickel--Doksum Box--Cox dual glog gpower Guerrero Manly neglog transformation Yeo--Johnson
#'
#' @importFrom forecast BoxCox.lambda
#' @importFrom Rdpack reprompt
#'
#' @export
ATA.Transform <- function(X
, tMethod = c("Box_Cox", "Sqrt", "Reciprocal", "Log", "NegLog", "Modulus", "BickelDoksum", "Manly", "Dual", "YeoJohnson", "GPower", "GLog")
, tLambda
, tShift = 0
, bcMethod = c("loglik", "guerrero")
, bcLower = 0
, bcUpper = 5)
{
if (is.null(tMethod)){
my_list <- list("trfmX" = X, "tLambda" = tLambda, "tShift" = tShift)
}else {
if (is.null(tLambda)){
ntShift <- calc_shift(min(X),tShift)
tX <- X + ntShift
if (bcMethod == "guerrero") {
tLambda <- forecast::BoxCox.lambda(tX, method = "guerrero", lower=bcLower, upper=bcUpper)
} else {
tLambda <- forecast::BoxCox.lambda(tX, method = "loglik", lower=bcLower, upper=bcUpper)
}
}
out_list <- SubATA.Transform(X, tMethod = tMethod, tType = "Vanilla", tLambda = tLambda, tShift = tShift)
my_list <- list("trfmX" = out_list$tX, "tLambda" = out_list$tLambda, "tShift" = out_list$tShift)
}
return(my_list)
}
#' Back Transformation Techniques for The ATAforecasting
#'
#' @description The function provides the applicability of different types of back transformation techniques for the transformed data to which the Ata method will be applied.
#' The \code{ATA.BackTransform} function works with many different types of inputs.
#' @param X a numeric vector or time series of class \code{ts} or \code{msts} for in-sample.
#' @param tMethod Box-Cox power transformation family is consist of "Box_Cox", "Sqrt", "Reciprocal", "Log", "NegLog",
#' "Modulus", "BickelDoksum", "Manly", "Dual", "YeoJohnson", "GPower", "GLog" in ATAforecasting package.
#' @param tLambda Box-Cox power transformation family parameter. If NULL, data transformed before model is estimated.
#' @param tShift Box-Cox power transformation family shifting parameter. If NULL, data transformed before model is estimated.
#' @param tbiasadj Use adjusted back-transformed mean for Box-Cox transformations using \code{forecast::BoxCox}. If transformed data is used to produce forecasts and fitted values,
#' a regular back transformation will result in median forecasts. If tbiasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.
#' @param tfvar Optional parameter required if tbiasadj=TRUE. Can either be the forecast variance, or a list containing the interval \code{level}, and the
#' corresponding \code{upper} and \code{lower} intervals.
#'
#' @return A list object consists of transformation parameters and transformed data.
#' \code{ATA.Transform} is a list containing at least the following elements:
#' \itemize{
#' \item{trfmX} : Transformed data
#' \item{tLambda} : Box-Cox power transformation family parameter
#' \item{tShift} : Box-Cox power transformation family shifting parameter
#'}
#'
#' @references
#'
#' #'\insertRef{tukey1957}{ATAforecasting}
#'
#' #'\insertRef{boxcox1964}{ATAforecasting}
#'
#' #'\insertRef{manly1976}{ATAforecasting}
#'
#' #'\insertRef{johndraper1980}{ATAforecasting}
#'
#' #'\insertRef{bickeldoksum1982}{ATAforecasting}
#'
#' #'\insertRef{sakia1992}{ATAforecasting}
#'
#' #'\insertRef{guerrero1993}{ATAforecasting}
#'
#' #'\insertRef{yeojohn2000}{ATAforecasting}
#'
#' #'\insertRef{glog2002}{ATAforecasting}
#'
#' #'\insertRef{neglog2005}{ATAforecasting}
#'
#' #'\insertRef{yang2006}{ATAforecasting}
#'
#' #'\insertRef{gpower2013}{ATAforecasting}
#'
#' @keywords Ata Bickel--Doksum Box--Cox dual glog gpower Guerrero Manly neglog transformation Yeo--Johnson
#'
#'
#' @export
ATA.BackTransform <- function(X, tMethod, tLambda, tShift, tbiasadj=FALSE, tfvar=NULL)
{
if (is.null(tMethod)){
trfmX <- X
}else {
out_list <- SubATA.Transform(tX = X, tMethod = tMethod, tType = "Back", tLambda = tLambda, tShift = tShift)
trfmX <- out_list$tX
}
return(trfmX)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/ATA_Transform.R |
#' @keywords internal
"_PACKAGE"
#' @import Rcpp
#'
#' @importFrom graphics axis legend layout lines mtext par plot polygon
#' @importFrom forecast BoxCox.lambda findfrequency is.constant mstl msts ndiffs nsdiffs seasadj seasonal tbats tbats.components
#' @importFrom Rdpack reprompt
#' @importFrom seasonal seas series udg
#' @importFrom stats acf as.ts cycle decompose frequency median na.omit qnorm qt sd ts tsp tsp<- spec.ar stl var
#' @importFrom stlplus stlplus
#' @importFrom stR AutoSTR
#' @importFrom timeSeries colKurtosis colSkewness
#' @importFrom TSA periodogram
#' @importFrom tseries adf.test kpss.test pp.test
#' @importFrom utils head tail
#' @importFrom xts period.apply
#' @exportPattern("^[[:alpha:]]+")
#'
#' @useDynLib ATAforecasting, .registration = TRUE
NULL
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/ATAforecasting-package.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
meanIT <- function(x, t) {
.Call(`_ATAforecasting_meanIT`, x, t)
}
medianIT <- function(x, t) {
.Call(`_ATAforecasting_medianIT`, x, t)
}
calc_amse <- function(IAX) {
.Call(`_ATAforecasting_calc_amse`, IAX)
}
NaiveSD_Accry <- function(train_set, frqx, accry) {
.Call(`_ATAforecasting_NaiveSD_Accry`, train_set, frqx, accry)
}
NaiveSV_Accry <- function(train_set, frqx, accry) {
.Call(`_ATAforecasting_NaiveSV_Accry`, train_set, frqx, accry)
}
NaiveSD_Accry_hin <- function(train_set, frqx, accry, h) {
.Call(`_ATAforecasting_NaiveSD_Accry_hin`, train_set, frqx, accry, h)
}
NaiveSV_Accry_hin <- function(train_set, frqx, accry, h) {
.Call(`_ATAforecasting_NaiveSV_Accry_hin`, train_set, frqx, accry, h)
}
SubATACore <- function(IAZ, IZP, IZQ, IZPHI, IZMO, IZAC, IZIL, IZIT, IZTA_0, IZTM_0, IZFRQ, IZNMSE) {
.Call(`_ATAforecasting_SubATACore`, IAZ, IZP, IZQ, IZPHI, IZMO, IZAC, IZIL, IZIT, IZTA_0, IZTM_0, IZFRQ, IZNMSE)
}
SubATADamped <- function(IAX, IXP, IXQ, IXMO, IXAC, IXLF, IXTF, IXTS, IXPHIS, IXPHIE, IXPHISS, IXIL, IXIT, IXTA_0, IXTM_0, IXFRQ, IXNMSE) {
.Call(`_ATAforecasting_SubATADamped`, IAX, IXP, IXQ, IXMO, IXAC, IXLF, IXTF, IXTS, IXPHIS, IXPHIE, IXPHISS, IXIL, IXIT, IXTA_0, IXTM_0, IXFRQ, IXNMSE)
}
SubATA <- function(IAX, IXP, IXQ, IXMO, IXAC, IXLF, IXTF, IXTS, IXPHIS, IXPHIE, IXPHISS, IXIL, IXIT, IXTA_0, IXTM_0, IXSMO, IXST, max_smo, max_st, IXFRQ, IXNMSE) {
.Call(`_ATAforecasting_SubATA`, IAX, IXP, IXQ, IXMO, IXAC, IXLF, IXTF, IXTS, IXPHIS, IXPHIE, IXPHISS, IXIL, IXIT, IXTA_0, IXTM_0, IXSMO, IXST, max_smo, max_st, IXFRQ, IXNMSE)
}
SubATACoreHoldout <- function(IAZ, IZP, IZQ, IZPHI, IZMO, IZAC, IZIL, IZIT, IZTA_0, IZTM_0, IZFRQ, IAZout, onestep) {
.Call(`_ATAforecasting_SubATACoreHoldout`, IAZ, IZP, IZQ, IZPHI, IZMO, IZAC, IZIL, IZIT, IZTA_0, IZTM_0, IZFRQ, IAZout, onestep)
}
SubATADampedHoldout <- function(IAX, IXP, IXQ, IXMO, IXAC, IXLF, IXTF, IXTS, IXPHIS, IXPHIE, IXPHISS, IXIL, IXIT, IXTA_0, IXTM_0, IXFRQ, IAXout, onestep) {
.Call(`_ATAforecasting_SubATADampedHoldout`, IAX, IXP, IXQ, IXMO, IXAC, IXLF, IXTF, IXTS, IXPHIS, IXPHIE, IXPHISS, IXIL, IXIT, IXTA_0, IXTM_0, IXFRQ, IAXout, onestep)
}
SubATAHoldout <- function(IAX, IXP, IXQ, IXMO, IXAC, IXLF, IXTF, IXTS, IXPHIS, IXPHIE, IXPHISS, IXIL, IXIT, IXTA_0, IXTM_0, IXSMO, IXST, max_smo, max_st, IXFRQ, IAXout, onestep) {
.Call(`_ATAforecasting_SubATAHoldout`, IAX, IXP, IXQ, IXMO, IXAC, IXLF, IXTF, IXTS, IXPHIS, IXPHIE, IXPHISS, IXIL, IXIT, IXTA_0, IXTM_0, IXSMO, IXST, max_smo, max_st, IXFRQ, IAXout, onestep)
}
ATAHoldoutForecast <- function(IAZ, IZP, IZQ, IZPHI, IZMO, IZIL, IZIT, IZTA_0, IZTM_0, IZFRQ, IAZout, onestep) {
.Call(`_ATAforecasting_ATAHoldoutForecast`, IAZ, IZP, IZQ, IZPHI, IZMO, IZIL, IZIT, IZTA_0, IZTM_0, IZFRQ, IAZout, onestep)
}
SubATACoreHoldhin <- function(IAZ, IZP, IZQ, IZPHI, IZMO, IZAC, IZIL, IZIT, IZTA_0, IZTM_0, IZFRQ, IZH, IZNMSE) {
.Call(`_ATAforecasting_SubATACoreHoldhin`, IAZ, IZP, IZQ, IZPHI, IZMO, IZAC, IZIL, IZIT, IZTA_0, IZTM_0, IZFRQ, IZH, IZNMSE)
}
SubATADampedHoldhin <- function(IAX, IXP, IXQ, IXMO, IXAC, IXLF, IXTF, IXTS, IXPHIS, IXPHIE, IXPHISS, IXIL, IXIT, IXTA_0, IXTM_0, IXFRQ, IXH, IXNMSE) {
.Call(`_ATAforecasting_SubATADampedHoldhin`, IAX, IXP, IXQ, IXMO, IXAC, IXLF, IXTF, IXTS, IXPHIS, IXPHIE, IXPHISS, IXIL, IXIT, IXTA_0, IXTM_0, IXFRQ, IXH, IXNMSE)
}
SubATAHoldhin <- function(IAX, IXP, IXQ, IXMO, IXAC, IXLF, IXTF, IXTS, IXPHIS, IXPHIE, IXPHISS, IXIL, IXIT, IXTA_0, IXTM_0, IXSMO, IXST, max_smo, max_st, IXFRQ, IXH, IXNMSE) {
.Call(`_ATAforecasting_SubATAHoldhin`, IAX, IXP, IXQ, IXMO, IXAC, IXLF, IXTF, IXTS, IXPHIS, IXPHIE, IXPHISS, IXIL, IXIT, IXTA_0, IXTM_0, IXSMO, IXST, max_smo, max_st, IXFRQ, IXH, IXNMSE)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/RcppExports.R |
#' @importFrom stats frequency
SubATA.Damped <- function(train_set, pb, qb, model.Type, accuracy.Type, level.fix, trend.fix, trend.Search, phiStart, phiEnd, phiSize,
initialLevel, initialTrend, main_set, Holdout, HoldoutSet, Adjusted_P, h, Holdin, nmse, seas_periods, holdout_onestep)
{
Xdata <- as.numeric(train_set)
TA_0 <- Xdata-ATA.Shift(Xdata,1)
TM_0 <- Xdata/ATA.Shift(Xdata,1)
model.Type <- ifelse(is.null(model.Type),"B",model.Type)
if (Holdout==TRUE){
output <- SubATADampedHoldout(as.double(Xdata)
, as.integer(ifelse(pb=="opt", -1, pb))
, as.integer(ifelse(qb=="opt", -1, qb))
, as.integer(switch(model.Type,"B"=0,"A"=1,"M"=2))
, as.integer(switch(accuracy.Type,"MAE"=1,"MdAE"=2,"MSE"=3,"MdSE"=4,"MPE"=5,"MdPE"=6,"MAPE"=7,"MdAPE"=8,"sMAPE"=9,"sMdAPE"=10,"RMSE"=11,"MASE"=12,"OWA"=13,"AMSE"=14,"lik"=15,"sigma"=16,"GAMSE"=17))
, as.integer(ifelse(level.fix, 1, 0))
, as.integer(ifelse(trend.fix, 1, 0))
, as.integer(ifelse(trend.Search, 1, 0))
, as.double(phiStart)
, as.double(phiEnd)
, as.double(phiSize)
, as.integer(switch(initialLevel,"none"=0,"mean"=1,"median"=2))
, as.integer(switch(initialTrend,"none"=0,"mean"=1,"median"=2))
, as.double(TA_0)
, as.double(TM_0)
, as.integer(seas_periods)
, as.double(HoldoutSet)
, as.integer(holdout_onestep))
}else if (Holdin==TRUE){
output <- SubATADampedHoldhin(as.double(Xdata)
, as.integer(ifelse(pb=="opt", -1, pb))
, as.integer(ifelse(qb=="opt", -1, qb))
, as.integer(switch(model.Type,"B"=0,"A"=1,"M"=2))
, as.integer(switch(accuracy.Type,"MAE"=1,"MdAE"=2,"MSE"=3,"MdSE"=4,"MPE"=5,"MdPE"=6,"MAPE"=7,"MdAPE"=8,"sMAPE"=9,"sMdAPE"=10,"RMSE"=11,"MASE"=12,"OWA"=13,"AMSE"=14,"lik"=15,"sigma"=16,"GAMSE"=17))
, as.integer(ifelse(level.fix, 1, 0))
, as.integer(ifelse(trend.fix, 1, 0))
, as.integer(ifelse(trend.Search, 1, 0))
, as.double(phiStart)
, as.double(phiEnd)
, as.double(phiSize)
, as.integer(switch(initialLevel,"none"=0,"mean"=1,"median"=2))
, as.integer(switch(initialTrend,"none"=0,"mean"=1,"median"=2))
, as.double(TA_0)
, as.double(TM_0)
, as.integer(seas_periods)
, as.integer(h)
, as.integer(nmse))
}else {
output <- SubATADamped(as.double(Xdata)
, as.integer(ifelse(pb=="opt", -1, pb))
, as.integer(ifelse(qb=="opt", -1, qb))
, as.integer(switch(model.Type,"B"=0,"A"=1,"M"=2))
, as.integer(switch(accuracy.Type,"MAE"=1,"MdAE"=2,"MSE"=3,"MdSE"=4,"MPE"=5,"MdPE"=6,"MAPE"=7,"MdAPE"=8,"sMAPE"=9,"sMdAPE"=10,"RMSE"=11,"MASE"=12,"OWA"=13,"AMSE"=14,"lik"=15,"sigma"=16,"GAMSE"=17))
, as.integer(ifelse(level.fix, 1, 0))
, as.integer(ifelse(trend.fix, 1, 0))
, as.integer(ifelse(trend.Search, 1, 0))
, as.double(phiStart)
, as.double(phiEnd)
, as.double(phiSize)
, as.integer(switch(initialLevel,"none"=0,"mean"=1,"median"=2))
, as.integer(switch(initialTrend,"none"=0,"mean"=1,"median"=2))
, as.double(TA_0)
, as.double(TM_0)
, as.integer(seas_periods)
, as.integer(nmse))
}
ifelse(Holdout==TRUE & Adjusted_P==TRUE, new_pk <- round((output[1] * length(main_set))/ length(train_set)), new_pk <- output[1])
ATA.last <- ATA.Core(main_set, pk = new_pk, qk = output[2], phik = output[3], mdlType = ifelse(output[4]==1,"A","M"), initialLevel = initialLevel, initialTrend = initialTrend)
ATA.last$holdout <- Holdout
ATA.last$holdin <- Holdin
if(Holdout==TRUE){
ATA.last$holdout.accuracy <- output[5]
ATA.last$holdout.forecast <- ATAHoldoutForecast(as.double(Xdata)
, as.integer(output[1])
, as.integer(output[2])
, as.double(output[3])
, as.integer(output[4])
, as.integer(switch(initialLevel,"none"=0,"mean"=1,"median"=2))
, as.integer(switch(initialTrend,"none"=0,"mean"=1,"median"=2))
, as.double(TA_0)
, as.double(TM_0)
, as.integer(frequency(train_set))
, as.double(HoldoutSet)
, as.integer(holdout_onestep))
}
return(ATA.last)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/SubATA_Damped.R |
#' @importFrom stats ts tsp tsp<-
SubATA.Forecast <- function(ataModel, hh=NULL)
{
X <- as.numeric(ataModel$actual)
ph <- ataModel$p
qh <- ataModel$q
phih <- ataModel$phi
modelType <- ataModel$model.type
lenX <- length(X)
if(is.null(hh)){
hh <- ataModel$h
}
Xobs <- X[lenX]
multistep.fitted <- rep(NA, hh)
if (modelType=="A"){
coefph <- abs(ph/lenX)
coefqh <- abs(qh/lenX)
T_1 <- ataModel$trend[lenX-1]
S_1 <- ataModel$level[lenX-1]
ataModel$level[lenX] <- S <- coefph * Xobs + (1-coefph)*(S_1 + phih * T_1)
ataModel$trend[lenX] <- T <- coefqh * (S-S_1) + (1-coefqh) * (phih * T_1)
multistep.fitted[1] <- S + (phih * T)
phiTotal <- phih
if (hh > 1){
for (h in 2:hh){
phiTotal <- phiTotal + (phih^h)
multistep.fitted[h] <- S + (phiTotal * T)
}
}
}
if (modelType=="M"){
coefph <- abs(ph/lenX)
coefqh <- abs(qh/lenX)
T_1 <- ataModel$trend[lenX-1]
S_1 <- ataModel$level[lenX-1]
ataModel$level[lenX] <- S <- coefph * Xobs + (1-coefph)* S_1 * (T_1^phih)
ataModel$trend[lenX] <- T <- coefqh * (S/S_1) + (1-coefqh) * (T_1^phih)
multistep.fitted[1] <- S * (T^phih)
phiTotal <- phih
if (hh > 1){
for (h in 2:hh){
phiTotal <- phiTotal + (phih^h)
multistep.fitted[h] <- S * (T^phiTotal)
}
}
}
my_list <- ataModel
my_list$forecast <- multistep.fitted
my_list$onestep.forecast <- NA
return(my_list)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/SubATA_Forecast.R |
#' @importFrom forecast msts
#' @importFrom stats end start ts tsp tsp<- var
SubATA_Multi_Before <- function(train_set, pb, qb, model.type, seasonal.Test, seasonal.Model, seasonal.Type, seasonal.Frequency, h, accuracy.Type,
level.Fix, trend.Fix, trend.Search, phiStart, phiEnd, phiSize, initialLevel, initialTrend, transform.Method, Lambda, Shift, main_set,
test_set, seas_attr_set, freqYh, ci.Level, negative.Forecast, boxcox_attr_set, Holdout, hold_set_size, Adjusted_P, Holdin, nmse, onestep, holdout_onestep)
{
tspX <- tsp(train_set)
firstTspX <- tsp(main_set)
if (is.null(seasonal.Test)){
is.season <- ATA.Seasonality(train_set, seasonal.Frequency, seas_attr_set)
}else if (seasonal.Test==TRUE){
is.season <- ATA.Seasonality(train_set, seasonal.Frequency, seas_attr_set)
}else {
if (max(seasonal.Frequency)==1){
is.season <- FALSE
seasonal.Type <- "A"
}else {
is.season <- TRUE
}
}
if (is.null(seasonal.Model)){
if (is.season==TRUE & length(seasonal.Frequency)>1){
seas.model <- c("stl","stR","tbats")
}else if (is.season==TRUE & length(seasonal.Frequency)==1){
seas.model <- c("decomp","stl","stR","tbats")
}else {
if (is.season==FALSE | max(seasonal.Frequency)==1){
seas.model <- "none"
seasonal.Type <- "A"
}else {
if (seasonal.Frequency!=12){
seas.model <- c("decomp","stl", "stlplus", "stR", "tbats")
}else {
seas.model <- c("decomp","stl", "stlplus", "stR", "tbats", "x13", "x11")
}
}
}
}else {
if (is.season==FALSE | max(seasonal.Frequency)==1){
seas.model <- "none"
seasonal.Type <- "A"
}else if (length(seasonal.Frequency)>1){
seas.model <- seasonal.Model[!(seasonal.Model %in% "decomp")]
}else {
seas.model <- seasonal.Model
}
}
ifelse(is.null(seasonal.Type), seas.type <- c("A","M"), seas.type <- seasonal.Type)
model.Type <- ifelse(is.null(model.type), "B", model.type)
max_smo <- length(seas.model)
if (length(seas.type)==1){
max_st <- 1
}else {
max_st <- 2
}
if (is.season==TRUE){
train_set_mat <- rep(NA,length(train_set))
DeSI <- rep(NA,max(seasonal.Frequency))
DeSA <- rep(NA,length(train_set))
TA_0 <- rep(NA,length(train_set))
TM_0 <- rep(NA,length(train_set))
typeName <- as.data.frame("omit")
for (smo in 1:max_smo){
for (st in 1:max_st){
if (seas.model[smo]!="none"){
org.seas.Type <- seas.type[st]
if (seas.model[smo]!="decomp" & seas.type[st]=="M"){
out.transform <- ATA.Transform(train_set, tMethod = "Box_Cox", tLambda = 0, tShift = 0) # lambda = 0 for multiplicative model
seas_train_set <- forecast::msts(out.transform$trfmX, start = start(train_set), seasonal.periods = seasonal.Frequency)
seas.Type <- "A"
seas.Model <- seas.model[smo]
seas.Lambda <- out.transform$tLambda
seas.Shift <- out.transform$tShift
seas.Transform <- "Box_Cox"
}else {
seas_train_set <- train_set
seas.Type <- seas.type[st]
seas.Model <- seas.model[smo]
seas.Lambda <- NULL
seas.Shift <- 0
seas.Transform <- NULL
}
}else {
seas_train_set <- train_set
seas.Type <- "A"
seas.Model <- "none"
seas.Lambda <- NULL
seas.Shift <- 0
seas.Transform <- NULL
}
ata.seasonal.component <- ATA.Decomposition(seas_train_set, s.model=seas.Model, s.type=seas.Type, s.frequency=seasonal.Frequency, seas_attr_set=seas_attr_set)
seasadj_train_set <- ATA.BackTransform(X=ata.seasonal.component$AdjustedX, tMethod=seas.Transform, tLambda=seas.Lambda, tShift=seas.Shift)
AdjSI <- ATA.BackTransform(X=ata.seasonal.component$SeasIndex, tMethod=seas.Transform, tLambda=seas.Lambda, tShift=seas.Shift)
AdjSA <- ATA.BackTransform(X=ata.seasonal.component$SeasActual, tMethod=seas.Transform, tLambda=seas.Lambda, tShift=seas.Shift)
if (seas.Model=="x13" | seas.Model=="x11"){
seas.Type <- ata.seasonal.component$SeasType
}
train_set_mat <- as.matrix.data.frame(cbind(train_set_mat, as.numeric(seasadj_train_set)))
DeSI <- as.matrix.data.frame(cbind(DeSI, as.numeric(AdjSI)))
DeSA <- as.matrix.data.frame(cbind(DeSA, as.numeric(AdjSA)))
typeName <- cbind(typeName, seas.Type)
TA_0 <- cbind(TA_0, as.double(seasadj_train_set - ATA.Shift(seasadj_train_set,1)))
TM_0 <- cbind(TM_0, as.double(seasadj_train_set / ATA.Shift(seasadj_train_set,1)))
}
}
main_train_set_mat <- train_set_mat <- train_set_mat[,-1]
DeSI <- DeSI[,-1]
DeSA <- DeSA[,-1]
TA_0 <- TA_0[,-1]
TM_0 <- TM_0[,-1]
if (Holdout == TRUE){
holdout_part <- ifelse(hold_set_size > 0 & hold_set_size < 1, floor(length(train_set) * hold_set_size), hold_set_size)
valid_len <- length(train_set) - holdout_part
train_len <- length(train_set)
train_set_mat <- forecast::msts(main_train_set_mat[1:valid_len,], start = start(main_set), seasonal.periods = seasonal.Frequency)
validation_set <- forecast::msts(main_train_set_mat[(valid_len+1):train_len,], start = end(train_set_mat) - ifelse(tspX[3]>1, (holdout_part - 1) * (1/tspX[3]), (holdout_part - 1) * 1), seasonal.periods = seasonal.Frequency)
output <- SubATAHoldout(as.matrix.data.frame(train_set_mat)
, as.integer(ifelse(pb=="opt", -1, pb))
, as.integer(ifelse(qb=="opt", -1, qb))
, as.integer(switch(model.Type,"B"=0,"A"=1,"M"=2))
, as.integer(switch(accuracy.Type,"MAE"=1,"MdAE"=2,"MSE"=3,"MdSE"=4,"MPE"=5,"MdPE"=6,"MAPE"=7,"MdAPE"=8,"sMAPE"=9,"sMdAPE"=10,"RMSE"=11,"MASE"=12,"OWA"=13,"AMSE"=14,"lik"=15,"sigma"=16,"GAMSE"=17))
, as.integer(ifelse(level.Fix, 1, 0))
, as.integer(ifelse(trend.Fix, 1, 0))
, as.integer(ifelse(trend.Search, 1, 0))
, as.double(phiStart)
, as.double(phiEnd)
, as.double(phiSize)
, as.integer(switch(initialLevel,"none"=0,"mean"=1,"median"=2))
, as.integer(switch(initialTrend,"none"=0,"mean"=1,"median"=2))
, as.matrix.data.frame(TA_0)
, as.matrix.data.frame(TM_0)
, as.integer(sapply(seas.model, switch, "none"=0,"decomp"=1,"stl"=2,"stlplus"=3,"stR"=4,"tbats"=5,"x13"=6,"x11"=7))
, as.integer(sapply(seas.type, switch, "A"=0,"M"=1))
, as.integer(max_smo)
, as.integer(max_st)
, as.double(seasonal.Frequency)
, as.matrix.data.frame(validation_set)
, as.integer(holdout_onestep))
}else if (Holdin == TRUE){
validation_set <- NA
output <- SubATAHoldhin(as.matrix.data.frame(train_set_mat)
, as.integer(ifelse(pb=="opt", -1, pb))
, as.integer(ifelse(qb=="opt", -1, qb))
, as.integer(switch(model.Type,"B"=0,"A"=1,"M"=2))
, as.integer(switch(accuracy.Type,"MAE"=1,"MdAE"=2,"MSE"=3,"MdSE"=4,"MPE"=5,"MdPE"=6,"MAPE"=7,"MdAPE"=8,"sMAPE"=9,"sMdAPE"=10,"RMSE"=11,"MASE"=12,"OWA"=13,"AMSE"=14,"lik"=15,"sigma"=16,"GAMSE"=17))
, as.integer(ifelse(level.Fix, 1, 0))
, as.integer(ifelse(trend.Fix, 1, 0))
, as.integer(ifelse(trend.Search, 1, 0))
, as.double(phiStart)
, as.double(phiEnd)
, as.double(phiSize)
, as.integer(switch(initialLevel,"none"=0,"mean"=1,"median"=2))
, as.integer(switch(initialTrend,"none"=0,"mean"=1,"median"=2))
, as.matrix.data.frame(TA_0)
, as.matrix.data.frame(TM_0)
, as.integer(sapply(seas.model, switch, "none"=0,"decomp"=1,"stl"=2,"stlplus"=3,"stR"=4,"tbats"=5,"x13"=6,"x11"=7))
, as.integer(sapply(seas.type, switch, "A"=0,"M"=1))
, as.integer(max_smo)
, as.integer(max_st)
, as.double(seasonal.Frequency)
, as.integer(h)
, as.integer(nmse))
}else {
validation_set <- NA
output <- SubATA(as.matrix.data.frame(train_set_mat)
, as.integer(ifelse(pb=="opt", -1, pb))
, as.integer(ifelse(qb=="opt", -1, qb))
, as.integer(switch(model.Type,"B"=0,"A"=1,"M"=2))
, as.integer(switch(accuracy.Type,"MAE"=1,"MdAE"=2,"MSE"=3,"MdSE"=4,"MPE"=5,"MdPE"=6,"MAPE"=7,"MdAPE"=8,"sMAPE"=9,"sMdAPE"=10,"RMSE"=11,"MASE"=12,"OWA"=13,"AMSE"=14,"lik"=15,"sigma"=16,"GAMSE"=17))
, as.integer(ifelse(level.Fix, 1, 0))
, as.integer(ifelse(trend.Fix, 1, 0))
, as.integer(ifelse(trend.Search, 1, 0))
, as.double(phiStart)
, as.double(phiEnd)
, as.double(phiSize)
, as.integer(switch(initialLevel,"none"=0,"mean"=1,"median"=2))
, as.integer(switch(initialTrend,"none"=0,"mean"=1,"median"=2))
, as.matrix.data.frame(TA_0)
, as.matrix.data.frame(TM_0)
, as.integer(sapply(seas.model, switch, "none"=0,"decomp"=1,"stl"=2,"stlplus"=3,"stR"=4,"tbats"=5,"x13"=6,"x11"=7))
, as.integer(sapply(seas.type, switch, "A"=0,"M"=1))
, as.integer(max_smo)
, as.integer(max_st)
, as.double(seasonal.Frequency)
, as.integer(nmse))
}
#output[1] = d_opt_p
#output[2] = d_opt_q
#output[3] = d_opt_phi
#output[4] = d_opt_mo
#output[5] = LastIXSMO
#output[6] = LastIXST
#output[7] = mod_clmn
#output[8] = holdout.accuracy
AdjInput <- forecast::msts(as.numeric(main_train_set_mat[,output[7]]), start = start(main_set), seasonal.periods = seasonal.Frequency)
SeasonalActual <- forecast::msts(as.numeric(DeSA[,output[7]]), start = start(main_set), seasonal.periods = seasonal.Frequency)
SeasonalIndex <- as.numeric(DeSI[,output[7]])
if (is.season==FALSE & output[6]==0){
OS_SIValue <- rep(0,times=h)
}else if (is.season==FALSE & output[6]==1){
OS_SIValue <- rep(1,times=h)
}else if (is.season==TRUE){
OS_SIValue <- rep(NA,times=h)
for (k in 1:h){
OS_SIValue[k] <- SeasonalIndex[freqYh[k]]
}
}else{
}
ifelse(Holdout==TRUE & Adjusted_P==TRUE, new_pk <- round((output[1] * length(train_set))/ length(train_set_mat[,output[7]])), new_pk <- output[1])
ATA.last <- ATA.Core(AdjInput, pk = new_pk, qk = output[2], phik = output[3], mdlType = ifelse(output[4]==1,"A","M"), initialLevel = initialLevel, initialTrend = initialTrend)
ATA.last$holdout <- Holdout
ATA.last$holdin <- Holdin
if(Holdout==TRUE){
ATA.last$holdout.accuracy <- output[8]
ATA.last$holdout.forecast <- ATAHoldoutForecast(as.double(train_set_mat[,output[7]])
, as.integer(output[1])
, as.integer(output[2])
, as.double(output[3])
, as.integer(output[4])
, as.integer(switch(initialLevel,"none"=0,"mean"=1,"median"=2))
, as.integer(switch(initialTrend,"none"=0,"mean"=1,"median"=2))
, as.double(TA_0)
, as.double(TM_0)
, as.integer(frequency(train_set))
, as.matrix.data.frame(validation_set)
, as.integer(holdout_onestep))
}
}else {
seas.Type <- "A"
OS_SIValue <- rep(0,times=h)
seas.Model <- "none"
seas.Lambda <- NULL
seas.Shift <- 0
seas.Transform <- NULL
ata.seasonal.component <- ATA.Decomposition(train_set, s.model=seas.Model, s.type=seas.Type, s.frequency=seasonal.Frequency, seas_attr_set=seas_attr_set)
SeasonalActual <- ata.seasonal.component$SeasActual
SeasonalIndex <- ata.seasonal.component$SeasIndex
AdjInput <- seasadj_train_set <- ata.seasonal.component$AdjustedX
if (Holdout == TRUE){
holdout_part <- ifelse(hold_set_size > 0 & hold_set_size < 1, floor(length(train_set) * hold_set_size), hold_set_size)
valid_len <- length(train_set) - holdout_part
train_len <- length(train_set)
train_set_mat <- forecast::msts(seasadj_train_set[1:valid_len], start = start(train_set), seasonal.periods = seasonal.Frequency)
validation_set <- forecast::msts(seasadj_train_set[(valid_len+1):train_len], start = end(train_set_mat) - ifelse(tspX[3]>1, (holdout_part - 1) * (1/tspX[3]), (holdout_part - 1) * 1), seasonal.periods = seasonal.Frequency)
}else {
train_set_mat <- seasadj_train_set
validation_set <- NA
}
ATA.last <- SubATA.Damped(train_set_mat, pb = pb, qb = qb, model.Type = model.Type, accuracy.Type = accuracy.Type, level.fix = level.Fix, trend.fix = trend.Fix,
trend.Search = trend.Search, phiStart = phiStart, phiEnd = phiEnd, phiSize = phiSize, initialLevel = initialLevel, initialTrend = initialTrend,
main_set = seasadj_train_set, Holdout = Holdout, HoldoutSet = validation_set, Adjusted_P = Adjusted_P, h = h, Holdin = Holdin, nmse = nmse,
seas_periods = seasonal.Frequency, holdout_onestep = holdout_onestep)
}
ATA.last$h <- h
if (onestep == FALSE){
ATA.last <- SubATA.Forecast(ATA.last, hh=h)
}else {
ATA.last <- SubATA.OneStepForecast(ATA.last, test_set, hh=h)
}
ATA.last$actual <- main_set
fit_ata <- ATA.last$fitted
forecast_ata <- ATA.last$forecast
ATA.last$level <- forecast::msts(ATA.BackTransform(X=ATA.last$level, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals))),
start = start(main_set), seasonal.periods = seasonal.Frequency)
ATA.last$trend <- forecast::msts(ATA.BackTransform(X=ATA.last$trend, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals))),
start = start(main_set), seasonal.periods = seasonal.Frequency)
crit_a <- ifelse(is.season==TRUE, ifelse(output[6]==0,"A","M"), seas.Type)
crit_a <- ifelse(is.season==FALSE, "A", crit_a)
if(crit_a=="A"){
ATA.fitted <- ATA.BackTransform(X = fit_ata + SeasonalActual, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals)))
ATA.forecast <- ATA.BackTransform(X = forecast_ata + OS_SIValue, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals)))
if (Holdout == TRUE){
ATA.last$holdout.forecast <- forecast::msts(ATA.BackTransform(X = ATA.last$holdout.forecast + SeasonalActual[(valid_len+1):train_len], tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals))),
start = end(train_set_mat) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = seasonal.Frequency)
}
}else {
ATA.fitted <- ATA.BackTransform(X = fit_ata * SeasonalActual, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals)))
ATA.forecast <- ATA.BackTransform(X = forecast_ata * OS_SIValue, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals)))
if (Holdout == TRUE){
ATA.last$holdout.forecast <- forecast::msts(ATA.BackTransform(X = ATA.last$holdout.forecast * SeasonalActual[(valid_len+1):train_len], tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals))),
start = end(train_set_mat) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = seasonal.Frequency)
}
}
ATA.last$fitted <- forecast::msts(ATA.fitted, start = start(main_set), seasonal.periods = seasonal.Frequency)
if (negative.Forecast==TRUE){
ATA.last$forecast <- forecast::msts(ATA.forecast, start = end(main_set) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = seasonal.Frequency)
}else {
ATA.forecast[ATA.forecast<0] <- 0
ATA.last$forecast <- forecast::msts(ATA.forecast, start = end(main_set) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = seasonal.Frequency)
}
ATA.last$residuals <- ATA.last$actual - ATA.last$fitted
if (Holdout == TRUE){
ATA.last$holdout.training <- forecast::msts(ATA.last$actual[1:valid_len], start = start(main_set), seasonal.periods = seasonal.Frequency)
ATA.last$holdout.validation <- forecast::msts(ATA.last$actual[(valid_len+1):train_len], start = end(ATA.last$holdout.training) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = seasonal.Frequency)
}
my_list <- ATA.last
my_list$out.sample <- forecast::msts(test_set, start = end(main_set) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = seasonal.Frequency)
if (level.Fix==TRUE){
method <- paste("ATA(",my_list$p, ",", my_list$q,",", my_list$phi, ")", sep="")
}else if (trend.Fix==TRUE){
method <- paste("ATA(", my_list$p, ",1," ,my_list$phi, ")", sep="")
}else if (trend.Search==TRUE){
method <- paste("ATA(",my_list$p, ",", my_list$q,",", my_list$phi, ")", sep="")
}else {
method <- paste("ATA(", my_list$p, "," ,my_list$q, ",", my_list$phi, ")", sep="")
}
my_list$initial.level <- initialLevel
my_list$initial.trend <- initialTrend
my_list$level.fixed <- level.Fix
my_list$trend.fixed <- trend.Fix
my_list$trend.search <- trend.Search
my_list$transform.method <- transform.Method
my_list$lambda <- Lambda
my_list$shift <- Shift
my_list$bcLower <- boxcox_attr_set$bcLower
my_list$bcUpper <- boxcox_attr_set$bcUpper
my_list$bcBiasAdj <- boxcox_attr_set$bcBiasAdj
my_list$accuracy.type <- accuracy.Type
my_list$nmse <- nmse
my_list$is.season <- is.season
my_list$seasonal.model <- ifelse(is.season==TRUE, switch(output[5]+1, "none", "decomp", "stl", "stlplus", "stR", "tbats", "x13", "x11"), "none")
if (!is.null(seasonal.Type)){
my_list$seasonal.type <- seasonal.Type
}else {
my_list$seasonal.type <- crit_a
}
if(my_list$q==0){
trend_mthd <- "N"
}else if (my_list$q!=0 & my_list$phi!=1 & my_list$phi>0){
trend_mthd <- paste(my_list$model.type, "d", sep="")
}else{
trend_mthd <- my_list$model.type
}
if(my_list$seasonal.model == "none"){
seas_mthd <- "N"
}else{
seas_mthd <- my_list$seasonal.type
}
method <- paste(method, " (A,", trend_mthd, ",", seas_mthd, ")", sep="")
my_list$method <- method
my_list$seasonal.period <- seasonal.Frequency
my_list$seasonal.index <- ATA.BackTransform(X=SeasonalIndex, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals)))
my_list$seasonal <- forecast::msts(ATA.BackTransform(X=SeasonalActual, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals))),
start = start(main_set), seasonal.periods = seasonal.Frequency)
my_list$seasonal.adjusted <- forecast::msts(ATA.BackTransform(X=AdjInput, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals))),
start = start(main_set), seasonal.periods = seasonal.Frequency)
ci.output <- ATA.CI(object = my_list, ci.level = ci.Level)
my_list$ci.level <- ci.Level
if (negative.Forecast==TRUE){
my_list$forecast.lower <- ci.output$forecast.lower
my_list$forecast.upper <- ci.output$forecast.upper
}else {
ci_low <- ci.output$forecast.lower
ci_up <- ci.output$forecast.upper
ci_low[ci_low<0] <- 0
ci_up[ci_up<0] <- 0
my_list$forecast.lower <- ci_low
my_list$forecast.upper <- ci_up
}
my_list$par.specs <- list("p" = my_list$p, "q" = my_list$q, "phi" = my_list$phi,
"trend" = trend_mthd,
"seasonal" = seas_mthd,
"period" = seasonal.Frequency,
"decomp_model" = ifelse(seas_mthd == "N", NA, my_list$seasonal.model),
"initial_level" = ifelse(my_list$initial.level=="none", NA, TRUE),
"initial_trend" = ifelse(my_list$initial.trend=="none", NA, TRUE))
accuracy_ata <- ATA.Accuracy(my_list, test_set, print.out = FALSE)
my_list$accuracy <- accuracy_ata
my_list$onestep <- onestep
return(my_list)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/SubATA_Multiple.R |
#' @importFrom forecast msts
#' @importFrom stats end start ts tsp tsp<- var
SubATA_Multi_After <- function(train_set, pb, qb, model.type, seasonal.Test, seasonal.Model, seasonal.Type, seasonal.Frequency, h, accuracy.Type,
level.Fix, trend.Fix, trend.Search, phiStart, phiEnd, phiSize, initialLevel, initialTrend, transform.Method, Lambda, Shift, main_set,
test_set, seas_attr_set, freqYh, ci.Level, negative.Forecast, boxcox_attr_set, Holdout, hold_set_size, Adjusted_P, Holdin, nmse, onestep, holdout_onestep)
{
tspX <- tsp(train_set)
firstTspX <- tsp(main_set)
if (is.null(seasonal.Test)){
is.season <- ATA.Seasonality(train_set, seasonal.Frequency, seas_attr_set)
}else if (seasonal.Test==TRUE){
is.season <- ATA.Seasonality(train_set, seasonal.Frequency, seas_attr_set)
}else {
if (max(seasonal.Frequency)==1){
is.season <- FALSE
seasonal.Type <- "A"
}else {
is.season <- TRUE
}
}
if (is.null(seasonal.Model)){
if (is.season==TRUE & length(seasonal.Frequency)>1){
seas.model <- c("stl","stR","tbats")
}else if (is.season==TRUE & length(seasonal.Frequency)==1){
seas.model <- c("decomp","stl","stR","tbats")
}else {
if (is.season==FALSE | max(seasonal.Frequency)==1){
seas.model <- "none"
seasonal.Type <- "A"
}else {
if (seasonal.Frequency!=12){
seas.model <- c("decomp","stl", "stlplus", "stR", "tbats")
}else {
seas.model <- c("decomp","stl", "stlplus", "stR", "tbats", "x13", "x11")
}
}
}
}else {
if (is.season==FALSE | max(seasonal.Frequency)==1){
seas.model <- "none"
seasonal.Type <- "A"
}else if (length(seasonal.Frequency)>1){
seas.model <- seasonal.Model[!(seasonal.Model %in% "decomp")]
}else {
seas.model <- seasonal.Model
}
}
ifelse(is.null(seasonal.Type), seas.type <- c("A","M"), seas.type <- seasonal.Type)
model.Type <- ifelse(is.null(model.type), "B", model.type)
max_smo <- length(seas.model)
if (length(seas.type)==1){
max_st <- 1
}else {
max_st <- 2
}
if (is.season==TRUE){
train_set_mat <- rep(NA,length(train_set))
DeSI <- rep(NA,max(seasonal.Frequency))
DeSA <- rep(NA,length(train_set))
TA_0 <- rep(NA,length(train_set))
TM_0 <- rep(NA,length(train_set))
typeName <- as.data.frame("omit")
for (smo in 1:max_smo){
for (st in 1:max_st){
if (seas.model[smo]!="none"){
org.seas.Type <- seas.type[st]
if (seas.model[smo]!="decomp" & seas.type[st]=="M"){
out.transform <- ATA.Transform(train_set, tMethod = "Box_Cox", tLambda = 0, tShift = 0) # lambda = 0 for multiplicative model
seas_train_set <- forecast::msts(out.transform$trfmX, start = start(train_set), seasonal.periods = seasonal.Frequency)
seas.Type <- "A"
seas.Model <- seas.model[smo]
seas.Lambda <- out.transform$tLambda
seas.Shift <- out.transform$tShift
seas.Transform <- "Box_Cox"
}else {
seas_train_set <- train_set
seas.Type <- seas.type[st]
seas.Model <- seas.model[smo]
seas.Lambda <- NULL
seas.Shift <- 0
seas.Transform <- NULL
}
}else {
seas_train_set <- train_set
seas.Type <- "A"
seas.Model <- "none"
seas.Lambda <- NULL
seas.Shift <- 0
seas.Transform <- NULL
}
ata.seasonal.component <- ATA.Decomposition(seas_train_set, s.model=seas.Model, s.type=seas.Type, s.frequency=seasonal.Frequency, seas_attr_set=seas_attr_set)
seasadj_train_set <- ATA.BackTransform(X=ata.seasonal.component$AdjustedX, tMethod=seas.Transform, tLambda=seas.Lambda, tShift = seas.Shift)
AdjSI <- ATA.BackTransform(X=ata.seasonal.component$SeasIndex, tMethod=seas.Transform, tLambda=seas.Lambda, tShift = seas.Shift)
AdjSA <- ATA.BackTransform(X=ata.seasonal.component$SeasActual, tMethod=seas.Transform, tLambda=seas.Lambda, tShift = seas.Shift)
if (seas.Model=="x13" | seas.Model=="x11"){
seas.Type <- ata.seasonal.component$SeasType
}
ChgX <- ATA.Transform(seasadj_train_set, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, bcMethod = boxcox_attr_set$bcMethod, bcLower = boxcox_attr_set$bcLower, bcUpper = boxcox_attr_set$bcUpper)
seasadj_train_set <- ChgX$trfmX
Lambda <- ChgX$tLambda
Shift <- ChgX$tShift
train_set_mat <- as.matrix.data.frame(cbind(train_set_mat, as.numeric(seasadj_train_set)))
DeSI <- as.matrix.data.frame(cbind(DeSI, as.numeric(AdjSI)))
DeSA <- as.matrix.data.frame(cbind(DeSA, as.numeric(AdjSA)))
typeName <- cbind(typeName, seas.Type)
TA_0 <- cbind(TA_0, as.double(seasadj_train_set - ATA.Shift(seasadj_train_set,1)))
TM_0 <- cbind(TM_0, as.double(seasadj_train_set / ATA.Shift(seasadj_train_set,1)))
}
}
main_train_set_mat <- train_set_mat <- train_set_mat[,-1]
DeSI <- DeSI[,-1]
DeSA <- DeSA[,-1]
TA_0 <- TA_0[,-1]
TM_0 <- TM_0[,-1]
if (Holdout == TRUE){
holdout_part <- ifelse(hold_set_size > 0 & hold_set_size < 1, floor(length(train_set) * hold_set_size), hold_set_size)
valid_len <- length(train_set) - holdout_part
train_len <- length(train_set)
train_set_mat <- forecast::msts(main_train_set_mat[1:valid_len,], start = start(main_set), seasonal.periods = seasonal.Frequency)
validation_set <- forecast::msts(main_train_set_mat[(valid_len+1):train_len,], start = end(train_set_mat) - ifelse(tspX[3]>1, (holdout_part - 1) * (1/tspX[3]), (holdout_part - 1) * 1), seasonal.periods = seasonal.Frequency)
output <- SubATAHoldout(as.matrix.data.frame(train_set_mat)
, as.integer(ifelse(pb=="opt", -1, pb))
, as.integer(ifelse(qb=="opt", -1, qb))
, as.integer(switch(model.Type,"B"=0,"A"=1,"M"=2))
, as.integer(switch(accuracy.Type,"MAE"=1,"MdAE"=2,"MSE"=3,"MdSE"=4,"MPE"=5,"MdPE"=6,"MAPE"=7,"MdAPE"=8,"sMAPE"=9,"sMdAPE"=10,"RMSE"=11,"MASE"=12,"OWA"=13,"AMSE"=14,"lik"=15,"sigma"=16,"GAMSE"=17))
, as.integer(ifelse(level.Fix, 1, 0))
, as.integer(ifelse(trend.Fix, 1, 0))
, as.integer(ifelse(trend.Search, 1, 0))
, as.double(phiStart)
, as.double(phiEnd)
, as.double(phiSize)
, as.integer(switch(initialLevel,"none"=0,"mean"=1,"median"=2))
, as.integer(switch(initialTrend,"none"=0,"mean"=1,"median"=2))
, as.matrix.data.frame(TA_0)
, as.matrix.data.frame(TM_0)
, as.integer(sapply(seas.model, switch, "none"=0,"decomp"=1,"stl"=2,"stlplus"=3,"stR"=4,"tbats"=5,"x13"=6,"x11"=7))
, as.integer(sapply(seas.type, switch, "A"=0,"M"=1))
, as.integer(max_smo)
, as.integer(max_st)
, as.double(seasonal.Frequency)
, as.matrix.data.frame(validation_set)
, as.integer(holdout_onestep))
}else if (Holdin == TRUE){
validation_set <- NA
output <- SubATAHoldhin(as.matrix.data.frame(train_set_mat)
, as.integer(ifelse(pb=="opt", -1, pb))
, as.integer(ifelse(qb=="opt", -1, qb))
, as.integer(switch(model.Type,"B"=0,"A"=1,"M"=2))
, as.integer(switch(accuracy.Type,"MAE"=1,"MdAE"=2,"MSE"=3,"MdSE"=4,"MPE"=5,"MdPE"=6,"MAPE"=7,"MdAPE"=8,"sMAPE"=9,"sMdAPE"=10,"RMSE"=11,"MASE"=12,"OWA"=13,"AMSE"=14,"lik"=15,"sigma"=16,"GAMSE"=17))
, as.integer(ifelse(level.Fix, 1, 0))
, as.integer(ifelse(trend.Fix, 1, 0))
, as.integer(ifelse(trend.Search, 1, 0))
, as.double(phiStart)
, as.double(phiEnd)
, as.double(phiSize)
, as.integer(switch(initialLevel,"none"=0,"mean"=1,"median"=2))
, as.integer(switch(initialTrend,"none"=0,"mean"=1,"median"=2))
, as.matrix.data.frame(TA_0)
, as.matrix.data.frame(TM_0)
, as.integer(sapply(seas.model, switch, "none"=0,"decomp"=1,"stl"=2,"stlplus"=3,"stR"=4,"tbats"=5,"x13"=6,"x11"=7))
, as.integer(sapply(seas.type, switch, "A"=0,"M"=1))
, as.integer(max_smo)
, as.integer(max_st)
, as.double(seasonal.Frequency)
, as.integer(h)
, as.integer(nmse))
}else {
validation_set <- NA
output <- SubATA(as.matrix.data.frame(train_set_mat)
, as.integer(ifelse(pb=="opt", -1, pb))
, as.integer(ifelse(qb=="opt", -1, qb))
, as.integer(switch(model.Type,"B"=0,"A"=1,"M"=2))
, as.integer(switch(accuracy.Type,"MAE"=1,"MdAE"=2,"MSE"=3,"MdSE"=4,"MPE"=5,"MdPE"=6,"MAPE"=7,"MdAPE"=8,"sMAPE"=9,"sMdAPE"=10,"RMSE"=11,"MASE"=12,"OWA"=13,"AMSE"=14,"lik"=15,"sigma"=16,"GAMSE"=17))
, as.integer(ifelse(level.Fix, 1, 0))
, as.integer(ifelse(trend.Fix, 1, 0))
, as.integer(ifelse(trend.Search, 1, 0))
, as.double(phiStart)
, as.double(phiEnd)
, as.double(phiSize)
, as.integer(switch(initialLevel,"none"=0,"mean"=1,"median"=2))
, as.integer(switch(initialTrend,"none"=0,"mean"=1,"median"=2))
, as.matrix.data.frame(TA_0)
, as.matrix.data.frame(TM_0)
, as.integer(sapply(seas.model, switch, "none"=0,"decomp"=1,"stl"=2,"stlplus"=3,"stR"=4,"tbats"=5,"x13"=6,"x11"=7))
, as.integer(sapply(seas.type, switch, "A"=0,"M"=1))
, as.integer(max_smo)
, as.integer(max_st)
, as.double(seasonal.Frequency)
, as.integer(nmse))
}
#output[1] = d_opt_p
#output[2] = d_opt_q
#output[3] = d_opt_phi
#output[4] = d_opt_mo
#output[5] = LastIXSMO
#output[6] = LastIXST
#output[7] = mod_clmn
#output[8] = holdout.accuracy
AdjInput <- forecast::msts(as.numeric(main_train_set_mat[,output[7]]), start = start(main_set), seasonal.periods = seasonal.Frequency)
SeasonalActual <- forecast::msts(as.numeric(DeSA[,output[7]]), start = start(main_set), seasonal.periods = seasonal.Frequency)
SeasonalIndex <- as.numeric(DeSI[,output[7]])
if (is.season==FALSE & output[6]==0){
OS_SIValue <- rep(0,times=h)
}else if (is.season==FALSE & output[6]==1){
OS_SIValue <- rep(1,times=h)
}else if (is.season==TRUE){
OS_SIValue <- rep(NA,times=h)
for (k in 1:h){
OS_SIValue[k] <- SeasonalIndex[freqYh[k]]
}
}else{
}
ifelse(Holdout==TRUE & Adjusted_P==TRUE, new_pk <- round((output[1] * length(train_set))/ length(train_set_mat[,output[7]])), new_pk <- output[1])
ATA.last <- ATA.Core(AdjInput, pk = new_pk, qk = output[2], phik = output[3], mdlType = ifelse(output[4]==1,"A","M"), initialLevel = initialLevel, initialTrend = initialTrend)
ATA.last$holdout <- Holdout
ATA.last$holdin <- Holdin
if(Holdout==TRUE){
ATA.last$holdout.accuracy <- output[8]
ATA.last$holdout.forecast <- ATAHoldoutForecast(as.double(train_set_mat[,output[7]])
, as.integer(output[1])
, as.integer(output[2])
, as.double(output[3])
, as.integer(output[4])
, as.integer(switch(initialLevel,"none"=0,"mean"=1,"median"=2))
, as.integer(switch(initialTrend,"none"=0,"mean"=1,"median"=2))
, as.double(TA_0)
, as.double(TM_0)
, as.integer(frequency(train_set))
, as.matrix.data.frame(validation_set)
, as.integer(holdout_onestep))
}
}else {
seas.Type <- "A"
OS_SIValue <- rep(0,times=h)
seas.Model <- "none"
seas.Lambda <- NULL
seas.Shift <- 0
seas.Transform <- NULL
ata.seasonal.component <- ATA.Decomposition(train_set, s.model=seas.Model, s.type=seas.Type, s.frequency=seasonal.Frequency, seas_attr_set=seas_attr_set)
SeasonalActual <- ata.seasonal.component$SeasActual
SeasonalIndex <- ata.seasonal.component$SeasIndex
ChgX <- ATA.Transform(ata.seasonal.component$AdjustedX, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, bcMethod = boxcox_attr_set$bcMethod, bcLower = boxcox_attr_set$bcLower, bcUpper = boxcox_attr_set$bcUpper)
AdjInput <- seasadj_train_set <- ChgX$trfmX
Lambda <- ChgX$tLambda
Shift <- ChgX$tShift
if (Holdout == TRUE){
holdout_part <- ifelse(hold_set_size > 0 & hold_set_size < 1, floor(length(train_set) * hold_set_size), hold_set_size)
valid_len <- length(train_set) - holdout_part
train_len <- length(train_set)
train_set_mat <- forecast::msts(seasadj_train_set[1:valid_len], start = start(train_set), seasonal.periods = seasonal.Frequency)
validation_set <- forecast::msts(seasadj_train_set[(valid_len+1):train_len], start = end(train_set_mat) - ifelse(tspX[3]>1, (holdout_part - 1) * (1/tspX[3]), (holdout_part - 1) * 1), seasonal.periods = seasonal.Frequency)
}else {
train_set_mat <- seasadj_train_set
validation_set <- NA
}
ATA.last <- SubATA.Damped(train_set_mat, pb = pb, qb = qb, model.Type = model.Type, accuracy.Type = accuracy.Type, level.fix = level.Fix, trend.fix = trend.Fix,
trend.Search = trend.Search, phiStart = phiStart, phiEnd = phiEnd, phiSize = phiSize, initialLevel = initialLevel, initialTrend = initialTrend,
main_set = seasadj_train_set, Holdout = Holdout, HoldoutSet = validation_set, Adjusted_P = Adjusted_P, h = h, Holdin = Holdin, nmse = nmse,
seas_periods = seasonal.Frequency, holdout_onestep = holdout_onestep)
}
ATA.last$h <- h
if (onestep == FALSE){
ATA.last <- SubATA.Forecast(ATA.last, hh=h)
}else {
ATA.last <- SubATA.OneStepForecast(ATA.last, test_set, hh=h)
}
ATA.last$actual <- main_set
fit_ata <- ATA.BackTransform(X=ATA.last$fitted, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals)))
forecast_ata <- ATA.BackTransform(X=ATA.last$forecast, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals)))
ATA.last$level <- forecast::msts(ATA.BackTransform(X=ATA.last$level, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals))),
start = start(main_set), seasonal.periods = seasonal.Frequency)
ATA.last$trend <- forecast::msts(ATA.BackTransform(X=ATA.last$trend, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals))),
start = start(main_set), seasonal.periods = seasonal.Frequency)
crit_a <- ifelse(is.season==TRUE, ifelse(output[6]==0,"A","M"), seas.Type)
crit_a <- ifelse(is.season==FALSE, "A", crit_a)
if(crit_a=="A"){
ATA.fitted <- fit_ata + SeasonalActual
ATA.forecast <- forecast_ata + OS_SIValue
if (Holdout == TRUE){
houldout.ata <- ATA.BackTransform(X = ATA.last$holdout.forecast, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals)))
ATA.last$holdout.forecast <- forecast::msts(houldout.ata + SeasonalActual[(valid_len+1):train_len], start = end(train_set_mat) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = seasonal.Frequency)
}
}else {
ATA.fitted <- fit_ata * SeasonalActual
ATA.forecast <- forecast_ata * OS_SIValue
if (Holdout == TRUE){
houldout.ata <- ATA.BackTransform(X = ATA.last$holdout.forecast, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals)))
ATA.last$holdout.forecast <- forecast::msts(houldout.ata * SeasonalActual[(valid_len+1):train_len], start = end(train_set_mat) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = seasonal.Frequency)
}
}
ATA.last$fitted <- forecast::msts(ATA.fitted, start = start(main_set), seasonal.periods = seasonal.Frequency)
if (negative.Forecast==TRUE){
ATA.last$forecast <- forecast::msts(ATA.forecast, start = end(main_set) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = seasonal.Frequency)
}else {
ATA.forecast[ATA.forecast<0] <- 0
ATA.last$forecast <- forecast::msts(ATA.forecast, start = end(main_set) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = seasonal.Frequency)
}
ATA.last$residuals <- ATA.last$actual - ATA.last$fitted
if (Holdout == TRUE){
ATA.last$holdout.training <- forecast::msts(ATA.last$actual[1:valid_len], start = start(main_set), seasonal.periods = seasonal.Frequency)
ATA.last$holdout.validation <- forecast::msts(ATA.last$actual[(valid_len+1):train_len], start = end(ATA.last$holdout.training) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = seasonal.Frequency)
}
my_list <- ATA.last
my_list$out.sample <- forecast::msts(test_set, start = end(main_set) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = seasonal.Frequency)
if (level.Fix==TRUE){
method <- paste("ATA(",my_list$p, ",", my_list$q,",", my_list$phi, ")", sep="")
}else if (trend.Fix==TRUE){
method <- paste("ATA(", my_list$p, ",1," ,my_list$phi, ")", sep="")
}else if (trend.Search==TRUE){
method <- paste("ATA(",my_list$p, ",", my_list$q,",", my_list$phi, ")", sep="")
}else {
method <- paste("ATA(", my_list$p, "," ,my_list$q, ",", my_list$phi, ")", sep="")
}
my_list$initial.level <- initialLevel
my_list$initial.trend <- initialTrend
my_list$level.fixed <- level.Fix
my_list$trend.fixed <- trend.Fix
my_list$trend.search <- trend.Search
my_list$transform.method <- transform.Method
my_list$lambda <- Lambda
my_list$shift <- Shift
my_list$bcLower <- boxcox_attr_set$bcLower
my_list$bcUpper <- boxcox_attr_set$bcUpper
my_list$bcBiasAdj <- boxcox_attr_set$bcBiasAdj
my_list$accuracy.type <- accuracy.Type
my_list$nmse <- nmse
my_list$is.season <- is.season
my_list$seasonal.model <- ifelse(is.season==TRUE, switch(output[5]+1, "none", "decomp", "stl", "stlplus", "stR", "tbats", "x13", "x11"), "none")
if (!is.null(seasonal.Type)){
my_list$seasonal.type <- seasonal.Type
}else {
my_list$seasonal.type <- crit_a
}
if(my_list$q==0){
trend_mthd <- "N"
}else if (my_list$q!=0 & my_list$phi!=1 & my_list$phi>0){
trend_mthd <- paste(my_list$model.type, "d", sep="")
}else{
trend_mthd <- my_list$model.type
}
if(my_list$seasonal.model == "none"){
seas_mthd <- "N"
}else{
seas_mthd <- my_list$seasonal.type
}
method <- paste(method, " (A,", trend_mthd, ",", seas_mthd, ")", sep="")
my_list$method <- method
my_list$seasonal.period <- seasonal.Frequency
my_list$seasonal.index <- SeasonalIndex
my_list$seasonal <- forecast::msts(SeasonalActual, start = start(main_set), seasonal.periods = seasonal.Frequency)
my_list$seasonal.adjusted <- forecast::msts(ATA.BackTransform(X=AdjInput, tMethod=transform.Method, tLambda=Lambda, tShift=Shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ATA.last$residuals))),
start = start(main_set), seasonal.periods = seasonal.Frequency)
ci.output <- ATA.CI(object = my_list, ci.level = ci.Level)
my_list$ci.level <- ci.Level
if (negative.Forecast==TRUE){
my_list$forecast.lower <- ci.output$forecast.lower
my_list$forecast.upper <- ci.output$forecast.upper
}else {
ci_low <- ci.output$forecast.lower
ci_up <- ci.output$forecast.upper
ci_low[ci_low<0] <- 0
ci_up[ci_up<0] <- 0
my_list$forecast.lower <- ci_low
my_list$forecast.upper <- ci_up
}
my_list$par.specs <- list("p" = my_list$p, "q" = my_list$q, "phi" = my_list$phi,
"trend" = trend_mthd,
"seasonal" = seas_mthd,
"period" = seasonal.Frequency,
"decomp_model" = ifelse(seas_mthd == "N", NA, my_list$seasonal.model),
"initial_level" = ifelse(my_list$initial.level=="none", NA, TRUE),
"initial_trend" = ifelse(my_list$initial.trend=="none", NA, TRUE))
accuracy_ata <- ATA.Accuracy(my_list, test_set, print.out = FALSE)
my_list$accuracy <- accuracy_ata
my_list$onestep <- onestep
return(my_list)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/SubATA_MultipleO.R |
#' @importFrom stats ts tsp tsp<-
SubATA.OneStepForecast <- function(ataModel, outSample, hh=NULL)
{
X <- as.numeric(ataModel$actual)
pk <- ataModel$p
qk <- ataModel$q
phik <- ataModel$phi
modelType <- ataModel$model.type
lenX <- length(X)
if(!is.null(outSample)){
if(!is.na(outSample[1])){
hh <- length(outSample)
ataModel$h <- hh
outflag <- TRUE
}
}
if(is.null(hh)){
hh <- ataModel$h
}
onestep.X <- rep(NA, lenX + hh)
onestep.X[1:lenX] <- as.numeric(ataModel$actual)
if(outflag){
onestep.X[(lenX + 1):(lenX + hh)] <- outSample
}
onestep.S <- rep(NA, lenX + hh)
onestep.S[1:lenX] <- as.numeric(ataModel$level)
onestep.T <- rep(NA, lenX + hh)
onestep.T[1:lenX] <- as.numeric(ataModel$trend)
onestep.fitted <- rep(NA, lenX + hh)
onestep.fitted[1:lenX] <- as.numeric(ataModel$fitted)
onestep.coefp <- rep(NA, lenX + hh)
onestep.coefp[1:lenX] <- as.numeric(ataModel$coefp)
onestep.coefq <- rep(NA, lenX + hh)
onestep.coefq[1:lenX] <- as.numeric(ataModel$coefq)
T_1 <- onestep.T[lenX-1]
S_1 <- onestep.S[lenX-1]
for(i in lenX:(lenX + hh - 1)){
Xobs = onestep.X[i]
if (modelType=="A"){
onestep.coefp[i] <- coefpk <- abs(pk/i)
onestep.coefq[i] <- coefqk <- abs(qk/i)
onestep.S[i] <- S <- (coefpk * Xobs) + ((1-coefpk) * (S_1 + (phik * T_1)))
onestep.T[i] <- T <- (coefqk * (S-S_1)) + ((1-coefqk) * phik * T_1)
if(outflag){
onestep.fitted[i+1] <- S + (phik * T)
}else{
onestep.fitted[i+1] <- onestep.X[i+1] <- S + (phik * T)
}
S_1 <- S
T_1 <- T
}
if (modelType=="M"){
onestep.coefp[i] <- coefpk <- abs(pk/i)
onestep.coefq[i] <- coefqk <- abs(qk/i)
onestep.S[i] <- S <- (coefpk * Xobs) + ((1-coefpk) * S_1 * (T_1^phik))
onestep.T[i] <- T <- (coefqk * (S/S_1)) + ((1-coefqk) * (T_1^phik))
if(outflag){
onestep.fitted[i+1] <- S * (T^phik)
}else{
onestep.fitted[i+1] <- onestep.X[i+1] <- S * (T^phik)
}
S_1 <- S
T_1 <- T
}
}
my_list <- ataModel
if(outflag){
my_list$forecast <- onestep.fitted[(lenX + 1):(lenX + hh)]
my_list$onestep.forecast <- list("level" = onestep.S[lenX:(lenX + hh - 1)],
"trend" = onestep.T[lenX:(lenX + hh - 1)],
"coefp" = onestep.coefp[lenX:(lenX + hh - 1)],
"coefq" = onestep.coefq[lenX:(lenX + hh - 1)])
}else{
my_list$onestep.forecast <- NA
}
return(my_list)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/SubATA_OneStepForecast.R |
#' @importFrom forecast msts
#' @importFrom stats end start ts tsp tsp<- var
SubATA_Single_Before <- function(train_set, parP, parQ, model.type, seasonal.test, seasonal.model, seasonal.type, s.frequency, h, accuracy.type,
level.fixed, trend.fixed, trend.search, start.phi, end.phi, size.phi, initial.level, initial.trend, transform.method, lambda, shift, main_set,
test_set, seas_attr_set, freqYh, ci.level, negative.forecast, boxcox_attr_set, holdout, hold_set_size, holdout.adjustedP, holdin, nmse, onestep, holdout.onestep)
{
tspX <- tsp(train_set)
firstTspX <- tsp(main_set)
if (seasonal.model=="none"){
is.season <- FALSE
seasonal.type <- "A"
}else {
if (seasonal.test==FALSE){
if (max(s.frequency)==1){
is.season <- FALSE
seasonal.type <- "A"
}else {
is.season <- TRUE
}
}else {
is.season <- ATA.Seasonality(train_set, s.frequency, seas_attr_set)
}
}
if (is.season==TRUE){
if (seasonal.model!="decomp" & seasonal.type=="M"){
out.transform <- ATA.Transform(train_set, tMethod = "Box_Cox", tLambda = 0, tShift = 0) # lambda = 0 for multiplicative model
seas_train_set <- forecast::msts(out.transform$trfmX, start = start(train_set), seasonal.periods = s.frequency)
seas.type <- "A"
seas.lambda <- out.transform$tLambda
seas.shift <- out.transform$tShift
seas.transform <- "Box_Cox"
}else {
seas_train_set <- train_set
seas.lambda <- NULL
seas.transform <- NULL
seas.type <- seasonal.type
seas.shift <- 0
}
}else {
seas_train_set <- train_set
seasonal.model <- "none"
seas.type <- seasonal.type <- "A"
seas.lambda <- NULL
seas.shift <- 0
seas.transform <- NULL
}
ata.seasonal.component <- ATA.Decomposition(seas_train_set, s.model=seasonal.model, s.type=seas.type, s.frequency=s.frequency, seas_attr_set=seas_attr_set)
seasadj_train_set <- ATA.BackTransform(X=ata.seasonal.component$AdjustedX, tMethod=seas.transform, tLambda=seas.lambda, tShift = seas.shift)
SeasonalIndex <- ATA.BackTransform(X=ata.seasonal.component$SeasIndex, tMethod=seas.transform, tLambda=seas.lambda, tShift = seas.shift)
SeasonalActual <- ATA.BackTransform(X=ata.seasonal.component$SeasActual, tMethod=seas.transform, tLambda=seas.lambda, tShift = seas.shift)
if (seasonal.model=="x13" | seasonal.model=="x11"){
seasonal.type <- ata.seasonal.component$SeasType
}
if (is.season==FALSE & seasonal.type=="A"){
OS_SIValue <- rep(0,times=h)
}else if (is.season==FALSE & seasonal.type=="M"){
OS_SIValue <- rep(1,times=h)
}else if (is.season==TRUE){
OS_SIValue <- rep(NA,times=h)
for (k in 1:h){
OS_SIValue[k] <- SeasonalIndex[freqYh[k]]
}
}else{
}
if (holdout == TRUE){
holdout_part <- ifelse(hold_set_size > 0 & hold_set_size < 1, floor(length(seasadj_train_set) * hold_set_size), hold_set_size)
valid_len <- length(seasadj_train_set) - holdout_part
train_len <- length(seasadj_train_set)
train_set_mat <- forecast::msts(seasadj_train_set[1:valid_len], start = start(train_set), seasonal.periods = s.frequency)
validation_set <- forecast::msts(seasadj_train_set[(valid_len+1):train_len], start = end(train_set_mat) - ifelse(tspX[3]>1, (holdout_part - 1) * (1/tspX[3]), (holdout_part - 1) * 1), seasonal.periods = s.frequency)
}else {
train_set_mat <- seasadj_train_set
validation_set <- NA
}
ata.output <- SubATA.Damped(train_set_mat, pb = parP, qb = parQ, model.Type = model.type, accuracy.Type = accuracy.type, level.fix = level.fixed, trend.fix = trend.fixed,
trend.Search = trend.search, phiStart = start.phi, phiEnd = end.phi, phiSize = size.phi, initialLevel = initial.level, initialTrend = initial.trend,
main_set = seasadj_train_set, Holdout = holdout, HoldoutSet = validation_set, Adjusted_P = holdout.adjustedP, h = h, Holdin = holdin, nmse = nmse,
seas_periods = s.frequency, holdout_onestep = holdout.onestep)
ata.output$h <- h
if (onestep == FALSE){
ata.output <- SubATA.Forecast(ata.output, hh=h)
}else {
ata.output <- SubATA.OneStepForecast(ata.output, test_set, hh=h)
}
ata.output$actual <- main_set
fit_ata <- ata.output$fitted
forecast_ata <- ata.output$forecast
ata.output$level <- forecast::msts(ATA.BackTransform(X=ata.output$level, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals))),
start = start(main_set), seasonal.periods = s.frequency)
ata.output$trend <- forecast::msts(ATA.BackTransform(X=ata.output$trend, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals))),
start = start(main_set), seasonal.periods = s.frequency)
if(seasonal.type == "A"){
ATA.fitted <- ATA.BackTransform(X=fit_ata + SeasonalActual, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals)))
ATA.forecast <- ATA.BackTransform(X=forecast_ata + OS_SIValue, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals)))
if (holdout == TRUE){
ata.output$holdout.forecast <- forecast::msts(ATA.BackTransform(X = ata.output$holdout.forecast + SeasonalActual[(valid_len+1):train_len], tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals))),
start = end(train_set_mat) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = s.frequency)
}
}else {
ATA.fitted <- ATA.BackTransform(X=fit_ata * SeasonalActual, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals)))
ATA.forecast <- ATA.BackTransform(X=forecast_ata * OS_SIValue, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals)))
if (holdout == TRUE){
ata.output$holdout.forecast <- forecast::msts(ATA.BackTransform(X = ata.output$holdout.forecast * SeasonalActual[(valid_len+1):train_len], tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals))),
start = end(train_set_mat) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = s.frequency)
}
}
ata.output$fitted <- forecast::msts(ATA.fitted, start = start(main_set), seasonal.periods = s.frequency)
SeasonalActual <- ATA.BackTransform(X=SeasonalActual, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals)))
SeasonalIndex <- ATA.BackTransform(X=SeasonalIndex, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals)))
if (negative.forecast==TRUE){
ata.output$forecast <- forecast::msts(ATA.forecast, start = end(main_set) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = s.frequency)
}else {
ATA.forecast[ATA.forecast<0] <- 0
ata.output$forecast <- forecast::msts(ATA.forecast, start = end(main_set) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = s.frequency)
}
ata.output$residuals <- ata.output$actual - ata.output$fitted
if (holdout == TRUE){
ata.output$holdout.training <- forecast::msts(ata.output$actual[1:valid_len], start = start(main_set), seasonal.periods = s.frequency)
ata.output$holdout.validation <- forecast::msts(ata.output$actual[(valid_len+1):train_len], start = end(ata.output$holdout.training) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = s.frequency)
}
my_list <- ata.output
my_list$out.sample <- forecast::msts(test_set, start = end(main_set) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = s.frequency)
if (level.fixed==TRUE){
method <- paste("ATA(",my_list$p, ",", my_list$q,",", my_list$phi, ")", sep="")
}else if (trend.fixed==TRUE){
method <- paste("ATA(", my_list$p, ",1," ,my_list$phi, ")", sep="")
}else if (trend.search==TRUE){
method <- paste("ATA(",my_list$p, ",", my_list$q,",", my_list$phi, ")", sep="")
}else {
method <- paste("ATA(", my_list$p, "," ,my_list$q, ",", my_list$phi, ")", sep="")
}
my_list$initial.level <- initial.level
my_list$initial.trend <- initial.trend
my_list$level.fixed <- level.fixed
my_list$trend.fixed <- trend.fixed
my_list$trend.search <- trend.search
my_list$transform.method <- transform.method
my_list$lambda <- lambda
my_list$shift <- shift
my_list$bcLower <- boxcox_attr_set$bcLower
my_list$bcUpper <- boxcox_attr_set$bcUpper
my_list$bcBiasAdj <- boxcox_attr_set$bcBiasAdj
my_list$accuracy.type <- accuracy.type
my_list$nmse <- nmse
my_list$is.season <- is.season
my_list$seasonal.model <- seasonal.model
my_list$seasonal.type <- seasonal.type
if(my_list$q==0){
trend_mthd <- "N"
}else if (my_list$q!=0 & my_list$phi!=1 & my_list$phi>0){
trend_mthd <- paste(my_list$model.type, "d", sep="")
}else{
trend_mthd <- my_list$model.type
}
if(my_list$seasonal.model == "none"){
seas_mthd <- "N"
}else{
seas_mthd <- my_list$seasonal.type
}
method <- paste(method, " (A,", trend_mthd, ",", seas_mthd, ")", sep="")
my_list$method <- method
my_list$seasonal.period <- s.frequency
my_list$seasonal.index <- SeasonalIndex
my_list$seasonal <- forecast::msts(SeasonalActual, start = start(main_set), seasonal.periods = s.frequency)
my_list$seasonal.adjusted <- forecast::msts(ATA.BackTransform(X=seasadj_train_set, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals))),
start = start(main_set), seasonal.periods = s.frequency)
ci.output <- ATA.CI(object = my_list, ci.level = ci.level)
my_list$ci.level <- ci.level
if (negative.forecast==TRUE){
my_list$forecast.lower <- ci.output$forecast.lower
my_list$forecast.upper <- ci.output$forecast.upper
}else {
ci_low <- ci.output$forecast.lower
ci_up <- ci.output$forecast.upper
ci_low[ci_low<0] <- 0
ci_up[ci_up<0] <- 0
my_list$forecast.lower <- ci_low
my_list$forecast.upper <- ci_up
}
my_list$par.specs <- list("p" = my_list$p, "q" = my_list$q, "phi" = my_list$phi,
"trend" = trend_mthd,
"seasonal" = seas_mthd,
"period" = s.frequency,
"decomp_model" = ifelse(seas_mthd == "N", NA, my_list$seasonal.model),
"initial_level" = ifelse(my_list$initial.level=="none", NA, TRUE),
"initial_trend" = ifelse(my_list$initial.trend=="none", NA, TRUE))
accuracy_ata <- ATA.Accuracy(my_list, test_set, print.out = FALSE)
my_list$accuracy <- accuracy_ata
my_list$onestep <- onestep
return(my_list)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/SubATA_Single.R |
#' @importFrom forecast msts
#' @importFrom stats end start ts tsp tsp<- var
SubATA_Single_After <- function(train_set, parP, parQ, model.type, seasonal.test, seasonal.model, seasonal.type, s.frequency, h, accuracy.type,
level.fixed, trend.fixed, trend.search, start.phi, end.phi, size.phi, initial.level, initial.trend, transform.method, lambda, shift, main_set,
test_set, seas_attr_set, freqYh, ci.level, negative.forecast, boxcox_attr_set, holdout, hold_set_size, holdout.adjustedP, holdin, nmse, onestep, holdout.onestep)
{
tspX <- tsp(train_set)
firstTspX <- tsp(main_set)
if (seasonal.model=="none"){
is.season <- FALSE
seasonal.type <- "A"
}else {
if (seasonal.test==FALSE){
if (max(s.frequency)==1){
is.season <- FALSE
seasonal.type <- "A"
}else {
is.season <- TRUE
}
}else {
is.season <- ATA.Seasonality(train_set, s.frequency, seas_attr_set)
}
}
if (is.season==TRUE){
if (seasonal.model!="decomp" & seasonal.type=="M"){
out.transform <- ATA.Transform(train_set, tMethod = "Box_Cox", tLambda = 0, tShift = 0) # lambda = 0 for multiplicative model
seas_train_set <- forecast::msts(out.transform$trfmX, start = start(train_set), seasonal.periods = s.frequency)
seas.type <- "A"
seas.lambda <- out.transform$tLambda
seas.shift <- out.transform$tShift
seas.transform <- "Box_Cox"
}else {
seas_train_set <- train_set
seas.lambda <- NULL
seas.transform <- NULL
seas.type <- seasonal.type
seas.shift <- 0
}
}else {
seas_train_set <- train_set
seasonal.model <- "none"
seas.type <- seasonal.type <- "A"
seas.lambda <- NULL
seas.shift <- 0
seas.transform <- NULL
}
ata.seasonal.component <- ATA.Decomposition(seas_train_set, s.model=seasonal.model, s.type=seas.type, s.frequency=s.frequency, seas_attr_set=seas_attr_set)
seasadj_train_set <- ATA.BackTransform(X=ata.seasonal.component$AdjustedX, tMethod=seas.transform, tLambda=seas.lambda, tShift = seas.shift)
SeasonalIndex <- ATA.BackTransform(X=ata.seasonal.component$SeasIndex, tMethod=seas.transform, tLambda=seas.lambda, tShift = seas.shift)
SeasonalActual <- ATA.BackTransform(X=ata.seasonal.component$SeasActual, tMethod=seas.transform, tLambda=seas.lambda, tShift = seas.shift)
if (seasonal.model=="x13" | seasonal.model=="x11"){
seasonal.type <- ata.seasonal.component$SeasType
}
ChgX <- ATA.Transform(seasadj_train_set, tMethod = transform.method, tLambda = lambda, tShift = shift, bcMethod = boxcox_attr_set$bcMethod, bcLower = boxcox_attr_set$bcLower, bcUpper = boxcox_attr_set$bcUpper)
seasadj_train_set <- ChgX$trfmX
lambda <- ChgX$tLambda
shift <- ChgX$tShift
if (is.season==FALSE & seasonal.type=="A"){
OS_SIValue <- rep(0,times=h)
}else if (is.season==FALSE & seasonal.type=="M"){
OS_SIValue <- rep(1,times=h)
}else if (is.season==TRUE){
OS_SIValue <- rep(NA,times=h)
for (k in 1:h){
OS_SIValue[k] <- SeasonalIndex[freqYh[k]]
}
}else{
}
if (holdout == TRUE){
holdout_part <- ifelse(hold_set_size > 0 & hold_set_size < 1, floor(length(seasadj_train_set) * hold_set_size), hold_set_size)
valid_len <- length(seasadj_train_set) - holdout_part
train_len <- length(seasadj_train_set)
train_set_mat <- forecast::msts(seasadj_train_set[1:valid_len], start = start(train_set), seasonal.periods = s.frequency)
validation_set <- forecast::msts(seasadj_train_set[(valid_len+1):train_len], start = end(train_set_mat) - ifelse(tspX[3]>1, (holdout_part - 1) * (1/tspX[3]), (holdout_part - 1) * 1), seasonal.periods = s.frequency)
}else {
train_set_mat <- seasadj_train_set
validation_set <- NA
}
ata.output <- SubATA.Damped(train_set_mat, pb = parP, qb = parQ, model.Type = model.type, accuracy.Type = accuracy.type, level.fix = level.fixed, trend.fix = trend.fixed,
trend.Search = trend.search, phiStart = start.phi, phiEnd = end.phi, phiSize = size.phi, initialLevel = initial.level, initialTrend = initial.trend,
main_set = seasadj_train_set, Holdout = holdout, HoldoutSet = validation_set, Adjusted_P = holdout.adjustedP, h = h, Holdin = holdin, nmse = nmse,
seas_periods = s.frequency, holdout_onestep = holdout.onestep)
ata.output$h <- h
if (onestep == FALSE){
ata.output <- SubATA.Forecast(ata.output, hh=h)
}else {
ata.output <- SubATA.OneStepForecast(ata.output, test_set, hh=h)
}
ata.output$actual <- main_set
fit_ata <- ATA.BackTransform(X=ata.output$fitted, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals)))
forecast_ata <- ATA.BackTransform(X=ata.output$forecast, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals)))
ata.output$level <- forecast::msts(ATA.BackTransform(X=ata.output$level, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals))),
start = start(main_set), seasonal.periods = s.frequency)
ata.output$trend <- forecast::msts(ATA.BackTransform(X=ata.output$trend, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals))),
start = start(main_set), seasonal.periods = s.frequency)
if(seasonal.type == "A"){
ATA.fitted <- fit_ata + SeasonalActual
ATA.forecast <- forecast_ata + OS_SIValue
if (holdout == TRUE){
holdout.ata <- ATA.BackTransform(X=ata.output$holdout.forecast, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals)))
ata.output$holdout.forecast <- forecast::msts(holdout.ata + SeasonalActual[(valid_len+1):train_len], start = end(train_set_mat) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = s.frequency)
}
}else {
ATA.fitted <- fit_ata * SeasonalActual
ATA.forecast <- forecast_ata * OS_SIValue
if (holdout == TRUE){
holdout.ata <- ATA.BackTransform(X=ata.output$holdout.forecast, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals)))
ata.output$holdout.forecast <- forecast::msts(holdout.ata * SeasonalActual[(valid_len+1):train_len], start = start(train_set_mat) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = s.frequency)
}
}
ata.output$fitted <- forecast::msts(ATA.fitted, start = start(main_set), seasonal.periods = s.frequency)
if (negative.forecast==TRUE){
ata.output$forecast <- forecast::msts(ATA.forecast, start = end(main_set) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = s.frequency)
}else {
ATA.forecast[ATA.forecast<0] <- 0
ata.output$forecast <- forecast::msts(ATA.forecast, start = end(main_set) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = s.frequency)
}
ata.output$residuals <- ata.output$actual - ata.output$fitted
if (holdout == TRUE){
ata.output$holdout.training <- forecast::msts(ata.output$actual[1:valid_len], start = start(main_set), seasonal.periods = s.frequency)
ata.output$holdout.validation <- forecast::msts(ata.output$actual[(valid_len+1):train_len], start = end(ata.output$holdout.training) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = s.frequency)
}
my_list <- ata.output
my_list$out.sample <- forecast::msts(test_set, start = end(main_set) + ifelse(firstTspX[3]>1, 1/firstTspX[3], 1), seasonal.periods = s.frequency)
if (level.fixed==TRUE){
method <- paste("ATA(",my_list$p, ",", my_list$q,",", my_list$phi, ")", sep="")
}else if (trend.fixed==TRUE){
method <- paste("ATA(", my_list$p, ",1," ,my_list$phi, ")", sep="")
}else if (trend.search==TRUE){
method <- paste("ATA(",my_list$p, ",", my_list$q,",", my_list$phi, ")", sep="")
}else {
method <- paste("ATA(", my_list$p, "," ,my_list$q, ",", my_list$phi, ")", sep="")
}
my_list$initial.level <- initial.level
my_list$initial.trend <- initial.trend
my_list$level.fixed <- level.fixed
my_list$trend.fixed <- trend.fixed
my_list$trend.search <- trend.search
my_list$transform.method <- transform.method
my_list$lambda <- lambda
my_list$shift <- shift
my_list$bcLower <- boxcox_attr_set$bcLower
my_list$bcUpper <- boxcox_attr_set$bcUpper
my_list$bcBiasAdj <- boxcox_attr_set$bcBiasAdj
my_list$accuracy.type <- accuracy.type
my_list$nmse <- nmse
my_list$is.season <- is.season
my_list$seasonal.model <- seasonal.model
my_list$seasonal.type <- seasonal.type
if(my_list$q==0){
trend_mthd <- "N"
}else if (my_list$q!=0 & my_list$phi!=1 & my_list$phi>0){
trend_mthd <- paste(my_list$model.type, "d", sep="")
}else{
trend_mthd <- my_list$model.type
}
if(my_list$seasonal.model == "none"){
seas_mthd <- "N"
}else{
seas_mthd <- my_list$seasonal.type
}
method <- paste(method, " (A,", trend_mthd, ",", seas_mthd, ")", sep="")
my_list$method <- method
my_list$seasonal.period <- s.frequency
my_list$seasonal.index <- SeasonalIndex
my_list$seasonal <- forecast::msts(SeasonalActual, start = start(main_set), seasonal.periods = s.frequency)
my_list$seasonal.adjusted <- forecast::msts(ATA.BackTransform(X=seasadj_train_set, tMethod=transform.method, tLambda=lambda, tShift=shift, tbiasadj=boxcox_attr_set$bcBiasAdj, tfvar=ifelse(boxcox_attr_set$bcBiasAdj==FALSE, NULL, var(ata.output$residuals))),
start = start(main_set), seasonal.periods = s.frequency)
ci.output <- ATA.CI(object = my_list, ci.level = ci.level)
my_list$ci.level <- ci.level
if (negative.forecast==TRUE){
my_list$forecast.lower <- ci.output$forecast.lower
my_list$forecast.upper <- ci.output$forecast.upper
}else {
ci_low <- ci.output$forecast.lower
ci_up <- ci.output$forecast.upper
ci_low[ci_low<0] <- 0
ci_up[ci_up<0] <- 0
my_list$forecast.lower <- ci_low
my_list$forecast.upper <- ci_up
}
my_list$par.specs <- list("p" = my_list$p, "q" = my_list$q, "phi" = my_list$phi,
"trend" = trend_mthd,
"seasonal" = seas_mthd,
"period" = s.frequency,
"decomp_model" = ifelse(seas_mthd == "N", NA, my_list$seasonal.model),
"initial_level" = ifelse(my_list$initial.level=="none", NA, TRUE),
"initial_trend" = ifelse(my_list$initial.trend=="none", NA, TRUE))
accuracy_ata <- ATA.Accuracy(my_list, test_set, print.out = FALSE)
my_list$accuracy <- accuracy_ata
my_list$onestep <- onestep
return(my_list)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/SubATA_SingleO.R |
SubATA.Transform <- function(tX
, tMethod = c("Box_Cox", "Sqrt", "Reciprocal", "Log", "NegLog", "Modulus", "BickelDoksum", "Manly", "Dual", "YeoJohnson", "GPower", "GLog")
, tType = c("Vanilla", "Back")
, tLambda = NULL
, tShift = 0)
{
out_transform <- list("tX" = tX, "tType" = tType, "tLambda" = tLambda, "tShift" = tShift)
switch(tType,
Vanilla = {
switch(tMethod,
Box_Cox = {
out_transform$tShift <- ntShift <- calc_shift(min(tX),tShift)
if (abs(tLambda) <= 0.00000000001) {
out_transform$tX <- log(tX + ntShift)
}else {
out_transform$tX <- (((tX + ntShift)^tLambda) - 1) / tLambda
}
},
Modulus = {
mdls <- abs(tX) + 1
if (abs(tLambda) <= 0.00000000001) {
out_transform$tX <- sign(tX) * log(mdls)
}else {
out_transform$tX <- sign(tX) * ((mdls^tLambda) - 1) / tLambda
}
},
BickelDoksum = {
if (tLambda > 0.00000000001) {
out_transform$tX <- ((abs(tX)^tLambda) * sign(tX) - 1)/ tLambda
}else {
stop("The lambda parameter must be positive for the Bickel-Doksum transformation.")
}
},
Manly = {
if (abs(tLambda) <= 0.00000000001) {
out_transform$tX <- tX
}else {
out_transform$tX <- (exp(tX * tLambda) - 1) / tLambda
}
},
Dual = {
out_transform$tShift <- ntShift <- calc_shift(min(tX),tShift)
if (abs(tLambda) <= 0.00000000001) {
out_transform$tX <- log(tX + ntShift)
}else {
out_transform$tX <- (((tX + ntShift)^tLambda) - ((tX + ntShift)^(-tLambda))) / (2 * tLambda)
}
},
YeoJohnson = {
lentX <- length(tX)
tZ <- rep(NA, lentX)
negtX <- which(tX < 0)
postX <- which(tX >= 0)
if (abs(tLambda) <= 0.00000000001) {
tZ[postX] <- log(tX[postX] + 1)
}else {
tZ[postX] <- (((tX[postX] + 1)^tLambda) - 1) / tLambda
}
if (abs(tLambda - 2) <= 0.00000000001) {
tZ[negtX] <- -log(1 - tX[negtX])
}else {
tZ[negtX] <- (((1 - tX[negtX])^(2 - tLambda)) - 1)/(tLambda - 2)
}
out_transform$tX <- tZ
},
NegLog = {
mdls <- abs(tX) + 1
out_transform$tX <- sign(tX) * log(mdls)
out_transform$tLambda <- NULL
},
GLog = {
out_transform$tShift <- ntShift <- calc_shift(min(tX),tShift)
out_transform$tX <- log((tX + ntShift) + sqrt(((tX + ntShift)^2) + 1))
out_transform$tLambda <- NULL
},
GPower = {
out_transform$tShift <- ntShift <- calc_shift(min(tX),tShift)
if (abs(tLambda) <= 0.00000000001) {
out_transform$tX <- log((tX + ntShift) + sqrt(((tX + ntShift)^2) + 1))
}else {
out_transform$tX <- ((((tX + ntShift) + (sqrt(tX + ntShift)^2 + 1))^tLambda) - 1) / tLambda
}
},
Sqrt = {
out_transform$tShift <- ntShift <- calc_shift(min(tX),tShift)
out_transform$tX <- sqrt(tX + ntShift)
out_transform$tLambda <- NULL
},
Log = {
out_transform$tShift <- ntShift <- calc_shift(min(tX),tShift)
out_transform$tLambda <- tLambda <- 0
out_transform$tX <- log(tX + ntShift)
},
Reciprocal = {
out_transform$tLambda <- NULL
out_transform$tX <- 1/tX
}
)
},
Back = {
switch(tMethod,
Box_Cox = {
if (abs(tLambda) <= 0.00000000001) {
out_transform$tX <- exp(tX) - tShift
}else {
out_transform$tX <- (tLambda * tX + 1)^(1 / tLambda) - tShift
}
},
Modulus = {
if (abs(tLambda) <= 0.00000000001) {
out_transform$tX <- sign(tX) * (exp(abs(tX)) - 1)
}else {
out_transform$tX <- sign(tX) * ((abs(tX)*tLambda + 1)^(1/tLambda) - 1)
}
},
BickelDoksum = {
negtX <- which(tX < 0)
postX <- which(tX >= 0)
lentX <- length(tX)
tZ <- rep(NA, lentX)
tZ[postX] <- (tLambda * tX[postX] + 1)^(1 / tLambda)
tZ[negtX] <- (-1) * ((-1) * (tLambda * tX[negtX] + 1))^(1 / tLambda)
out_transform$tX <- tZ
},
Manly = {
if (abs(tLambda) <= 0.00000000001) {
out_transform$tX <- tX
}else {
out_transform$tX <- log(tLambda * tX + 1) / tLambda
}
},
Dual = {
if (abs(tLambda) <= 0.00000000001) {
out_transform$tX <- exp(tX) + tShift
}else {
out_transform$tX <- ((tLambda * tX + sqrt(1 + tLambda^2 * tX^2))^(1/tLambda)) - tShift
}
},
YeoJohnson = {
lentX <- length(tX)
tZ <- rep(NA, lentX)
negtX <- which(tX < 0)
postX <- which(tX >= 0)
if (abs(tLambda) <= 0.00000000001) {
tZ[postX] <- exp(tX[postX]) + 1
}else {
tZ[postX] <- ((tX[postX] + 1 * tLambda + 1)^(1 / tLambda)) - 1
}
if (abs(tLambda - 2) <= 0.00000000001) {
tZ[negtX] <- (-1) * (exp(-tX[negtX]) - 1)
}else {
tZ[negtX] <- (-1) * ((tX[negtX] * (tLambda - 2) + 1)^(1/(2 - tLambda)) - 1)
}
out_transform$tX <- tZ
},
NegLog = out_transform$tX <- sign(tX) * (exp(abs(tX)) - 1),
GLog = out_transform$tX <- ((-(1 - exp(tX * 2))) / (2 * exp(tX))) - tShift,
GPower = {
if (abs(tLambda) <= 0.00000000001) {
out_transform$tX <- (-(1 - exp(tX * 2))) / (2 * exp(tX))
}else {
gpX <- (tX * tLambda + 1)^(1 / tLambda)
out_transform$tX <- (-(1 - gpX^2)) / (2 * gpX)
}
},
Sqrt = out_transform$tX <- tX^2 + tShift,
Log = out_transform$tX <- exp(tX) + tShift,
Reciprocal = out_transform$tX <- 1 / tX
)
}
)
return(out_transform)
}
calc_shift <- function(mintX, tshft) {
if (mintX <= 0) {
if (tshft >=0 & tshft < abs(mintX)) {
newtShift <- abs(mintX) + 1
warning("The data has negative values. Besides, the shift parameter is smaller than minimum value of the data. ATAforecasting changed the shift parameter to absolute minimum value of the data.")
}else if (tshft >=0 & tshft >= abs(mintX)) {
newtShift <- tshft + 1
}else {
newtShift <- abs(mintX) + 1
warning("The data has negative values. Besides, the shift parameter must be positive value. ATAforecasting changed the shift parameter to 0.")
}
}else {
if (tshft < 0 & abs(tshft) >= mintX) {
newtShift <- 0
warning("The shift parameter is negative and bigger than minimum value of the data. ATAforecasting changed the shift parameter to 0.")
}else {
newtShift <- tshft
}
}
return(newtShift)
}
| /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/SubATA_Transform.R |
#' Monthly number of tourists arrived in Turkey
#'
#' Monthly number of tourists arrived in Turkey: from Jan 2008 to Dec 2020.
#'
#' @docType data
#'
#' @usage data(touristTR)
#'
#' @format Time series data
#' @source The Central Bank of the Republic of Turkey -- CBRT.
#' @keywords datasets
#' @examples
#' plot(touristTR)
#'
"touristTR"
#' Weekly Net Funding Level of Central Bank of Republic of Turkey
#'
#' Weekly Net Funding Level of Central Bank of Republic of Turkey: from Jan 7, 2011 to Jan 08, 2021.
#'
#' @docType data
#'
#' @usage data(fundingTR)
#'
#' @format Time series data
#' @source The Central Bank of the Republic of Turkey -- CBRT.
#' @keywords datasets
#' @examples
#' plot(fundingTR)
#'
"fundingTR" | /scratch/gouwar.j/cran-all/cranData/ATAforecasting/R/data.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @name Scaled
#' @title Store parameters and functions associated to the scaled version of ATN
#' @description Type the name of the class to see its methods
#' @field nb_s Total number of species
#' @field nb_b Number of basal species
#' @field c double: interference competition
#' @field X Vector of metabolic rates (length = number of species)
#' @field max_feed Vector of maximum feeding rates (length = number of consumers)
#' @field e Vector of assimilation efficiencies (length = number of species)
#' @field r Vector of producers maximum growth rates (length = number of basal species)
#' @field BM Vector of body masses (length = number of species)
#' @field dB Vector of local derivatives (length = number of species)
#' @field B0 Vector of half saturation densities (length = number of consumers)
#' @field fw Adjacency matrix of the food-web (dim = number of species * number of species)
#' @field w Matrix of relative consumption rates (dim = number of species * number of consumers)
#' @field F Matrix of per-capita feeding rates (dim = number of species * number of consumers)
#' @field q hill exponent for the type of functional response
#' @field K Carrying capacity of basal species
#' @field ext Extinction threshold for species
#' @field alpha Plant resource competition
#' @field ODE Calculate the derivatives for the scaled version of the ATN model \itemize{
#' \item Parameter: bioms - Local species biomasses
#' \item Parameter: t - Integration time point
#' \item Returns a vector of growth rate for each species at time t
#' }
NULL
#' @name Scaled_loops
#' @title Store parameters and functions associated to the scaled version of ATN
#' @description To not use. For testing purpose only. Please use Rcpp_Scaled instead.
NULL
#' @name Unscaled
#' @title Store parameters and functions associated to the unscaled version of ATN
#' @description Type the name of the class to see its methods
#' @field nb_s Total number of species
#' @field nb_b Number of basal species
#' @field c double: interference competition
#' @field X Vector of metabolic rates (length = number of species)
#' @field a Matrix of attack rates (dim = number of species * number of consumers)
#' @field h Matrix of handling times (dim = number of species * number of consumers)
#' @field e Vector of assimilation efficiencies (length = number of species)
#' @field r Vector of producers maximum growth rates (length = number of basal species)
#' @field BM Vector of body masses (length = number of species)
#' @field dB Vector of local derivatives (length = number of species)
#' @field fw Adjacency matrix of the food-web (dim = number of species * number of species)
#' @field F Matrix of per-capita feeding rates (dim = number of species * number of consumers)
#' @field q hill exponent for the type of functional response
#' @field K Carrying capacity of basal species
#' @field alpha Plant resource competition
#' @field ext Extinction threshold for species
#' @field ODE Calculate the derivatives for the scaled version of the ATN model \itemize{
#' \item Parameter: bioms - Local species biomasses
#' \item Parameter: t - Integration time point
#' \item Returns a vector of growth rate for each species at time t
#' }
NULL
#' @name Unscaled_loops
#' @title Store parameters and functions associated to the unscaled version of ATN
#' @description To not use. For testing purpose only. Please use Rcpp_Unscaled instead.
NULL
#' @name Unscaled_nuts
#' @title Store parameters and functions associated to the unscaled version of ATN including nutrient dynamics
#' @description Type the name of the class to see its methods
#' @field nb_s Total number of species
#' @field nb_b Number of basal species
#' @field nb_n Number of nutrient pool
#' @field c double: interference competition
#' @field b Matrix of attack rates (dim = number of species * number of consumers)
#' @field h Matrix of handling times (dim = number of species * number of consumers)
#' @field X vector of metabolic rates (length = number of species)
#' @field K matrix of plant nutrient efficiencies (dim = number of nutrients * number of plants)
#' @field V matrix of plant relative nutrient content (dim = number of nutrients * number of plants)
#' @field S Vector of maximum nutrient concentration (length = number of plants)
#' @field r Vector of maximum growth rate of plant species (length = number of plant species)
#' @field e Vector of assimilation efficiencies (length = number of species)
#' @field BM Vector of body masses (length = number of species)
#' @field dB Vector of local derivatives (length = number of species)
#' @field fw Adjacency matrix of the food-web (dim = number of species * number of species)
#' @field w Matrix of relative consumption rates (dim = number of species * number of consumers)
#' @field F Matrix of per-capita feeding rates (dim = number of species * number of consumers)
#' @field q hill exponent for the type of functional response
#' @field ext Extinction threshold for species
#' @field ODE Calculate the derivatives for the scaled version of the ATN model \itemize{
#' \item Parameter: bioms - Local species biomasses
#' \item Parameter: t - Integration time point
#' \item Returns a vector of growth rate for each species at time t
#' }
NULL
#' @name Unscaled_nuts_loops
#' @title Store parameters and functions associated to the unscaled version of ATN
#' @description To not use. For testing purpose only. Please use Rcpp_Unscaled_nuts instead.
NULL
| /scratch/gouwar.j/cran-all/cranData/ATNr/R/RcppExports.R |
#' @keywords internal
.runge_kutta4<- function(t, biomasses, model){
bioms <- matrix(NA, ncol = length(biomasses), nrow = length(t))
biom.step<- biomasses
delta.t <- t[2] - t[1]
for (i in 1:length(t)){
bioms[i, ] <- biom.step
k1 <- model$ODE(biom.step, i*delta.t)
k2 <- model$ODE(biom.step + 0.5*delta.t * k1, (i+0.5)*delta.t)
k3 <- model$ODE(biom.step + 0.5*delta.t * k2, (i+0.5)*delta.t)
k4 <- model$ODE(biom.step + delta.t * k3, i*delta.t)
biom.step <- biom.step + (delta.t/6) * (k1 + 2*k2 + 2*k3 + k4)
}
return(cbind(t, bioms))
}
| /scratch/gouwar.j/cran-all/cranData/ATNr/R/Runge-Kutta4.R |
#' @title Detect whether a food web is composed of several disconnected sub-networks
#'
#' @description Run a deep search first algorithm (DFS)
#'
#' @param fw binary adjacency matrix of the food web.
#'
#' @return Boolean: TRUE if the food web is connected, FALSE if several disconnected sub-networks are detected.
#'
#' @examples
#'
#' library(ATNr)
#' set.seed(123)
#' # number of species, nutrients, and body masses
#' n_species <- 20
#' n_basal <- 5
#' n_nutrients <- 3
#' masses <- sort(10^runif(n_species, 2, 6)) #body mass of species
#' # create food web matrix
#' L <- create_Lmatrix(masses, n_basal)
#' L[, 1:n_basal] <- 0
#' fw <- L
#' fw[fw > 0] <- 1
#' connected <- is_connected(fw)
is_connected <- function(fw){ #BUG: this has conflicts with igraph::is_connected
# m is the undirected foodweb.
# i is the first index, default = 1.
# visited is the TRUE/FALSE vector to check if nodes were visited.
# env is the parent environment of the DFS function.
DFS <- function(m, i, visited, env = envf) {
# loop over successors
for (n in which(m[i,] > 0)){
if (!env$visited[n]){
# if n not visited, apply recursivity
env$visited[n] = TRUE
DFS(m, n, env$visited)
}
}
return()
}
# make the network undirected (symetric matrix) to simply use a DSF algorithm
fw.s <- fw
fw.s[lower.tri(fw.s)] <- t(fw.s)[lower.tri(fw.s)]
# create the Boolean vector of visited nodes
visited <- rep(FALSE, nrow(fw.s))
visited[1] <- TRUE
# call deep first search alg. on the first node (should it be randomly chosen?)
envf <- environment()
visited <- DFS(fw.s, 1, visited, envf)
# check if all nodes where reach by the DSF, and return:
return(all(visited))
}
| /scratch/gouwar.j/cran-all/cranData/ATNr/R/connected_component.R |
#' @title Initialize an ATN model, following Schneider et al. 2016, Nature Communication
#'
#' @param nb_s integer, number of total species.
#' @param nb_b integer, number of basal species.
#' @param nb_n integer, number of nutrients.
#' @param BM float vector, body mass of species.
#' @param fw binary adjacency matrix of the food web.
#'
#' @export
#'
#' @details A model is defined by the total number of species
#' (\emph{nb_s}), the number of basal species (\emph{nb_b}),
#' the number of nutrients (\emph{nb_n}), the body masses
#' (\emph{BM}) of species, and the adjacency matrix (\emph{fw})
#' representing species interactions.
#' Nutrients are not counted as species.
#'
#' @return An object of class \emph{ATN (Rcpp_parameters_prefs)}.
#'
#' @examples
#' library(ATNr)
#' set.seed(123)
#' n_species <- 50
#' n_basal <- 20
#' n_nutrients <- 2
#' masses <- sort(10^runif(n_species, 2, 6)) #body mass of species
#' L <- create_Lmatrix(masses, n_basal)
#' fw <- L
#' fw[fw > 0] <- 1
#' mod <- create_model_Unscaled_nuts(n_species, n_basal, n_nutrients, masses, fw)
create_model_Unscaled_nuts <- function(
nb_s,
nb_b,
nb_n = 2,
BM,
fw
) {
# check input is correct
if (any(c(nb_s, nb_b, nb_n) %% 1 != 0)) {
stop("nb_s, nb_b and nb_n must all be integers")
}
if (length(BM) != nb_s) {
stop("BM should have length equal to nb_s (", nb_s, ")")
}
if (dim(fw)[1] != dim(fw)[2]) {
stop("Food web matrix is not a square matrix")
}
if (nb_s != ncol(fw)) {
stop("Number of species and food web matrix do not match")
}
model <- methods::new(Unscaled_nuts, nb_s, nb_b, nb_n)
# THIS WE CAN EVEN PUT IN THE CONSTRUCTOR, PERHAPS?
model[["BM"]] <- BM
model[["fw"]] <- fw
return(model)
}
#' @title Initialize an ATN model, following Delmas et al. 2017, Methods in Ecology and Evolution
#'
#' @param nb_s integer, number of total species.
#' @param nb_b integer, number of basal species.
#' @param BM float vector, body mass of species.
#' @param fw binary adjacency matrix of the food web.
#'
#' @export
#'
#' @details A model is defined by the total number of species
#' (\emph{nb_s}), the number of basal species (\emph{nb_b}),
#' the number of nutrients (\emph{nb_n}), the body masses
#' (\emph{BM}) of species, and the adjacency matrix (\emph{fw})
#' representing species interactions.
#'
#' @return An object of class \emph{ATN (Rcpp_parameters_prefs)}.
#'
#' @references Delmas, E., Brose, U., Gravel, D., Stouffer, D.B. and Poisot, T.
#' (2017), Simulations of biomass dynamics in community food webs. Methods
#' Ecol Evol, 8: 881-886. https://doi.org/10.1111/2041-210X.12713
#'
#' @examples
#' library(ATNr)
#' set.seed(123)
#' n_species <- 50
#' n_basal <- 20
#' masses <- sort(10^runif(n_species, 2, 6)) #body mass of species
#' L <- create_Lmatrix(masses, n_basal)
#' fw <- L
#' fw[fw > 0] <- 1
#' mod <- create_model_Scaled(n_species, n_basal, masses, fw)
create_model_Scaled <- function(
nb_s,
nb_b,
BM,
fw
) {
# check input is correct
if (any(c(nb_s, nb_b) %% 1 != 0)) {
stop("nb_s, nb_b and nb_n must all be integers")
}
if (length(BM) != nb_s) {
stop("BM should have length equal to nb_s (", nb_s, ")")
}
if (dim(fw)[1] != dim(fw)[2]) {
stop("Food web matrix is not a square matrix")
}
if (nb_s != ncol(fw)) {
stop("Number of species and food web matrix do not match")
}
model <- methods::new(Scaled, nb_s, nb_b)
# THIS WE CAN EVEN PUT IN THE CONSTRUCTOR, PERHAPS?
model[["BM"]] <- BM
model[["log_BM"]] <- log10(BM)
model[["fw"]] <- fw
return(model)
}
#' @title Initialize an ATN model, following Binzer et al. 2016, Global Change Biology
#'
#' @param nb_s integer, number of total species.
#' @param nb_b integer, number of basal species.
#' @param BM float vector, body mass of species.
#' @param fw binary adjacency matrix of the food web.
#'
#' @export
#'
#' @details A model is defined by the total number of species
#' (\emph{nb_s}), the number of basal species (\emph{nb_b}),
#' the number of nutrients (\emph{nb_n}), the body masses
#' (\emph{BM}) of species, and the adjacency matrix (\emph{fw})
#' representing species interactions.
#'
#' @return An object of class \emph{ATN (Rcpp_parameters_prefs)}.
#'
#' @references Binzer, A., Guill, C., Rall, B.C. and Brose, U. (2016),
#' Interactive effects of warming, eutrophication and size structure: impacts on biodiversity and food-web structure.
#' Glob Change Biol, 22: 220-227. https://doi.org/10.1111/gcb.13086
#' Gauzens, B., Rall, B.C., Mendonca, V. et al.
#' Biodiversity of intertidal food webs in response to warming across latitudes.
#' Nat. Clim. Chang. 10, 264-269 (2020). https://doi.org/10.1038/s41558-020-0698-z
#'
#' @examples
#' library(ATNr)
#' set.seed(123)
#' n_species <- 50
#' n_basal <- 20
#' masses <- sort(10^runif(n_species, 1, 6)) #body mass of species
#' L <- create_Lmatrix(masses, n_basal)
#' fw <- L
#' fw[fw > 0] <- 1
#' mod <- create_model_Unscaled(n_species, n_basal, masses, fw)
create_model_Unscaled <- function(
nb_s,
nb_b,
BM,
fw
) {
# check input is correct
if (any(c(nb_s, nb_b) %% 1 != 0)) {
stop("nb_s, nb_b and nb_n must all be integers")
}
if (length(BM) != nb_s) {
stop("BM should have length equal to nb_s (", nb_s, ")")
}
if (dim(fw)[1] != dim(fw)[2]) {
stop("Food web matrix is not a square matrix")
}
if (nb_s != ncol(fw)) {
stop("Number of species and food web matrix do not match")
}
model <- methods::new(Unscaled, nb_s, nb_b)
# THIS WE CAN EVEN PUT IN THE CONSTRUCTOR, PERHAPS?
model[["BM"]] <- BM
model[["log_BM"]] <- log10(BM)
model[["fw"]] <- fw
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/ATNr/R/create_model.R |
#' Default parameters as in Schneider et al. (2016)
#'
#' A dataset containing the default parameters as in the Schneider et al. (2016)
#' and used to parametrize the default models. See also
#' \code{create_model_Unscaled_nuts}, \code{create_Lmatrix},
#' \code{initialise_default_Unscaled_nuts}.
#'
#' @format A list with the default parameters:
#' \describe{
#' \item{Temperature}{ambient temperature in Celsius}
#' \item{T.K}{default temperature, 20 degree Celsius in Kelvin}
#' \item{k}{Boltzmann's constant}
#' \item{T0}{20 degree Celsius in Kelvin, used to estimate scaling law of metabolic rates}
#' \item{q}{Hill's exponent of the functional response}
#' \item{Ropt}{consumer/resource optimal body mass ratio}
#' \item{gamma}{shape of the Ricker function}
#' \item{mu_c}{average predator interference}
#' \item{sd_c}{standard deviation of predator interference}
#' \item{E.c}{Activation energy for interference}
#' \item{h0}{scaling constant of the power-law of handling time with consumer and resource body mass}
#' \item{hpred}{exponent associated to predator body mass for the allometric scaling of handling time}
#' \item{hprey}{exponent associated to prey body mass for the allometric scaling of handling time}
#' \item{E.h}{Activation energy for handling time}
#' \item{b0}{normalisation constant for capture coefficient}
#' \item{bprey}{exponent associated to prey body mass for the allometric scaling of capture coefficient}
#' \item{bpred}{exponent associated to predator body mass for the allometric scaling of capture coefficient}
#' \item{E.b}{Activation energy for capture coefficient}
#' \item{e_P}{Assimilation efficiency associated to the consumption of a plant species}
#' \item{e_A}{Assimilation efficiency associated to the consumption of an animal species}
#' \item{x_P}{scaling constant of the power-law of metabolic demand per unit of plant biomass}
#' \item{x_A}{scaling constant of the power-law of metabolic demand per unit of animal biomass}
#' \item{E.x}{Activation energy for metabolic rates}
#' \item{expX}{TBD}
#' \item{D}{turnover rate of nutrients}
#' \item{nut_up_min}{Minimum uptake efficiency of plants}
#' \item{nut_up_max}{Maximum uptake efficiency of plants}
#' \item{mu_nut}{Average maximum nutrient concentration}
#' \item{sd_nut}{standard deviation of maximum nutrient concentration}
#' \item{v}{relative content of nutrient 1 in plant biomass}
#' }
#'
#' @references Schneider, F. D., Brose, U., Rall, B. C., & Guill, C. (2016).
#' Animal diversity and ecosystem functioning in dynamic food webs. Nature
#' Communications, 7(1), 1-8.
"schneider" | /scratch/gouwar.j/cran-all/cranData/ATNr/R/data.R |
#' @title Make parameter matrix
#'
#' @param BM float vector, body mass of species.
#' @param b0 const
#' @param bprey const
#' @param bpred const
#' @param E const
#' @param T.K, Celsius to Kelvin conversion
#' @param T0, Default temperature in Kelvin
#' @param k, Boltzmann constant
#'
#' @return A matrix filled with estimated values
#' for a model parameter that depends on prey and predator body masses (see details)
#'
#' @export
#'
#' @details Make a parameter matrix that depends on both predators
#' and prey and that is used to define attack rates and handling
#' times based on the general allometric equation:
#' \deqn{p_{i,j} = b_0 * BM_i^{bprey} * BM_j^{bpred} * exp(-E * (T0-T.K) / (k * T.K * T0))}
create_matrix_parameter <- function(
BM,
b0,
bprey,
bpred,
E,
T.K,
T0,
k
) {
# create a matrix of link specific parameters (i.e that depend on both prey and predators).
# Classically, this is needed to define attack rates or handling times.
# based on the general allometric equation:
# p_{i,j} = b_0*BM_i^{bprey}*BM_j^{bpred}*exp(-E*(T0-T.K)/(k*T.K*T0))
nb_s <- length(BM)
M <- matrix(1, nrow = nb_s, ncol = nb_s)
return(b0 * ((M * BM[, 1])^bprey * t(M * BM[, 1])^bpred) *
exp(-E * (T0 - T.K) / (k * T.K * T0)))
}
#' @title Default model parameters as in Schneider et al. 2016
#'
#' @description Initialise the default parametrisation for the model for
#' Schneider et al. (2016).
#' @param model an object of class \emph{ATN (Rcpp_Unscaled_nuts}.
#' @param L.mat numeric matrix, probability of a consumer to attack and capture an encountered resource. See \code{\link{create_Lmatrix}}.
#' @param temperature numeric, ambient temperature of the ecosystem in Celsius.
#'
#' @export
#'
#' @references Schneider, F. D., Brose, U., Rall, B. C., & Guill, C. (2016).
#' Animal diversity and ecosystem functioning in dynamic food webs. Nature
#' Communications, 7(1), 1-8.
#'
#' @return An object of class \emph{ATN (Rcpp_Unscaled_nuts)} with default
#' parameters as in Schneider et al. (2016).
#'
#' @examples
#' library(ATNr)
#' set.seed(123)
#' masses <- runif(20, 10, 100) #body mass of species
#' L <- create_Lmatrix(masses, 10, Ropt = 10)
#' L[L > 0] <- 1
#' mod <- create_model_Unscaled_nuts(20, 10, 3, masses, L)
#' mod <- initialise_default_Unscaled_nuts(mod, L)
#'
initialise_default_Unscaled_nuts <- function(
model,
L.mat,
temperature = 20
) {
utils::data("schneider", envir = environment())
schneider[["nb_s"]] <- model$nb_s
schneider[["nb_b"]] <- model$nb_b
schneider[["nb_n"]] <- model$nb_n
schneider[["BM"]] <- model$BM
schneider[["T.K"]] <- temperature + 273.15
# w parameter: how a predator splits its foraging time on the different
# species. by default a predator split its foraging time equally between all
# the prey sum of w values should be equal to 1 for a given predator.
model$ext <- 1e-6
w <- sweep(x = model$fw, MARGIN = 2, FUN = "/", colSums(model$fw))
model$w <- w[, (model$nb_b + 1):model$nb_s]
# Plant nutrient uptake efficiency
model$K <- with(schneider,
matrix(stats::runif(nb_b * nb_n, nut_up_min, nut_up_max),
nrow = nb_n, ncol = nb_b))
# turnover rate of the nutrients
model$D <- schneider$D
# maximal nutrient level
model$S <- with(schneider, stats::rnorm(nb_n, mu_nut, sd_nut))
# growth rate of the basal species
model$r <- with(schneider, BM[1 : nb_b] ^ -0.25 * exp(-0.22 * (T0 - T.K) / (k * T.K * T0)))
# per gram metabolic rate
model$X <- with(schneider,
c(rep(x_P, nb_b), rep(x_A, nb_s - nb_b)) *
BM^-0.25 * exp(-0.69 * (T0 - T.K) / (k * T.K * T0)))
# species efficiencies
model$e <- with(schneider, c(rep(e_P, nb_b), rep(e_A, nb_s - nb_b)))
# species specific capture rate (encounter rate * predaion success)
model$b <- with(schneider, create_matrix_parameter(BM, b0, bprey, bpred, E.b, T.K, T0, k)) * L.mat
model$b <- model$b[, (model$nb_b + 1):model$nb_s]
# specific values for plants: BM^beta = 20
model$b[1:model$nb_b, ] <- with(schneider,
t(replicate(model$nb_b, 20 * model$BM[(model$nb_b + 1):nrow(model$BM), 1]^bpred)) *
L.mat[1:model$nb_b, (model$nb_b + 1):model$nb_s]
)
# interference competition
model$c <- with(schneider, stats::rnorm(nb_s - nb_b, mu_c, sd_c) * exp(-0.65 * (T0 - T.K) / (k * T.K * T0)))
# handling time
model$h <- with(schneider, create_matrix_parameter(BM, h0, hprey, hpred, E.h, T.K, T0, k))
model$h <- model$h[, (model$nb_b + 1):model$nb_s]
# Hill exponent
model$q <- stats::rnorm(model$nb_s - model$nb_b, 1.5, 0.2)
# plant stoichiometry (relative content in the nutrients) !!!!!!!!!! to update. here assume 2 nutrients only !!!!!!!
model$V <- with(schneider,
matrix(stats::runif(nb_b * nb_n, 1, 2), nrow = nb_n, ncol = nb_b))
model$V <- sweep(x = model$V, MARGIN = 2, FUN = "/", colSums(model$V))
# growth rate of plant species !!!!!!!!!!! temperature independant right now !!!!!!!!!!!!!!!
model$r <- with(schneider, BM[1:nb_b]^-0.25)
# initialisation of the matrix of feeding rates.
# all values are 0 for now. Updated at each call of the ODEs estimations.
model$F <- with(schneider, matrix(0.0, nrow = nb_s, ncol = nb_s - nb_b))
return(model)
}
#' @title Default parameters for the scaled version of ATN as in Delmas et al.
#' 2016
#'
#' @description Initialise the default parametrisation for the scaled version of
#' the ATN model as in Delmas et al. (2016).
#'
#' @param model an object of class \emph{Rcpp_Scaled}.
#'
#' @export
#'
#' @references Delmas, E., Brose, U., Gravel, D., Stouffer, D.B. and Poisot, T.
#' (2017), Simulations of biomass dynamics in community food webs. Methods
#' Ecol Evol, 8: 881-886. https://doi.org/10.1111/2041-210X.12713
#'
#' @return An object of class \emph{Rcpp_Scaled} with default
#' parameters as in Delmas et al. (2017).
#'
#' @examples
#' library(ATNr)
#' set.seed(123)
#' masses <- runif(20, 10, 100) #body mass of species
#' L <- create_Lmatrix(masses, 10, Ropt = 10)
#' L[L > 0] <- 1
#' mod <- create_model_Scaled(20, 10, BM = masses, fw = L)
#' mod <- initialise_default_Scaled(mod)
#'
initialise_default_Scaled <- function(model) {
utils::data("schneider", envir = environment())
schneider[["nb_s"]] <- model$nb_s
schneider[["nb_b"]] <- model$nb_b
schneider[["BM"]] <- model$BM
# allometric constant for growth rate:
ar <- 1
# Carrying capacity for basal species i.e. nutrient pool accessible to ALL species simultaneously
K <- 10
# w parameter: how a predator splits its foraging time on the different
# species. by default a predator split its foraging time equally between all
# the prey sum of w values should be equal to 1 for a given predator.
w <- sweep(x = model$fw, MARGIN = 2, FUN = "/", colSums(model$fw))
model$w <- w[, (model$nb_b + 1):model$nb_s]
# per gram metabolic rate
min.BM = with(schneider, min(BM[1:nb_b]))
model$X <- with(schneider, 0.314 * BM^-0.25 / min.BM^-0.25)
# model$X[1:schneider$nb_b] <- 0.0
# species efficiencies
model$e <- with(schneider, c(rep(e_P, nb_b), rep(e_A, nb_s - nb_b)))
# interference competition
model$c <- rep(0.8, model$nb_s - model$nb_b)
# max feeding rate
model$max_feed <- rep(8, model$nb_s - model$nb_b)
# half sturation density:
model$B0 <- rep(0.5, model$nb_s - model$nb_b)
# Hill exponent
model$q <- rep(1.2, model$nb_s - model$nb_b)
# max growth rate of plant species
model$r <- with(schneider, (ar * BM[1:nb_b]^-0.25) / (ar * min.BM^-0.25))
# max carrying capacity of all plant species
model$K <- 10
# initialisation of the matrix of feeding rates.
# all values are 0 for now. Updated at each call of the ODEs estimations.
model$F <- with(schneider, matrix(0.0, nrow = model$nb_s, ncol = model$nb_s - model$nb_b))
# plant resource competition, matrix should be symetric by default
# model$alpha = matrix(runif(model$nb_b*model$nb_b, 0.5, 1), nrow = model$nb_b, ncol = model$nb_b)
# model$alpha[lower.tri(model$alpha)] = t(model$alpha)[lower.tri(model$alpha)]
model$alpha <- matrix(0, nrow = model$nb_b, ncol = model$nb_b)
diag(model$alpha) = 1
model$ext <- 1e-6
return(model)
}
#' @title Default parameters for the scaled version of ATN as in Binzer et al.
#' 2016, with updates from Gauzens et al. 2020
#'
#' @description Initialise the default parametrisation for the scaled version of
#' the ATN model as in Binzer et al. (2016), with updates from Gauzens et al. 2020
#'
#' @param model an object of class \emph{ATN (Rcpp_Unscaled)}.
#' @param temperature numeric, ambient temperature of the ecosystem in Celsius.
#'
#' @export
#'
#' @references Binzer, A., Guill, C., Rall, B. C. & Brose, U.
#' Interactive effects of warming, eutrophication and size structure: impacts on biodiversity and food-web structure.
#' Glob. Change Biol. 22, 220-227 (2016).
#' Gauzens, B., Rall, B.C., Mendonca, V. et al.
#' Biodiversity of intertidal food webs in response to warming across latitudes.
#' Nat. Clim. Chang. 10, 264-269 (2020). https://doi.org/10.1038/s41558-020-0698-z
#'
#' @return An object of class \emph{ATN (Rcpp_Unscaled)} with default
#' parameters as in Delmas et al. (2017).
#'
#' @examples
# library(ATNr)
# set.seed(123)
# masses <- runif(20, 10, 100) #body mass of species
# L <- create_Lmatrix(masses, 10, Ropt = 10)
# L[L > 0] <- 1
# mod <- create_model_Unscaled(20, 10, 3, masses, L)
# mod <- initialise_default_Unscaled(mod)
initialise_default_Unscaled <- function(model, temperature = 20){
k <- 8.6173324e-5
T0 <- 293.15
T.K <- temperature + 273.15
model$X <- exp(-16.54) * model$BM^-0.31 * exp(-0.69 * (T0 - T.K) / (k * T.K * T0))
e_P <- 0.545
e_A <- 0.906
model$e <- c(rep(e_P, model$nb_b), rep(e_A, model$nb_s - model$nb_b))
model$c <- rep(0.8, model$nb_s - model$nb_b) * exp(-0.65 * (T0 - T.K) / (k * T.K * T0))
model$r <- exp(-15.68) * model$BM[1:model$nb_b]^-0.25 * exp(-0.84 * (T0 - T.K) / (k * T.K * T0))
model$K <- 40 * model$BM[1:model$nb_b]^0.28 * exp(0.71 * (T0 - T.K) / (k * T.K * T0))
# here attack rate decrease with consumer BM, and handling time increase with consumer BM
# Is it an error in Binzer et al. ?
model$a <- create_matrix_parameter(model$BM, exp(-13.1), 0.25, -0.8, -0.38, T.K, T0, k)
model$a <- model$a * model$fw
model$a <- model$a[, (model$nb_b + 1):model$nb_s]
model$h <- create_matrix_parameter(model$BM, exp(9.66), -0.45, 0.47, -0.26, T.K, T0, k)
model$h <- model$h * model$fw
model$h <- model$h[, (model$nb_b + 1):model$nb_s]
model$q <- rep(1.2, model$nb_s - model$nb_b)
# plant resource competition, matrix should be symetric by default
# model$alpha = matrix(runif(model$nb_b*model$nb_b, 0.5, 1), nrow = model$nb_b, ncol = model$nb_b)
# model$alpha[lower.tri(model$alpha)] = t(model$alpha)[lower.tri(model$alpha)]
model$alpha <- matrix(0, nrow = model$nb_b, ncol = model$nb_b)
diag(model$alpha) = 1
model$ext <- 1e-6
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/ATNr/R/default_initialisations.R |
# Create default parameters data.
# This is only for development purporses: do not use directly.
###############################################################
##### definition of global parameters #########################
###############################################################
# Temperature <- 20
# schneider <- list(
# Temperature = Temperature,
# T.K = Temperature + 273.15,
# k = 8.6173324e-5,
# T0 = 293.15,
#
# q = 1.2,
#
# # used to generate L, foodWeb, bm
# # optimal consumer-resource body-mass ratio
# Ropt = 100,
# # Ricker function width
# gamma = 2,
#
# # used in feeding function
# # consumer interference, proportion of time a consumer spences encountering con-specifics
# mu_c = 0.8,
# sd_c = 0.2,
# E.c = -0.65,
# # used in Handling Time (h)
# h0 = 0.4,
# hpred = rnorm(1,-0.48,0.03),
# hprey = rnorm(1,-0.66,0.02),
# E.h = 0.26,
#
# # feeding rates:
# b0 = 50,
# bprey = rnorm(1, 0.15, 0.03),
# bpred = rnorm(1, 0.47, 0.04),
# E.b = -0.38,
# # used in calculating change in biomass of Animal or Plant
# # NOTE called conversion efficiency in Schneider et al., 2016
# # assimilation efficiency herbivore
# e_P = 0.545, # NOTE From Lang et al., 2017 ORIGINAL value 0.45 from Schneider et al., 2016
# # assimilation efficiency carnivore
# e_A = 0.906, # NOTE From Lang et al., 2017 ORIGINAL value 0.85 from Schneider et al., 2016
# # metabolic rate scaling constant of plants
# x_P = 0.141, # ORIGINAL value 0.138 from Schneider et al., 2016
# # metabolic rate scaling constant of animals
# x_A = 0.314, # NOTE ORIGINAL value 0.314 from Schneider, 0.305 From Ehmes et al., 2011
# E.x = -0.69,
# expX = -0.305,
#
# #used in calculating change in nutrient concentration
# # global nutrient turn over rate (rate of replenishment)
# D = 0.25,
# # min and max nutrient uptake efficiencies
# nut_up_min = 0.1,
# nut_up_max = 0.2,
# # nutrient 'densities'
# mu_nut = 10,
# sd_nut = 2,
# # plant nutrient proportions:
# v1 = 1,
# v2 = 0.5
# )
#
# usethis::use_data(schneider, overwrite = TRUE)
| /scratch/gouwar.j/cran-all/cranData/ATNr/R/default_parameters.R |
#' Filter Extinct Species
#'
#' @param df deSolve matrix as returned from lsoda_wrapper().
#' @param model ATNr model object, from which extinction threshold is extracted.
#'
#' @details Set to zero species biomass that are below the extinction threshold.
#'
#' @return df with values below th set to zero.
.filter_extinct <- function(df, model) {
ext <- which(df[, -1] < model$ext, arr.ind = TRUE)
for (j in unique(ext[, 2])) {
df[min(ext[ext[, 2] == j, 1]):nrow(df), j + 1] <- 0
}
return(df)
}
| /scratch/gouwar.j/cran-all/cranData/ATNr/R/filter_extinct.R |
#' @title Create a food web based on the niche model
#'
#' @description Function to generate a food web based on the niche model
#' (Williams and Martinez, 2000) based on the number of species and
#' connectance. Corrections from Allesina et al. (2008) are used.
#' @details If at least one species has not resource or consumer (i.e. it is an
#' isolated species), another food web is generated, until a maximum of 100
#' iterations.
#' @param S integer, number of species.
#' @param C numeric, connectance i.e. the number of realized links over the all
#' possible links.
#' @export
#' @return A (square) matrix with zeros (no interaction) and ones (species j
#' consume species i).
#' @references Williams, R. J., & Martinez, N. D. (2000). Simple rules yield
#' complex food webs. Nature, 404(6774), 180-183.
#'
#' Allesina, S., Alonso, D., & Pascual, M. (2008). A general model for food
#' web structure. science, 320(5876), 658-661.
#' @examples
#' set.seed(123)
#' web_niche <- create_niche_model(30, .1)
#' image(t(web_niche))
create_niche_model <- function(S, C) {
stopifnot(S > 0 && C > 0)
niche_model <- function(S, C) {
# niches of species
niche <- sort(stats::runif(S))
# feeding ranges, using the correction from Allesina et al. (2008)
if (((S - 1) / (2 * S * C)) - 1 < 0) {
stop("Beta distribution parameter < 0. Try to decrease C.")
}
diet <- stats::rbeta(S, 1, ((S - 1) / (2 * S * C)) - 1) * niche
# feeding center, using the correction from Allesina et al. (2008)
center <- sapply(seq_len(S),
function(i) {
n <- niche[i]
r <- diet[i]
ifelse(n + r / 2 <= 1,
stats::runif(1, r / 2, n),
stats::runif(1, r / 2, 1 - r / 2)
)
})
species <- seq_len(S)
# create food web adjacency matrix
fw <- matrix(rep(0, S ^ 2), S, S)
for (sp in species) {
preys <- (center[sp] - diet[sp] / 2 <= niche) &
(niche <= center[sp] + diet[sp] / 2)
fw[preys, sp] <- 1
}
return(fw)
}
fw <- niche_model(S, C)
# check for isolated species
isolated <- ifelse(any(colSums(fw) + rowSums(fw) == 0), TRUE, FALSE)
# check if trophic levels can be calculated
tro_lev <- tryCatch(ATNr::TroLev(fw), error = function(e) NULL)
# check is fw is connected
connected <- is_connected(fw)
i <- 0
while((isolated | is.null(tro_lev) | !connected) & i < 100) {
fw <- niche_model(S, C)
# check first if isolated then TL calculation and then detection of connected components
# no need to make the 3 of them each time as one is enough to reject
isolated <- ifelse (any(colSums(fw) + rowSums(fw) == 0), TRUE, FALSE)
if (!isolated) {
tro_lev <- tryCatch(ATNr::TroLev(fw), error = function(e) NULL)
}
if (!isolated & !is.null(tro_lev)){
connected <- is_connected(fw)
}
i <- i + 1
}
if (isolated) warning("Presence of an isolated species after 100 iterations.")
if (is.null(tro_lev)) warning("Trophic levels cannot be calcualted after 100 iterations.")
if (!is_connected(fw)) warning("Several connected components detected")
# reorder matrix to put basal species first
basals <- which(colSums(fw) == 0)
consumers <- which(colSums(fw) > 0)
fw <- fw[c(basals, consumers), c(basals, consumers)]
return(fw)
}
#' @title Make L matrix
#'
#' @param BM float vector, body mass of species.
#' @param nb_b integer, number of basal species.
#' @param Ropt numeric, consumer/resource optimal body mass ratio.
#' @param gamma numeric, code for the width of the Ricker function.
#' @param th float, the threshold below which attack rates are considered = 0.
#' @export
#' @details The L matrix contains the probability for an attack event to be
#' successful based on allometric rules and a Ricker function defined by
#' \emph{Ropt} and \emph{gamma}. If at least one species has not resource or
#' consumer (i.e. it is an isolated species), another food web is generated,
#' until a maximum of 100 iterations.
#'
#' @return A numeric matrix with the probability for an attack event between two
#' species to be successful.
#'
#' @examples
#' set.seed(123)
#' mass <- sort(10 ^ runif(30, 2, 6))
#' L <- create_Lmatrix(mass, nb_b = 10, Ropt = 100)
#' image(t(L))
create_Lmatrix <- function(
BM,
nb_b,
Ropt = 100,
gamma = 2,
th = 0.01
) {
stopifnot(all(BM > 0) && nb_b >= 0 && Ropt > 0 && gamma > 0 && th >= 0)
Lmatrix <- function(BM, nb_b, Ropt, gamma, th) {
s <- length(BM)
L <- matrix(rep(BM, s), s, s, byrow = TRUE) /
(matrix(rep(BM, s), s, s) * Ropt)
L <- (L * exp(1 - L)) ^ gamma
L[L < th] <- 0
L[, 1:nb_b] <- 0
return(L)
}
s <- length(BM)
L <- Lmatrix(BM, nb_b, Ropt, gamma, th)
# check for isolated species
isolated <- ifelse(any(colSums(L) + rowSums(L) == 0), TRUE, FALSE)
# check for isolated consumers
cons_no_prey <- ifelse(any(colSums(L[, (nb_b + 1) : s]) == 0), TRUE, FALSE)
# check if trophic levels can be calculated
tro_lev <- tryCatch(ATNr::TroLev(L), error = function(e) NULL)
# check for different connected components
connected <- ATNr::is_connected(L) #BUG - add namespace to avoid conflicts with igraph::is_connected()
if (is.null(tro_lev)) warning("Cannot compute trophic levels.")
if (isolated) warning("Presence of an isolated species.")
if (cons_no_prey) warning("Presence of consumer without prey.")
if (!connected) warning("Several conected component detected")
return(L)
}
| /scratch/gouwar.j/cran-all/cranData/ATNr/R/fw_generative_models.R |
#' @title Estimate the Jacobian matrix of a ODE system
#'
#' @param bioms float vector, biomass of species.
#' @param ODE function that computes the ODEs from one of the model available
#' @param eps float, scale precision of the numerical approximation.
#' @export
#' @details The function provides a numerical estimation of the Jacobian matrix
#' based on the 5 points stencil method. The precision of the method is in \deqn{O(h^5)},
#' where \deqn{h = eps*bioms}. The choice of eps should ensure that \deqn{h^5}
#' is always lower to the extinction threshold.
#'
#' The dimension of the Jacobian matrix are not always matching the number of species in the system.
#' This is because we considered that a perturbation can not correspond to the recolonisation of an extinct species.
#' Therefore, extinct species are removed from the system to calculate the Jacobian matrix.
#' @return A matrix corresponding to the Jacobian of the system estimated at the parameter biomasses
#'
#'
#' @examples
#' library(ATNr)
#' set.seed(123)
#' # first run a model to reach equilibrium
#' masses <- runif(20, 10, 100) #body mass of species
#' L <- create_Lmatrix(masses, 10, Ropt = 10)
#' L[L > 0] <- 1
#' mod <- create_model_Unscaled_nuts(20, 10, 3, masses, L)
#' mod <- initialise_default_Unscaled_nuts(mod, L)
#' biomasses <- masses ^ -0.75 * 10 ^ 4 #biomasses of species
#' biomasses <- append(runif(3, 20, 30), biomasses)
#' times <- seq(0, 100, 1)
#' sol <- lsoda_wrapper(times, biomasses, mod)
#' # get the final biomasses
#' final.bioms = sol[nrow(sol), -1]
#' # estimate jacobian
#' jacobian(final.bioms, mod$ODE)
jacobian <- function(bioms, ODE, eps = 1e-6){
nb_s <- length(bioms)
Jacob <- matrix(NA, nrow = nb_s, ncol = nb_s)
for (cons in 1:nb_s){
# h: magnitude of the perturbation
h <- eps * bioms[cons];
# generate the vectors with a slight variation applied
hs <- bioms
hs2 <- bioms
mhs <- bioms
mhs2 <- bioms
# here I consider that extinct species can't perturb the system (they are gone)
# so I apply he perturbation only for non extinct species
if (bioms[cons] > 0){
hs[cons] <- hs[cons] + h;
hs2[cons] <- hs2[cons] + 2 * h;
mhs[cons] <- mhs[cons] - h;
mhs2[cons] <- mhs2[cons] - 2 * h;
}
# compute the local derivatives
res.values <- (-ODE(hs2, 0.0) + 8*ODE(hs, 0.0) - 8*ODE(mhs, 0.0) + ODE(mhs2, 0.0)) / (12*h)
# fill the jacobian matrix
Jacob[, cons] <- res.values
}
return(Jacob[bioms > 0, bioms > 0])
}
| /scratch/gouwar.j/cran-all/cranData/ATNr/R/jacobian.R |
#' @title Plot food web dynamics
#'
#' @description Plot solution of the ODE for the food web. Currently only
#' species and not nutrients are plotted.
#'
#' @param x matrix with solutions. First row should be the time vector.
#' @param nb_s numeric, number of species as in the model (e.g.,
#' \code{create_model_Unscaled_nuts}).
#'
#' @return No return value, called for side effects.
#'
#' @export
#'
#' @examples
#' \dontrun{
#' library(ATNr)
#' library(deSolve)
#' set.seed(123)
#' # number of species, nutrients, and body masses
#' n_species <- 20
#' n_basal <- 5
#' n_nutrients <- 3
#' masses <- sort(10^runif(n_species, 2, 6)) #body mass of species
#' # create food web matrix
#' L <- create_Lmatrix(masses, n_basal)
#' L[, 1:n_basal] <- 0
#' fw <- L
#' fw[fw > 0] <- 1
#' model <- create_model_Unscaled_nuts(
#' n_species,
#' n_basal,
#' n_nutrients,
#' masses,
#' fw
#' )
#' # initialize model as default in Schneider et al. (2016)
#' model <- initialise_default_Unscaled_nuts(model, L)
#' # defining integration time
#' times <- seq(0, 500, 5)
#' biomasses <- runif(n_species + n_nutrients, 2, 3)
#' sol <- lsoda_wrapper(times, biomasses, model, verbose = FALSE)
#' plot_odeweb(sol, model$nb_s)
#' }
plot_odeweb <- function(x, nb_s) {
stopifnot((ncol(x) - 1) >= nb_s)
pal <- grDevices::colorRampPalette(c("blue", "red"))(nb_s)
pal <- grDevices::adjustcolor(pal, alpha.f = .5)
plot(c(0, max(x[, 1])), #xlim
c(0, max(x[, c((ncol(x) - nb_s + 1) : ncol(x))])), #ylim
frame = FALSE,
xlab = "Time",
ylab = "Biomass",
col = NA)
for (i in seq(ncol(x) - nb_s + 1, ncol(x))) {
graphics::points(x[, 1], x[, i], col = pal[i - ncol(x) + nb_s], pch = 20, cex = .5)
graphics::lines(x[, 1], x[, i], col = pal[i - ncol(x) + nb_s], lw = 1)
}
}
| /scratch/gouwar.j/cran-all/cranData/ATNr/R/plot_odeweb.R |
#' @title Function to remove species from a model class
#'
#' @param species integer vector, the indices of species to remove.
#' @param model model object
#' @param nuts integer vector, the indices of nutrients to remove. Parameter
#' specific to the Unscaled_nuts model.
#' @export
#' @return A model object where the data structure has bee updated to remove the
#' species in parameters.
remove_species = function(species, model, nuts = NULL){
if (class(model)[1] %in% c("Rcpp_Scaled", "Rcpp_Scaled_loops")){
model2 = .remove_Scaled(species, model)
} else if (class(model)[1] %in% c("Rcpp_Unscaled_nuts", "Rcpp_Unscaled_nuts_loops")) {
model2 = .remove_Unscaled_nuts(species, model, nuts)
} else if (class(model)[1] %in% c("Rcpp_Scaled", "Rcpp_Scaled_loops")) {
model2 = .remove_Scaled(species, model)
} else {
stop("model must be a model class from the ATNr package")
}
return(model2)
}
#' Internal function to remove species from a Scaled model
#'
#'@keywords internal
#'
.remove_Scaled <- function(species, model) {
# consumers: indices of consumer species in data that does not have basal species
consumers <- species[species > model$nb_b] - model$nb_b
# index of basal species
basals <- species[species <= model$nb_b]
new.nb_b <- model$nb_b - sum(species < model$nb_b)
new.nb_s <- model$nb_s - length(species)
model2 <- methods::new(Scaled, new.nb_s, new.nb_b)
model2$fw <- model$fw[-species, -species]
model2$BM <- model$BM[-species]
model2$X <- model$X[-species]
model2$e <- model$e[-species]
model2$c <- model$c
model2$q <- model2$q
model2$dB <- model$dB[-species]
# mat[-x,-y] or vec[-x] return void objects when x or y are numeric(0).
# then I guess I have no other choice than checking consumers and basals?
if (length(consumers) > 0) {
model2$B0 <- model$B0[-consumers]
model2$F <- model$F[-species, -consumers]
model2$max_feed <- model$max_feed[-consumers]
model2$w <- model$w[-species, -consumers]
}else{
model2$B0 <- model$B0
model2$F <- model$F[-species, ]
model2$max_feed <- model$max_feed
model2$w <- model$w[-species, ]
}
# same checks for basals
if (length(basals) > 0) {
model2$r <- model$r[-basals]
}else{
model2$r <- model$r
}
return(model2)
}
#' Internal function to remove species from a Unscaled_nuts model
#'
#'@keywords internal
.remove_Unscaled_nuts <- function(species, model, nuts = NULL) {
# consumers: indices of consumer species in data that does not have basal species
consumers <- species[species > model$nb_b] - model$nb_b
# index of basal species
basals <- species[species <= model$nb_b]
new.nb_b <- model$nb_b - sum(species < model$nb_b)
new.nb_s <- model$nb_s - length(species)
new.nb_n <- model$nb_n
if (length(nuts > 0)) {
new.nb_n <- model$nb_n - 1
}
model2 <- methods::new(Unscaled_nuts, new.nb_s, new.nb_b, new.nb_n)
model2$fw <- model$fw[-species, -species]
model2$BM <- model$BM[-species]
model2$X <- model$X[-species]
model2$e <- model$e[-species]
model2$c <- model$c
model2$q <- model2$q
model2$dB <- model$dB[-(species + model$nb_n)]
if (!is.null(nuts)){
model2$dB <- model$dB[-nuts]
}
# mat[-x,-y] or vec[-x] return void objects when x or y are numeric(0).
# then I guess I have no other choice than checking consumers and basals?
if (length(consumers) > 0){
model2$F <- model$F[-species, -consumers]
model2$b <- model$b[-species,-consumers]
model2$w <- model$w[-species, -consumers]
model2$h <- model$h[-species,-consumers]
}else{
model2$F <- model$F[-species, ]
model2$w <- model$w[-species,]
model2$b <- model$b[-species,]
model2$h <- model$h[,-consumers]
}
# same checks for basals
if (length(basals) > 0){
model2$r <- model$r[, -basals]
model2$K <- model$K[, -basals]
model2$V <- model$V[, -basals]
}else{
model2$r <- model$r
model2$K <- model$K
model2$V <- model$V
}
# now nutrients, no need to check for plants, as already done before
if (!is.null(nuts)) {
model2$K <- model$K[-nuts, ]
model2$S <- model$S[-nuts]
model2$V <- model$V[-nuts, ]
}
return(model2)
}
#' Internal function to remove species from a Unscaled model
#'
#' @keywords internal
.remove_Unscaled = function(species, model) {
# consumers: indices of consumer species in data that does not have basal species
consumers <- species[species > model$nb_b] - model$nb_b
# index of basal species
basals <- species[species <= model$nb_b]
new.nb_b <- model$nb_b - sum(species < model$nb_b)
new.nb_s <- model$nb_s - length(species)
model2 <- methods::new(Scaled, new.nb_s, new.nb_b)
model2$fw <- model$fw[-species, -species]
model2$BM <- model$BM[-species]
model2$X <- model$X[-species]
model2$e <- model$e[-species]
model2$c <- model$c
model2$q <- model2$q
model2$dB <- model$dB[-species]
# mat[-x,-y] or vec[-x] return void objects when x or y are numeric(0).
# then I guess I have no other choice than checking consumers and basals?
if (length(consumers) > 0) {
model2$F <- model$F[-species, -consumers]
model2$a <- model$a[-species, -consumers]
model2$h <- model$h[-species, -consumers]
}else{
model2$F <- model$F[-species, ]
model2$b <- model$b[-species, ]
model2$h <- model$h[, -consumers]
}
# same checks for basals
if (length(basals) > 0) {
model2$r <- model$r[, -basals]
model2$K <- model$K[-basals]
}else{
model2$r <- model$r
model2$K <- model$K
}
return(model2)
}
| /scratch/gouwar.j/cran-all/cranData/ATNr/R/remove_species.R |
#' @title Run checks on model parameters
#'
#' @description Check if the dimensions of vectors and matrices used in the model are correct.
#' If any dimension is not correct, an error message is returned.
#'
#' @param model a model object.
#' @param verbose Boolean, whether a message should be printed when all checks were successful
#'
#' @return No return value, only throw an error if parameters are inconsistent.
#'
#' @export
run_checks <- function(model, verbose = TRUE) {
if (class(model)[1] == "Rcpp_Scaled") {
with(model, {
if (length(X) != nb_s) stop(" vector of metabolic rates ($X) mispecified")
else if (length(max_feed) != nb_s - nb_b) stop(" vector of maximum feeding rates ($max_feed) mispecified")
else if (length(e) != nb_s) stop(" vector of assimilation efficiencies mispecified")
else if (length(r) != nb_b) stop(" vector of plant growth rate mispecified")
else if (length(BM) != nb_s) stop(" vector of body masses ($BM) mispecified")
else if (length(B0) != nb_s - nb_b) stop(" vector of half saturation densities ($B0) mispecified")
else if (any(dim(fw) != c(nb_s, nb_s))) stop(" food web ($fw) dimension is incorrect")
else if (any(dim(w) != c(nb_s, nb_s - nb_b))) stop(" dimensions of $w are incorrect")
else if (any(dim(alpha) != c(nb_b, nb_b))) stop(" dimensions of plant competition matrix ($alpha) are incorrect")
})
if (verbose) message("All checks successful")
}
else if (class(model)[1] == "Rcpp_Unscaled_nuts") {
with(model, {
if (any(dim(K) != c(nb_n, nb_b))) stop(" vector of plant half saturation densities ($K2) mispecified")
else if (length(e) != nb_s) stop(" vector of assimilation efficiencies ($e) mispecified")
else if (length(r) != nb_b) stop(" vector of plant growth rate ($r) mispecified")
else if (length(BM) != nb_s) stop(" vector of body masses ($BM) mispecified")
else if (length(S) != nb_n) stop(" vector of maximal nutrient level ($S) mispecified")
else if (any(dim(fw) != c(nb_s, nb_s))) stop(" food web dimension ($fw) is incorrect")
else if (any(dim(w) != c(nb_s, nb_s - nb_b))) stop(" diemsions of w are incorrect")
else if (any(dim(b) != c(nb_s, nb_s - nb_b))) stop(" dimensions of atack rates matrix ($b) are incorrect")
else if (any(dim(h) != c(nb_s, nb_s - nb_b))) stop(" dimensions of handling times matrix ($h) are incorrect")
else if (length(X) != nb_s) stop(" vector of metabolic rates ($X) mispecified")
else if (any(dim(V) != c(nb_n, nb_b))) stop(" matrix relative content in the plant species' biomass ($V) mispecified")
})
if (verbose) message("All checks successfull")
}
else if (class(model)[1] == "Rcpp_Unscaled") {
with(model, {
if (length(e) != nb_s) stop(" vector of assimilation efficiencies ($e) mispecified")
else if (length(r) != nb_b) stop(" vector of plant growth rate ($r) mispecified")
else if (length(BM) != nb_s) stop(" vector of body masses ($BM) mispecified")
else if (any(dim(fw) != c(nb_s, nb_s))) stop(" food web dimension ($fw) is incorrect")
else if (any(dim(a) != c(nb_s, nb_s - nb_b))) stop(" dimensions of atack rates matrix ($a) are incorrect")
else if (any(dim(h) != c(nb_s, nb_s - nb_b))) stop(" dimensions of handling times matrix ($h) are incorrect")
else if (length(X) != nb_s) stop(" vector of metabolic rates ($X) mispecified")
else if (any(dim(alpha) != c(nb_b, nb_b))) stop(" dimensions of plant competition matrix ($alpha) are incorrect")
})
if (verbose) message("All checks successfull")
}
# The following models are present for the testing unit only. NOt to be used
else if (!class(model) %in% c("Rcpp_Scaled_loops", "Rcpp_Unscaled_nuts_loops", "Rcpp_Unscaled_loops") ) stop(class(model)[1], " is not supported.")
}
| /scratch/gouwar.j/cran-all/cranData/ATNr/R/run_checks.R |
#' @title Sort custom input
#'
#' @param BM numeric vector, body mass of species.
#' @param fw adjacency matrix of the food web.
#'
#' @export
#'
#' @details Body masses and food web matrix should be arranged with the first
#' elements/columns being for basal species. This is a requirement for the Cpp
#' class and must be enforced before initializing the Rcpp_Schneider and
#' Rcpp_Delmas objects.
#'
#' @return A list with sorted body masses (\emph{body.mass}) and food web
#' matrix (\emph{food.web}).
#'
#' @examples
#' bm <- runif(10, 10, 50)
#' fw <- matrix(as.numeric(runif(100) > .9), 10, 10)
#' sort_input(bm, fw)
sort_input <- function(BM, fw) {
stopifnot(nrow(fw) == ncol(fw))
stopifnot(length(BM) == nrow(fw))
basals <- which(colSums(fw) == 0)
consumers <- which(colSums(fw) > 0)
adj <- fw[c(basals, consumers), c(basals, consumers)]
bm <- BM[c(basals, consumers)]
return(list("body.mass" = bm, "food.web" = adj))
}
| /scratch/gouwar.j/cran-all/cranData/ATNr/R/sort_input.R |
#' @title Calculate trophic level of species
#'
#' @param fw numeric matrix, the matrix of the food web.
#'
#' @export
#'
#' @return A numeric vector of species' trophic level.
TroLev <- function(fw) {
fw <- t(fw)
nn <- rowSums(fw); nn[nn == 0] <- 1
ww <- diag(1 / nn)
L1 <- ww %*% fw
L2 <- L1 - diag(rep(1, length(nn)))
b <- -1 * rep(1, length(nn))
Tro.lev <- solve(L2) %*% b
return(Tro.lev)
}
| /scratch/gouwar.j/cran-all/cranData/ATNr/R/trophic_level.R |
#' @title Wrapper for lsoda
#'
#' @description This is a wrapper to call \code{lsoda} from
#' \emph{deSolve} and solve the ODE.
#' Package \code{deSolve} needs to be installed to run
#' this wrapper.
#'
#' @param t vector of times.
#' @param y vector of biomasses.
#' @param model object of class \emph{ATN (Rcpp_parameters_prefs)}.
#' @param verbose Boolean, whether a message should be printed when all checks were successful
#' @param ... additional arguments to pass to `lsoda`
#' @export
#'
#' @return A matrix for the ODE solution with species as columns and
#' times as rows.
#'
#' @examples
#' library(ATNr)
#' set.seed(123)
#' masses <- runif(20, 10, 100) #body mass of species
#' L <- create_Lmatrix(masses, 10, Ropt = 10)
#' L[L > 0] <- 1
#' mod <- create_model_Unscaled_nuts(20, 10, 3, masses, L)
#' mod <- initialise_default_Unscaled_nuts(mod, L)
#' biomasses <- masses ^ -0.75 * 10 ^ 4 #biomasses of species
#' biomasses <- append(runif(3, 20, 30), biomasses)
#' times <- seq(0, 100, 1)
#' sol <- lsoda_wrapper(times, biomasses, mod)
#' range(sol[, -1])
#' mod$ext <- 1e-3
#' sol <- lsoda_wrapper(times, biomasses, mod)
lsoda_wrapper <- function(t, y, model, verbose = FALSE, ...) {
if (is(model, "Rcpp_Unscaled") || is(model, "Rcpp_Scaled")) {
stopifnot(model$nb_s == length(y))
} else if (is(model, "Rcpp_Unscaled_nuts")) {
stopifnot(model$nb_s + model$nb_n == length(y))
} else {
stop("The model does not seem to be an ATNr model.")
}
if (length(model$q) == 1){
model$q = rep(model$q, model$nb_s - model$nb_b)
warning('q is expected to be a vector of length = number of consumers, not a scalar.
Same value was used for all consumers')
}
model$initialisations()
run_checks(model, verbose)
ans <- deSolve::lsoda(
y,
t,
func = function(t, y, pars) list(pars$ODE(y, t)),
model,
...
)
ans <- .filter_extinct(ans, model)
return (ans)
}
# #' @title Wrapper for sundial
# #'
# #' @keywords internal
# #'
# #' @description This is a wrapper to call \code{cvode} from
# #' \emph{sundialr} and solve the ODE.
# #' Package \code{sundialr} needs to be installed to run
# #' this wrapper.
# #'
# #' @param t vector of times.
# #' @param y vector of biomasses.
# #' @param model object of class \emph{ATN (Rcpp_parameters_prefs)}.
# #'
# #' @return A matrix for the ODE solution with species as columns and
# #' times as rows.
# #'
# #' @examples
# #' library(ATNr)
# #' masses <- runif(50, 10, 100) #body mass of species
# #' L <- create_Lmatrix(masses, 10, Ropt = 50)
# #' L[L > 0] <- 1
# #' mod <- create_model_Unscaled_nuts(20, 10, 3, masses, L)
# #' mod <- initialise_default_Unscaled_nuts(mod, L)
# #' biomasses <- masses ^ -0.75 * 10 ^ 4 #biomasses of species
# #' biomasses <- append(runif(3, 20, 30), biomasses)
# #' times <- seq(0, 100, 1)
# #' sol <- sundial_wrapper(times, biomasses, mod)
# #' t <- times
# #' y <- biomasses
#
# #' sundialr::cvode(
# #' time_vector = 0.0, #time vectors
# #' IC = y, #initial conditions
# #' input_function = function(t, y, p) mod$ODE(y, t), #anonymous function to reorder input
# #' Parameters = c(0, 0) #this does nothing, but is necessary for compatibility
# #' )
#
# sundial_wrapper <- function(t, y, model) {
# wrapper.ODE <- function(t, y, p) {
# return(model$ODE(y, t))
# }
# sundialr::cvode(
# t,
# y,
# input_function = wrapper.ODE,
# c(0, 0)
# )
# }
| /scratch/gouwar.j/cran-all/cranData/ATNr/R/wrap_solvers.R |
Rcpp::loadModule("UnscaledModule", TRUE)
Rcpp::loadModule("Unscaled_nutsModule", TRUE)
Rcpp::loadModule("ScaledModule", TRUE)
Rcpp::loadModule("Unscaled_loopsModule", TRUE)
Rcpp::loadModule("Scaled_loopsModule", TRUE)
Rcpp::loadModule("Unscaled_nuts_loopsModule", TRUE) | /scratch/gouwar.j/cran-all/cranData/ATNr/R/zzz.R |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- include=FALSE, echo=FALSE-----------------------------------------------
oldpar <- par()
# if (!nzchar(Sys.getenv("_R_CHECK_LIMIT_CORES_", ""))) {
# ## Possible values: 'TRUE' 'false', 'warn', 'error'
# Sys.setenv("_R_CHECK_LIMIT_CORES_" = "TRUE")
# }
Sys.setenv("OMP_NUM_THREADS" = 1)
## -----------------------------------------------------------------------------
library(ATNr)
set.seed(123)
n_species <- 20 # number of species
conn <- 0.3 # connectance
fw <- create_niche_model(n_species, conn)
# The number of basal species can be calculated:
n_basal <- sum(colSums(fw) == 0)
## -----------------------------------------------------------------------------
TL = TroLev(fw) #trophic levels
masses <- 1e-2 * 10 ^ (TL - 1)
## -----------------------------------------------------------------------------
n_species <- 20
n_basal <- 5
masses <- sort(10^runif(n_species, 2, 6)) #body mass of species
L <- create_Lmatrix(masses, n_basal)
## -----------------------------------------------------------------------------
fw <- L
fw[fw > 0] <- 1
## -----------------------------------------------------------------------------
# initialisation of the model object. It is possible to create a ode corresponding to
# Schneider et al. 2016, Delmas et al. 2016 or Binzer et al. 2016:
# 1) Schneider et al. 2016
n_nutrients <- 3
model_unscaled_nuts <- create_model_Unscaled_nuts(n_species, n_basal, n_nutrients, masses, fw)
# 2) Delmas et al. 2016:
model_scaled <- create_model_Scaled(n_species, n_basal, masses, fw)
# 3) Binzer et al. 2016
model_unscaled <- create_model_Unscaled(n_species, n_basal, masses, fw)
## -----------------------------------------------------------------------------
# updating the hill coefficient of consumers in the Unscaled_nuts model:
model_unscaled_nuts$q <- rep(1.4, model_unscaled_nuts$nb_s - model_unscaled_nuts$nb_s)
# Changing the assimilation efficiencies of all species to 0.5 in the Scaled model:
model_scaled$e = rep(0.5, model_scaled$nb_s)
# print the different fields that can be updated and their values:
# str(model_unscaled_nuts)
## -----------------------------------------------------------------------------
# for a model created by create_model_Unscaled_nuts():
model_unscaled_nuts <- initialise_default_Unscaled_nuts(model_unscaled_nuts, L)
# for a model created by create_model_Scaled():
model_scaled <- initialise_default_Scaled(model_scaled)
# for a model created by create_model_Unscaled():
model_unscaled <- initialise_default_Unscaled(model_unscaled)
## ----wrappers-----------------------------------------------------------------
biomasses <-runif(n_species, 2, 3) # starting biomasses
biomasses <- append(runif(3, 20, 30), biomasses) # nutrient concentration
# defining the desired integration time
times <- seq(0, 1500, 5)
sol <- lsoda_wrapper(times, biomasses, model_unscaled_nuts)
## ----deSolve------------------------------------------------------------------
# running simulations for the Schneider model
model_unscaled_nuts$initialisations()
sol <- deSolve::lsoda(
biomasses,
times,
function(t, y, params) {
return(list(params$ODE(y, t)))
},
model_unscaled_nuts
)
## ----plot_odeweb, fig.width=4, fig.height=3, fig.align='center'---------------
par(mar = c(4, 4, 1, 1))
plot_odeweb(sol, model_unscaled_nuts$nb_s)
## -----------------------------------------------------------------------------
# function to plot the fw
show_fw <- function(mat, title = NULL) {
par(mar = c(.5, .5, 2, .5))
S <- nrow(mat)
mat <- mat[nrow(mat):1, ]
mat <- t(mat)
image(mat, col = c("goldenrod", "steelblue"),
frame = FALSE, axes = FALSE)
title(title)
grid(nx = S, ny = S, lty = 1, col = adjustcolor("grey20", alpha.f = .1))
}
## -----------------------------------------------------------------------------
S <- 50 # number of species
C <- 0.2 # connectance
fw <- create_niche_model(S, C)
## -----------------------------------------------------------------------------
# number of species and body masses
n_species <- 20
n_basal <- 5
# body mass of species. Here we assume two specific rules for basal and non basal species
masses <- c(sort(10^runif(n_basal, 1, 3)), sort(10^runif(n_species - n_basal, 2, 6)))
L <- create_Lmatrix(masses, n_basal, Ropt = 100, gamma = 2, th = 0.01)
## ---- fig.width=4, fig.height=4, fig.align='center'---------------------------
# boolean version
fw <- L > 0
# 0/1 version:
fw <- L
fw[fw > 0] <- 1
show_fw(fw, title = "L-matrix model food web")
## -----------------------------------------------------------------------------
set.seed(12)
# 1) define number of species, their body masses, and the structure of the
# community
n_species <- 50
n_basal <- 20
n_nut <- 2
# body mass of species
masses <- 10 ^ c(sort(runif(n_basal, 1, 3)),
sort(runif(n_species - n_basal, 2, 9)))
# 2) create the food web
# create the L matrix
L <- create_Lmatrix(masses, n_basal, Ropt = 50, gamma = 2, th = 0.01)
# create the 0/1 version of the food web
fw <- L
fw[fw > 0] <- 1
# 3) create the model
model <- create_model_Unscaled_nuts(n_species, n_basal, n_nut, masses, fw)
# 4) define the temperature gradient and initial conditions
temperatures <- seq(4, 22, by = 2)
extinctions <- rep(NA, length(temperatures))
# defining biomasses
biomasses <- runif(n_species + n_nut, 2, 3)
# 5) define the desired integration time.
times <- seq(0, 100000, 100)
# 6) and loop over temperature to run the population dynamics
i <- 0
for (t in temperatures){
# initialise the model parameters for the specific temperature
# Here, no key parameters (numbers of species or species' body masses) are modified
# Therefore, it is not needed to create a new model object
# TO reinitialise the different parameters is enough
model <- initialise_default_Unscaled_nuts(model, L, temperature = t)
# updating the value of q, same for all consumers
model$q = rep(1.4, n_species - n_basal)
model$S <- rep(10, n_nut)
# running simulations for the Schneider model:
sol <- lsoda_wrapper(times, biomasses, model, verbose = FALSE)
# retrieve the number of species that went extinct before the end of the
# simulation excluding here the 3 first columns: first is time, 2nd and 3rd
# are nutrients
i <- i + 1
extinctions[i] <- sum(sol[nrow(sol), 4:ncol(sol)] < 1e-6)
}
## ---- fig.width=4, fig.height=3, fig.align='center'---------------------------
plot(temperatures, extinctions,
pch = 20, cex = 0.5, ylim = c(0,50), frame = FALSE,
ylab = "Number of Extinctions", xlab = "Temperature (°C)")
lines(temperatures, extinctions, col = 'blue')
## ----binzer example-----------------------------------------------------------
# set.seed(142)
# number of species
S <- 30
# vector containing the predator prey body mass ratios to test
scaling <- 10 ^ seq(-1, 4, by = .5)
# vectors to store the results
persistence0 <- c()
persistence40 <- c()
# create the studied food web
fw <- create_niche_model(S = S, C = 0.1)
# calculating trophic levels
TL = TroLev(fw)
biomasses <- runif(S, 2, 3)
# run a loop over the different pred-prey body mass ratios
for (scal in scaling) {
# update species body masses following the specific body mass ratio
masses <- 0.01 * scal ^ (TL - 1)
# create the models with parameters corresponding to 0 and 40 degrees Celcius
mod0 <- create_model_Unscaled(nb_s = S,
nb_b = sum(colSums(fw) == 0),
BM = masses,
fw = fw)
mod0 <- initialise_default_Unscaled(mod0, temperature = 0)
mod0$c <- rep(0, mod0$nb_s - mod0$nb_b)
mod0$alpha <- diag(mod0$nb_b)
mod40 <- create_model_Unscaled(nb_s = S,
nb_b = sum(colSums(fw) == 0),
BM = masses,
fw = fw)
mod40 <- initialise_default_Unscaled(mod40, temperature = 40)
mod40$c <- rep(0, mod40$nb_s - mod40$nb_b)
mod40$alpha <- diag(mod40$nb_b)
times <- seq(1, 1e9, by = 1e7)
# run the model corresponding to the 0 degree conditions
sol <- lsoda_wrapper(times, biomasses, mod0, verbose = FALSE)
persistence0 <- append(persistence0, sum(sol[nrow(sol), -1] > mod0$ext) / S)
# run the model corresponding to the 40 degrees conditions
sol <- lsoda_wrapper(times, biomasses, mod40, verbose = FALSE)
persistence40 <- append(persistence40, sum(sol[nrow(sol), -1] > mod40$ext) / S)
}
## ----binzer example plot, fig.width=6, fig.height=4, fig.align='center'-------
plot(log10(scaling), persistence40,
xlab = expression("Body mass ratio between TL"[i + 1]* " and TL"[i]),
ylab = "Persistence",
ylim = c(0, 1),
frame = FALSE, axes = FALSE, type = 'l', col = "red")
lines(log10(scaling), persistence0, col = "blue")
axis(2, at = seq(0, 1, by = .1), labels = seq(0, 1, by = .1))
axis(1, at = seq(-1, 4, by = 1), labels = 10 ^ seq(-1, 4, by = 1))
legend(0.1, 0.9, legend = c("40 \u00B0C", "0 \u00B0C"), fill = c("red", "blue"))
## ----delmas 1-----------------------------------------------------------------
set.seed(1234)
S <- 10
fw <- NULL
TL <- NULL
fw <- create_niche_model(S, C = .15)
TL <- TroLev(fw)
masses <- 0.01 * 100 ^ (TL - 1) #body mass of species
mod <- create_model_Scaled(nb_s = S,
nb_b = sum(colSums(fw) == 0),
BM = masses,
fw = fw)
mod <- initialise_default_Scaled(mod)
times <- seq(0, 300, by = 2)
biomasses <- runif(S, 2, 3) # starting biomasses
## ----delmas 2-----------------------------------------------------------------
mod$K <- 1
sol1 <- lsoda_wrapper(times, biomasses, mod, verbose = FALSE)
mod$K <- 10
sol10 <- lsoda_wrapper(times, biomasses, mod, verbose = FALSE)
## ----delmas 3, fig.width=6, fig.height=6, fig.align='center'------------------
par(mfrow = c(2, 1))
plot_odeweb(sol1, S)
title("Carrying capacity = 1")
plot_odeweb(sol10, S)
title("Carrying capacity = 10")
## ----mistake 1----------------------------------------------------------------
set.seed(1234)
nb_s <- 20
nb_b <- 5
nb_n <- 2
masses <- sort(10 ^ runif(nb_s, 2, 6)) #body mass of species
biomasses = runif(nb_s + nb_n, 2, 3)
L <- create_Lmatrix(masses, nb_b, Ropt = 50)
L[, 1:nb_b] <- 0
fw <- L
fw[fw > 0] <- 1
model_unscaled_nuts <- create_model_Unscaled_nuts(nb_s, nb_b, nb_n, masses, fw)
model_unscaled_nuts <- initialise_default_Unscaled_nuts(model_unscaled_nuts, L)
nb_s <- 30 #this does not change the model parameter
model_unscaled_nuts$nb_s #this is the model parameter
## -----------------------------------------------------------------------------
times <- seq(0, 15000, 150)
model_unscaled_nuts$nb_s = 40
# this will return an error :
# sol <- lsoda_wrapper(times, biomasses, model_schneider)
## ----mistake 2, fig.width=6---------------------------------------------------
set.seed(1234)
nb_s <- 20
nb_b <- 5
nb_n <- 2
masses <- sort(10 ^ runif(nb_s, 2, 6)) #body mass of species
biomasses = runif(nb_s + nb_n, 2, 3)
L <- create_Lmatrix(masses, nb_b, Ropt = 50)
L[, 1:nb_b] <- 0
fw <- L
fw[fw > 0] <- 1
model_unscaled_nuts <- create_model_Unscaled_nuts(nb_s, nb_b, nb_n, masses, fw)
model_unscaled_nuts <- initialise_default_Unscaled_nuts(model_unscaled_nuts, L)
model_unscaled_nuts$BM <- sqrt(model_unscaled_nuts$BM) # we change body masses within the model
sol <- lsoda_wrapper(seq(1, 5000, 50), biomasses, model_unscaled_nuts)
par(mar = c(4, 4, 1, 1))
plot_odeweb(sol, model_unscaled_nuts$nb_s)
## -----------------------------------------------------------------------------
nb_s <- 30
nb_n <- 2
masses <- sort(10 ^ runif(nb_s, 2, 6)) #body mass of species
biomasses <- runif(nb_s + nb_n, 2, 3)
L <- create_Lmatrix(masses, nb_b, Ropt = 50)
L[, 1:nb_b] <- 0
fw <- L
fw[fw > 0] <- 1
# create a new object:
model_unscaled_nuts <- create_model_Unscaled_nuts(nb_s, nb_b, nb_n, masses, fw)
model_unscaled_nuts <- initialise_default_Unscaled_nuts(model_unscaled_nuts, L)
# safely run the integration:
sol <- lsoda_wrapper(times, biomasses, model_unscaled_nuts)
## ----mistake 4----------------------------------------------------------------
nb_s <- 30
nb_n <- 2
masses <- sort(10 ^ runif(nb_s, 2, 6)) #body mass of species
biomasses <- runif(nb_s + nb_n, 2, 3)
L <- create_Lmatrix(masses, nb_b, Ropt = 50)
L[, 1:nb_b] <- 0
fw <- L
fw[fw > 0] <- 1
# create a new object:
model_1 <- create_model_Unscaled_nuts(nb_s, nb_b, nb_n, masses, fw)
model_1 <- initialise_default_Unscaled_nuts(model_1, L)
# trying to create a new model that is similar to model_1
model_2 = model_1
## -----------------------------------------------------------------------------
model_1$q = 1.8
# this also updated the value in model_2:
model_2$q
## -----------------------------------------------------------------------------
plus.3 = function(x, useless) {
y = x+3
useless = useless + 1
return(y)
}
useless = 4:10
useless2 = useless
x = sapply(1:5, plus.3, useless)
# the useless variable was not modified:
useless == useless2
## -----------------------------------------------------------------------------
n_species <- 20
n_basal <- 5
n_cons = n_species - n_basal
n_nut <- 2
masses <- 10 ^ c(sort(runif(n_basal, 0, 3)),
sort(runif(n_species - n_basal, 2, 5)))
L <- create_Lmatrix(masses, n_basal, Ropt = 100, gamma = 2, th = 0.01)
fw <- L
fw[fw > 0] <- 1
model <- create_model_Unscaled_nuts(n_species, n_basal, n_nut, masses, fw)
model <- initialise_default_Unscaled_nuts(model, L, temperature = 20)
## -----------------------------------------------------------------------------
# a function that sets all elements of model$b to 0
a.fun <- function(x, model){
model$b = model$b*0
return(x+1)
}
## ---- eval = FALSE------------------------------------------------------------
# x = c(1,2)
# sum(model$b)
# y = lapply(x, a.fun, model)
# sum(model$b)
## ---- eval = FALSE------------------------------------------------------------
# library(parallel)
# sum(model$b)
# model <- initialise_default_Unscaled_nuts(model, L, temperature = 20)
# y = mclapply(x, a.fun, model = model, mc.cores=5)
# sum(model$b)
## ----restore par, include=FALSE, echo=FALSE-----------------------------------
par(oldpar)
| /scratch/gouwar.j/cran-all/cranData/ATNr/inst/doc/ATNr.R |
---
title: "ATNr"
output: rmarkdown::html_vignette
bibliography: vignette.bib
vignette: >
%\VignetteIndexEntry{ATNr}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r, include=FALSE, echo=FALSE}
oldpar <- par()
# if (!nzchar(Sys.getenv("_R_CHECK_LIMIT_CORES_", ""))) {
# ## Possible values: 'TRUE' 'false', 'warn', 'error'
# Sys.setenv("_R_CHECK_LIMIT_CORES_" = "TRUE")
# }
Sys.setenv("OMP_NUM_THREADS" = 1)
```
The package *ATNr* defines the differential equations and parametrisation of different versions of the Allometric Trophic Network (ATN) model. It is structured around a model object that contains a function implementing the ordinary differential equations (ODEs) of the model and various attributes defining the different parameters to run the ODEs.
Two different versions of the model are implemented:
* Scaled version: @delmas2017simulations
* Unscaled version incorporating nutrient dynamics: @schneider2016animal
* Unscaled version without nutrients: @binzer2016interactive
The version without nutrients from @delmas2017simulations is scaled, meaning that the biological rates controlling the growth rate of the species are normalised by the growth rate of the smallest basal species. For more details on the three models, see the specific vignette: `vignette(model_descriptions, package = "ATNr")`.
# A quick go through
## Creating a model
The definition of ATN is based on a model object (formally a S4 class in R). The model object is initialised specifying a fixed set of parameters: *the number of species*, *the number of basal species*, *species body masses*, *a matrix defining the trophic interactions* and, for the version including the nutrient dynamics, *the number of nutrients*.
The first thing to do is therefore to create the corresponding R variables. While one can use an empirical food web for its analysis, it is also possible to generate synthetic food webs using the niche model from @williams2000simple or using allometric scaling as defined in @schneider2016animal.
### Generating synthetic food webs (if needed)
*ATNr* has two functions to generate synthetic food webs, `create_niche_model()` for the niche model (@williams2000simple) and `create_Lmatrix()` for the allometric scaling model (@schneider2016animal).
The niche model requires information on the number of species and connectance of the desired food web:
```{r}
library(ATNr)
set.seed(123)
n_species <- 20 # number of species
conn <- 0.3 # connectance
fw <- create_niche_model(n_species, conn)
# The number of basal species can be calculated:
n_basal <- sum(colSums(fw) == 0)
```
As the niche model does not rely on allometry, it is possible to estimate species body masses from their trophic levels, which can be calculated form the ```TroLev``` function of the package. For instance:
```{r}
TL = TroLev(fw) #trophic levels
masses <- 1e-2 * 10 ^ (TL - 1)
```
The allometric scaling model generate links based on species body masses. Therefore, it requires as an input a vector containing the body mass of species, as well as a parameter informing on the number of basal species desired. It produces a so-called L matrix which formally quantifies the probability for a consumer to successfully attack and consumer an encountered resource:
```{r}
n_species <- 20
n_basal <- 5
masses <- sort(10^runif(n_species, 2, 6)) #body mass of species
L <- create_Lmatrix(masses, n_basal)
```
This L matrix can then be transformed into a binary food web:
```{r}
fw <- L
fw[fw > 0] <- 1
```
More details about the generative models and the the usage precaution around them can be found in the section "[The food web generative functions]"
### Creating a specific ATN model
As soon as a food web is stored in a matrix, it is possible to create a model object that refers to the desired specific model
```{r}
# initialisation of the model object. It is possible to create a ode corresponding to
# Schneider et al. 2016, Delmas et al. 2016 or Binzer et al. 2016:
# 1) Schneider et al. 2016
n_nutrients <- 3
model_unscaled_nuts <- create_model_Unscaled_nuts(n_species, n_basal, n_nutrients, masses, fw)
# 2) Delmas et al. 2016:
model_scaled <- create_model_Scaled(n_species, n_basal, masses, fw)
# 3) Binzer et al. 2016
model_unscaled <- create_model_Unscaled(n_species, n_basal, masses, fw)
```
Once created, it is possible to access to the methods and attributes of the object to initialise or update them:
```{r}
# updating the hill coefficient of consumers in the Unscaled_nuts model:
model_unscaled_nuts$q <- rep(1.4, model_unscaled_nuts$nb_s - model_unscaled_nuts$nb_s)
# Changing the assimilation efficiencies of all species to 0.5 in the Scaled model:
model_scaled$e = rep(0.5, model_scaled$nb_s)
# print the different fields that can be updated and their values:
# str(model_unscaled_nuts)
```
It is important to keep in mind that some rules apply here:
* The order of the species in the different fields must be consistent: the first species in the `$BM` object corresponds to the first species in the `$fw` object and in the `$e` object.
* The objects that are specific to a species type (i.e. basal species or consumers) are dimensioned accordingly: the handling time (`$h`) sets the handling time of consumers on resources. Therefore, the `h` matrix has a number of rows equal to the number of species and a number of columns equal to the number of consumers (as non consumer species do not have a handling time by definition). In that case, the first row correspond to the first species and the first column to the first consumer.
* The object describing the interactions between plants and nutrients (`$K` or `$V`) are matrices for which the number of rows equals to the number of nutrients and a number of columns matches the number of basal species (this point is specific to the Schneider model which is the only one including an explicit dynamics of the nutrient pool).
To run the population dynamics, all the parameters must be defined. It is possible to automatically load a by default parametrisation using the dedicated functions:
```{r}
# for a model created by create_model_Unscaled_nuts():
model_unscaled_nuts <- initialise_default_Unscaled_nuts(model_unscaled_nuts, L)
# for a model created by create_model_Scaled():
model_scaled <- initialise_default_Scaled(model_scaled)
# for a model created by create_model_Unscaled():
model_unscaled <- initialise_default_Unscaled(model_unscaled)
```
Importantly, for the unscaled with nutrients model of @schneider2016animal, the calculation of consumption rates rely on the L matrix created above or, in case of empirical networks, by a matrix that defines the probability of a consumer to successfully attack and consume an encountered prey. The default initialisation of the `Unsacled_nuts` and `Unscaled` models can also include temperature effects (20° C by default).
## Running the population dynamics
Once all the parameters are properly defined, the ODEs can be integrated by any solver. We present here a solution based on `lsoda` from library `deSolve` (@DeSolve), but other solutions exist (`sundialr` is also a possibility). The package propose a direct wrapper to `lsoda` with the function `lsoda_wrapper`:
```{r wrappers}
biomasses <-runif(n_species, 2, 3) # starting biomasses
biomasses <- append(runif(3, 20, 30), biomasses) # nutrient concentration
# defining the desired integration time
times <- seq(0, 1500, 5)
sol <- lsoda_wrapper(times, biomasses, model_unscaled_nuts)
```
To have more control of the integration, it is however possible to not use the wrapper proposed in the package and directly work with the `lsoda` function. Here is an example:
```{r deSolve}
# running simulations for the Schneider model
model_unscaled_nuts$initialisations()
sol <- deSolve::lsoda(
biomasses,
times,
function(t, y, params) {
return(list(params$ODE(y, t)))
},
model_unscaled_nuts
)
```
Not that the call of `model_unscaled_nuts$initialisation()` is important here as it pre-computes some variables to optimise code execution. This function is normally internally called by `lsoda_wrapper`. In case of an integration that does not rely on this wrapper function, the call to `$initialisation()` is needed for ALL the model types.
The package also contains a simple function to plot the time series obtained: plot_odeweb. The colours only differentiate the species using their ranks in the food web matrix (from blue to red).
```{r plot_odeweb, fig.width=4, fig.height=3, fig.align='center'}
par(mar = c(4, 4, 1, 1))
plot_odeweb(sol, model_unscaled_nuts$nb_s)
```
# The food web generative functions
It is possible to create a model object using empirical food webs, however synthetic ones can be valuable tools to explore different theoretical questions. To allow this possibility, two different models are available in the package: the niche model (@williams2000simple) or the allometric scaling model (@schneider2016animal). Thereafter, we use the following function to visualise the adjacency matrices (where rows correspond to resources and columns to consumers) of the food webs:
```{r}
# function to plot the fw
show_fw <- function(mat, title = NULL) {
par(mar = c(.5, .5, 2, .5))
S <- nrow(mat)
mat <- mat[nrow(mat):1, ]
mat <- t(mat)
image(mat, col = c("goldenrod", "steelblue"),
frame = FALSE, axes = FALSE)
title(title)
grid(nx = S, ny = S, lty = 1, col = adjustcolor("grey20", alpha.f = .1))
}
```
The niche model orders species based on their trophic niche, randomly sampled from a uniform distribution. For each species $i$, a diet range ($r_i$) is then drawn from a Beta distribution and a diet center $c_i$ from a uniform distribution. For each species $i$, all species that have trophic niche within the interval $[c_i - r_i / 2, c_i + r_i / 2]$ are considered to be prey of species $i$. In this package, we followed the modification to the niche model of @williams2000simple as specified in @allesina2008general.
Generating a food web from the niche model is made by a simple call to the corresponding functions:
```{r}
S <- 50 # number of species
C <- 0.2 # connectance
fw <- create_niche_model(S, C)
```
The function ensure that the food web returned are not composed of disconnected sub networks (i.i several connected components).
The allometric scaling model assumes an optimal consumer/resource body mass ratio (_Ropt_, default = 100) for attack rates, i.e. the probability that when a consumer encounter a species it will predate on it. In particular, each attack rate is calculated using a Ricker function:
$$
a_{ij} = \left( \frac{m_i}{m_j \cdot Ropt} \cdot e^{(1 - \frac{m_i}{m_j \cdot Ropt})} \right) ^\gamma
$$
where $m_i$ is the body mass of species $i$ and $\gamma$ sets the width of the trophic niche.
Generating a food web with the allometric scaling model necessitate few more steps. The trophic niche of species is defined by a body mass interval and is quantified (see fig. 2 and 3 from Schneider et al., 2016). This quantified version actually return the probabilities of a successful attack event to occur when a consumer encounter a prey. These probabilities are estimated with a Ricker function of 4 parameters: the body masses of the resource and of the consumer, the optimal predator-prey body mass ratio `Ropt` and the width of the trophic niche `gamma`. A threshold (`th`) filters out links with very low probabilities of attack success. The probabilities are stored in a matrix obtained from:
```{r}
# number of species and body masses
n_species <- 20
n_basal <- 5
# body mass of species. Here we assume two specific rules for basal and non basal species
masses <- c(sort(10^runif(n_basal, 1, 3)), sort(10^runif(n_species - n_basal, 2, 6)))
L <- create_Lmatrix(masses, n_basal, Ropt = 100, gamma = 2, th = 0.01)
```
Then, a food web is a binary version of the L matrix that can be stored either using booleans (FALSE/TRUE) or numeric values (0/1):
```{r, fig.width=4, fig.height=4, fig.align='center'}
# boolean version
fw <- L > 0
# 0/1 version:
fw <- L
fw[fw > 0] <- 1
show_fw(fw, title = "L-matrix model food web")
```
# Examples
## effect of temperature on species persistence
_ATNr_ makes it relatively easy to vary one parameter to assess its effect on the population dynamics. For example, we can study how changes in temperatures from 15 to 30 Celsius degrees affects the number species to go extinct.
```{r}
set.seed(12)
# 1) define number of species, their body masses, and the structure of the
# community
n_species <- 50
n_basal <- 20
n_nut <- 2
# body mass of species
masses <- 10 ^ c(sort(runif(n_basal, 1, 3)),
sort(runif(n_species - n_basal, 2, 9)))
# 2) create the food web
# create the L matrix
L <- create_Lmatrix(masses, n_basal, Ropt = 50, gamma = 2, th = 0.01)
# create the 0/1 version of the food web
fw <- L
fw[fw > 0] <- 1
# 3) create the model
model <- create_model_Unscaled_nuts(n_species, n_basal, n_nut, masses, fw)
# 4) define the temperature gradient and initial conditions
temperatures <- seq(4, 22, by = 2)
extinctions <- rep(NA, length(temperatures))
# defining biomasses
biomasses <- runif(n_species + n_nut, 2, 3)
# 5) define the desired integration time.
times <- seq(0, 100000, 100)
# 6) and loop over temperature to run the population dynamics
i <- 0
for (t in temperatures){
# initialise the model parameters for the specific temperature
# Here, no key parameters (numbers of species or species' body masses) are modified
# Therefore, it is not needed to create a new model object
# TO reinitialise the different parameters is enough
model <- initialise_default_Unscaled_nuts(model, L, temperature = t)
# updating the value of q, same for all consumers
model$q = rep(1.4, n_species - n_basal)
model$S <- rep(10, n_nut)
# running simulations for the Schneider model:
sol <- lsoda_wrapper(times, biomasses, model, verbose = FALSE)
# retrieve the number of species that went extinct before the end of the
# simulation excluding here the 3 first columns: first is time, 2nd and 3rd
# are nutrients
i <- i + 1
extinctions[i] <- sum(sol[nrow(sol), 4:ncol(sol)] < 1e-6)
}
```
```{r, fig.width=4, fig.height=3, fig.align='center'}
plot(temperatures, extinctions,
pch = 20, cex = 0.5, ylim = c(0,50), frame = FALSE,
ylab = "Number of Extinctions", xlab = "Temperature (°C)")
lines(temperatures, extinctions, col = 'blue')
```
## Effect of predator-prey body mass ratio and temperature on species persistence
Predator-prey body mass ratio and environment temperature have been shown to affect persistence of species in local communities, e.g. @binzer2016interactive. Here, we use the _ATNr_ model (**name here**) to replicate the results from @binzer2016interactive. In particular, we compute the fraction of species species that persist for predator-prey body mass ratio values in $\left[ 10^{-1}, 10^4 \right]$ and temperature values in $\{0, 40\}$ °C.
First, we create a food web with 30 species and initialize within a for loop the model with a given value of body mass ratio and temperature. Species persistence is calculate as the fraction of species that are not extinct at the end of the simulations.
```{r binzer example}
# set.seed(142)
# number of species
S <- 30
# vector containing the predator prey body mass ratios to test
scaling <- 10 ^ seq(-1, 4, by = .5)
# vectors to store the results
persistence0 <- c()
persistence40 <- c()
# create the studied food web
fw <- create_niche_model(S = S, C = 0.1)
# calculating trophic levels
TL = TroLev(fw)
biomasses <- runif(S, 2, 3)
# run a loop over the different pred-prey body mass ratios
for (scal in scaling) {
# update species body masses following the specific body mass ratio
masses <- 0.01 * scal ^ (TL - 1)
# create the models with parameters corresponding to 0 and 40 degrees Celcius
mod0 <- create_model_Unscaled(nb_s = S,
nb_b = sum(colSums(fw) == 0),
BM = masses,
fw = fw)
mod0 <- initialise_default_Unscaled(mod0, temperature = 0)
mod0$c <- rep(0, mod0$nb_s - mod0$nb_b)
mod0$alpha <- diag(mod0$nb_b)
mod40 <- create_model_Unscaled(nb_s = S,
nb_b = sum(colSums(fw) == 0),
BM = masses,
fw = fw)
mod40 <- initialise_default_Unscaled(mod40, temperature = 40)
mod40$c <- rep(0, mod40$nb_s - mod40$nb_b)
mod40$alpha <- diag(mod40$nb_b)
times <- seq(1, 1e9, by = 1e7)
# run the model corresponding to the 0 degree conditions
sol <- lsoda_wrapper(times, biomasses, mod0, verbose = FALSE)
persistence0 <- append(persistence0, sum(sol[nrow(sol), -1] > mod0$ext) / S)
# run the model corresponding to the 40 degrees conditions
sol <- lsoda_wrapper(times, biomasses, mod40, verbose = FALSE)
persistence40 <- append(persistence40, sum(sol[nrow(sol), -1] > mod40$ext) / S)
}
```
Similarly to @binzer2016interactive, species persistence increases with increasing values of predator-prey body mass ratios, but temperature effect differs depending n this ratio: when predator prey body mass ratio is low, high temperature lead to more persistence while increasing predator prey body mass ratio tend to reduce the effects of temperature.
```{r binzer example plot, fig.width=6, fig.height=4, fig.align='center'}
plot(log10(scaling), persistence40,
xlab = expression("Body mass ratio between TL"[i + 1]* " and TL"[i]),
ylab = "Persistence",
ylim = c(0, 1),
frame = FALSE, axes = FALSE, type = 'l', col = "red")
lines(log10(scaling), persistence0, col = "blue")
axis(2, at = seq(0, 1, by = .1), labels = seq(0, 1, by = .1))
axis(1, at = seq(-1, 4, by = 1), labels = 10 ^ seq(-1, 4, by = 1))
legend(0.1, 0.9, legend = c("40 \u00B0C", "0 \u00B0C"), fill = c("red", "blue"))
```
## Paradox of enrichment
The paradox of enrichment states that by increasing the carrying capacity of basal species may destabilize the population dynamics (@Rosenzweig). Here, we show how this can be studied with the _ATNr_; we used the model from @delmas2017simulations, but similar results can be obtained using the other two models in the package.
First, we create a food web with 10 species and initialize the model
```{r delmas 1}
set.seed(1234)
S <- 10
fw <- NULL
TL <- NULL
fw <- create_niche_model(S, C = .15)
TL <- TroLev(fw)
masses <- 0.01 * 100 ^ (TL - 1) #body mass of species
mod <- create_model_Scaled(nb_s = S,
nb_b = sum(colSums(fw) == 0),
BM = masses,
fw = fw)
mod <- initialise_default_Scaled(mod)
times <- seq(0, 300, by = 2)
biomasses <- runif(S, 2, 3) # starting biomasses
```
Then, we solve the system specifying the carrying capacity of basal species equal to one (`mod$K <- 1`) and then increased this to ten (`mod$K <- 10`)
```{r delmas 2}
mod$K <- 1
sol1 <- lsoda_wrapper(times, biomasses, mod, verbose = FALSE)
mod$K <- 10
sol10 <- lsoda_wrapper(times, biomasses, mod, verbose = FALSE)
```
As shown in the plot below, for _K = 1_ the system reaches a stable equilibrium, whereas when we increase the carrying capacity (_K = 10_) the system departs from this stable equilibrium and periodic oscillations appear.
```{r delmas 3, fig.width=6, fig.height=6, fig.align='center'}
par(mfrow = c(2, 1))
plot_odeweb(sol1, S)
title("Carrying capacity = 1")
plot_odeweb(sol10, S)
title("Carrying capacity = 10")
```
# Common mistakes, things to not do
## Not updating model parameters properly in the model object
The building block of this package are the C++ classes to solve ODEs for the ATN model. Model parameters are stored in such classes and can be changed only by addressing them within the respective objects. For instance:
```{r mistake 1}
set.seed(1234)
nb_s <- 20
nb_b <- 5
nb_n <- 2
masses <- sort(10 ^ runif(nb_s, 2, 6)) #body mass of species
biomasses = runif(nb_s + nb_n, 2, 3)
L <- create_Lmatrix(masses, nb_b, Ropt = 50)
L[, 1:nb_b] <- 0
fw <- L
fw[fw > 0] <- 1
model_unscaled_nuts <- create_model_Unscaled_nuts(nb_s, nb_b, nb_n, masses, fw)
model_unscaled_nuts <- initialise_default_Unscaled_nuts(model_unscaled_nuts, L)
nb_s <- 30 #this does not change the model parameter
model_unscaled_nuts$nb_s #this is the model parameter
```
## Updating key parameters without creating a new model object
Changing parameters that are used in the `create_model` functions without creating a new model object is almost always a bad idea. Those parameters are important structural parameters, and changing one of them implies changes in most of the other variables contained in the model object. For instance, the example above, changing the number of species in the model object will lead to inconsistencies in the different variables: the dimensions of objects storing attack rates, body masses and so on won't match the updated number of species. Some basic checks are made before starting the integration in the `lsoda_wrapper` function, based on the `run_checks` procedure also available in the package.
```{r}
times <- seq(0, 15000, 150)
model_unscaled_nuts$nb_s = 40
# this will return an error :
# sol <- lsoda_wrapper(times, biomasses, model_schneider)
```
However, some modification can remain undetected. For instance, modifying species' body masses only won't raise any errors. However, a change in species body mass should be associated to a change in all the associated biological rates. The following code won't raise any errors, but will produce results relying on a model with an incoherent set of parameters and therefore wrong result:
```{r mistake 2, fig.width=6}
set.seed(1234)
nb_s <- 20
nb_b <- 5
nb_n <- 2
masses <- sort(10 ^ runif(nb_s, 2, 6)) #body mass of species
biomasses = runif(nb_s + nb_n, 2, 3)
L <- create_Lmatrix(masses, nb_b, Ropt = 50)
L[, 1:nb_b] <- 0
fw <- L
fw[fw > 0] <- 1
model_unscaled_nuts <- create_model_Unscaled_nuts(nb_s, nb_b, nb_n, masses, fw)
model_unscaled_nuts <- initialise_default_Unscaled_nuts(model_unscaled_nuts, L)
model_unscaled_nuts$BM <- sqrt(model_unscaled_nuts$BM) # we change body masses within the model
sol <- lsoda_wrapper(seq(1, 5000, 50), biomasses, model_unscaled_nuts)
par(mar = c(4, 4, 1, 1))
plot_odeweb(sol, model_unscaled_nuts$nb_s)
```
In general, each time one of these key parameters is modified (`nb_s`, `nb_b`, `nb_n` for the Schneider model, `BM`, `fw`), it is strongly recommended to create a new model objects with the updated parameters:
```{r}
nb_s <- 30
nb_n <- 2
masses <- sort(10 ^ runif(nb_s, 2, 6)) #body mass of species
biomasses <- runif(nb_s + nb_n, 2, 3)
L <- create_Lmatrix(masses, nb_b, Ropt = 50)
L[, 1:nb_b] <- 0
fw <- L
fw[fw > 0] <- 1
# create a new object:
model_unscaled_nuts <- create_model_Unscaled_nuts(nb_s, nb_b, nb_n, masses, fw)
model_unscaled_nuts <- initialise_default_Unscaled_nuts(model_unscaled_nuts, L)
# safely run the integration:
sol <- lsoda_wrapper(times, biomasses, model_unscaled_nuts)
```
Specifically to the Schneider model, changing the 'Lmatrix' requires to update the feeding rate (`$b`).
## Changing the dimensions of vectors and matrix fields in a model object without doing it consistently.
Changing the dimensions of a vector or matrix object somehow implies a change in the number of species (see section above). For instance, decreasing the length of the assimilation efficiencies vector `$e` should imply a consistent update of all the other parameters (handling times, attack rates, body masses, etc depending on the model). The function `remove_species` is made to properly remove species from model objects without having to manually regenerate all parameters.
## Shallow copying models
As built on Rcpp, the different models are only pointers to C++ objects. It means that this script:
```{r mistake 4}
nb_s <- 30
nb_n <- 2
masses <- sort(10 ^ runif(nb_s, 2, 6)) #body mass of species
biomasses <- runif(nb_s + nb_n, 2, 3)
L <- create_Lmatrix(masses, nb_b, Ropt = 50)
L[, 1:nb_b] <- 0
fw <- L
fw[fw > 0] <- 1
# create a new object:
model_1 <- create_model_Unscaled_nuts(nb_s, nb_b, nb_n, masses, fw)
model_1 <- initialise_default_Unscaled_nuts(model_1, L)
# trying to create a new model that is similar to model_1
model_2 = model_1
```
will not create a new model object. Formally, it creates a new pointer to the same address, which means that `model_1` and `model_2` are in reality the same variable (shallow copy). Therefore, modifying one modifies the other:
```{r}
model_1$q = 1.8
# this also updated the value in model_2:
model_2$q
```
Therefore, to create a new model object based on another one, it is important to formally create one (either with one of the `create_model_` function, or using `new`). More information on copying variables using pointers here: https://stackoverflow.com/questions/184710/what-is-the-difference-between-a-deep-copy-and-a-shallow-copy
## Modifying a model object in a *apply function
Modifying a R variable inside a ```*apply``` function in does not modify it:
```{r}
plus.3 = function(x, useless) {
y = x+3
useless = useless + 1
return(y)
}
useless = 4:10
useless2 = useless
x = sapply(1:5, plus.3, useless)
# the useless variable was not modified:
useless == useless2
```
However, this is not the case anymore with a model object. If we consider a model object:
```{r}
n_species <- 20
n_basal <- 5
n_cons = n_species - n_basal
n_nut <- 2
masses <- 10 ^ c(sort(runif(n_basal, 0, 3)),
sort(runif(n_species - n_basal, 2, 5)))
L <- create_Lmatrix(masses, n_basal, Ropt = 100, gamma = 2, th = 0.01)
fw <- L
fw[fw > 0] <- 1
model <- create_model_Unscaled_nuts(n_species, n_basal, n_nut, masses, fw)
model <- initialise_default_Unscaled_nuts(model, L, temperature = 20)
```
and a function that takes this model object as an argument, setting the b matrix to 0:
```{r}
# a function that sets all elements of model$b to 0
a.fun <- function(x, model){
model$b = model$b*0
return(x+1)
}
```
then, we can see that the global model object is indeed modified when the function is called by *apply:
```{r, eval = FALSE}
x = c(1,2)
sum(model$b)
y = lapply(x, a.fun, model)
sum(model$b)
```
This behaviour is still due to the fact that in a *apply function the model is shallow-copied and each iteration points in fact to the same object in memory.
However, this behaviour is not present when using a parallel version of an apply function, as in parallel computations the object is automatically deep-copied and passed to each task separately:
```{r, eval = FALSE}
library(parallel)
sum(model$b)
model <- initialise_default_Unscaled_nuts(model, L, temperature = 20)
y = mclapply(x, a.fun, model = model, mc.cores=5)
sum(model$b)
```
```{r restore par, include=FALSE, echo=FALSE}
par(oldpar)
```
| /scratch/gouwar.j/cran-all/cranData/ATNr/inst/doc/ATNr.Rmd |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.