content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
g_acc <- 9.81 # gravitational acceleration (m/s^2)
# Wave energy constant (using density of water at 20 C i.e. 998 kg/m^3)
we_const <- 0.998 * g_acc^2 / (64 * pi)
#' Calculate the wave energy flux
#'
#' Calculates the wave energy flux (power per meter of wave crest) given
#' either (1) the significant wave height and peak period or (2) the wind speed
#' at 10m, fetch length and (optionally) water depth.
#'
#' Given the significant height (\emph{H}) and peak period (\emph{T}),
#' the wave energy flux is calculated as: \deqn{\frac{\rho g^2}{64 \pi} H^2 T},
#' where \eqn{\rho} is the density of water (998 kg/m^3) and \emph{g} is the
#' acceleration of gravity (9.81 m/s^2).
#'
#' If both \code{height} and \code{period} are missing, they are estimated from
#' on the wind speed at 10m (\eqn{U_{10}}) and the fetch length (\emph{F}) as
#' described in Resio et al. (2003):
#' \deqn{{U_f}^2 = 0.001 (1.1 + 0.035 U_{10}) {U_{10}}^2} (friction velocity)
#' \deqn{\frac{g H}{{U_f}^2} = \min (0.0413 \sqrt{\frac{g F}{{U_f}^2}}, 211.5)}
#' \deqn{\frac{g T}{U_f} = \min (0.651 (\frac{g F}{{U_f}^2})^{1/3}, 239.8)}
#' If the depth (\emph{d}) is specified, it imposes a limit on the peak period:
#' \deqn{T_{max} = 9.78 \sqrt{\frac{d}{g}}} (in seconds)
#'
#' @param height Significant wave height, in meters.
#' @param period Peak wave period, in seconds.
#' @param wind Wind speed at 10m, in m/s.
#' @param fetch Fetch length, in meters.
#' @param depth Water depth, in meters.
#' @return Wave energy flux, in kW/m.
#' @references Resio, D.T., Bratos, S.M., and Thompson, E.F. (2003). Meteorology
#' and Wave Climate, Chapter II-2. Coastal Engineering Manual.
#' US Army Corps of Engineers, Washington DC, 72pp.
#' @examples
#' # With height and period arguments
#' wave_energy(8, 1)
#'
#' # With wind, fetch and depth arguments (must be named)
#' wave_energy(wind = 12, fetch = 15000, depth = 10)
#' @export
wave_energy <- function(height = NA, period = NA,
wind = NA, fetch = NA, depth = NA) {
if (all(is.na(height)) && all(is.na(period))) {
# Friction velocity squared
uf2 <- 0.001 * (1.1 + 0.035 * wind) * wind^2
# Non-dimensional fetch, height and period
fetch_nd <- fetch * g_acc / uf2
height_nd <- pmin(0.0413 * sqrt(fetch_nd), 211.5)
period_nd <- pmin(0.651 * fetch_nd^(1/3), 239.8)
# Calculate height and period (latter with depth limitation, if available)
# then wave energy
height <- height_nd * uf2 / g_acc
period <- period_nd * sqrt(uf2) / g_acc
if (any(!is.na(depth)))
period <- pmin(period, 9.78 * sqrt(depth / g_acc))
} else {
if (any(!is.na(wind)) || any(!is.na(fetch)) || any(!is.na(depth)))
warning("height or period provided; ignoring other arguments to wave_energy.")
}
we_const * height^2 * period
}
|
/scratch/gouwar.j/cran-all/cranData/waver/R/wave_energy.R
|
#' @import sf
#' @importFrom methods as is
#' @importFrom stats setNames
NULL
|
/scratch/gouwar.j/cran-all/cranData/waver/R/waver.R
|
#' @title Aggregate data based on grouping variables and a user-provided
#' function
#' @name aggregate_spectra
#' @description Use grouping variables to collapse spectral \code{data.frame} by
#' mean or median. Recommended for use after \code{\link{filter_spectra}}
#' @author Jenna Hershberger \email{jmh579@@cornell.edu}
#' @importFrom stats aggregate median
#' @importFrom dplyr select group_by_at summarize across
#' @importFrom magrittr %>%
#' @importFrom tidyselect starts_with all_of everything
#' @usage aggregate_spectra(df, grouping.colnames, reference.value.colname,
#' agg.function)
#'
#' @param df \code{data.frame} object containing one or multiple columns of
#' grouping variables (must be consistent within each group), column of
#' reference values (optional), and columns of spectra. Spectral column names
#' must start with "X".
#' @param grouping.colnames Names of columns to be used as grouping variables.
#' Minimum 2 variables required. Default is c("trial", "plot").
#' @param reference.value.colname Name of reference column to be aggregated
#' along with spectra. Default is "reference"
#' @param agg.function Name of function (string format) to be used for sample
#' aggregation. Must be either "mean" or "median". Default is "mean".
#'
#' @return \code{data.frame} object \code{df} aggregated based on grouping
#' column by \code{agg.function}
#' @export
#'
#' @examples
#' library(magrittr)
#' aggregated.test <- ikeogu.2017 %>%
#' dplyr::select(-TCC) %>%
#' na.omit() %>%
#' aggregate_spectra(
#' grouping.colnames = c("study.name"),
#' reference.value.colname = "DMC.oven",
#' agg.function = "mean"
#' )
#' aggregated.test[1:5, 1:5]
aggregate_spectra <- function(df,
grouping.colnames = c("unique.id"),
reference.value.colname = "reference",
agg.function = "mean") {
# Error handling
if (!(agg.function %in% c("mean", "median"))) {
rlang::abort('agg.function must be either "mean" or "median"')
}
if (nrow(df) != nrow(na.omit(df))) {
rlang::abort("df cannot contain missing values. Omit rows with missing values and try again.")
}
# Set aggregation function to match input
if (agg.function == "median") {
agg.function <-
function(x) {
suppressWarnings(median(as.numeric(as.character(x))))
}
} else {
agg.function <-
function(x) {
suppressWarnings(mean(as.numeric(as.character(x)), na.rm = TRUE))
}
}
# Aggregate data.frame
df.aggregated <- df %>%
dplyr::select(
tidyselect::all_of(grouping.colnames),
tidyselect::all_of(reference.value.colname),
tidyselect::starts_with("X")
) %>%
dplyr::group_by_at(grouping.colnames) %>%
dplyr::summarize(dplyr::across(
.cols = tidyselect::everything(),
.fns = agg.function
))
return(df.aggregated)
}
|
/scratch/gouwar.j/cran-all/cranData/waves/R/aggregate_spectra.R
|
#' @title Example vis-NIRS and reference dataset
#' @name ikeogu.2017
#' @description The `ikeogu.2017` data set contains raw vis-NIRS scans, total
#' carotenoid content, and cassava root dry matter content (using the oven
#' method) from the 2017 PLOS One paper by Ikeogu et al. This dataset contains
#' a subset of the original scans and reference values from the supplementary
#' files of the paper.
#' `ikeogu.2017` is a `data.frame` that contains the following columns:
#' \itemize{
#' \item study.name = Name of the study as described in Ikeogu et al. (2017).
#' \item sample.id = Unique identifier for each individual root sample
#' \item DMC.oven = Cassava root dry matter content, the percentage of dry
#' weight relative to fresh weight of a sample after oven drying.
#' \item TCC = Total carotenoid content (\eqn{\mu g/g}, unknown whether on a
#' fresh or dry weight basis) as measured by
#' high performance liquid chromatography
#' \item X350:X2500 = spectral reflectance measured with the QualitySpec Trek:
#' S-10016 vis-NIR spectrometer.
#' Each cell represents the mean of 150 scans on a single root at a single wavelength.
#' }
#' @author Original authors: Ikeogu, U.N., F. Davrieux, D. Dufour, H. Ceballos,
#' C.N. Egesi, and J. Jannink. Reformatted by Jenna Hershberger.
#' @references Ikeogu, U.N., F. Davrieux, D. Dufour, H. Ceballos, C.N. Egesi, et
#' al. 2017. Rapid analyses of dry matter content and carotenoids in fresh
#' cassava roots using a portable visible and near infrared spectrometer
#' (Vis/NIRS). PLOS One 12(12): 1–17. doi: 10.1371/journal.pone.0188918.
#'
#' @examples
#' library(magrittr)
#' library(ggplot2)
#' data(ikeogu.2017)
#' ikeogu.2017[1:10, 1:10]
#' ikeogu.2017 %>%
#' dplyr::select(-starts_with("X")) %>%
#' dplyr::group_by(study.name) %>%
#' tidyr::gather(trait, value, c(DMC.oven:TCC), na.rm = TRUE) %>%
#' ggplot2::ggplot(aes(x = study.name, y = value, fill = study.name)) +
#' facet_wrap(~trait, scales = "free_y", nrow = 2) +
#' geom_boxplot()
"ikeogu.2017"
|
/scratch/gouwar.j/cran-all/cranData/waves/R/data.R
|
#' @title Filter spectral data frame based on Mahalanobis distance
#' @name filter_spectra
#' @description Determine Mahalanobis distances of observations (rows) within a
#' given \code{data.frame} with spectral data. Option to filter out
#' observations based on these distances.
#' @details This function uses a chi-square distribution with 95\% cutoff where
#' degrees of freedom = number of wavelengths (columns) in the input
#' \code{data.frame}.
#' @references Johnson, R.A., and D.W. Wichern. 2007. Applied Multivariate
#' Statistical Analysis (6th Edition). pg 189
#' @author Jenna Hershberger \email{jmh579@@cornell.edu}
#' @usage filter_spectra(df, filter, return.distances, num.col.before.spectra,
#' window.size, verbose)
#'
#' @importFrom stats cov mahalanobis na.omit qchisq
#' @importFrom rlang abort
#'
#' @param df \code{data.frame} object containing columns of spectra and rows of
#' observations. Spectral columns must be labeled with an "X" and then the
#' wavelength (example: "X740" = 740nm). Left-most column must be unique ID.
#' May also contain columns of metadata between the unique ID and spectral
#' columns. Cannot contain any missing values. Metadata column names may not
#' start with "X".
#' @param filter boolean that determines whether or not the input
#' \code{data.frame} will be filtered. If \code{TRUE}, \code{df} will be
#' filtered according to squared Mahalanobis distance with a 95\% cutoff from
#' a chi-square distribution with degrees of freedom = number of spectral
#' columns. If \code{FALSE}, a column of squared Mahalanobis distances
#' \code{h.distance} will be added to the right side of df and all rows will
#' be returned. Default is \code{TRUE}.
#' @param return.distances boolean that determines whether a column of squared
#' Mahalanobis distances will be included in output \code{data.frame}. If
#' \code{TRUE}, a column of Mahalanobis distances for each row will be added
#' to the right side of \code{df}. Default is \code{FALSE}.
#' @param num.col.before.spectra number of columns to the left of the spectral
#' matrix in \code{df}. Default is 4.
#' @param window.size number defining the size of window to use when calculating
#' the covariance of the spectra (required to calculate Mahalanobis distance).
#' Default is 10.
#' @param verbose If \code{TRUE}, the number of rows removed through filtering
#' will be printed to the console. Default is \code{TRUE}.
#'
#' @return If \code{filter} is \code{TRUE}, returns filtered data frame
#' \code{df} and reports the number of rows removed. The Mahalanobis distance
#' with a cutoff of 95\% of chi-square distribution (degrees of freedom =
#' number of wavelengths) is used as filtering criteria. If \code{filter} is
#' \code{FALSE}, returns full input df with column \code{h.distances}
#' containing the Mahalanobis distance for each row.
#' @export
#'
#' @examples
#' library(magrittr)
#' filtered.test <- ikeogu.2017 %>%
#' dplyr::select(-TCC) %>%
#' na.omit() %>%
#' filter_spectra(
#' df = .,
#' filter = TRUE,
#' return.distances = TRUE,
#' num.col.before.spectra = 5,
#' window.size = 15
#' )
#' filtered.test[1:5, c(1:5, (ncol(filtered.test) - 5):ncol(filtered.test))]
filter_spectra <- function(df,
filter = TRUE,
return.distances = FALSE,
num.col.before.spectra = 4,
window.size = 10,
verbose = TRUE) {
# Strip off non-spectral columns
spectra <- df[, (num.col.before.spectra + 1):ncol(df)]
# Error handling
# mahalanobis function does not allow missing values or non-numeric data
if (nrow(spectra) != nrow(na.omit(spectra))) {
rlang::abort("Input data frame cannot contain missing values!
Remove them and try again.")
}
# Make subset of spectra using provided window size
# (otherwise the system is computationally singular)
spectra.subset <- spectra[, seq(1, ncol(spectra), window.size)]
# Calculate covariance of spectral matrix
spectra.cov <- tryCatch(
expr = cov(as.matrix(spectra.subset)),
error = function(err) {
message("Error in covariance matrix calculation.
Please increase 'window.size' and try again.")
print(err)
}
)
# Create list of Mahalanobis distances for each sample and bind to input df
h.distances <- mahalanobis(
x = spectra.subset, center = colMeans(spectra.subset),
cov = spectra.cov, tol = 1e-22
)
if (sum(h.distances <= 0) > 0) {
rlang::abort("Please increase window size.")
}
df.distances <- cbind(df, h.distances)
if (filter) {
# Filter input data based on square of Mahalanobis distance
chisq95 <- qchisq(p = .95, df = ncol(spectra))
df.filtered <- df.distances[which(h.distances < chisq95), ]
# How many samples were removed?
if (verbose) {
if (nrow(df) - nrow(df.filtered) != 1) {
cat(paste("\nRemoved", nrow(df) - nrow(df.filtered), "rows.\n"))
} else {
cat(paste("\nRemoved 1 row.\n"))
}
}
if (return.distances) {
return(df.filtered)
} else {
return(dplyr::select(df.filtered, -h.distances))
}
} else {
# Don't filter.
# Just return input data.frame with or without distances as rightmost column
if (return.distances) {
return(df.distances)
} else {
return(dplyr::select(df.distances, -h.distances))
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/waves/R/filter_spectra.R
|
#' @title Format multiple trials with or without overlapping genotypes into
#' training and test sets according to user-provided cross validation scheme
#' @name format_cv
#' @author Jenna Hershberger \email{jmh579@@cornell.edu}
#' @description Standalone function that is also used within
#' \code{\link{train_spectra}} to divide trials or studies into training and
#' test sets based on overlap in trial environments and genotype entries
#' @details Use of a cross-validation scheme requires a column in the input
#' \code{data.frame} named "genotype" to ensure proper sorting of training and
#' test sets. Variables \code{trial1} and \code{trial2} are required, while
#' \code{trial 3} is optional.
#'
#' @param trial1 \code{data.frame} object that is for use only when
#' \code{cv.scheme} is provided. Contains the trial to be tested in subsequent
#' model training functions. The first column contains unique identifiers,
#' second contains genotypes, third contains reference values, followed by
#' spectral columns. Include no other columns to right of spectra! Column
#' names of spectra must start with "X", reference column must be named
#' "reference", and genotype column must be named "genotype".
#' @param trial2 \code{data.frame} object that is for use only when
#' \code{cv.scheme} is provided. This data.frame contains a trial that has
#' overlapping genotypes with \code{trial1} but that were grown in a different
#' site/year (different environment). Formatting must be consistent with
#' \code{trial1}.
#' @param trial3 \code{data.frame} object that is for use only when
#' \code{cv.scheme} is provided. This data.frame contains a trial that may or
#' may not contain genotypes that overlap with \code{trial1}. Formatting must
#' be consistent with \code{trial1}.
#' @param cv.scheme A cross validation (CV) scheme from Jarquín et al., 2017.
#' Options for \code{cv.scheme} include:
#' \itemize{
#' \item "CV1": untested lines in tested environments
#' \item "CV2": tested lines in tested environments
#' \item "CV0": tested lines in untested environments
#' \item "CV00": untested lines in untested environments
#' }
#' @param stratified.sampling If \code{TRUE}, training and test
#' sets will be selected using stratified random sampling. Default is
#' \code{TRUE}.
#' @param proportion.train Fraction of samples to include in the training set.
#' Default is 0.7.
#' @param seed Number used in the function \code{set.seed()} for reproducible
#' randomization. If \code{NULL}, no seed is set. Default is \code{NULL}.
#' @param remove.genotype boolean that, if \code{TRUE}, removes the "genotype"
#' column is removed from the output \code{data.frame}. Default is
#' \code{FALSE}.
#'
#' @references Jarquín, D., C. Lemes da Silva, R. C. Gaynor, J. Poland, A.
#' Fritz, R. Howard, S. Battenfield, and J. Crossa. 2017. Increasing
#' genomic-enabled prediction accuracy by modeling genotype × environment
#' interactions in Kansas wheat. Plant Genome 10(2):1-15.
#' <doi:10.3835/plantgenome2016.12.0130>
#'
#' @importFrom dplyr group_by ungroup filter mutate rename select summarize
#' @importFrom tidyr nest unnest drop_na
#' @importFrom magrittr %>%
#' @importFrom rlang .data abort
#' @importFrom caret createDataPartition createResample
#' @importFrom tidyselect starts_with
#'
#' @return List of data.frames ($train.set, $test.set) compiled according to
#' user-provided cross validation scheme.
#' @export
#'
#' @examples
#' # Must have a column called "genotype", so we'll create a fake one for now
#' # We will use CV00, which does not require any overlap in genotypes
#' # In real scenarios, CV schemes that rely on genotypes should not be applied
#' # when genotypes are unknown, as in this case.
#' library(magrittr)
#' trials <- ikeogu.2017 %>%
#' dplyr::mutate(genotype = 1:nrow(ikeogu.2017)) %>% # fake for this example
#' dplyr::rename(reference = DMC.oven) %>%
#' dplyr::select(
#' study.name, sample.id, genotype, reference,
#' tidyselect::starts_with("X")
#' )
#' trial1 <- trials %>%
#' dplyr::filter(study.name == "C16Mcal") %>%
#' dplyr::select(-study.name)
#' trial2 <- trials %>%
#' dplyr::filter(study.name == "C16Mval") %>%
#' dplyr::select(-study.name)
#' cv.list <- format_cv(
#' trial1 = trial1, trial2 = trial2, cv.scheme = "CV00",
#' stratified.sampling = FALSE, remove.genotype = TRUE
#' )
#' cv.list$train.set[1:5, 1:5]
#' cv.list$test.set[1:5, 1:5]
format_cv <- function(trial1,
trial2,
trial3 = NULL,
cv.scheme,
stratified.sampling = TRUE,
proportion.train = 0.7,
seed = NULL,
remove.genotype = FALSE) {
# Error handling
if (!cv.scheme %in% c("CV0", "CV00", "CV1", "CV2")) {
rlang::abort("cv.scheme must be 'CV0', 'CV00', 'CV1', or 'CV2'")
}
if (!"genotype" %in% colnames(trial1) || !"genotype" %in% colnames(trial2)) {
rlang::abort("trial1 and trial2 must each have a column named 'genotype'")
}
if (proportion.train > 1 || proportion.train < 0) {
rlang::abort("'proportion.train' must be a number between 0 and 1")
}
overlapping.genos <- trial1[which(trial1$genotype %in%
rbind(trial2, trial3)[, "genotype"]), ]
if ((cv.scheme %in% c("CV2", "CV0")) && (nrow(overlapping.genos) < 1)) {
rlang::abort("There are no overlapping genotypes between the trials
provided,so the CV scheme you have chosen cannot be used.
Please choose another, more appropriate CV scheme (CV1 or CV00).")
}
if (!is.null(seed)) {
set.seed(seed)
}
t1 <- trial1 %>%
dplyr::group_by(.data$genotype) %>%
tidyr::nest(data = c(-.data$genotype))
# Random sampling
if (!stratified.sampling) {
train.index <- sort(sample(
x = seq_len(nrow(t1)),
size = proportion.train * nrow(t1),
replace = FALSE, prob = NULL
))
} else if (stratified.sampling) {
# Stratified sampling based on genotype mean of reference values
t1_summary <- trial1 %>%
dplyr::group_by(.data$genotype) %>%
dplyr::summarize(reference.mean = mean(.data$reference))
train.index <- caret::createDataPartition(
y = t1_summary$reference.mean,
p = proportion.train
) %>%
unlist()
}
# t1.a is always the test set
t1.a <- t1[-train.index, ] %>%
tidyr::unnest(c(-.data$genotype)) %>%
dplyr::ungroup()
t1.b <- t1[train.index, ] %>%
tidyr::unnest(c(-.data$genotype)) %>%
dplyr::ungroup()
# we want t2.a to be the same genotypes as in t1.a
# and t2.b to be the same genotypes as t1.b
t2.a <- trial2[which(trial2$genotype %in% t1.a$genotype), ]
t2.b <- trial2[which(trial2$genotype %in% t1.b$genotype), ]
if (cv.scheme == "CV0") {
# Tested lines in untested environment
test.set <- t1.a
train.set <- rbind(trial2, trial3)
}
if (cv.scheme == "CV00") {
# Untested lines in untested environment
# check for overlapping genotypes and remove
# from either training or test set
trial2.no.overlap <- trial2 %>%
dplyr::filter(!.data$genotype %in% t1.a$genotype)
if (!is.null(trial3)) {
trial3.no.overlap <- trial3 %>%
dplyr::filter(!.data$genotype %in% t1.a$genotype)
} else {
trial3.no.overlap <- NULL
}
test.set <- t1.a
train.set <- rbind(trial2.no.overlap, trial3.no.overlap)
}
if (cv.scheme == "CV1") {
# Untested lines in tested environment
test.set <- t1.a
train.set <- rbind(t1.b, t2.b)
}
if (cv.scheme == "CV2") {
# Tested lines in tested environment
test.set <- t1.a
train.set <- rbind(t1.b, trial2)
}
if (remove.genotype) {
train.set <- train.set %>%
dplyr::select(-.data$genotype) %>%
tidyr::drop_na()
test.set <- test.set %>%
dplyr::select(-.data$genotype) %>%
tidyr::drop_na()
}
return(list(
train.set = train.set,
test.set = test.set
))
}
|
/scratch/gouwar.j/cran-all/cranData/waves/R/format_cv.R
|
#' @title Plot spectral data, highlighting outliers as identified using
#' Mahalanobis distance
#' @description Generates a \code{\link[ggplot2]{ggplot}} object of given
#' spectra, with wavelength on the x axis and given spectral values on the y.
#' Mahalanobis distance is used to calculate outliers, which are both
#' identified on the plot. Rows from the original dataframe are printed to the
#' console for each outlier that is identified.
#' @author Jenna Hershberger \email{jmh579@@cornell.edu}
#'
#' @inheritParams filter_spectra
#' @param df \code{data.frame} object containing columns of spectra.
#' Spectral columns must be labeled with an "X" and then the wavelength
#' (example: "X740" = 740nm). Left-most column must be unique ID. May also
#' contain columns of metadata between the unique ID and spectral columns.
#' Cannot contain any missing values. Metadata column names may not start
#' with "X".
#' @param num.col.before.spectra Number of columns to the left of the spectral
#' matrix (including unique ID). Default is 1.
#' @param detect.outliers Boolean indicating whether spectra should be filtered
#' before plotting. If \code{TRUE}, outliers are indicated by color in the
#' resulting plot. If \code{verbose} is also set to \code{TRUE}, outlier
#' metadata will be printed to the console. Default is \code{TRUE}.
#' @param color String or vector of strings indicating colors to be passed to
#' \code{\link[ggplot2]{ggplot}}. Default is default
#' \code{\link[ggplot2]{ggplot}} colors.
#' @param alternate.title String to be used as plot title. If
#' \code{detect.outliers} is \code{TRUE}, a descriptive title will be supplied.
#' If \code{detect.outliers} is \code{FALSE}, default is no title will be used.
#' @param wavelengths DEPRECATED \code{wavelengths} is no
#' longer supported; this information is now inferred from
#' \code{df} column names
#'
#' @importFrom dplyr select mutate distinct
#' @importFrom tidyselect starts_with
#' @importFrom readr parse_number
#' @importFrom ggplot2 ggplot aes geom_line theme_minimal labs scale_color_manual
#' @importFrom scales hue_pal
#' @importFrom tidyr gather
#' @importFrom stringr str_extract
#' @importFrom rlang .data abort
#' @importFrom lifecycle deprecated
#'
#' @return If verbose, prints unique ID and metadata for rows identified as
#' outliers. Returns plot of spectral data with non-outliers in blue and
#' outliers in red. X-axis is wavelengths and y-axis is spectral values.
#' @export
#'
#' @examples
#' \donttest{
#' library(magrittr)
#' ikeogu.2017 %>%
#' dplyr::rename(unique.id = sample.id) %>%
#' dplyr::select(unique.id, dplyr::everything(), -TCC) %>%
#' na.omit() %>%
#' plot_spectra(
#' df = .,
#' num.col.before.spectra = 5,
#' window.size = 15,
#' detect.outliers = TRUE,
#' color = NULL,
#' alternate.title = NULL,
#' verbose = TRUE
#' )
#' }
plot_spectra <- function(df,
num.col.before.spectra = 1,
window.size = 10,
detect.outliers = TRUE,
color = NULL,
alternate.title = NULL,
verbose = TRUE,
wavelengths = deprecated()) {
# Strip off non-spectral columns
spectra <- df %>%
dplyr::select(tidyselect::starts_with("X"))
# Error handling ---------------------------
# mahalanobis function does not allow missing values or non-numeric data
if (nrow(spectra) != nrow(na.omit(spectra))) {
rlang::abort("Input data frame cannot contain missing values!
Remove them and try again.")
}
if (detect.outliers) { # Outlier detection
# Color-related handling ---------------------------
if (!is.null(color) && length(color) == 1) {
rlang::abort("Two colors are required but only one was supplied.
Please add another value or set 'color' to 'NULL'")
}
if (!is.null(color) && length(color) > 2) {
warning("Two colors are required but more than two were supplied.
Only the first two will be used.")
color <- color[1:2]
}
if (is.null(color)) {
color <- scales::hue_pal()(2)
}
if (lifecycle::is_present(wavelengths)) {
lifecycle::deprecate_warn(
when = "0.2.0",
what = "plot_spectra(wavelengths)",
details = "Wavelength specification is now inferred from column names."
)
}
wavelengths <- spectra %>%
colnames() %>%
readr::parse_number()
# Plot title ---------------------------
if (is.null(alternate.title)) {
plot.title <- paste0("Chi-Square (", length(wavelengths),
" df) 95% cutoff for Mahalanobis distance")
} else {
plot.title <- alternate.title
}
# Calculate outlier cutoff ---------------------------
chisq95 <- qchisq(.95, df = length(wavelengths))
# Calculate Mahalanobis distribution for each scan and identify outliers ---
filtered.df <- filter_spectra(
df = df, filter = FALSE, return.distances = TRUE,
num.col.before.spectra = num.col.before.spectra,
window.size = window.size, verbose = FALSE
) %>%
mutate(Outlier = ifelse(.data$h.distances > chisq95, TRUE, FALSE))
# Prepare data frame for plotting ---------------------------
hdists.df <- filtered.df %>%
tibble::rownames_to_column(var = "rownames") %>%
dplyr::select(.data$rownames, .data$h.distances, .data$Outlier,
tidyselect::starts_with("X")) %>%
tidyr::gather(key = "wl", value = "s.value",
tidyselect::starts_with("X")) %>%
dplyr::mutate(wl = as.numeric(readr::parse_number(.data$wl)))
# Create plot ---------------------------
spectral.plot <- ggplot2::ggplot(
data = hdists.df,
aes(
x = .data$wl,
y = .data$s.value,
group = .data$rownames,
color = .data$Outlier
)
) +
geom_line(alpha = .5) +
geom_line(data = subset(hdists.df, hdists.df$Outlier == TRUE),
alpha = .7) +
scale_color_manual(values = color, name = "Outlier?") +
theme_minimal() +
labs(
title = plot.title,
x = "Wavelength",
y = "Spectral Value"
)
# Print metadata for each outlier ---------------------------
if (verbose) {
if (sum(hdists.df$Outlier > 0)) {
cat("Outliers:\n")
print(
filtered.df %>%
dplyr::filter(.data$h.distances > chisq95) %>%
dplyr::select(-tidyselect::starts_with("X")) %>%
distinct()
)
} else {
cat("No outliers detected.\n")
}
}
} else { # No outlier detection
# Color handling ---------------------------
if (!is.null(color) && length(color) > 1) {
warning("Only one color is required but more than one were supplied.
Only the first value will be used")
color <- color[1]
}
if (is.null(color)) {
color <- scales::hue_pal()(1)
}
# Plot title
if (!is.null(alternate.title)) {
plot.title <- alternate.title
}
# Prepare data frame for plotting ---------------------------
prepped.df <- df %>%
tibble::rownames_to_column(var = "rownames") %>%
dplyr::select(.data$rownames, tidyselect::starts_with("X")) %>%
tidyr::gather(key = "wl", value = "s.value",
tidyselect::starts_with("X")) %>%
dplyr::mutate(wl = as.numeric(stringr::str_extract(.data$wl,
"\\-*\\d+\\.*\\d*")))
spectral.plot <- ggplot(
data = prepped.df,
aes(
x = .data$wl,
y = .data$s.value,
group = .data$rownames
)
) +
geom_line(alpha = .5, color = color) +
theme_minimal() +
labs(title = plot.title, x = "Wavelength", y = "Spectral Value")
}
return(spectral.plot)
}
|
/scratch/gouwar.j/cran-all/cranData/waves/R/plot_spectra.R
|
#' @title Use provided model object to predict trait values with input dataset
#' @name predict_spectra
#' @description Loads an existing model and cross-validation performance
#' statistics (created with \code{\link{save_model}}) and makes predictions
#' based on new spectra.
#' @author Jenna Hershberger \email{jmh579@@cornell.edu}
#' @inheritParams test_spectra
#' @param input.data \code{data.frame} object of spectral data for input into a
#' spectral prediction model. First column contains unique identifiers
#' followed by spectral columns. Include no other columns to right of spectra!
#' Column names of spectra must start with "X".
#' @param model.stats.location String containing file path (including file name)
#' to save location of "(model.name)_stats.csv" as output from the
#' \code{\link{save_model}} function.
#' @param model.location String containing file path (including file name) to
#' location where the trained model ("(model.name).Rds") was saved as output
#' by the \code{\link{save_model}} function.
#' @param wavelengths DEPRECATED \code{wavelengths} is no
#' longer supported; this information is now inferred from \code{input.data}
#' column names
#'
#' @importFrom stats predict
#' @importFrom utils read.csv
#' @importFrom lifecycle deprecated
#'
#' @return \code{data.frame} object of predictions for each sample (row). First
#' column is unique identifier supplied by \code{input.data} and second is
#' predicted values
#' @export
#'
#' @examples
#' \dontrun{
#' ikeogu.2017 %>%
#' dplyr::select(sample.id, dplyr::starts_with("X")) %>%
#' predict_spectra(
#' input.data = .,
#' model.stats.location = paste0(
#' getwd(),
#' "/my_model_stats.csv"
#' ),
#' model.location = paste0(getwd(), "/my_model.Rds")
#' )
#' }
#'
predict_spectra <- function(input.data,
model.stats.location,
model.location,
model.method = "pls",
wavelengths = deprecated()) {
if (lifecycle::is_present(wavelengths)) {
lifecycle::deprecate_warn(
when = "0.2.0",
what = "predict_spectra(wavelengths)",
details = "Wavelength specification is now inferred from column names."
)
}
# Load model and model statistics ---------------------------
model.stats <- read.csv(model.stats.location)
model.object <- readRDS(model.location)
final.model <- model.object
# Match best pretreatment method with index number ---------------------------
best.pretreatment.num <- match(
model.stats$Pretreatment[1],
c(
"Raw_data", "SNV", "SNV1D", "SNV2D", "D1",
"D2", "SG", "SNVSG", "SGD1", "SG.D1W5",
"SG.D1W11", "SG.D2W5", "SG.D2W11"
)
)
# Use pretreat_spectra() function to format input.data and preprocess
# if needed ------------------------------------------------------------------
pretreated <- pretreat_spectra(
df = input.data, test.data = NULL,
pretreatment = best.pretreatment.num
)
# Predict values using imported model, pretreated/formatted input data,
# and method of choice -------------------------------------------------------
if (model.method == "pls") {
# Extract best number of components
best.ncomp <- model.stats$best.ncomp_mode[1]
# Get predictions
predicted.values <- as.numeric(predict(final.model,
newdata = as.matrix(pretreated[2:ncol(pretreated)]),
ncomp = best.ncomp
))
} else if (model.method == "svmLinear") {
predicted.values <- as.numeric(predict(final.model, newdata = pretreated))
} else if (model.method == "svmRadial") {
predicted.values <- as.numeric(predict(final.model, newdata = pretreated))
} else if (model.method == "rf") {
best.ntree <- final.model$ntree
best.mtry <- final.model$mtry
predicted.values <- as.numeric(predict(final.model,
newdata = pretreated,
ntree = best.ntree,
mtry = best.mtry
))
}
# Bind unique identifiers from the input data to the predicted values --------
predicted.df <- cbind(input.data[, 1], data.frame(predicted.values))
colnames(predicted.df) <- c(colnames(input.data)[1], "predicted.values")
return(predicted.df)
}
|
/scratch/gouwar.j/cran-all/cranData/waves/R/predict_spectra.R
|
#' @title Pretreat spectral data according to user-designated method
#' @name pretreat_spectra
#' @description Pretreatment, also known as preprocessing, is often used to
#' increase the signal to noise ratio in vis-NIR datasets. The \emph{waves}
#' function \code{pretreat_spectra} applies common spectral pretreatment
#' methods such as standard normal variate and the Savitzky-Golay filter.
#' @author Jenna Hershberger \email{jmh579@@cornell.edu}
#' @param df \code{data.frame} object containing spectral data. First column(s)
#' (optional) include metadata (with or without reference value column)
#' followed by spectral columns. Spectral column names must be formatted as
#' "X" followed by wavelength Include no other columns to right of spectra! No
#' missing values permitted.
#' @param test.data \code{data.frame} object with same format as train.data.
#' Will be appended to \code{df} during pretreatment so that the same
#' transformations are applied to each row. Default is \code{NULL}.
#' @param pretreatment Number or list of numbers 1:13 corresponding to
#' desired pretreatment method(s):
#' \enumerate{
#' \item Raw data (default)
#' \item Standard normal variate (SNV)
#' \item SNV and first derivative
#' \item SNV and second derivative
#' \item First derivative
#' \item Second derivative
#' \item Savitzky–Golay filter (SG)
#' \item SNV and SG
#' \item Gap-segment derivative (window size = 11)
#' \item SG and first derivative (window size = 5)
#' \item SG and first derivative (window size = 11)
#' \item SG and second derivative (window size = 5)
#' \item SG and second derivative (window size = 11)
#' }
#' @param wavelengths DEPRECATED \code{wavelengths} is no
#' longer supported; this information is now inferred from \code{df}
#' column names
#' @param preprocessing.method DEPRECATED \code{preprocessing.method}
#' has been renamed "pretreatment"
#'
#' @importFrom prospectr standardNormalVariate savitzkyGolay gapDer
#' @importFrom tidyselect starts_with
#' @importFrom dplyr select
#' @importFrom tidyr drop_na
#' @importFrom magrittr %>%
#' @importFrom lifecycle is_present deprecate_warn deprecated
#'
#' @return Pretreated \code{df}` (or list of \code{data.frame}s) with
#' reference column intact
#' @export
#'
#' @examples
#' pretreat_spectra(df = ikeogu.2017, pretreatment = 3)[1:5, 1:5]
pretreat_spectra <- function(df,
test.data = NULL,
pretreatment = 1,
preprocessing.method = deprecated(),
wavelengths = deprecated()) {
# Format input data frames for processing. Combine training.data and test.data
# so that the same transformations are applied to all samples
if (!is.null(test.data)) {
# bind training to test for pretreatment
df <- rbind(df, test.data)
}
if (lifecycle::is_present(preprocessing.method)) {
lifecycle::deprecate_warn(
when = "0.2.0",
what = "pretreat_spectra(preprocessing.method)",
with = "pretreat_spectra(pretreatment)"
)
pretreatment <- preprocessing.method
}
if (lifecycle::is_present(wavelengths)) {
lifecycle::deprecate_warn(
when = "0.2.0",
what = "pretreat_spectra(wavelengths)",
details = "Wavelength specification is now inferred from column names."
)
}
# Remove rows with missing spectral data
# (shouldn't be any, but this is just in case)
df <- df %>% tidyr::drop_na(tidyselect::starts_with("X"))
# Split spectra from metadata turn spectra into matrix (spc)
spc <- df %>%
dplyr::select(tidyselect::starts_with("X")) %>%
data.matrix()
metadata <- df %>% dplyr::select(-tidyselect::starts_with("X"))
# Add preprocessed spectral data to list
processed.list <- list(
# 1. Raw data
Raw_data = spc,
# Scatter correction (to remove noise)
# 2. Standard normal variate (SNV)
SNV = standardNormalVariate(spc),
# 3. SNV + 1st derivative
SNV1D = t(diff(t(standardNormalVariate(spc)), differences = 1)),
# 4. SNV + 2nd derivative
SNV2D = t(diff(t(standardNormalVariate(spc)), differences = 2)),
# 5. First derivative
D1 = t(diff(t(spc), differences = 1)),
# 6. Second derivative
D2 = t(diff(t(spc), differences = 2)),
# Smoothing filters
# 7. Savitzky-Golay
SG = savitzkyGolay(spc, p = 2, w = 11, m = 0),
# 8. SNV + Savitzky-Golay
SNVSG = savitzkyGolay(standardNormalVariate(spc), p = 2, w = 11, m = 0),
# 9. Savitzky-Golay + Gap-segment derivative (gapDer) algorithms
SGD1 = gapDer(spc, m = 1, w = 11, s = 9),
# 10. Savitzky-Golay filter + 1st derivative - window size = 5
SG.D1W5 = savitzkyGolay(spc, p = 2, w = 5, m = 1),
# 11. Savitzky-Golay filter + 1st derivative - window size = 11
SG.D1W11 = savitzkyGolay(spc, p = 2, w = 11, m = 1),
# 12. Savitzky-Golay filter + 2nd derivative - window size = 5
SG.D2W5 = savitzkyGolay(spc, p = 2, w = 5, m = 2),
# 13. Savitzky-Golay filter + 2nd derivative - window size = 11
SG.D2W11 = savitzkyGolay(spc, p = 2, w = 11, m = 2)
)
# Make each matrix in list into a data frame and add back metadata and
# reference column if present
processed.meta.list <- lapply(
seq_along(processed.list),
function(x) cbind(metadata, processed.list[[x]])
)
names(processed.meta.list) <- names(processed.list)
# Choose which to return
if (length(pretreatment) == 1) {
# Return a single processed data frame if only one pretreatment was chosen.
processed <- processed.meta.list[[pretreatment]]
} else {
# Return a list of processed data frames if more
# than one pretreatment was chosen.
processed <- processed.meta.list[c(pretreatment)]
}
return(processed)
}
|
/scratch/gouwar.j/cran-all/cranData/waves/R/pretreat_spectra.R
|
#' Functions renamed in waves 0.2.0
#'
#' @description
#' `r lifecycle::badge('deprecated')`
#'
#' waves 0.2.0 renamed a number of functions to ensure that every function
#' name adheres to the tidyverse style guide.
#'
#' * `AggregateSpectra()` -> `aggregate_spectra()`
#' * `DoPreprocessing()` -> `pretreat_spectra()`
#' * `FilterSpectra()` -> `filter_spectra()`
#' * `FormatCV()` -> `format_cv()`
#' * `PlotSpectra()` -> `plot_spectra()`
#' * `PredictFromSavedModel()` -> `predict_spectra()`
#' * `SaveModel()` -> `save_model()`
#' * `TestModelPerformance()` -> `test_spectra()`
#' * `TrainSpectralModel()` -> `train_spectra()`
#'
#'
#' @keywords internal
#' @name rename
#' @aliases NULL
NULL
#' @rdname rename
#' @export
AggregateSpectra <- function(
df,
grouping.colnames = c("unique.id"),
reference.value.colname = "reference",
agg.function = "mean") {
lifecycle::deprecate_warn("0.2.0", "AggregateSpectra()",
"aggregate_spectra()")
aggregate_spectra(
df,
grouping.colnames = c("unique.id"),
reference.value.colname = "reference",
agg.function = "mean"
)
}
#' @export
#' @rdname rename
DoPreprocessing <- function(
df,
test.data = NULL,
pretreatment = 1) {
lifecycle::deprecate_warn("0.2.0", "DoPreprocessing()", "pretreat_spectra()")
pretreat_spectra(
df,
test.data = NULL,
pretreatment = 1)
}
#' @rdname rename
#' @export
FilterSpectra <- function(
df,
filter = TRUE,
return.distances = FALSE,
num.col.before.spectra = 4,
window.size = 10,
verbose = TRUE) {
lifecycle::deprecate_warn("0.2.0", "FilterSpectra()", "filter_spectra()")
filter_spectra(
df,
filter = TRUE,
return.distances = FALSE,
num.col.before.spectra = 4,
window.size = 10,
verbose = TRUE
)
}
#' @export
#' @rdname rename
FormatCV <- function(
trial1,
trial2,
trial3 = NULL,
cv.scheme,
stratified.sampling = TRUE,
proportion.train = 0.7,
seed = NULL,
remove.genotype = FALSE) {
lifecycle::deprecate_warn("0.2.0", "FormatCV()", "format_cv()")
format_cv(
trial1,
trial2,
trial3 = NULL,
cv.scheme,
stratified.sampling = TRUE,
proportion.train = 0.7,
seed = NULL,
remove.genotype = FALSE
)
}
#' @export
#' @rdname rename
PlotSpectra <- function(
df,
num.col.before.spectra = 1,
window.size = 10,
detect.outliers = TRUE,
color = NULL,
alternate.title = NULL,
verbose = TRUE) {
lifecycle::deprecate_warn("0.2.0", "PlotSpectra()", "plot_spectra()")
plot_spectra(
df,
num.col.before.spectra = 1,
window.size = 10,
detect.outliers = TRUE,
color = NULL,
alternate.title = NULL,
verbose = TRUE
)
}
#' @export
#' @rdname rename
PredictFromSavedModel <- function(
input.data,
model.stats.location,
model.location,
model.method = "pls") {
lifecycle::deprecate_warn("0.2.0", "PredictFromSavedModel()",
"predict_spectra()")
predict_spectra(
input.data,
model.stats.location,
model.location,
model.method = "pls"
)
}
#' @export
#' @rdname rename
SaveModel <- function(
df,
save.model = TRUE,
pretreatment = 1,
model.save.folder = NULL,
model.name = "PredictionModel",
best.model.metric = "RMSE",
k.folds = 5,
proportion.train = 0.7,
tune.length = 50,
model.method = "pls",
num.iterations = 10,
stratified.sampling = TRUE,
cv.scheme = NULL,
trial1 = NULL,
trial2 = NULL,
trial3 = NULL,
verbose = TRUE) {
lifecycle::deprecate_warn("0.2.0", "SaveModel()", "save_model()")
save_model(df,
save.model = TRUE,
pretreatment = 1,
model.save.folder = NULL,
model.name = "PredictionModel",
best.model.metric = "RMSE",
k.folds = 5,
proportion.train = 0.7,
tune.length = 50,
model.method = "pls",
num.iterations = 10,
stratified.sampling = TRUE,
cv.scheme = NULL,
trial1 = NULL,
trial2 = NULL,
trial3 = NULL,
verbose = TRUE
)
}
#' @export
#' @rdname rename
TestModelPerformance <- function(
train.data,
num.iterations,
test.data = NULL,
pretreatment = 1,
k.folds = 5,
proportion.train = 0.7,
tune.length = 50,
model.method = "pls",
best.model.metric = "RMSE",
stratified.sampling = TRUE,
cv.scheme = NULL,
trial1 = NULL,
trial2 = NULL,
trial3 = NULL,
split.test = FALSE,
verbose = TRUE) {
lifecycle::deprecate_warn("0.2.0", "TestModelPerformance()", "test_spectra()")
test_spectra(
train.data,
num.iterations,
test.data = NULL,
pretreatment = 1,
k.folds = 5,
proportion.train = 0.7,
tune.length = 50,
model.method = "pls",
best.model.metric = "RMSE",
stratified.sampling = TRUE,
cv.scheme = NULL,
trial1 = NULL,
trial2 = NULL,
trial3 = NULL,
split.test = FALSE,
verbose = TRUE
)
}
#' @export
#' @rdname rename
TrainSpectralModel <- function(
df,
num.iterations,
test.data = NULL,
k.folds = 5,
proportion.train = 0.7,
tune.length = 50,
model.method = "pls",
best.model.metric = "RMSE",
stratified.sampling = TRUE,
cv.scheme = NULL,
trial1 = NULL,
trial2 = NULL,
trial3 = NULL,
split.test = FALSE,
verbose = TRUE) {
lifecycle::deprecate_warn("0.2.0", "TrainSpectralModel()", "train_spectra()")
train_spectra(
df,
num.iterations,
test.data = NULL,
k.folds = 5,
proportion.train = 0.7,
tune.length = 50,
model.method = "pls",
best.model.metric = "RMSE",
stratified.sampling = TRUE,
cv.scheme = NULL,
trial1 = NULL,
trial2 = NULL,
trial3 = NULL,
split.test = FALSE,
verbose = TRUE
)
}
|
/scratch/gouwar.j/cran-all/cranData/waves/R/rename.R
|
#' @title Save spectral prediction model and model performance statistics
#' @name save_model
#' @description Given a set of pretreatment methods, saves the best spectral
#' prediction model and model statistics to \code{model.save.folder} as
#' \code{model.name.Rds} and \code{model.name_stats.csv} respectively. If only
#' one pretreatment method is supplied, results from that method are stored.
#' @details Wrapper that uses \code{\link{pretreat_spectra}},
#' \code{\link{format_cv}}, and \code{\link{train_spectra}} functions.
#' @author Jenna Hershberger \email{jmh579@@cornell.edu}
#'
#' @inheritParams test_spectra
#' @inheritParams train_spectra
#' @inheritParams pretreat_spectra
#' @param write.model If \code{TRUE}, the trained model will be saved in .Rds
#' format to the location specified by \code{model.save.folder}. If
#' \code{FALSE}, the best model will be output by the function but will not
#' save to a file. Default is \code{TRUE}.
#' @param model.save.folder Path to folder where model will be saved. If not
#' provided, will save to working directory.
#' @param model.name Name that model will be saved as in
#' \code{model.save.folder}. Default is "PredictionModel".
#' @param autoselect.preprocessing DEPRECATED
#' \code{autoselect.preprocessing = FALSE} is no longer supported. If
#' multiple pretreatment methods are supplied, the best will be automatically
#' selected as the model to be saved.
#'
#' @importFrom utils write.csv
#' @importFrom rlang abort
#' @importFrom lifecycle deprecated
#' @importFrom tibble add_column
#' @importFrom magrittr %>%
#'
#' @return List of model stats (in \code{data.frame}) and trained model object.
#' If the parameter \code{write.model} is TRUE, both objects are saved to
#' \code{model.save.folder}. To use the optimally trained model for
#' predictions, use tuned parameters from \code{$bestTune}.
#' @export
#'
#' @examples
#' \donttest{
#' library(magrittr)
#' test.model <- ikeogu.2017 %>%
#' dplyr::filter(study.name == "C16Mcal") %>%
#' dplyr::rename(reference = DMC.oven,
#' unique.id = sample.id) %>%
#' dplyr::select(unique.id, reference, dplyr::starts_with("X")) %>%
#' na.omit() %>%
#' save_model(
#' df = .,
#' write.model = FALSE,
#' pretreatment = 1:13,
#' model.name = "my_prediction_model",
#' tune.length = 3,
#' num.iterations = 3
#' )
#' summary(test.model$best.model)
#' test.model$best.model.stats
#' }
save_model <- function(df,
write.model = TRUE,
pretreatment = 1,
model.save.folder = NULL,
model.name = "PredictionModel",
best.model.metric = "RMSE",
k.folds = 5,
proportion.train = 0.7,
tune.length = 50,
model.method = "pls",
num.iterations = 10,
stratified.sampling = TRUE,
cv.scheme = NULL,
trial1 = NULL,
trial2 = NULL,
trial3 = NULL,
seed = 1,
verbose = TRUE,
save.model = deprecated(),
wavelengths = deprecated(),
autoselect.preprocessing = deprecated(),
preprocessing.method = deprecated()) {
# Deprecate warnings
if (lifecycle::is_present(save.model)) {
lifecycle::deprecate_warn(
when = "0.2.0",
what = "save_model(save.model)",
with = "save_model(write.model)"
)
write.model <- save.model
}
if (lifecycle::is_present(wavelengths)) {
lifecycle::deprecate_warn(
when = "0.2.0",
what = "save_model(wavelengths)",
details = "Wavelength specification is now inferred from column names."
)
}
if (lifecycle::is_present(autoselect.preprocessing)) {
lifecycle::deprecate_warn(
when = "0.2.0",
what = "save_model(autoselect.preprocessing)",
details = "If multiple pretreatment methods are supplied,
the best will be selected automatically."
)
}
if (lifecycle::is_present(preprocessing.method)) {
lifecycle::deprecate_warn(
when = "0.2.0",
what = "save_model(preprocessing.method)",
with = "save_model(pretreatment)"
)
}
# Error handling
if (!(best.model.metric %in% c("RMSE", "Rsquared"))) {
rlang::abort('best.model.metric must be either "RMSE" or "Rsquared"')
}
if (nrow(df) != nrow(na.omit(df))) {
rlang::abort("Training data cannot contain missing values.")
}
if (!is.character(model.name)) {
rlang::abort("model.name must be a string!")
}
if (is.null(model.save.folder)) {
model.save.folder <- getwd()
}
# Choose best pretreatment method and set up training set
methods.list <- c(
"Raw_data", "SNV", "SNV1D", "SNV2D", "D1", "D2", "SG",
"SNVSG", "SGD1", "SG.D1W5", "SG.D1W11", "SG.D2W5",
"SG.D2W11"
)
training.results <- test_spectra(
train.data = df,
num.iterations = num.iterations,
test.data = NULL,
pretreatment = pretreatment,
k.folds = k.folds,
proportion.train = proportion.train,
tune.length = tune.length,
model.method = model.method,
stratified.sampling = stratified.sampling,
best.model.metric = best.model.metric,
cv.scheme = cv.scheme,
trial1 = trial1,
trial2 = trial2,
trial3 = trial3,
split.test = FALSE,
verbose = verbose
)
if (length(pretreatment) == 1) {
best.model <- training.results$model
best.model.stats <- training.results$summary.model.performance %>%
tibble::add_column(Pretreatment = methods.list[pretreatment],
.before = "SummaryType")
if (verbose) print(best.model.stats)
}
if (length(pretreatment) != 1) {
# Use results data frame to determine best pretreatment technique
results.df <- training.results$summary.model.performance
best.type.num <- ifelse(best.model.metric == "RMSE",
which.min(results.df$RMSEp_mean),
which.max(results.df$R2p_mean)
)
# Set chosen model as best.model for export
best.model <- training.results$model[[best.type.num]]
best.model.stats <- results.df[best.type.num, ]
if (verbose) {
cat("\nTraining Summary:\n")
print(results.df)
cat(paste0(
"\nBest pretreatment technique: ",
results.df$Pretreatment[best.type.num], "\n"
))
}
} # End multiple pretreatments if statement
if (write.model) {
if (verbose) {
cat(paste0(
"\nSaving model and model statistics to ",
model.save.folder, ".\n"
))
}
# Output stats to model.save.folder as 'model.name_stats.csv'
write.csv(best.model.stats,
file = paste0(
model.save.folder, "/", model.name,
"_stats.csv"
), row.names = FALSE
)
# Save model in save location as 'model.name.Rds'
saveRDS(best.model, file = paste0(
model.save.folder, "/",
model.name, ".Rds"
))
}
# Output list of model stats data frame and model
output.list <- list(
best.model = best.model,
best.model.stats = best.model.stats
)
return(output.list)
}
|
/scratch/gouwar.j/cran-all/cranData/waves/R/save_model.R
|
#' @title Test the performance of spectral models
#' @name test_spectra
#' @description Wrapper that trains models based spectral data to predict
#' reference values and reports model performance statistics
#'
#' @details Calls \code{\link{pretreat_spectra}}, \code{\link{format_cv}},
#' and \code{\link{train_spectra}} functions.
#'
#' @author Jenna Hershberger \email{jmh579@@cornell.edu}
#'
#' @inheritParams train_spectra
#' @inheritParams format_cv
#' @inheritParams pretreat_spectra
#' @param train.data \code{data.frame} object of spectral data for input into a
#' spectral prediction model. First column contains unique identifiers, second
#' contains reference values, followed by spectral columns. Include no other
#' columns to right of spectra! Column names of spectra must start with "X"
#' and reference column must be named "reference".
#' @param preprocessing DEPRECATED please use
#' \code{pretreatment} to specify the specific pretreatment(s) to test.
#' For behavior identical to that of \code{preprocessing = TRUE}, set
#' \code{pretreatment = 1:13}`.
#'
#' @importFrom magrittr %>%
#' @importFrom dplyr select
#' @importFrom tidyr pivot_longer pivot_wider
#' @importFrom rlang abort
#' @importFrom lifecycle deprecated
#'
#' @return \code{list} of 5 objects:
#' \enumerate{
#' \item `model.list` is a \code{list} of trained model objects, one for each
#' pretreatment method specified by the \code{pretreatment} argument.
#' Each model is trained with all rows of \code{df}.
#' \item `summary.model.performance` is a \code{data.frame} containing summary
#' statistics across all model training iterations and pretreatments.
#' See below for a description of the summary statistics provided.
#' \item `model.performance` is a \code{data.frame} containing performance
#' statistics for each iteration of model training separately (see below).
#' \item `predictions` is a \code{data.frame} containing both reference and
#' predicted values for each test set entry in each iteration of
#' model training.
#' \item `importance` is a \code{data.frame} containing variable importance
#' results for each wavelength at each iteration of model training.
#' If \code{model.method} is not "pls" or "rf", this list item is \code{NULL}.
#' }
#'
#' `summary.model.performance` and `model.performance` \code{data.frames}
#' summary statistics include:
#' \itemize{
#' \item Tuned parameters depending on the model algorithm:
#' \itemize{
#' \item \strong{Best.n.comp}, the best number of components
#' \item \strong{Best.ntree}, the best number of trees in an RF model
#' \item \strong{Best.mtry}, the best number of variables to include at
#' every decision point in an RF model
#' }
#' \item \strong{RMSECV}, the root mean squared error of cross-validation
#' \item \strong{R2cv}, the coefficient of multiple determination of
#' cross-validation for PLSR models
#' \item \strong{RMSEP}, the root mean squared error of prediction
#' \item \strong{R2p}, the squared Pearson’s correlation between predicted and
#' observed test set values
#' \item \strong{RPD}, the ratio of standard deviation of observed test set
#' values to RMSEP
#' \item \strong{RPIQ}, the ratio of performance to interquartile difference
#' \item \strong{CCC}, the concordance correlation coefficient
#' \item \strong{Bias}, the average difference between the predicted and
#' observed values
#' \item \strong{SEP}, the standard error of prediction
#' \item \strong{R2sp}, the squared Spearman’s rank correlation between
#' predicted and observed test set values
#' }
#'
#' @export
#'
#' @examples
#' \donttest{
#' library(magrittr)
#' ikeogu.2017 %>%
#' dplyr::rename(reference = DMC.oven,
#' unique.id = sample.id) %>%
#' dplyr::select(unique.id, reference, dplyr::starts_with("X")) %>%
#' na.omit() %>%
#' test_spectra(
#' train.data = .,
#' tune.length = 3,
#' num.iterations = 3,
#' pretreatment = 1
#' )
#' }
test_spectra <- function(train.data,
num.iterations,
test.data = NULL,
pretreatment = 1,
k.folds = 5,
proportion.train = 0.7,
tune.length = 50,
model.method = "pls",
best.model.metric = "RMSE",
stratified.sampling = TRUE,
cv.scheme = NULL,
trial1 = NULL,
trial2 = NULL,
trial3 = NULL,
split.test = FALSE,
seed = 1,
verbose = TRUE,
wavelengths = deprecated(),
preprocessing = deprecated(),
output.summary = deprecated(),
rf.variable.importance = deprecated()) {
# Deprecate warnings ---------------------------
if (lifecycle::is_present(wavelengths)) {
lifecycle::deprecate_warn(
when = "0.2.0",
what = "test_spectra(wavelengths)",
details = "Wavelength specification is now inferred from column names."
)
}
if (is_present(preprocessing)) {
lifecycle::deprecate_warn(
when = "0.2.0",
what = "test_spectra(preprocessing)",
details = "To test all pretreatment methods, use 'pretreatment = 1:13'.
To test only raw data, use 'pretreatment = 1'."
)
}
if (lifecycle::is_present(rf.variable.importance)) {
lifecycle::deprecate_warn(
when = "0.2.0",
what = "test_spectra(rf.variable importance)",
details = "Variable importance is now output by default when
`model.method` is set to `pls` or `rf`."
)
}
if (lifecycle::is_present(output.summary)) {
lifecycle::deprecate_warn(
when = "0.2.0",
what = "test_spectra(output.summary)",
details = "Summary is now default output alongside full results."
)
}
# Error handling ---------------------------
if (!is.null(cv.scheme)) {
if (is.null(trial1)) {
rlang::abort("trial1 must be provided if using cv.scheme")
}
if (is.null(trial2)) {
rlang::abort("trial2 must be provided if using cv.scheme")
}
if (sum(colnames(trial1) != colnames(trial2)) > 0) {
rlang::abort("Column names must match for trial1 and trial2
if using cv.scheme")
}
if (!is.null(trial3) && sum(colnames(trial1) != colnames(trial3)) > 0) {
rlang::abort("Column names must match for trial1, trial2, and trial3
if using cv.scheme and including trial3")
}
train.data <- trial1
}
if (nrow(train.data) != nrow(na.omit(train.data))) {
rlang::abort("Training data cannot contain missing values.")
}
if (!is.null(test.data) && (nrow(test.data) != nrow(na.omit(test.data)))) {
rlang::abort("Test data cannot contain missing values.
Either omit missing values or exclude training data
(set as NULL).")
}
if (model.method == "rf" && tune.length > 5) {
rlang::abort("The waves implementation of the random forest algorithm uses
oob cross-validation for model training
and requires a tune length of 5.")
}
# End error handling ---------------------------
n.train <- nrow(train.data)
n.test <- ifelse(is.null(test.data), 0, nrow(test.data))
# Perform pretreatments on everything ---------------------------
# Returns a list of data frames,
# one for each transformation specified by pretreatment argument
if (verbose) {
cat("Pretreatment initiated.\n")
}
if (!is.null(cv.scheme)) {
train.data <- rbind(trial1, trial2, trial3)
}
# Pretreat spectra ---------------------------
methods.list <- c(
"Raw_data", "SNV", "SNV1D", "SNV2D", "D1", "D2", "SG",
"SNVSG", "SGD1", "SG.D1W5", "SG.D1W11", "SG.D2W5", "SG.D2W11"
)
df.list <- pretreat_spectra(
df = train.data,
test.data = test.data,
pretreatment = pretreatment
)
# If only one pretreatment, pretreat_spectra() outputs a data.frame,
# not a list.
# To simplify downstream use of pretreated spectra,
# make this data.frame into a list with one item.
if (length(pretreatment) == 1) {
df.list <- list(df.list)
names(df.list) <- methods.list[pretreatment]
}
# Training loop ---------------------------
if (verbose) {
cat("Training models...\n")
}
counter <- 0
for (i in pretreatment) {
# This implementation allows for any combination of pretreatments
# ex// pretreatment = c(1,4,8)
counter <- counter + 1
if (verbose) {
cat(paste("Working on", methods.list[i], "\n", sep = " "))
}
# Extract preprocessed data ---------------------------
# df.list contains named data.frames transformed by the requested methods.
# To access a specific method, use df.list[[methods.list[i]]].
# This will call the preprocessed data.frame by
# name from the transformed list.
# Then extract the test dataset from full processed data frame
processed.train.data <- df.list[[methods.list[i]]][1:n.train, ]
if (n.test == 0) {
processed.test.data <- NULL
} else {
processed.test.data <- df.list[[methods.list[i]]][(
n.train + 1):(n.train + n.test), ]
}
if (!is.null(cv.scheme)) {
processed.trial1 <- df.list[[methods.list[i]]][seq_len(nrow(trial1)), ]
processed.trial2 <- df.list[[methods.list[i]]][(
nrow(trial1) + 1):(nrow(trial1) + nrow(trial2)), ]
processed.trial3 <- df.list[[methods.list[i]]][(
nrow(trial1) + nrow(trial2) + 1):nrow(train.data), ]
}
# Fit models for each pretreatment and output results ----------------------
training.results.i <- train_spectra(
df = processed.train.data,
num.iterations = num.iterations,
test.data = processed.test.data,
k.folds = k.folds,
proportion.train = proportion.train,
tune.length = tune.length,
model.method = model.method,
cv.scheme = cv.scheme,
stratified.sampling = stratified.sampling,
trial1 = processed.trial1,
trial2 = processed.trial2,
trial3 = processed.trial3,
split.test = split.test,
verbose = verbose
)
if (length(pretreatment) != 1) {
# Add Pretreatment column to each data.frame in the training results list
for (j in 2:length(training.results.i)) {
training.results.i[[j]] <- cbind("Pretreatment" = methods.list[i],
training.results.i[[j]])
rownames(training.results.i[[j]]) <- NULL
}
# 1. Reformat summary statistics data.frame so
# multiple pretreatments can be stacked
# 2. Put pretreatment name in first column followed by
# performance statistics
# 3. Append SummaryType (mean, sd, mode) to statistic name
# to flatten into a single row
summary.i <- training.results.i$summary.model.performance %>%
tidyr::pivot_longer(cols = .data$RMSEp:.data$best.mtry) %>%
pivot_wider(
id_cols = c(.data$Pretreatment),
names_from = c(.data$name, .data$SummaryType),
names_sep = "_"
)
} else {
summary.i <- training.results.i$summary.model.performance
}
if (counter == 1) { # Counter indicates pretreatment number
# Set up results compilations in first iteration
if (length(pretreatment) != 1) {
model.list <- list(training.results.i$model)
} else { # If only one pretreatment, don't make a list.
model.list <- training.results.i$model
}
summary.df <- summary.i
results.df <- training.results.i$model.performance
predictions.df <- training.results.i$predictions
importance.df <- training.results.i$importance
} else { # Not the first pretreatment
# Add new results to existing objects
model.list <- append(model.list, list(training.results.i$model))
summary.df <- rbind(summary.df, summary.i)
results.df <- rbind(results.df, training.results.i$model.performance)
predictions.df <- rbind(predictions.df, training.results.i$predictions)
importance.df <- rbind(importance.df, training.results.i$importance)
}
} # End of pretreatment loop ---------------------------
rownames(summary.df) <- NULL
rownames(results.df) <- NULL
if (length(pretreatment) != 1) {
names(model.list) <- methods.list[pretreatment]
}
if (model.method %in% c("pls", "rf")) {
# Reformat importance.df
# Some pretreatments trim the wavelengths,
# so they do not return the full set of importance values.
# If pivot_wider is used with each pretreatment separately,
# the number of columns will not match.
importance.df <- tidyr::pivot_wider(importance.df,
names_from = .data$wavelength,
values_from = .data$Overall)
}
results.list <- list(
model = model.list,
summary.model.performance = summary.df,
model.performance = results.df,
predictions = predictions.df,
importance = importance.df
)
return(results.list)
}
|
/scratch/gouwar.j/cran-all/cranData/waves/R/test_spectra.R
|
#' @title Train a model based predict reference values with spectral data
#' @name train_spectra
#' @description Trains spectral prediction models using one of several
#' algorithms and sampling procedures.
#' @author Jenna Hershberger \email{jmh579@@cornell.edu}
#'
#' @inheritParams format_cv
#' @param df \code{data.frame} object. First column contains unique identifiers,
#' second contains reference values, followed by spectral columns. Include no
#' other columns to right of spectra! Column names of spectra must start with
#' "X" and reference column must be named "reference"
#' @param num.iterations Number of training iterations to perform
#' @param test.data \code{data.frame} with same specifications as \code{df}. Use
#' if specific test set is desired for hyperparameter tuning. If \code{NULL},
#' function will automatically train with a stratified sample of 70\%. Default
#' is \code{NULL}.
#' @param k.folds Number indicating the number of folds for k-fold
#' cross-validation during model training. Default is 5.
#' @param tune.length Number delineating search space for tuning of the PLSR
#' hyperparameter \code{ncomp}. Must be set to 5 when using the random forest
#' algorithm (\code{model.method == rf}). Default is 50.
#' @param model.method Model type to use for training. Valid options include:
#' \itemize{ \item "pls": Partial least squares regression (Default) \item
#' "rf": Random forest \item "svmLinear": Support vector machine with linear
#' kernel \item "svmRadial": Support vector machine with radial kernel }
#' @param best.model.metric Metric used to decide which model is best. Must be
#' either "RMSE" or "Rsquared"
#' @param stratified.sampling If \code{TRUE}, training and test sets will be
#' selected using stratified random sampling. This term is only used if
#' \code{test.data == NULL}. Default is \code{TRUE}.
#' @param split.test boolean that allows for a fixed training set and a split
#' test set. Example// train model on data from two breeding programs and a
#' stratified subset (70\%) of a third and test on the remaining samples
#' (30\%) of the third. If \code{FALSE}, the entire provided test set
#' \code{test.data} will remain as a testing set or if none is provided, 30\%
#' of the provided \code{train.data} will be used for testing. Default is
#' \code{FALSE}.
#' @param seed Integer to be used internally as input for \code{set.seed()}.
#' Only used if \code{stratified.sampling = TRUE}. In all other cases, seed
#' is set to the current iteration number. Default is 1.
#' @param verbose If \code{TRUE}, the number of rows removed through filtering
#' will be printed to the console. Default is \code{TRUE}.
#' @param save.model DEPRECATED \code{save.model = FALSE} is no
#' longer supported; this function will always return a saved model.
#' @param rf.variable.importance DEPRECATED
#' \code{rf.variable.importance = FALSE} is no longer supported; variable
#' importance results are always returned if the \code{model.method} is
#' set to `pls` or `rf`.
#' @param output.summary DEPRECATED \code{output.summary = FALSE}
#' is no longer supported; a summary of output is always returned alongside
#' the full performance statistics.
#' @param return.model DEPRECATED \code{return.model = FALSE}
#' is no longer supported; a trained model object is always returned
#' alongside the full performance statistics and summary.
#'
#' @return list of the following:
#' \enumerate{
#' \item \code{model} is a model object trained with all rows of \code{df}.
#' \item \code{summary.model.performance} is a \code{data.frame} with model
#' performance statistics in summary format (2 rows, one with mean and one
#' with standard deviation of all training iterations).
#' \item \code{full.model.performance} is a \code{data.frame} with model
#' performance statistics in long format
#' (number of rows = \code{num.iterations})
#' \item \code{predictions} is a \code{data.frame} containing predicted values
#' for each test set entry at each iteration of model training.
#' \item \code{importance} is a \code{data.frame} that contains variable
#' importance for each wavelength. Only available for \code{model.method}
#' options "rf" and "pls".
#' }
#' Included summary statistics:
#' \itemize{
#' \item Tuned parameters depending on the model algorithm:
#' \itemize{
#' \item \strong{Best.n.comp}, the best number of components
#' \item \strong{Best.ntree}, the best number of trees in an RF model
#' \item \strong{Best.mtry}, the best number of variables to include at
#' every decision point in an RF model
#' }
#' \item \strong{RMSECV}, the root mean squared error of cross-validation
#' \item \strong{R2cv}, the coefficient of multiple determination of
#' cross-validation for PLSR models
#' \item \strong{RMSEP}, the root mean squared error of prediction
#' \item \strong{R2p}, the squared Pearson’s correlation between predicted and
#' observed test set values
#' \item \strong{RPD}, the ratio of standard deviation of observed test set
#' values to RMSEP
#' \item \strong{RPIQ}, the ratio of performance to interquartile difference
#' \item \strong{CCC}, the concordance correlation coefficient
#' \item \strong{Bias}, the average difference between the predicted and
#' observed values
#' \item \strong{SEP}, the standard error of prediction
#' \item \strong{R2sp}, the squared Spearman’s rank correlation between
#' predicted and observed test set values
#' }
#'
#' @importFrom caret createDataPartition trainControl train createResample varImp
#' @importFrom dplyr select mutate summarize_all
#' @importFrom tidyselect starts_with everything all_of
#' @importFrom magrittr %>% %<>%
#' @importFrom tibble rownames_to_column
#' @importFrom stats cor predict sd
#' @importFrom spectacles postResampleSpectro
#' @importFrom randomForest importance
#' @importFrom rlang .data abort has_name
#' @importFrom pls R2 RMSEP mvrValstats MSEP
#' @importFrom lifecycle deprecated
#'
#' @export train_spectra
#'
#' @examples
#' \donttest{
#' library(magrittr)
#' ikeogu.2017 %>%
#' dplyr::filter(study.name == "C16Mcal") %>%
#' dplyr::rename(reference = DMC.oven,
#' unique.id = sample.id) %>%
#' dplyr::select(unique.id, reference, dplyr::starts_with("X")) %>%
#' na.omit() %>%
#' train_spectra(
#' df = .,
#' tune.length = 3,
#' num.iterations = 3,
#' best.model.metric = "RMSE",
#' stratified.sampling = TRUE
#' ) %>%
#' summary()
#' }
train_spectra <- function(df,
num.iterations,
test.data = NULL,
k.folds = 5,
proportion.train = 0.7,
tune.length = 50,
model.method = "pls",
best.model.metric = "RMSE",
stratified.sampling = TRUE,
cv.scheme = NULL,
trial1 = NULL,
trial2 = NULL,
trial3 = NULL,
split.test = FALSE,
seed = 1,
verbose = TRUE,
save.model = deprecated(),
rf.variable.importance = deprecated(),
output.summary = deprecated(),
return.model = deprecated()) {
# Deprecate warnings ---------------------------
if (lifecycle::is_present(rf.variable.importance)) {
lifecycle::deprecate_warn(
when = "0.2.0",
what = "train_spectra(rf.variable importance)",
details = "Variable importance is now output by default when
`model.method` is set to `pls` or `rf`."
)
}
if (lifecycle::is_present(output.summary)) {
lifecycle::deprecate_warn(
when = "0.2.0",
what = "train_spectra(output.summary)",
details = "Summary is now default output alongside full results."
)
}
if (lifecycle::is_present(return.model)) {
lifecycle::deprecate_warn(
when = "0.2.0",
what = "train_spectra(return.model)",
details = "Trained models are now default output alongside full results."
)
}
# Error handling ---------------------------
if (!(best.model.metric %in% c("RMSE", "Rsquared"))) {
rlang::abort('best.model.metric must be either "RMSE" or "Rsquared"')
}
if (!(model.method %in% c("pls", "rf", "svmLinear", "svmRadial"))) {
rlang::abort('model.method must be "pls", "rf", "svmLinear",
or "svmRadial"')
}
if (!rlang::has_name(df, "reference")) {
rlang::abort('The training dataset must include a column named "reference"')
}
if (!is.null(test.data) && !(rlang::has_name(test.data, "reference"))) {
rlang::abort('The test dataset must include a column named "reference"')
}
if (!(rlang::has_name(df, "unique.id"))) {
rlang::abort('The training dataset must include a column named "unique.id"')
}
if (!is.null(test.data) && !(rlang::has_name(test.data, "unique.id"))) {
rlang::abort('The test dataset must include a column named "unique.id"')
}
if (!is.null(cv.scheme)) {
if (!(cv.scheme %in% c("CV1", "CV2", "CV0", "CV00"))) {
rlang::abort('cv.scheme must be NULL, "CV0", "CV00", "CV1", or "CV2"')
}
# Set num.iterations based on cv.scheme
if (cv.scheme == "CV0" || cv.scheme == "CV00") {
num.iterations <- 1
} # else use provided number of iterations
}
if (proportion.train > 1 || proportion.train < 0) {
rlang::abort("'proportion.train' must be a number between 0 and 1")
}
df.colnames <- c(
"Iteration", "ModelType", "RMSEp", "R2p", "RPD", "RPIQ", "CCC", "Bias",
"SEP", "RMSEcv", "R2cv", "R2sp", "best.ncomp", "best.ntree", "best.mtry"
)
df.colnames.notype <- c(
"Iteration", "RMSEp", "R2p", "RPD", "RPIQ", "CCC", "Bias",
"SEP", "RMSEcv", "R2cv", "R2sp", "best.ncomp", "best.ntree", "best.mtry"
)
# Set seed ------------------------------
set.seed(seed = seed)
# Train model ---------------------------
# Partition training and test sets
# Random sampling occurs in the loop below
if (is.null(test.data)) {
partition.input.df <- df
} else {
partition.input.df <- test.data
}
if (!is.null(test.data) && !split.test) {
# If fixed training and test sets provided but split.test = F
# One iteration is the only option because there is only one
# possible combination when the sets are fixed
num.iterations <- 1
data.train <- df
data.test <- test.data
}
if (stratified.sampling && is.null(cv.scheme)) {
# Stratified sampling to get representative sample of ground
# truth (reference column) values
# Outputs list with n = num.iterations
train.index <- caret::createDataPartition(
y = partition.input.df$reference,
p = proportion.train, times = num.iterations
)
}
for (i in 1:num.iterations) {
# set seed, different for each iteration for random samples
set.seed(i)
if (!stratified.sampling && is.null(cv.scheme)) {
# Random sample (not stratified)
train.index <- sort(sample(
x = seq_len(nrow(partition.input.df)),
size = proportion.train * nrow(partition.input.df),
replace = FALSE, prob = NULL
))
if (is.null(test.data)) {
# No test set provided
data.train <- df[train.index, ]
data.test <- df[-train.index, ]
} else if (!is.null(test.data) && split.test) {
# Test set provided and split randomly
# Fixed training set + add proportion.train from test set pool
# to training set
data.train <- rbind(df, test.data[train.index, ])
data.test <- test.data[-train.index, ]
}
} else if (stratified.sampling && is.null(cv.scheme)) {
# Stratified random sampling
if (is.null(test.data)) {
# No test set provided
data.train <- df[train.index[[i]], ]
data.test <- df[-train.index[[i]], ]
} else if (!is.null(test.data) && split.test) {
# Test set provided and split in a stratified random manner
# Fixed training set + add proportion.train from test set pool to
# training set
data.train <- rbind(df, test.data[train.index[[i]], ])
data.test <- test.data[-train.index[[i]], ]
}
} else if (!is.null(cv.scheme)) {
# cv.scheme present
# Use selected cross-validation scheme
formatted.lists <- format_cv(
trial1 = trial1,
trial2 = trial2,
trial3 = trial3,
cv.scheme = cv.scheme,
stratified.sampling = stratified.sampling,
proportion.train = proportion.train,
seed = i,
remove.genotype = TRUE
)
data.train <- formatted.lists$train.set
data.test <- formatted.lists$test.set
}
train.ref.spectra <- data.train %>% dplyr::select(
.data$reference,
starts_with("X")
)
# Exclude reference column from test set
test.spectra <- data.test %>% dplyr::select(starts_with("X"))
if (num.iterations > 9) {
cv.seeds <- c(1:num.iterations)
} else {
cv.seeds <- c(1:10)
}
# Tune hyperparameters with training data
# Example// for 'pls', train hyperparameter "ncomps", where tune.length is
# number of ncomps tried
if (model.method != "rf") {
# 5-fold cross validation on training set
cv.kfold <- caret::trainControl(
method = "repeatedcv",
number = k.folds,
savePredictions = TRUE,
seeds = cv.seeds
)
data.trained <- caret::train(reference ~ .,
data = train.ref.spectra,
method = model.method,
tuneLength = tune.length,
trControl = cv.kfold,
metric = best.model.metric
)
}
if (model.method == "pls") {
# Extract best number of components
best.ncomp <- data.trained$bestTune$ncomp
best.ntree <- NA
best.mtry <- NA
# Put results as row in data frame
predicted.values <- as.numeric(predict(data.trained$finalModel,
newdata = as.matrix(test.spectra), # exclude reference column
ncomp = best.ncomp
))
R2cv <- pls::R2(data.trained$finalModel, ncomp = best.ncomp)[["val"]][2]
RMSEcv <- pls::RMSEP(data.trained$finalModel,
ncomp = best.ncomp
)[["val"]][2]
} else if (model.method == "svmLinear") {
predicted.values <- as.numeric(predict(data.trained,
newdata = as.matrix(test.spectra)
))
best.ncomp <- NA
best.ntree <- NA
best.mtry <- NA
R2cv <- NA
RMSEcv <- NA
} else if (model.method == "svmRadial") {
predicted.values <- as.numeric(predict(data.trained,
newdata = as.matrix(test.spectra)
))
best.ncomp <- NA
best.ntree <- NA
best.mtry <- NA
R2cv <- NA
RMSEcv <- NA
} else if (model.method == "rf") {
cv.oob <- caret::trainControl(
method = "oob",
number = 5,
savePredictions = TRUE,
seeds = list(cv.seeds, cv.seeds)
)
data.trained <- caret::train(reference ~ .,
data = train.ref.spectra,
method = model.method,
tuneLength = tune.length,
trControl = cv.oob,
metric = best.model.metric,
importance = TRUE
)
# Extract best ntree and mtry
best.ncomp <- NA
best.ntree <- data.trained$finalModel$ntree
best.mtry <- data.trained$finalModel$mtry
predicted.values <- as.numeric(predict(data.trained$finalModel,
newdata = as.matrix(test.spectra),
ntree = best.ntree,
mtry = best.mtry
))
R2cv <- NA
RMSEcv <- NA
}
# Variable importance ---------------------------
# Can only be performed for pls and rf model types
# Each row contains iteration number followed by model type and importance
# value of each wavelength.
if (model.method %in% c("pls", "rf")) {
importance.df.i <- cbind(
"Iteration" = i, "ModelType" = model.method,
caret::varImp(data.trained$finalModel)
) %>%
tibble::rownames_to_column(var = "wavelength")
rownames(importance.df.i) <- NULL
} else {
importance.df.i <- NULL
}
# Get model performance statistics ---------------------------
reference.values <- data.test$reference
# Squared Spearman's rank correlation
R2sp <- cor(predicted.values, reference.values, method = "spearman")**2
spectacles.df.i <- as.data.frame(t(spectacles::postResampleSpectro(
pred = predicted.values,
obs = reference.values
)))
results.df.i <- cbind(
i, spectacles.df.i,
RMSEcv, R2cv, R2sp,
best.ncomp, best.ntree, best.mtry
)
colnames(results.df.i) <- df.colnames.notype
# Compile predictions ---------------------------
predictions.df.i <- cbind(i, model.method, data.test$unique.id,
reference.values, predicted.values)
colnames(predictions.df.i) <- c("Iteration", "ModelType", "unique.id",
"reference", "predicted")
if (i == 1) {
predictions.df <- predictions.df.i
results.df <- results.df.i
importance.df <- importance.df.i
} else {
predictions.df <- rbind(predictions.df, predictions.df.i)
results.df <- rbind(results.df, results.df.i)
importance.df <- rbind(importance.df, importance.df.i)
}
} # End of loop
# Create summary data.frame ---------------------------
summary.df <- rbind(
summarize_all(results.df, .funs = mean),
summarize_all(results.df, .funs = sd, na.rm = TRUE),
summarize_all(results.df, .funs = get_mode)
) %>%
mutate(
ModelType = model.method,
SummaryType = c("mean", "sd", "mode")
) %>%
# Get rid of iteration column and reorder remaining
dplyr::select(
.data$SummaryType, .data$ModelType, .data$RMSEp:.data$R2sp,
.data$best.ncomp, .data$best.ntree, .data$best.mtry
)
# Stitch on ModelType column later so doesn't interfere with
# mean calculations for summary
results.df$ModelType <- model.method
results.df <- results.df %>%
dplyr::select(all_of(df.colnames))
# Create model with all input data (not just 70%). Results will give an idea
# of this model's performance, but they will have been generated with
# only subsets of the data.
if (verbose) cat("Returning model...\n")
if (model.method == "pls") {
# Format df for plsr() function
df.plsr <- df %>%
dplyr::select(-starts_with("X")) %>%
as.data.frame()
df.plsr$spectra <- df %>%
dplyr::select(starts_with("X")) %>%
as.matrix()
full.model <- pls::plsr(reference ~ spectra,
ncomp = tune.length,
data = df.plsr
)
}
if (model.method == "rf") {
df.rf <- df %>% dplyr::select(.data$reference, starts_with("X"))
full.model <- randomForest::randomForest(reference ~ .,
data = df.rf,
importance = FALSE,
ntree = tune.length
)
}
if (model.method == "svmLinear" || model.method == "svmRadial") {
full.model <- caret::train(reference ~ .,
data = df,
method = model.method,
tuneLength = tune.length,
trControl = cv.kfold,
metric = best.model.metric
)
}
return(list(
model = full.model,
summary.model.performance = summary.df,
model.performance = results.df,
predictions = as.data.frame(predictions.df),
importance = importance.df
))
}
|
/scratch/gouwar.j/cran-all/cranData/waves/R/train_spectra.R
|
#' @title Internal utility functions
#' @name get_mode
#' @description Get the mode of a set of numbers. Used in getting summary of
#' results within [train_spectra()]
#'
#' @param vector.input The mode of this vector of numbers will be calculated
#' by this function
#' @return mode of the numbers in `vector.input`
#' @keywords internal
get_mode <- function(vector.input) {
as.matrix(vector.input)
unique.vector <- unique(vector.input)
return(unique.vector[which.max(tabulate(match(vector.input,
unique.vector)))])
}
|
/scratch/gouwar.j/cran-all/cranData/waves/R/utils.R
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 6,
fig.align = "center"
)
## ----setup, message=FALSE, warning=FALSE--------------------------------------
library(waves)
library(magrittr)
library(dplyr)
library(tidyr)
library(ggplot2)
library(tibble)
## ----install_cran, eval=FALSE-------------------------------------------------
# install.packages("waves")
# library(waves)
## ----install_dev, eval=FALSE--------------------------------------------------
# install.packages("devtools")
# devtools::install_github("GoreLab/waves")
# library(waves)
## ----format-------------------------------------------------------------------
ikeogu.2017[1:7, 1:7]
ikeogu.2017.prepped <- ikeogu.2017 %>%
dplyr::rename(unique.id = sample.id,
reference = DMC.oven) %>%
dplyr::select(unique.id, dplyr::everything(), -TCC) %>%
na.omit()
ikeogu.2017.prepped[1:7, 1:7]
## ----plot_raw, fig.height=5, fig.width=7--------------------------------------
ikeogu.2017.prepped %>%
plot_spectra(
df = .,
num.col.before.spectra = 5,
detect.outliers = FALSE,
alternate.title = "Example spectra"
)
## ----filter-------------------------------------------------------------------
filtered.df <- ikeogu.2017.prepped %>%
filter_spectra(
df = .,
filter = TRUE,
return.distances = TRUE,
num.col.before.spectra = 5,
window.size = 15
)
filtered.df[1:5, c(1:5, (ncol(filtered.df) - 3):ncol(filtered.df))]
## ----aggregate----------------------------------------------------------------
aggregated.test <- ikeogu.2017.prepped %>%
aggregate_spectra(
grouping.colnames = c("study.name"),
reference.value.colname = "reference",
agg.function = "mean"
)
aggregated.test[, 1:5]
## ----run_test_spectra---------------------------------------------------------
results.list <- ikeogu.2017.prepped %>%
dplyr::select(unique.id, reference, dplyr::starts_with("X")) %>%
na.omit() %>%
test_spectra(
train.data = .,
tune.length = 3,
num.iterations = 3,
pretreatment = 1
)
## ----plot_pretreatments, fig.height=6, fig.width=7----------------------------
ikeogu.2017.prepped[1:10, ] %>% # subset the first 10 scans for speed
pretreat_spectra(pretreatment = 2:13) %>% # exclude pretreatment 1 (raw data)
bind_rows(.id = "pretreatment") %>%
gather(key = "wl",
value = "s.value",
tidyselect::starts_with("X")) %>%
mutate(wl = as.numeric(readr::parse_number(.data$wl)),
pretreatment = as.factor(pretreatment)) %>%
drop_na(s.value) %>%
ggplot(data = ., aes(x = wl, y = s.value, group = unique.id)) +
geom_line(alpha = .5) +
theme(axis.text.x = element_text(angle = 45)) +
labs(title = "Pretreated spectra",
x = "Wavelength",
y = "Spectral Value") +
facet_wrap(~ pretreatment, scales = "free")
## ----view_model---------------------------------------------------------------
summary(results.list$model)
## ----view_summary-------------------------------------------------------------
results.list$summary.model.performance
## ----view_performance---------------------------------------------------------
results.list$model.performance
## ----view_predictions---------------------------------------------------------
head(results.list$predictions)
## ----view_importance----------------------------------------------------------
results.list$importance[, 1:7]
## ----run_save_model-----------------------------------------------------------
model.to.save <- ikeogu.2017.prepped %>%
dplyr::filter(study.name == "C16Mcal") %>%
dplyr::select(unique.id, reference, dplyr::starts_with("X")) %>%
na.omit() %>%
save_model(
df = .,
write.model = FALSE,
pretreatment = 1:13,
tune.length = 5,
num.iterations = 3,
verbose = FALSE
)
## ----summarize_saved_model----------------------------------------------------
summary(model.to.save$best.model)
## ----format_saved_output------------------------------------------------------
model.to.save$best.model.stats %>%
gather(key = "statistic", value = "value", RMSEp_mean:best.mtry_mode) %>%
separate(statistic, into = c("statistic", "summary_type"), sep = "_") %>%
pivot_wider(id_cols = c(Pretreatment, summary_type),
names_from = statistic, values_from = value)
## ----prep_for_predictions-----------------------------------------------------
pretreated.val <- ikeogu.2017.prepped %>%
filter(study.name == "C16Mval") %>%
pretreat_spectra(pretreatment = 8)
pretreated.val.mx <- pretreated.val %>%
dplyr::select(starts_with("X")) %>%
as.matrix()
best.ncomp <- model.to.save$best.model.stats$best.ncomp_mode
## ----predict------------------------------------------------------------------
predicted.values <- as.numeric(predict(model.to.save$best.model,
newdata = pretreated.val.mx,
ncomp = best.ncomp))
## ----calculate_statistics-----------------------------------------------------
spectacles::postResampleSpectro(pred = predicted.values,
obs = pretreated.val$reference)
## ----plot_predictions, fig.height=6-------------------------------------------
overall.range <- c(min(c(pretreated.val$reference, predicted.values)),
max(c(pretreated.val$reference, predicted.values)))
cbind(unique.id = pretreated.val$unique.id,
observed = pretreated.val$reference,
predicted = predicted.values) %>%
as_tibble() %>%
mutate(observed = as.numeric(observed),
predicted = as.numeric(predicted)) %>%
ggplot(aes(x = observed, y = predicted)) +
geom_abline(intercept = 0,
slope = 1,
color = "gray80") +
geom_point() +
coord_fixed(xlim = overall.range,
ylim = overall.range) +
labs(title = "Example dry matter content predictions",
x = "Observed",
y = "Predicted") +
theme_bw()
|
/scratch/gouwar.j/cran-all/cranData/waves/inst/doc/waves.R
|
---
title: "waves"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{waves}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
resource_files:
- save_model.R
- test_spectra.R
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 6,
fig.align = "center"
)
```
```{r setup, message=FALSE, warning=FALSE}
library(waves)
library(magrittr)
library(dplyr)
library(tidyr)
library(ggplot2)
library(tibble)
```
## Introduction
Originally designed application in the context of resource-limited plant research and breeding programs, `waves` provides an open-source solution to spectral data processing and model development by bringing useful packages together into a streamlined pipeline. This package is wrapper for functions related to the analysis of point visible and near-infrared reflectance measurements. It includes visualization, filtering, aggregation, pretreatment, cross-validation set formation, model training, and prediction functions to enable open-source association of spectral and reference data.
## Use
Follow the installation instructions below, and then go wild! Use `waves` to analyze your own data. Please report any bugs or feature requests by opening issues in the [`waves` repository](https://github.com/GoreLab/waves).
## Installation
Install the latest `waves` release directly from CRAN:
```{r install_cran, eval=FALSE}
install.packages("waves")
library(waves)
```
Alternatively, install the development version to get the most up-to-date (but not necessarily thoroughly tested) version:
```{r install_dev, eval=FALSE}
install.packages("devtools")
devtools::install_github("GoreLab/waves")
library(waves)
```
## 1. Format your data
Match spectra with reference values so that you have a `data.frame` with unique identifiers, reference values, and other metadata as columns to the left of spectral values. Spectral column names should start with "X". Remove rows with missing values.
```{r format}
ikeogu.2017[1:7, 1:7]
ikeogu.2017.prepped <- ikeogu.2017 %>%
dplyr::rename(unique.id = sample.id,
reference = DMC.oven) %>%
dplyr::select(unique.id, dplyr::everything(), -TCC) %>%
na.omit()
ikeogu.2017.prepped[1:7, 1:7]
```
## 2. Visualize spectra with `plot_spectra()`
To display outliers in a different color, set `detect.outliers` to `TRUE`.
```{r plot_raw, fig.height=5, fig.width=7}
ikeogu.2017.prepped %>%
plot_spectra(
df = .,
num.col.before.spectra = 5,
detect.outliers = FALSE,
alternate.title = "Example spectra"
)
```
## 3. Perform outlier removal with `filter_spectra()`.
`waves` uses Mahalanobis distance to identify outliers. Mahalanobis distance is a common metric used to identify multivariate outliers. The larger the value of Mahalanobis distance, the more unusual the data point (i.e., the more likely it is to be a multivariate outlier).
The distance tells us how far an observation is from the center of the cloud, taking into account the shape (covariance) of the cloud as well.
To detect outliers, the calculated Mahalanobis distance is compared against a $\chi^2$ distribution with degrees of freedom equal to the number of spectral data columns and an alpha level of 0.05.
```{r filter}
filtered.df <- ikeogu.2017.prepped %>%
filter_spectra(
df = .,
filter = TRUE,
return.distances = TRUE,
num.col.before.spectra = 5,
window.size = 15
)
filtered.df[1:5, c(1:5, (ncol(filtered.df) - 3):ncol(filtered.df))]
```
No outliers were identified in the example dataset. Note the if `return.distances` is set to `TRUE`, the rightmost column contains Mahalanobis distances (`h.distances`).
## 4. Aggregate scans
If you have more than one scan per unique identifier, aggregate the scans by mean or median with `aggregate_spectra()`.
In this example, we will aggregate by `study.name`.
```{r aggregate}
aggregated.test <- ikeogu.2017.prepped %>%
aggregate_spectra(
grouping.colnames = c("study.name"),
reference.value.colname = "reference",
agg.function = "mean"
)
aggregated.test[, 1:5]
```
## 5. Evaluate the predictive ability of your spectra
`test_spectra()` is a wrapper that performs spectral pretreatment ([5.1](#5.1. Pretreat spectra)), cross-validation set formation ([5.2](#5.2. Specify a cross-validation scheme)), and model training functions over multiple iterations ([5.3](#5.3. Train spectral prediction models)).
Note that the following subsections describe functions that are called within `test_spectra()`. They do not need to be used separately for model pretreatment, cross-validation set formation, or model training.
Some of the arguments for this function are detailed below. A description of output is below under section [5.4](#5.4. Output). See `?test_spectra()` for more information on the arguments and output for this function.
```{r run_test_spectra}
results.list <- ikeogu.2017.prepped %>%
dplyr::select(unique.id, reference, dplyr::starts_with("X")) %>%
na.omit() %>%
test_spectra(
train.data = .,
tune.length = 3,
num.iterations = 3,
pretreatment = 1
)
```
### 5.1. Pretreat spectra
Specify which spectral pretreatments (1-13) to apply with the parameter `pretreatment`. `pretreat_spectra()` can also be used on its own to transform a data.frame using any/all of 12 available pretreatments:
1. Raw data (no pretreatment is applied)
2. Standard normal variate (SNV)
3. SNV and first derivative
4. SNV and second derivative
5. First derivative
6. Second derivative
7. Savitzky–Golay filter (SG)
8. SNV and SG
9. Gap segment derivative (window size = 11)
10. SG and first derivative (window size = 5)
11. SG and first derivative (window size = 11)
12. SG and second derivative (window size = 5)
13. SG and second derivative (window size = 11)
```{r plot_pretreatments, fig.height=6, fig.width=7}
ikeogu.2017.prepped[1:10, ] %>% # subset the first 10 scans for speed
pretreat_spectra(pretreatment = 2:13) %>% # exclude pretreatment 1 (raw data)
bind_rows(.id = "pretreatment") %>%
gather(key = "wl",
value = "s.value",
tidyselect::starts_with("X")) %>%
mutate(wl = as.numeric(readr::parse_number(.data$wl)),
pretreatment = as.factor(pretreatment)) %>%
drop_na(s.value) %>%
ggplot(data = ., aes(x = wl, y = s.value, group = unique.id)) +
geom_line(alpha = .5) +
theme(axis.text.x = element_text(angle = 45)) +
labs(title = "Pretreated spectra",
x = "Wavelength",
y = "Spectral Value") +
facet_wrap(~ pretreatment, scales = "free")
```
Note that the scales in this plot are "free". Without free scales, anything derivative-based treatment (D1 or D2) looks like it's a constant zero in comparison to those without derivative-based treatments (SNV, SG).
### 5.2. Specify a cross-validation scheme
Choose from random, stratified random, or a plant breeding-specific scheme from [Jarquín et *al.*, 2017. *The Plant Genome*](https://doi.org/10.3835/plantgenome2016.12.0130). Options include:
| `cv.scheme` | Description |
|-------------|-------------------------------------------------------------------------------------------|
| `NULL` | Random or stratified random sampling (does not take genotype or environment into account) |
| "CV1" | Untested lines in tested environments |
| "CV2" | Tested lines in tested environments |
| "CV0" | Tested lines in untested environments |
| "CV00" | Untested lines in untested environments |
If `cv.scheme` is set to `NULL`, the argument `stratified.sampling` is used to determine whether stratified random sampling should be performed. If `TRUE`, the reference values from the input `data.frame` (`train.data`) will be used to create a balanced split of data between the training and test sets in each training iteration.
When using one of the four specialized cross-validation schemes ("CV1", "CV2", "CV0", or "CV00"), additional arguments are required:
- `trial1` contains the trial to be tested in subsequent model training functions. The first column contains unique identifiers, second contains genotypes, third contains reference values, followed by spectral columns. Include no other columns to right of spectra! Column names of spectra must start with "X", reference column must be named "reference", and genotype column must be named "genotype".
-`trial2` contains a trial that has overlapping genotypes with `trial1` but that were grown in a different site/year (different environment). Formatting must be consistent with `trial1`.
- `trial3` contains a trial that may or may not contain genotypes that overlap with `trial1`. Formatting must be consistent with `trial1`.
Cross-validation schemes can also be formatted outside of `test_spectra()` using the function `format_cv()`.
### 5.3. Train spectral prediction models
Many of the arguments for `test_spectra()` are related to model training:
- `model.method` is the algorithm type to use for training. See the table below for more information
- `tune.length` is the number of PLS components to test. This argument is ignored if other algorithms are used
- `best.model.metric` indicates the metric used to decide which model is best ("RMSE" or "R-squared")
- `k-fold` specifies the number of folds used for cross-validation to tune model hyperparameters within the training set
- `num.iterations` sets the number of training iterations
- `proportion.train` is the fraction of samples to be included in the training set (default is 0.7)
Models can also be trained with the standalone function `train_spectra()`. Model training is implemented with [`caret`](https://topepo.github.io/caret/).
| Algorithm | `model.method` | R package source | Tuning parameters (hyperparameters) |
|-------------------------------------------------|----------------|-------------------------------------------------------------------|-------------------------------------|
| Partial least squares (PLS) | "pls" | [`pls`](https://CRAN.R-project.org/package=pls) | ncomp |
| Random forest (RF) | "rf" | [`randomForest`](https://CRAN.R-project.org/package=randomForest) | mtry |
| Support vector machine (SVM) with linear kernel | "svmLinear" | [`kernlab`](https://CRAN.R-project.org/package=kernlab) | C |
| Support vector machine (SVM) with radial kernel | "svmRadial | [`kernlab`](https://CRAN.R-project.org/package=kernlab) | sigma, C |
### 5.4. Output
`test_spectra()` outputs a list with four objects:
1. `model.list` is a list of trained model objects, one for each pretreatment method specified by the `pretreatment` argument. Each model is trained with all rows of the input `data.frame` (`df`)
```{r view_model}
summary(results.list$model)
```
2. `summary.model.performance` is a `data.frame` containing summary statistics across all model training iterations and pretreatments. See below for a description of the summary statistics provided.
```{r view_summary}
results.list$summary.model.performance
```
3. `model.performance` is a `data.frame` containing performance statistics for each iteration of model training separately (see below).
```{r view_performance}
results.list$model.performance
```
4. `predictions` is a `data.frame` containing both reference and predicted values for each test set entry in each iteration of model training.
```{r view_predictions}
head(results.list$predictions)
```
5. `importance` is a `data.frame` containing variable importance results for each wavelength at each iteration of model training. If `model.method` is not "pls" or "rf", this list item is `NULL`.
```{r view_importance}
results.list$importance[, 1:7]
```
| Statistic* | Description |
|----------------------------|------------------------------------------------------------------------------------|
| RMSE<sub>p</sub> | Root mean squared error of prediction |
| R<sup>2</sup><sub>p</sub> | Squared Pearson’s correlation between predicted and observed test set values |
| RPD | Ratio of standard deviation of observed test set values to RMSE<sub>p</sub> |
| RPIQ | Ratio of performance to interquartile difference |
| CCC | Concordance correlation coefficient |
| Bias | Average difference between the predicted and observed values |
| SEP | Standard error of prediction |
| RMSE<sub>cv</sub> | Root mean squared error of cross-validation |
| R<sup>2</sup><sub>cv</sub> | Coefficient of multiple determination of cross-validation for PLS models |
| R<sup>2</sup><sub>sp</sub> | Squared Spearman’s rank correlation between predicted and observed test set values |
| best.ncomp | Best number of components in a PLS model |
| best.ntree | Best number of trees in an RF model |
| best.mtry | Best number of variables to include at every decision point in an RF model |
*Many of the spectral model performance statistics are calculated using the function `postResampleSpectro()` from the `spectacles` package.
## 6. Save trained prediction models with `save_model()`
- Intended for a production environment
- Can evaluate spectral pretreatment methods using the input dataset
- Selects best model using the metric provided with `best.model.metric` ("RMSE" or "Rsquared")
- Returns trained model with option to save as .Rds object
- The `$model` output from `test_spectra()` can also be saved and used for prediction, but `save_model()` will take the extra step of saving an .Rds file for you if `write.model` is set to `TRUE`.
In the example below, we'll use one subset of the example dataset ("C16Mcal") to create the model and then we'll predict the other subset ("C16Mval") in section [7](#7. Predict phenotypic values with new spectra).
```{r run_save_model}
model.to.save <- ikeogu.2017.prepped %>%
dplyr::filter(study.name == "C16Mcal") %>%
dplyr::select(unique.id, reference, dplyr::starts_with("X")) %>%
na.omit() %>%
save_model(
df = .,
write.model = FALSE,
pretreatment = 1:13,
tune.length = 5,
num.iterations = 3,
verbose = FALSE
)
```
Now let's take a look at our trained model:
```{r summarize_saved_model}
summary(model.to.save$best.model)
```
```{r format_saved_output}
model.to.save$best.model.stats %>%
gather(key = "statistic", value = "value", RMSEp_mean:best.mtry_mode) %>%
separate(statistic, into = c("statistic", "summary_type"), sep = "_") %>%
pivot_wider(id_cols = c(Pretreatment, summary_type),
names_from = statistic, values_from = value)
```
## 7. Predict phenotypic values with new spectra
If generating predictions from a saved model file in .Rds format, use `predict_spectra()`. If the model object is already in your R environment, the function `stats::predict()` can be used to generate predictions. `predict_spectra()` pulls the best model hyperparameters from your saved model object, but if using `stats::predict()`, these must be supplied separately.
Using the model we trained in section [6](#6. Save trained prediction models with `save_model()`), we can predict cassava root dry matter content for our held out validation set:
First, determine which pretreatment generated the best model. In this case, it's "SNVSG", which is pretreatment #8. Pretreat the new spectral dataset with these spectra.
```{r prep_for_predictions}
pretreated.val <- ikeogu.2017.prepped %>%
filter(study.name == "C16Mval") %>%
pretreat_spectra(pretreatment = 8)
pretreated.val.mx <- pretreated.val %>%
dplyr::select(starts_with("X")) %>%
as.matrix()
best.ncomp <- model.to.save$best.model.stats$best.ncomp_mode
```
#### Perform predictions!
```{r predict}
predicted.values <- as.numeric(predict(model.to.save$best.model,
newdata = pretreated.val.mx,
ncomp = best.ncomp))
```
#### How did we do?
```{r calculate_statistics}
spectacles::postResampleSpectro(pred = predicted.values,
obs = pretreated.val$reference)
```
#### Plot predictions
```{r plot_predictions, fig.height=6}
overall.range <- c(min(c(pretreated.val$reference, predicted.values)),
max(c(pretreated.val$reference, predicted.values)))
cbind(unique.id = pretreated.val$unique.id,
observed = pretreated.val$reference,
predicted = predicted.values) %>%
as_tibble() %>%
mutate(observed = as.numeric(observed),
predicted = as.numeric(predicted)) %>%
ggplot(aes(x = observed, y = predicted)) +
geom_abline(intercept = 0,
slope = 1,
color = "gray80") +
geom_point() +
coord_fixed(xlim = overall.range,
ylim = overall.range) +
labs(title = "Example dry matter content predictions",
x = "Observed",
y = "Predicted") +
theme_bw()
```
|
/scratch/gouwar.j/cran-all/cranData/waves/inst/doc/waves.Rmd
|
---
title: "waves"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{waves}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
resource_files:
- save_model.R
- test_spectra.R
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width = 6,
fig.align = "center"
)
```
```{r setup, message=FALSE, warning=FALSE}
library(waves)
library(magrittr)
library(dplyr)
library(tidyr)
library(ggplot2)
library(tibble)
```
## Introduction
Originally designed application in the context of resource-limited plant research and breeding programs, `waves` provides an open-source solution to spectral data processing and model development by bringing useful packages together into a streamlined pipeline. This package is wrapper for functions related to the analysis of point visible and near-infrared reflectance measurements. It includes visualization, filtering, aggregation, pretreatment, cross-validation set formation, model training, and prediction functions to enable open-source association of spectral and reference data.
## Use
Follow the installation instructions below, and then go wild! Use `waves` to analyze your own data. Please report any bugs or feature requests by opening issues in the [`waves` repository](https://github.com/GoreLab/waves).
## Installation
Install the latest `waves` release directly from CRAN:
```{r install_cran, eval=FALSE}
install.packages("waves")
library(waves)
```
Alternatively, install the development version to get the most up-to-date (but not necessarily thoroughly tested) version:
```{r install_dev, eval=FALSE}
install.packages("devtools")
devtools::install_github("GoreLab/waves")
library(waves)
```
## 1. Format your data
Match spectra with reference values so that you have a `data.frame` with unique identifiers, reference values, and other metadata as columns to the left of spectral values. Spectral column names should start with "X". Remove rows with missing values.
```{r format}
ikeogu.2017[1:7, 1:7]
ikeogu.2017.prepped <- ikeogu.2017 %>%
dplyr::rename(unique.id = sample.id,
reference = DMC.oven) %>%
dplyr::select(unique.id, dplyr::everything(), -TCC) %>%
na.omit()
ikeogu.2017.prepped[1:7, 1:7]
```
## 2. Visualize spectra with `plot_spectra()`
To display outliers in a different color, set `detect.outliers` to `TRUE`.
```{r plot_raw, fig.height=5, fig.width=7}
ikeogu.2017.prepped %>%
plot_spectra(
df = .,
num.col.before.spectra = 5,
detect.outliers = FALSE,
alternate.title = "Example spectra"
)
```
## 3. Perform outlier removal with `filter_spectra()`.
`waves` uses Mahalanobis distance to identify outliers. Mahalanobis distance is a common metric used to identify multivariate outliers. The larger the value of Mahalanobis distance, the more unusual the data point (i.e., the more likely it is to be a multivariate outlier).
The distance tells us how far an observation is from the center of the cloud, taking into account the shape (covariance) of the cloud as well.
To detect outliers, the calculated Mahalanobis distance is compared against a $\chi^2$ distribution with degrees of freedom equal to the number of spectral data columns and an alpha level of 0.05.
```{r filter}
filtered.df <- ikeogu.2017.prepped %>%
filter_spectra(
df = .,
filter = TRUE,
return.distances = TRUE,
num.col.before.spectra = 5,
window.size = 15
)
filtered.df[1:5, c(1:5, (ncol(filtered.df) - 3):ncol(filtered.df))]
```
No outliers were identified in the example dataset. Note the if `return.distances` is set to `TRUE`, the rightmost column contains Mahalanobis distances (`h.distances`).
## 4. Aggregate scans
If you have more than one scan per unique identifier, aggregate the scans by mean or median with `aggregate_spectra()`.
In this example, we will aggregate by `study.name`.
```{r aggregate}
aggregated.test <- ikeogu.2017.prepped %>%
aggregate_spectra(
grouping.colnames = c("study.name"),
reference.value.colname = "reference",
agg.function = "mean"
)
aggregated.test[, 1:5]
```
## 5. Evaluate the predictive ability of your spectra
`test_spectra()` is a wrapper that performs spectral pretreatment ([5.1](#5.1. Pretreat spectra)), cross-validation set formation ([5.2](#5.2. Specify a cross-validation scheme)), and model training functions over multiple iterations ([5.3](#5.3. Train spectral prediction models)).
Note that the following subsections describe functions that are called within `test_spectra()`. They do not need to be used separately for model pretreatment, cross-validation set formation, or model training.
Some of the arguments for this function are detailed below. A description of output is below under section [5.4](#5.4. Output). See `?test_spectra()` for more information on the arguments and output for this function.
```{r run_test_spectra}
results.list <- ikeogu.2017.prepped %>%
dplyr::select(unique.id, reference, dplyr::starts_with("X")) %>%
na.omit() %>%
test_spectra(
train.data = .,
tune.length = 3,
num.iterations = 3,
pretreatment = 1
)
```
### 5.1. Pretreat spectra
Specify which spectral pretreatments (1-13) to apply with the parameter `pretreatment`. `pretreat_spectra()` can also be used on its own to transform a data.frame using any/all of 12 available pretreatments:
1. Raw data (no pretreatment is applied)
2. Standard normal variate (SNV)
3. SNV and first derivative
4. SNV and second derivative
5. First derivative
6. Second derivative
7. Savitzky–Golay filter (SG)
8. SNV and SG
9. Gap segment derivative (window size = 11)
10. SG and first derivative (window size = 5)
11. SG and first derivative (window size = 11)
12. SG and second derivative (window size = 5)
13. SG and second derivative (window size = 11)
```{r plot_pretreatments, fig.height=6, fig.width=7}
ikeogu.2017.prepped[1:10, ] %>% # subset the first 10 scans for speed
pretreat_spectra(pretreatment = 2:13) %>% # exclude pretreatment 1 (raw data)
bind_rows(.id = "pretreatment") %>%
gather(key = "wl",
value = "s.value",
tidyselect::starts_with("X")) %>%
mutate(wl = as.numeric(readr::parse_number(.data$wl)),
pretreatment = as.factor(pretreatment)) %>%
drop_na(s.value) %>%
ggplot(data = ., aes(x = wl, y = s.value, group = unique.id)) +
geom_line(alpha = .5) +
theme(axis.text.x = element_text(angle = 45)) +
labs(title = "Pretreated spectra",
x = "Wavelength",
y = "Spectral Value") +
facet_wrap(~ pretreatment, scales = "free")
```
Note that the scales in this plot are "free". Without free scales, anything derivative-based treatment (D1 or D2) looks like it's a constant zero in comparison to those without derivative-based treatments (SNV, SG).
### 5.2. Specify a cross-validation scheme
Choose from random, stratified random, or a plant breeding-specific scheme from [Jarquín et *al.*, 2017. *The Plant Genome*](https://doi.org/10.3835/plantgenome2016.12.0130). Options include:
| `cv.scheme` | Description |
|-------------|-------------------------------------------------------------------------------------------|
| `NULL` | Random or stratified random sampling (does not take genotype or environment into account) |
| "CV1" | Untested lines in tested environments |
| "CV2" | Tested lines in tested environments |
| "CV0" | Tested lines in untested environments |
| "CV00" | Untested lines in untested environments |
If `cv.scheme` is set to `NULL`, the argument `stratified.sampling` is used to determine whether stratified random sampling should be performed. If `TRUE`, the reference values from the input `data.frame` (`train.data`) will be used to create a balanced split of data between the training and test sets in each training iteration.
When using one of the four specialized cross-validation schemes ("CV1", "CV2", "CV0", or "CV00"), additional arguments are required:
- `trial1` contains the trial to be tested in subsequent model training functions. The first column contains unique identifiers, second contains genotypes, third contains reference values, followed by spectral columns. Include no other columns to right of spectra! Column names of spectra must start with "X", reference column must be named "reference", and genotype column must be named "genotype".
-`trial2` contains a trial that has overlapping genotypes with `trial1` but that were grown in a different site/year (different environment). Formatting must be consistent with `trial1`.
- `trial3` contains a trial that may or may not contain genotypes that overlap with `trial1`. Formatting must be consistent with `trial1`.
Cross-validation schemes can also be formatted outside of `test_spectra()` using the function `format_cv()`.
### 5.3. Train spectral prediction models
Many of the arguments for `test_spectra()` are related to model training:
- `model.method` is the algorithm type to use for training. See the table below for more information
- `tune.length` is the number of PLS components to test. This argument is ignored if other algorithms are used
- `best.model.metric` indicates the metric used to decide which model is best ("RMSE" or "R-squared")
- `k-fold` specifies the number of folds used for cross-validation to tune model hyperparameters within the training set
- `num.iterations` sets the number of training iterations
- `proportion.train` is the fraction of samples to be included in the training set (default is 0.7)
Models can also be trained with the standalone function `train_spectra()`. Model training is implemented with [`caret`](https://topepo.github.io/caret/).
| Algorithm | `model.method` | R package source | Tuning parameters (hyperparameters) |
|-------------------------------------------------|----------------|-------------------------------------------------------------------|-------------------------------------|
| Partial least squares (PLS) | "pls" | [`pls`](https://CRAN.R-project.org/package=pls) | ncomp |
| Random forest (RF) | "rf" | [`randomForest`](https://CRAN.R-project.org/package=randomForest) | mtry |
| Support vector machine (SVM) with linear kernel | "svmLinear" | [`kernlab`](https://CRAN.R-project.org/package=kernlab) | C |
| Support vector machine (SVM) with radial kernel | "svmRadial | [`kernlab`](https://CRAN.R-project.org/package=kernlab) | sigma, C |
### 5.4. Output
`test_spectra()` outputs a list with four objects:
1. `model.list` is a list of trained model objects, one for each pretreatment method specified by the `pretreatment` argument. Each model is trained with all rows of the input `data.frame` (`df`)
```{r view_model}
summary(results.list$model)
```
2. `summary.model.performance` is a `data.frame` containing summary statistics across all model training iterations and pretreatments. See below for a description of the summary statistics provided.
```{r view_summary}
results.list$summary.model.performance
```
3. `model.performance` is a `data.frame` containing performance statistics for each iteration of model training separately (see below).
```{r view_performance}
results.list$model.performance
```
4. `predictions` is a `data.frame` containing both reference and predicted values for each test set entry in each iteration of model training.
```{r view_predictions}
head(results.list$predictions)
```
5. `importance` is a `data.frame` containing variable importance results for each wavelength at each iteration of model training. If `model.method` is not "pls" or "rf", this list item is `NULL`.
```{r view_importance}
results.list$importance[, 1:7]
```
| Statistic* | Description |
|----------------------------|------------------------------------------------------------------------------------|
| RMSE<sub>p</sub> | Root mean squared error of prediction |
| R<sup>2</sup><sub>p</sub> | Squared Pearson’s correlation between predicted and observed test set values |
| RPD | Ratio of standard deviation of observed test set values to RMSE<sub>p</sub> |
| RPIQ | Ratio of performance to interquartile difference |
| CCC | Concordance correlation coefficient |
| Bias | Average difference between the predicted and observed values |
| SEP | Standard error of prediction |
| RMSE<sub>cv</sub> | Root mean squared error of cross-validation |
| R<sup>2</sup><sub>cv</sub> | Coefficient of multiple determination of cross-validation for PLS models |
| R<sup>2</sup><sub>sp</sub> | Squared Spearman’s rank correlation between predicted and observed test set values |
| best.ncomp | Best number of components in a PLS model |
| best.ntree | Best number of trees in an RF model |
| best.mtry | Best number of variables to include at every decision point in an RF model |
*Many of the spectral model performance statistics are calculated using the function `postResampleSpectro()` from the `spectacles` package.
## 6. Save trained prediction models with `save_model()`
- Intended for a production environment
- Can evaluate spectral pretreatment methods using the input dataset
- Selects best model using the metric provided with `best.model.metric` ("RMSE" or "Rsquared")
- Returns trained model with option to save as .Rds object
- The `$model` output from `test_spectra()` can also be saved and used for prediction, but `save_model()` will take the extra step of saving an .Rds file for you if `write.model` is set to `TRUE`.
In the example below, we'll use one subset of the example dataset ("C16Mcal") to create the model and then we'll predict the other subset ("C16Mval") in section [7](#7. Predict phenotypic values with new spectra).
```{r run_save_model}
model.to.save <- ikeogu.2017.prepped %>%
dplyr::filter(study.name == "C16Mcal") %>%
dplyr::select(unique.id, reference, dplyr::starts_with("X")) %>%
na.omit() %>%
save_model(
df = .,
write.model = FALSE,
pretreatment = 1:13,
tune.length = 5,
num.iterations = 3,
verbose = FALSE
)
```
Now let's take a look at our trained model:
```{r summarize_saved_model}
summary(model.to.save$best.model)
```
```{r format_saved_output}
model.to.save$best.model.stats %>%
gather(key = "statistic", value = "value", RMSEp_mean:best.mtry_mode) %>%
separate(statistic, into = c("statistic", "summary_type"), sep = "_") %>%
pivot_wider(id_cols = c(Pretreatment, summary_type),
names_from = statistic, values_from = value)
```
## 7. Predict phenotypic values with new spectra
If generating predictions from a saved model file in .Rds format, use `predict_spectra()`. If the model object is already in your R environment, the function `stats::predict()` can be used to generate predictions. `predict_spectra()` pulls the best model hyperparameters from your saved model object, but if using `stats::predict()`, these must be supplied separately.
Using the model we trained in section [6](#6. Save trained prediction models with `save_model()`), we can predict cassava root dry matter content for our held out validation set:
First, determine which pretreatment generated the best model. In this case, it's "SNVSG", which is pretreatment #8. Pretreat the new spectral dataset with these spectra.
```{r prep_for_predictions}
pretreated.val <- ikeogu.2017.prepped %>%
filter(study.name == "C16Mval") %>%
pretreat_spectra(pretreatment = 8)
pretreated.val.mx <- pretreated.val %>%
dplyr::select(starts_with("X")) %>%
as.matrix()
best.ncomp <- model.to.save$best.model.stats$best.ncomp_mode
```
#### Perform predictions!
```{r predict}
predicted.values <- as.numeric(predict(model.to.save$best.model,
newdata = pretreated.val.mx,
ncomp = best.ncomp))
```
#### How did we do?
```{r calculate_statistics}
spectacles::postResampleSpectro(pred = predicted.values,
obs = pretreated.val$reference)
```
#### Plot predictions
```{r plot_predictions, fig.height=6}
overall.range <- c(min(c(pretreated.val$reference, predicted.values)),
max(c(pretreated.val$reference, predicted.values)))
cbind(unique.id = pretreated.val$unique.id,
observed = pretreated.val$reference,
predicted = predicted.values) %>%
as_tibble() %>%
mutate(observed = as.numeric(observed),
predicted = as.numeric(predicted)) %>%
ggplot(aes(x = observed, y = predicted)) +
geom_abline(intercept = 0,
slope = 1,
color = "gray80") +
geom_point() +
coord_fixed(xlim = overall.range,
ylim = overall.range) +
labs(title = "Example dry matter content predictions",
x = "Observed",
y = "Predicted") +
theme_bw()
```
|
/scratch/gouwar.j/cran-all/cranData/waves/vignettes/waves.Rmd
|
AntonB <- function() {
a0 <- c(0,
0.02674875741081,
-0.01686411844287,
-0.07822326652899,
0.26686411844288,
0.60294901823636,
0.26686411844287,
-0.07822326652899,
-0.01686411844287,
0.02674875741081,
0,
0)
a1 <- c(0,
0,
0,
0.04563588155712,
-0.02877176311425,
-0.29563588155712,
0.55754352622850,
-0.29563588155713,
-0.02877176311425,
0.04563588155712,
0,
0)
s0 <- c(0,
0,
0,
-0.04563588155712,
-0.02877176311425,
0.29563588155712,
0.55754352622850,
0.29563588155713,
-0.02877176311425,
-0.04563588155712,
0,
0)
s1 <- c(0,
0.02674875741081,
0.01686411844287,
-0.07822326652899,
-0.26686411844288,
0.60294901823636,
-0.26686411844287,
-0.07822326652899,
0.01686411844287,
0.02674875741081,
0,
0)
s0 <- 2 * s0
s1 <- 2 * s1
aa0 <- c(0,
0,
0.02674875741081,
-0.01686411844287,
-0.07822326652899,
0.26686411844288,
0.60294901823636,
0.26686411844287,
-0.07822326652899,
-0.01686411844287,
0.02674875741081,
0)
aa1 <- c(0,
0,
0,
0,
0.04563588155712,
-0.02877176311425,
-0.29563588155712,
0.55754352622850,
-0.29563588155713,
-0.02877176311425,
0.04563588155712,
0)
ss0 <- c(0,
0,
-0.04563588155712,
-0.02877176311425,
0.29563588155712,
0.55754352622850,
0.29563588155713,
-0.02877176311425,
-0.04563588155712,
0,
0,
0)
ss1 <- c(0.02674875741081,
0.01686411844287,
-0.07822326652899,
-0.26686411844288,
0.60294901823636,
-0.26686411844287,
-0.07822326652899,
0.01686411844287,
0.02674875741081,
0,
0,
0)
ss0 <- 2 * ss0
ss1 <- 2 * ss1
list(af = list(cbind(a0, a1), cbind(aa0, aa1)),
sf = list(cbind(s0, s1), cbind(ss0, ss1)))
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/Anton.R
|
bishrink <- function(y1, y2, T) {
## Bivariate Shrinkage Function
## Usage :
## [w1] = bishrink(y1,y2,T)
## INPUT :
## y1 - a noisy coefficient value
## y2 - the corresponding parent value
## T - threshold value
## OUTPUT :
## w1 - the denoised coefficient
R <- sqrt(abs(y1)^2 + abs(y2)^2)
R <- R - T
R <- R * as.numeric(R > 0)
return(y1 * R/(R+T))
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/bishrink.R
|
#' Higher-Order Wavelet Filters
#'
#' Create a wavelet filter at arbitrary scale.
#'
#' Uses \code{cascade} subroutine to compute higher-order wavelet coefficient
#' vector from a given filtering sequence.
#'
#' @param wf.name Character string of wavelet filter.
#' @param filter.seq Character string of filter sequence. \code{H} means
#' high-pass filtering and \code{L} means low-pass filtering. Sequence is read
#' from right to left.
#' @param n Length of zero-padded filter. Frequency resolution will be
#' \code{n}/2+1.
#' @return Vector of wavelet coefficients.
#' @author B. Whitcher
#' @seealso \code{\link{squared.gain}}, \code{\link{wave.filter}}.
#' @references Bruce, A. and H.-Y. Gao (1996). \emph{Applied Wavelet Analysis
#' with S-PLUS}, Springer: New York.
#'
#' Doroslovacki, M. L. (1998) On the least asymmetric wavelets, \emph{IEEE
#' Transactions on Signal Processing}, \bold{46}, No. 4, 1125-1130.
#'
#' Daubechies, I. (1992) \emph{Ten Lectures on Wavelets}, CBMS-NSF Regional
#' Conference Series in Applied Mathematics, SIAM: Philadelphia.
#'
#' Morris and Peravali (1999) Minimum-bandwidth discrete-time wavelets,
#' \emph{Signal Processing}, \bold{76}, No. 2, 181-193.
#'
#' Nielsen, M. (2001) On the Construction and Frequency Localization of Finite
#' Orthogonal Quadrature Filters, \emph{Journal of Approximation Theory},
#' \bold{108}, No. 1, 36-52.
#' @keywords ts
#' @examples
#'
#' ## Figure 4.14 in Gencay, Selcuk and Whitcher (2001)
#' par(mfrow=c(3,1), mar=c(5-2,4,4-1,2))
#' f.seq <- "HLLLLL"
#' plot(c(rep(0,33), wavelet.filter("mb4", f.seq), rep(0,33)), type="l",
#' xlab="", ylab="", main="D(4) in black, MB(4) in red")
#' lines(c(rep(0,33), wavelet.filter("d4", f.seq), rep(0,33)), col=2)
#' plot(c(rep(0,35), -wavelet.filter("mb8", f.seq), rep(0,35)), type="l",
#' xlab="", ylab="", main="D(8) in black, -MB(8) in red")
#' lines(c(rep(0,35), wavelet.filter("d8", f.seq), rep(0,35)), col=2)
#' plot(c(rep(0,39), wavelet.filter("mb16", f.seq), rep(0,39)), type="l",
#' xlab="", ylab="", main="D(16) in black, MB(16) in red")
#' lines(c(rep(0,39), wavelet.filter("d16", f.seq), rep(0,39)), col=2)
#'
#' @export wavelet.filter
wavelet.filter <- function(wf.name, filter.seq = "L", n = 512)
{
cascade <- function(f, x, j)
{
L <- length(f)
N <- length(x)
M <- (L - 1) * 2^j
M1 <- M - L + 2
M2 <- 2 * M - L + 2
if(N > M1)
stop("x is too long\n")
else x <- c(x, rep(0, M1 - N))
xj <- c(rep(0, M), x, rep(0, M))
yj <- rep(0, M2)
for(i in 1:L)
yj <- yj + f[L - i + 1] * xj[1:M2 + (i - 1) * 2^j]
yj
}
if(is.character(wf.name))
wf <- wave.filter(wf.name)
else
wf <- wf.name
J <- nchar(filter.seq)
key <- rev(substring(filter.seq, 1:J, 1:J))
f <- 1
fl <- wf$lpf
fh <- wf$hpf
for(k in 1:J) {
if(key[k] == "H")
f <- cascade(fh, f, k - 1)
else if(key[k] == "L")
f <- cascade(fl, f, k - 1)
else stop("Invalid filter.seq\n")
}
f
}
#' Squared Gain Function of a Filter
#'
#' Produces the modulus squared of the Fourier transform for a given filtering
#' sequence.
#'
#' Uses \code{cascade} subroutine to compute the squared gain function from a
#' given filtering sequence.
#'
#' @param wf.name Character string of wavelet filter.
#' @param filter.seq Character string of filter sequence. \code{H} means
#' high-pass filtering and \code{L} means low-pass filtering. Sequence is read
#' from right to left.
#' @param n Length of zero-padded filter. Frequency resolution will be
#' \code{n}/2+1.
#' @return Squared gain function.
#' @author B. Whitcher
#' @seealso \code{\link{wave.filter}}, \code{\link{wavelet.filter}}.
#' @keywords ts
#' @examples
#'
#' par(mfrow=c(2,2))
#' f.seq <- "H"
#' plot(0:256/512, squared.gain("d4", f.seq), type="l", ylim=c(0,2),
#' xlab="frequency", ylab="L = 4", main="Level 1")
#' lines(0:256/512, squared.gain("fk4", f.seq), col=2)
#' lines(0:256/512, squared.gain("mb4", f.seq), col=3)
#' abline(v=c(1,2)/4, lty=2)
#' legend(-.02, 2, c("Daubechies", "Fejer-Korovkin", "Minimum-Bandwidth"),
#' lty=1, col=1:3, bty="n", cex=1)
#' f.seq <- "HL"
#' plot(0:256/512, squared.gain("d4", f.seq), type="l", ylim=c(0,4),
#' xlab="frequency", ylab="", main="Level 2")
#' lines(0:256/512, squared.gain("fk4", f.seq), col=2)
#' lines(0:256/512, squared.gain("mb4", f.seq), col=3)
#' abline(v=c(1,2)/8, lty=2)
#' f.seq <- "H"
#' plot(0:256/512, squared.gain("d8", f.seq), type="l", ylim=c(0,2),
#' xlab="frequency", ylab="L = 8", main="")
#' lines(0:256/512, squared.gain("fk8", f.seq), col=2)
#' lines(0:256/512, squared.gain("mb8", f.seq), col=3)
#' abline(v=c(1,2)/4, lty=2)
#' f.seq <- "HL"
#' plot(0:256/512, squared.gain("d8", f.seq), type="l", ylim=c(0,4),
#' xlab="frequency", ylab="", main="")
#' lines(0:256/512, squared.gain("fk8", f.seq), col=2)
#' lines(0:256/512, squared.gain("mb8", f.seq), col=3)
#' abline(v=c(1,2)/8, lty=2)
#'
#' @export squared.gain
squared.gain <- function(wf.name, filter.seq = "L", n = 512)
{
cascade <- function(f, x, j)
{
L <- length(f)
N <- length(x)
M <- (L - 1) * 2^j
M1 <- M - L + 2
M2 <- 2 * M - L + 2
if(N > M1)
stop("x is too long\n")
else x <- c(x, rep(0, M1 - N))
xj <- c(rep(0, M), x, rep(0, M))
yj <- rep(0, M2)
for(i in 1:L)
yj <- yj + f[L - i + 1] * xj[1:M2 + (i - 1) * 2^j]
yj
}
if(is.character(wf.name))
wf <- wave.filter(wf.name)
else
wf <- wf.name
J <- nchar(filter.seq)
key <- rev(substring(filter.seq, 1:J, 1:J))
f <- 1
fl <- wf$lpf
fh <- wf$hpf
for(k in 1:J) {
if(key[k] == "H")
f <- cascade(fh, f, k - 1)
else if(key[k] == "L")
f <- cascade(fl, f, k - 1)
else stop("Invalid filter.seq\n")
}
Mod(fft(c(f, rep(0, n - length(f))))[1:(n/2 + 1)])^2
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/cascade.R
|
#' Wavelet Analysis of Univariate/Bivariate Time Series
#'
#' Produces an estimate of the multiscale variance, covariance or correlation
#' along with approximate confidence intervals.
#'
#' The time-independent wavelet variance is basically the average of the
#' squared wavelet coefficients across each scale. As shown in Percival
#' (1995), the wavelet variance is a scale-by-scale decomposition of the
#' variance for a stationary process, and certain non-stationary processes.
#'
#' @usage wave.variance(x, type = "eta3", p = 0.025)
#' @usage wave.covariance(x, y)
#' @usage wave.correlation(x, y, N, p = 0.975)
#' @aliases wave.variance wave.covariance wave.correlation
#' @param x first time series
#' @param y second time series
#' @param type character string describing confidence interval calculation;
#' valid methods are \code{gaussian}, \code{eta1}, \code{eta2}, \code{eta3},
#' \code{nongaussian}
#' @param p (one minus the) two-sided p-value for the confidence interval
#' @param N length of time series
#' @return Matrix with as many rows as levels in the wavelet transform object.
#' The first column provides the point estimate for the wavelet variance,
#' covariance, or correlation followed by the lower and upper bounds from the
#' confidence interval.
#' @author B. Whitcher
#' @references Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An
#' Introduction to Wavelets and Other Filtering Methods in Finance and
#' Economics}, Academic Press.
#'
#' Percival, D. B. (1995) \emph{Biometrika}, \bold{82}, No. 3, 619-631.
#'
#' Percival, D. B. and A. T. Walden (2000) \emph{Wavelet Methods for Time
#' Series Analysis}, Cambridge University Press.
#'
#' Whitcher, B., P. Guttorp and D. B. Percival (2000) Wavelet Analysis of
#' Covariance with Application to Atmospheric Time Series, \emph{Journal of
#' Geophysical Research}, \bold{105}, No. D11, 14,941-14,962.
#' @keywords ts
#' @examples
#'
#' ## Figure 7.3 from Gencay, Selcuk and Whitcher (2001)
#' data(ar1)
#' ar1.modwt <- modwt(ar1, "haar", 6)
#' ar1.modwt.bw <- brick.wall(ar1.modwt, "haar")
#' ar1.modwt.var2 <- wave.variance(ar1.modwt.bw, type="gaussian")
#' ar1.modwt.var <- wave.variance(ar1.modwt.bw, type="nongaussian")
#' par(mfrow=c(1,1), las=1, mar=c(5,4,4,2)+.1)
#' matplot(2^(0:5), ar1.modwt.var2[-7,], type="b", log="xy",
#' xaxt="n", ylim=c(.025, 6), pch="*LU", lty=1, col=c(1,4,4),
#' xlab="Wavelet Scale", ylab="")
#' matlines(2^(0:5), as.matrix(ar1.modwt.var)[-7,2:3], type="b",
#' pch="LU", lty=1, col=3)
#' axis(side=1, at=2^(0:5))
#' legend(1, 6, c("Wavelet variance", "Gaussian CI", "Non-Gaussian CI"),
#' lty=1, col=c(1,4,3), bty="n")
#'
#' ## Figure 7.8 from Gencay, Selcuk and Whitcher (2001)
#' data(exchange)
#' returns <- diff(log(as.matrix(exchange)))
#' returns <- ts(returns, start=1970, freq=12)
#' wf <- "d4"
#' J <- 6
#' demusd.modwt <- modwt(returns[,"DEM.USD"], wf, J)
#' demusd.modwt.bw <- brick.wall(demusd.modwt, wf)
#' jpyusd.modwt <- modwt(returns[,"JPY.USD"], wf, J)
#' jpyusd.modwt.bw <- brick.wall(jpyusd.modwt, wf)
#' returns.modwt.cov <- wave.covariance(demusd.modwt.bw, jpyusd.modwt.bw)
#' par(mfrow=c(1,1), las=0, mar=c(5,4,4,2)+.1)
#' matplot(2^(0:(J-1)), returns.modwt.cov[-(J+1),], type="b", log="x",
#' pch="*LU", xaxt="n", lty=1, col=c(1,4,4), xlab="Wavelet Scale",
#' ylab="Wavelet Covariance")
#' axis(side=1, at=2^(0:7))
#' abline(h=0)
#'
#' returns.modwt.cor <- wave.correlation(demusd.modwt.bw, jpyusd.modwt.bw,
#' N = dim(returns)[1])
#' par(mfrow=c(1,1), las=0, mar=c(5,4,4,2)+.1)
#' matplot(2^(0:(J-1)), returns.modwt.cor[-(J+1),], type="b", log="x",
#' pch="*LU", xaxt="n", lty=1, col=c(1,4,4), xlab="Wavelet Scale",
#' ylab="Wavelet Correlation")
#' axis(side=1, at=2^(0:7))
#' abline(h=0)
#'
#' @export wave.variance
wave.variance <- function(x, type = "eta3", p = 0.025) {
ci.gaussian <- function(x, y, p) {
find.first <- function(v) {
na.length <- sum(is.na(v))
v[na.length + 1]
}
x.acf <- lapply(x, FUN = my.acf)
Aj <- unlist(lapply(x.acf, FUN = function(v) sum(v * v, na.rm = TRUE))) -
unlist(lapply(x.acf, FUN = find.first))^2 / 2
wv.var <- 2 * Aj / unlist(lapply(x, FUN = function(v) sum(!is.na(v))))
return(data.frame(wavevar = y, lower = y - qnorm(1-p) * sqrt(wv.var),
upper = y + qnorm(1 - p) * sqrt(wv.var)))
}
ci.eta1 <- function(x, y, p) {
## x4 <- lapply(x, FUN = function(v) sum(v^4, na.rm = TRUE))
## eta1 <- x4.ss * unlist(lapply(x, FUN = function(v) sum(!is.na(v))))
return(0)
}
ci.eta2 <- function(x, y, p) {
return(0)
}
ci.eta3 <- function(x, y, p) {
x.length <- unlist(lapply(x, FUN=function(v)sum(!is.na(v))))
eta3 <- pmax(x.length / 2^(1:length(x)), 1)
return(data.frame(wavevar = y, lower = eta3 * y / qchisq(1-p, eta3),
upper = eta3 * y / qchisq(p, eta3)))
}
ci.nongaussian <- function(x, y, p) {
K <- 5
J <- length(x)
x.length <- unlist(lapply(x, FUN=function(v)sum(!is.na(v))))
x.ss <- unlist(lapply(x, FUN=function(v)v[!is.na(v)]^2))
mt.var <- numeric(J)
for(j in 1:J) {
x.dpss <- dpss.taper(x.length[j], K, 4)
V <- apply(x.dpss, 2, sum)
J <- apply(x.dpss * x.ss[[j]], 2, sum)
mt.var[j] <- sum((J - y[j] * V)^2) / K / x.length[j]
}
return(data.frame(wavevar = y, lower = y - qnorm(1-p) * sqrt(mt.var),
upper = y + qnorm(1-p) * sqrt(mt.var)))
}
x.ss <- unlist(lapply(x, FUN = function(v) sum(v*v, na.rm=TRUE)))
x.length <- unlist(lapply(x, FUN = function(v) sum(!is.na(v))))
y <- x.ss / x.length
switch(type,
gaussian = ci.gaussian(x, y, p),
eta1 = ci.eta1(x, y, p),
eta2 = ci.eta2(x, y, p),
eta3 = ci.eta3(x, y, p),
nongaussian = ci.nongaussian(x, y, p),
stop("Invalid selection of \"type\" for the confidence interval"))
}
##plot.var <- function(x, y=NA, ylim=range(x, y, na.rm=TRUE))
##{
## n <- dim(x)[1]
## plot(2^(0:(n-1)), x[,1], axes=FALSE, type="n", log="xy", ylim=ylim)
## axis(1, at=2^(0:(n-1)))
## axis(2)
## polyci(x[,1], x[,-1], -1)
## if(any(!is.na(y))) { polyci(y[,1], y[,-1], 1, color=5) }
## abline(h=0, lty=2)
##}
wave.covariance <- function(x, y) {
my.acf.na <- function(v) {
v <- v[!is.na(v)]
my.acf(v)
}
my.ccf.na <- function(u, v) {
u <- u[!is.na(u)]
v <- v[!is.na(v)]
n <- length(u)
u <- c(u, rep(0, n))
v <- c(v, rep(0, n))
n <- length(u)
x <- Re(fft(fft(u) * Conj(fft(v)), inverse=TRUE)) / 2 / n^2
x[c((n %/% 2):n, 1:(n %/% 2 - 1))]
}
compute.sum.xy.ccvs <- function(x, y) {
l <- length(x)
xy <- numeric(l)
for(i in 1:l)
xy[i] <- sum(my.ccf.na(x[[i]], y[[i]])^2)
xy
}
compute.xy.acvs <- function(x, y) {
l <- length(x)
xy <- vector("list", l)
for(i in 1:l) {
z <- x[[i]] * y[[i]]
xy[[i]] <- c(rev(z), z[-1])
}
xy
}
per <- function (z) {
n <- length(z)
(Mod(fft(z))^2/n)[1:(n%/%2 + 1)]
}
per2 <- function(x, y) {
n <- length(x)
fft.x <- fft(x)
fft.y <- fft(y)
((Conj(fft.x) * fft.y)/n)[1:(n %/% 2 + 1)]
}
l <- length(x)
xy <- vector("list", l)
for(i in 1:l)
xy[[i]] <- as.vector(x[[i]] * y[[i]])
z.ss <- unlist(lapply(xy, sum, na.rm=TRUE))
x.na <- lapply(x, is.na)
for(i in 1:l)
x.na[[i]] <- !x.na[[i]]
z.length <- unlist(lapply(x.na, sum))
zz <- z.ss / z.length
names(zz) <- names(x)
x.acvs <- lapply(x, my.acf.na)
y.acvs <- lapply(y, my.acf.na)
sum.xy.acvs <- unlist(lapply(compute.xy.acvs(x.acvs, y.acvs), sum))
sum.squared.xy.ccvs <- compute.sum.xy.ccvs(x, y)
var.gamma <- (sum.xy.acvs + sum.squared.xy.ccvs) / 2 / z.length
out <- data.frame(wavecov = zz, lower = zz - qnorm(.975) * sqrt(var.gamma),
upper = zz + qnorm(.975) * sqrt(var.gamma))
return(as.matrix(out))
}
##polyci <- function(x, xci, sp, color=2)
##{
## n <- length(x)
## y <- 2^(0:(n-1)+sp*.05)
## delta <- y - 2^(0:(n-1))
## for(i in 1:n){
## polygon(c(y[i] + .6*delta[i], y[i] + .6*delta[i], y[i] - .6*delta[i],
## y[i] - .6*delta[i]), c(xci[i,], xci[i,2:1]), border=FALSE,
## col=color, lty=1)
## }
## points(y, x, pch="-")
##}
##plot.cov <- function(x, ylim=range(x,0))
##{
## n <- dim(x)[1]
## plot(2^(0:(n-1)), x[,1], axes=FALSE, type="n", log="x", ylim=ylim)
## axis(1, at=2^(0:(n-1)))
## axis(2)
## polyci(x[,1], x[,-1], 1)
## abline(h=0, lty=2)
##}
wave.correlation <- function(x, y, N, p = .975) {
sum.of.squares <- function(x) { sum(x^2, na.rm=TRUE) / sum(!is.na(x)) }
sum.of.not.squares <- function(x) { sum(x, na.rm=TRUE) / sum(!is.na(x)) }
l <- length(x)
xy <- vector("list", l); xy.abs <- vector("list", l)
for(i in 1:l) {
xy[[i]] <- as.vector(x[[i]] * y[[i]])
xy.abs[[i]] <- as.vector(abs(x[[i]] * y[[i]]))
}
xy.cov <- unlist(lapply(xy, sum.of.not.squares))
x.var <- unlist(lapply(x, sum.of.squares))
y.var <- unlist(lapply(y, sum.of.squares))
xy.cor <- xy.cov / sqrt(x.var * y.var)
n <- trunc(N/2^(1:l))
out <- data.frame(wavecor=xy.cor,
lower=tanh(atanh(xy.cor)-qnorm(p)/sqrt(n-3)),
upper=tanh(atanh(xy.cor)+qnorm(p)/sqrt(n-3)))
return(as.matrix(out))
}
##plot.cor <- function(x, ylim=c(-1,1), cex=NULL)
##{
## n <- dim(x)[1]
## plot(2^(0:(n-1)), x[,1], axes=FALSE, type="n", log="x", ylim=ylim, cex=cex)
## axis(1, at=2^(0:(n-1)), cex=cex)
## axis(2, cex=cex)
## polyci(x[,1], x[,-1], 1)
## abline(h=0, lty=2)
##}
#' Compute Wavelet Cross-Covariance Between Two Time Series
#'
#' Computes wavelet cross-covariance or cross-correlation between two time
#' series.
#'
#' See references.
#'
#' @usage spin.covariance(x, y, lag.max = NA)
#' @usage spin.correlation(x, y, lag.max = NA)
#' @aliases spin.covariance spin.correlation
#' @param x first time series
#' @param y second time series, same length as \code{x}
#' @param lag.max maximum lag to compute cross-covariance (correlation)
#' @return List structure holding the wavelet cross-covariances (correlations)
#' according to scale.
#' @author B. Whitcher
#' @seealso \code{\link{wave.covariance}}, \code{\link{wave.correlation}}.
#' @references Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An
#' Introduction to Wavelets and Other Filtering Methods in Finance and
#' Economics}, Academic Press.
#'
#' Whitcher, B., P. Guttorp and D. B. Percival (2000) Wavelet analysis of
#' covariance with application to atmospheric time series, \emph{Journal of
#' Geophysical Research}, \bold{105}, No. D11, 14,941-14,962.
#' @keywords ts
#' @examples
#'
#' ## Figure 7.9 from Gencay, Selcuk and Whitcher (2001)
#' data(exchange)
#' returns <- diff(log(exchange))
#' returns <- ts(returns, start=1970, freq=12)
#' wf <- "d4"
#' demusd.modwt <- modwt(returns[,"DEM.USD"], wf, 8)
#' demusd.modwt.bw <- brick.wall(demusd.modwt, wf)
#' jpyusd.modwt <- modwt(returns[,"JPY.USD"], wf, 8)
#' jpyusd.modwt.bw <- brick.wall(jpyusd.modwt, wf)
#' n <- dim(returns)[1]
#' J <- 6
#' lmax <- 36
#' returns.cross.cor <- NULL
#' for(i in 1:J) {
#' blah <- spin.correlation(demusd.modwt.bw[[i]], jpyusd.modwt.bw[[i]], lmax)
#' returns.cross.cor <- cbind(returns.cross.cor, blah)
#' }
#' returns.cross.cor <- ts(as.matrix(returns.cross.cor), start=-36, freq=1)
#' dimnames(returns.cross.cor) <- list(NULL, paste("Level", 1:J))
#' lags <- length(-lmax:lmax)
#' lower.ci <- tanh(atanh(returns.cross.cor) - qnorm(0.975) /
#' sqrt(matrix(trunc(n/2^(1:J)), nrow=lags, ncol=J, byrow=TRUE)
#' - 3))
#' upper.ci <- tanh(atanh(returns.cross.cor) + qnorm(0.975) /
#' sqrt(matrix(trunc(n/2^(1:J)), nrow=lags, ncol=J, byrow=TRUE)
#' - 3))
#' par(mfrow=c(3,2), las=1, pty="m", mar=c(5,4,4,2)+.1)
#' for(i in J:1) {
#' plot(returns.cross.cor[,i], ylim=c(-1,1), xaxt="n", xlab="Lag (months)",
#' ylab="", main=dimnames(returns.cross.cor)[[2]][i])
#' axis(side=1, at=seq(-36, 36, by=12))
#' lines(lower.ci[,i], lty=1, col=2)
#' lines(upper.ci[,i], lty=1, col=2)
#' abline(h=0,v=0)
#' }
#'
#' @export spin.covariance
spin.covariance <- function(x, y, lag.max = NA) {
xx <- zz <- x[!is.na(x)]
yy <- y[!is.na(y)]
n.length <- length(xx)
xx.length <- min(length(xx)-1, lag.max, na.rm=TRUE)
lag1 <- numeric(xx.length + 1)
lag2 <- numeric(xx.length + 1)
for(i in 1:(xx.length+1)) {
lag1[i] <- sum(xx * yy, na.rm=TRUE) / n.length
lag2[i] <- sum(zz * yy, na.rm=TRUE) / n.length
xx <- c(xx[2:n.length], NA)
zz <- c(NA, zz[1:(n.length-1)])
}
c(rev(lag2[-1]), lag1)
}
spin.correlation <- function(x, y, lag.max = NA) {
xx <- zz <- x[!is.na(x)]
yy <- y[!is.na(y)]
n.length <- length(xx)
xx.length <- min(length(xx)-1, lag.max, na.rm=TRUE)
xx.var <- mean(xx^2)
yy.var <- mean(yy^2)
lag1 <- numeric(xx.length + 1)
lag2 <- numeric(xx.length + 1)
for(i in 1:(xx.length+1)) {
lag1[i] <- sum(xx * yy, na.rm=TRUE) / sqrt(xx.var * yy.var) / n.length
lag2[i] <- sum(zz * yy, na.rm=TRUE) / sqrt(xx.var * yy.var) / n.length
xx <- c(xx[2:n.length], NA)
zz <- c(NA, zz[1:(n.length-1)])
}
c(rev(lag2[-1]), lag1)
}
##edof <- function(x) {
## x <- x[!is.na(x)]
## n <- length(x)
## x.acf <- my.acf(x)
## n * x.acf[1]^2 /
## sum((1 - abs(seq(-n+1,n-1))/n) * c(rev(x.acf[-1]), x.acf)^2)
##}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/cov.R
|
#' Dual-tree Complex 2D Discrete Wavelet Transform
#'
#' Dual-tree complex 2D discrete wavelet transform (DWT).
#'
#'
#' @usage cplxdual2D(x, J, Faf, af)
#' @usage icplxdual2D(w, J, Fsf, sf)
#' @aliases cplxdual2D icplxdual2D
#' @param x 2D array.
#' @param w wavelet coefficients.
#' @param J number of stages.
#' @param Faf first stage analysis filters for tree i.
#' @param af analysis filters for the remaining stages on tree i.
#' @param Fsf last stage synthesis filters for tree i.
#' @param sf synthesis filters for the preceeding stages.
#' @return For the analysis of \code{x}, the output is \item{w}{wavelet
#' coefficients indexed by \code{[[j]][[i]][[d1]][[d2]]}, where
#' \eqn{j=1,\ldots,J} (scale), \eqn{i=1} (real part) or \eqn{i=2} (imag part),
#' \eqn{d1=1,2} and \eqn{d2=1,2,3} (orientations).} For the synthesis of
#' \code{w}, the output is \item{y}{output signal.}
#' @author Matlab: S. Cai, K. Li and I. Selesnick; R port: B. Whitcher
#' @seealso \code{\link{FSfarras}}, \code{\link{farras}}, \code{\link{afb2D}},
#' \code{\link{sfb2D}}.
#' @keywords ts
#' @examples
#'
#' \dontrun{
#' ## EXAMPLE: cplxdual2D
#' x = matrix(rnorm(32*32), 32, 32)
#' J = 5
#' Faf = FSfarras()$af
#' Fsf = FSfarras()$sf
#' af = dualfilt1()$af
#' sf = dualfilt1()$sf
#' w = cplxdual2D(x, J, Faf, af)
#' y = icplxdual2D(w, J, Fsf, sf)
#' err = x - y
#' max(abs(err))
#' }
#'
cplxdual2D <- function(x, J, Faf, af) {
## Dual-Tree Complex 2D Discrete Wavelet Transform
##
## USAGE:
## w = cplxdual2D(x, J, Faf, af)
## INPUT:
## x - 2-D array
## J - number of stages
## Faf{i}: first stage filters for tree i
## af{i}: filters for remaining stages on tree i
## OUTPUT:
## w{j}{i}{d1}{d2} - wavelet coefficients
## j = 1..J (scale)
## i = 1 (real part); i = 2 (imag part)
## d1 = 1,2; d2 = 1,2,3 (orientations)
## w{J+1}{m}{n} - lowpass coefficients
## d1 = 1,2; d2 = 1,2
## EXAMPLE:
## x = rand(256);
## J = 5;
## [Faf, Fsf] = FSfarras;
## [af, sf] = dualfilt1;
## w = cplxdual2D(x, J, Faf, af);
## y = icplxdual2D(w, J, Fsf, sf);
## err = x - y;
## max(max(abs(err)))
##
## WAVELET SOFTWARE AT POLYTECHNIC UNIVERSITY, BROOKLYN, NY
## http://eeweb.poly.edu/iselesni/WaveletSoftware/
## normalization
x <- x/2
w <- vector("list", J+1)
for (m in 1:2) {
w[[1]][[m]] <- vector("list", 2)
for (n in 1:2) {
w[[1]][[m]][[n]] <- vector("list", 2)
temp <- afb2D(x, Faf[[m]], Faf[[n]])
lo <- temp$lo
w[[1]][[m]][[n]] <- temp$hi
if (J > 1) {
for (j in 2:J) {
temp <- afb2D(lo, af[[m]], af[[n]])
lo <- temp$lo
w[[j]][[m]][[n]] <- temp$hi
}
w[[J+1]][[m]][[n]] <- lo
}
}
}
for (j in 1:J) {
for (m in 1:3) {
w[[j]][[1]][[1]][[m]] <- pm(w[[j]][[1]][[1]][[m]])
w[[j]][[2]][[2]][[m]] <- pm(w[[j]][[2]][[2]][[m]])
w[[j]][[1]][[2]][[m]] <- pm(w[[j]][[1]][[2]][[m]])
w[[j]][[2]][[1]][[m]] <- pm(w[[j]][[2]][[1]][[m]])
}
}
return(w)
}
icplxdual2D <- function(w, J, Fsf, sf) {
## Inverse Dual-Tree Complex 2D Discrete Wavelet Transform
##
## USAGE:
## y = icplxdual2D(w, J, Fsf, sf)
## INPUT:
## w - wavelet coefficients
## J - number of stages
## Fsf - synthesis filters for final stage
## sf - synthesis filters for preceeding stages
## OUTPUT:
## y - output array
## See cplxdual2D
##
## WAVELET SOFTWARE AT POLYTECHNIC UNIVERSITY, BROOKLYN, NY
## http://eeweb.poly.edu/iselesni/WaveletSoftware/
for (j in 1:J) {
for (m in 1:3) {
w[[j]][[1]][[1]][[m]] <- pm(w[[j]][[1]][[1]][[m]])
w[[j]][[2]][[2]][[m]] <- pm(w[[j]][[2]][[2]][[m]])
w[[j]][[1]][[2]][[m]] <- pm(w[[j]][[1]][[2]][[m]])
w[[j]][[2]][[1]][[m]] <- pm(w[[j]][[2]][[1]][[m]])
}
}
y <- matrix(0, 2*nrow(w[[1]][[1]][[1]][[1]]), 2*ncol(w[[1]][[1]][[1]][[1]]))
for (m in 1:2) {
for (n in 1:2) {
lo <- w[[J+1]][[m]][[n]]
if (J > 1) {
for (j in J:2) {
lo <- sfb2D(lo, w[[j]][[m]][[n]], sf[[m]], sf[[n]])
}
lo <- sfb2D(lo, w[[1]][[m]][[n]], Fsf[[m]], Fsf[[n]])
y <- y + lo
}
}
}
## normalization
return(y/2)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/cplxdual2D.R
|
#' Autocovariance and Autocorrelation Sequences for a Seasonal Persistent
#' Process
#'
#' The autocovariance and autocorrelation sequences from the time series model
#' in Figures 8, 9, 10, and 11 of Andel (1986). They were obtained through
#' numeric integration of the spectral density function.
#'
#' @usage data(acvs.andel8)
#' @usage data(acvs.andel9)
#' @usage data(acvs.andel10)
#' @usage data(acvs.andel11)
#' @name acvs.andel8
#' @docType data
#' @aliases acvs.andel9 acvs.andel10 acvs.andel11
#' @format A data frame with 4096 rows and three columns: lag, autocovariance
#' sequence, autocorrelation sequence.
#' @references Andel, J. (1986) Long memory time series models,
#' \emph{Kypernetika}, \bold{22}, No. 2, 105-123.
#' @keywords datasets
NULL
#' Simulated AR(1) Series
#'
#' Simulated AR(1) series used in Gencay, Selcuk and Whitcher (2001).
#'
#' @usage data(ar1)
#' @name ar1
#' @docType data
#' @format A vector containing 200 observations.
#' @references Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An
#' Introduction to Wavelets and Other Filtering Methods in Finance and
#' Economics}, Academic Press.
#' @keywords datasets
NULL
#' Barbara Test Image
#'
#' The Barbara image comes from Allen Gersho's lab at the University of
#' California, Santa Barbara.
#'
#' @usage data(barbara)
#' @name barbara
#' @docType data
#' @format A 256 \eqn{\times}{x} 256 matrix.
#' @source Internet.
#' @keywords datasets
NULL
#' A Piecewise-Constant Function
#'
#' \deqn{blocks(x) = \sum_{j=1}^{11}(1 + {\rm sign}(x-p_j)) h_j / 2}{%
#' blocks(x) = sum[j=1,11] (1 + sign(x - p_j)) h_j/2}
#'
#' @usage data(blocks)
#' @name blocks
#' @docType data
#' @format A vector containing 512 observations.
#' @references Bruce, A., and H.-Y. Gao (1996) \emph{Applied Wavelet Analysis
#' with S-PLUS}, Springer: New York.
#' @source S+WAVELETS.
#' @keywords datasets
NULL
#' U.S. Consumer Price Index
#'
#' Monthly U.S. consumer price index from 1948:1 to 1999:12.
#'
#' @usage data(cpi)
#' @name cpi
#' @docType data
#' @format A vector containing 624 observations.
#' @references Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An
#' Introduction to Wavelets and Other Filtering Methods in Finance and
#' Economics}, Academic Press.
#' @source Unknown.
#' @keywords datasets
NULL
#' Digital Photograph of Ingrid Daubechies
#'
#' A digital photograph of Ingrid Daubechies taken at the 1993 AMS winter
#' meetings in San Antonio, Texas. The photograph was taken by David Donoho
#' with a Canon XapShot video still frame camera.
#'
#' @usage data(dau)
#' @name dau
#' @docType data
#' @format A 256 \eqn{\times}{x} 256 matrix.
#' @references Bruce, A., and H.-Y. Gao (1996) \emph{Applied Wavelet Analysis
#' with S-PLUS}, Springer: New York.
#' @source S+WAVELETS.
#' @keywords datasets
NULL
#' Sinusoid with Changing Amplitude and Frequency
#'
#' \deqn{doppler(x) = \sqrt{x(1 - x)} }{% doppler(x) = sqrt{x(1-x)}
#' sin[(2.1*pi)/(x+0.05)]}\deqn{ \sin\left(\frac{2.1\pi}{x+0.05}\right)}{%
#' doppler(x) = sqrt{x(1-x)} sin[(2.1*pi)/(x+0.05)]}
#'
#' @usage data(doppler)
#' @name doppler
#' @docType data
#' @format A vector containing 512 observations.
#' @references Bruce, A., and H.-Y. Gao (1996) \emph{Applied Wavelet Analysis
#' with S-PLUS}, Springer: New York.
#' @source S+WAVELETS.
#' @keywords datasets
NULL
#' Exchange Rates Between the Deutsche Mark, Japanese Yen and U.S. Dollar
#'
#' Monthly foreign exchange rates for the Deutsche Mark - U.S. Dollar (DEM-USD)
#' and Japanese Yen - U.S. Dollar (JPY-USD) starting in 1970.
#'
#' @usage data(exchange)
#' @name exchange
#' @docType data
#' @format A bivariate time series containing 348 observations.
#' @references Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An
#' Introduction to Wavelets and Other Filtering Methods in Finance and
#' Economics}, Academic Press.
#' @source Unknown.
#' @keywords datasets
NULL
#' Sine with Jumps at 0.3 and 0.72
#'
#' \deqn{heavisine(x) = 4\sin(4{\pi}x) - \mathrm{sign}(x-0.3) - }{%
#' heavisine(x) = 4*sin(4*pi*x) - sign(x-0.3) - sign(0.72-x)}\deqn{
#' \mathrm{sign}(0.72-x)}{% heavisine(x) = 4*sin(4*pi*x) - sign(x-0.3) -
#' sign(0.72-x)}
#'
#' @usage data(heavisine)
#' @name heavisine
#' @docType data
#' @format A vector containing 512 observations.
#' @references Bruce, A., and H.-Y. Gao (1996) \emph{Applied Wavelet Analysis
#' with S-PLUS}, Springer: New York.
#' @source S+WAVELETS.
#' @keywords datasets
NULL
#' Daily IBM Stock Prices
#'
#' Daily IBM stock prices spanning May~17, 1961 to November~2, 1962.
#'
#' @usage data(ibm)
#' @name ibm
#' @docType data
#' @format A vector containing 369 observations.
#' @source Box, G. E.P. and Jenkins, G. M. (1976) \emph{Time Series Analysis:
#' Forecasting and Control}, Holden Day, San Francisco, 2nd edition.
#' @keywords datasets
NULL
#' Japanese Gross National Product
#'
#' Quarterly Japanese gross national product from 1955:1 to 1996:4.
#'
#' @usage data(japan)
#' @name japan
#' @docType data
#' @format A vector containing 169 observations.
#' @references Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An
#' Introduction to Wavelets and Other Filtering Methods in Finance and
#' Economics}, Academic Press.
#'
#' Hecq, A. (1998) Does seasonal adjustment induce common cycles?,
#' \emph{Empirical Economics}, \bold{59}, 289-297.
#' @source Unknown.
#' @keywords datasets
NULL
#' Sine with Jumps at 0.625 and 0.875
#'
#' \deqn{jumpsine(x) = 10\left( \sin(4{\pi}x) +
#' I_{[0.625 < x \leq 0.875]}\right)}{%
#' jumpsine(x) = 10*(sin(4*pi*x) + I_[0.625 < x <= 0.875])}
#'
#' @usage data(jumpsine)
#' @name jumpsine
#' @docType data
#' @format A vector containing 512 observations.
#' @references Bruce, A., and H.-Y. Gao (1996) \emph{Applied Wavelet Analysis
#' with S-PLUS}, Springer: New York.
#' @source S+WAVELETS.
#' @keywords datasets
NULL
#' 1995 Kobe Earthquake Data
#'
#' Seismograph (vertical acceleration, nm/sq.sec) of the Kobe earthquake,
#' recorded at Tasmania University, HobarTRUE, Australia on 16 January 1995
#' beginning at 20:56:51 (GMTRUE) and continuing for 51 minutes at 1 second
#' intervals.
#'
#' @usage data(kobe)
#' @name kobe
#' @docType data
#' @format A vector containing 3048 observations.
#' @source Data management centre, Washington University.
#' @keywords datasets
NULL
#' Linear Chirp
#'
#' \deqn{linchirp(x) = \sin(0.125 \pi n x^2)}{%
#' linchirp(x) = sin(0.125*pi*n*x^2)}
#'
#' @usage data(linchirp)
#' @name linchirp
#' @docType data
#' @format A vector containing 512 observations.
#' @references Bruce, A., and H.-Y. Gao (1996) \emph{Applied Wavelet Analysis
#' with S-PLUS}, Springer: New York.
#' @source S+WAVELETS.
#' @keywords datasets
NULL
#' Mexican Money Supply
#'
#' Percentage changes in monthly Mexican money supply.
#'
#' @usage data(mexm)
#' @name mexm
#' @docType data
#' @format A vector containing 516 observations.
#' @references Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An
#' Introduction to Wavelets and Other Filtering Methods in Finance and
#' Economics}, Academic Press.
#' @source Unknown.
#' @keywords datasets
NULL
#' Nile River Minima
#'
#' Yearly minimal water levels of the Nile river for the years 622 to 1281,
#' measured at the Roda gauge near Cairo (Tousson, 1925, p. 366-385). The data
#' are listed in chronological sequence by row.
#'
#' The original Nile river data supplied by Beran only contained only 500
#' observations (622 to 1121). However, the book claimed to have 660
#' observations (622 to 1281). The remaining observations from the book were
#' added, by hand, but the series still only contained 653 observations (622 to
#' 1264).
#'
#' Note, now the data consists of 663 observations (spanning the years
#' 622-1284) as in original source (Toussoun, 1925).
#'
#' @usage data(nile)
#' @name nile
#' @docType data
#' @format A length 663 vector.
#' @references Beran, J. (1994) \emph{Statistics for Long-Memory Processes},
#' Chapman Hall: Englewood, NJ.
#' @source Toussoun, O. (1925) M\'emoire sur l'Histoire du Nil, Volume 18 in
#' \emph{M\'emoires a l'Institut d'Egypte}, pp. 366-404.
#' @keywords datasets
NULL
#' U.S. Tourism
#'
#' Quarterly U.S. tourism figures from 1960:1 to 1999:4.
#'
#' @usage data(tourism)
#' @name tourism
#' @docType data
#' @format A vector containing 160 observations.
#' @references Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An
#' Introduction to Wavelets and Other Filtering Methods in Finance and
#' Economics}, Academic Press.
#' @source Unknown.
#' @keywords datasets
NULL
#' U.S. Unemployment
#'
#' Monthly U.S. unemployment figures from 1948:1 to 1999:12.
#'
#' @usage data(unemploy)
#' @name unemploy
#' @docType data
#' @format A vector containing 624 observations.
#' @references Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An
#' Introduction to Wavelets and Other Filtering Methods in Finance and
#' Economics}, Academic Press.
#' @source Unknown.
#' @keywords datasets
NULL
#' Image with Box and X
#'
#' \deqn{xbox(i,j) = I_{[i=n/4,\;3n/4,\;j;~ n/4 \leq j \leq 3n/4]} + }{%
#' xbox(i,j) = I_[i = n/4, 3n/4, j; n/4 \leq j \leq 3n/4] + I_[n/4 \leq i \leq
#' 3n/4; j = n/4, 3n/4, i]}\deqn{ I_{[n/4 \leq i \leq 3n/4;~
#' j=n/4,\;3n/4,\;i]}}{% xbox(i,j) = I_[i = n/4, 3n/4, j; n/4 \leq j \leq 3n/4]
#' + I_[n/4 \leq i \leq 3n/4; j = n/4, 3n/4, i]}
#'
#' @usage data(xbox)
#' @name xbox
#' @docType data
#' @format A 128 \eqn{\times}{x} 128 matrix.
#' @references Bruce, A., and H.-Y. Gao (1996) \emph{Applied Wavelet Analysis
#' with S-PLUS}, Springer: New York.
#' @source S+WAVELETS.
#' @keywords datasets
NULL
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/data.R
|
#' Wavelet Shrinkage via Thresholding
#'
#' Perform wavelet shrinkage using data-analytic, hybrid SURE, manual, SURE, or
#' universal thresholding.
#'
#' An extensive amount of literature has been written on wavelet shrinkage.
#' The functions here represent the most basic approaches to the problem of
#' nonparametric function estimation. See the references for further
#' information.
#'
#' @usage da.thresh(wc, alpha = .05, max.level = 4, verbose = FALSE, return.thresh = FALSE)
#' @usage hybrid.thresh(wc, max.level = 4, verbose = FALSE, seed = 0)
#' @usage manual.thresh(wc, max.level = 4, value, hard = TRUE)
#' @usage sure.thresh(wc, max.level = 4, hard = TRUE)
#' @usage universal.thresh(wc, max.level = 4, hard = TRUE)
#' @usage universal.thresh.modwt(wc, max.level = 4, hard = TRUE)
#' @aliases Thresholding da.thresh hybrid.thresh manual.thresh sure.thresh
#' universal.thresh universal.thresh.modwt bishrink soft
#' @param wc wavelet coefficients
#' @param alpha level of the hypothesis tests
#' @param max.level maximum level of coefficients to be affected by threshold
#' @param verbose if \code{verbose=TRUE} then information is printed to the
#' screen
#' @param value threshold value (only utilized in \code{manual.thresh})
#' @param hard Boolean value, if \code{hard=F} then soft thresholding is used
#' @param seed sets random seed (only utilized in \code{hybrid.thresh})
#' @param return.thresh if \code{return.thresh=TRUE} then the vector of
#' threshold values is returned, otherwise the surviving wavelet coefficients
#' are returned
#' @return The default output is a list structure, the same length as was
#' input, containing only those wavelet coefficients surviving the threshold.
#' @author B. Whitcher (some code taken from R. Todd Ogden)
#' @references Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An
#' Introduction to Wavelets and Other Filtering Methods in Finance and
#' Economics}, Academic Press.
#'
#' Ogden, R. T. (1996) \emph{Essential Wavelets for Statistical Applications
#' and Data Analysis}, Birkhauser.
#'
#' Percival, D. B. and A. T. Walden (2000) \emph{Wavelet Methods for Time
#' Series Analysis}, Cambridge University Press.
#'
#' Vidakovic, B. (1999) \emph{Statistical Modeling by Wavelets}, John Wiley and
#' Sons.
#' @keywords ts
manual.thresh <- function(wc, max.level=4, value, hard=TRUE)
{
wc.fine <- wc[["d1"]]
factor <- median(abs(wc.fine)) / .6745
wc.shrink <- wc
if(hard) {
# Hard thresholding
for(i in names(wc)[1:max.level]) {
wci <- wc[[i]]
unithresh <- factor * value
wc.shrink[[i]] <- wci * (abs(wci) > unithresh)
}
}
else {
# Soft thresholding
for(i in names(wc)[1:max.level]) {
wci <- wc[[i]]
unithresh <- factor * value
wc.shrink[[i]] <- sign(wci) * (abs(wci) - unithresh) *
(abs(wci) > unithresh)
}
}
wc.shrink
}
universal.thresh <- function(wc, max.level=4, hard=TRUE)
{
n <- length(idwt(wc))
wc.fine <- wc[["d1"]]
factor <- median(abs(wc.fine)) / .6745
wc.shrink <- wc
if(hard) {
# Hard thresholding
for(i in names(wc)[1:max.level]) {
wci <- wc[[i]]
unithresh <- factor * sqrt(2 * log(n))
wc.shrink[[i]] <- wci * (abs(wci) > unithresh)
}
}
else {
# Soft thresholding
for(i in names(wc)[1:max.level]) {
wci <- wc[[i]]
unithresh <- factor * sqrt(2 * log(n))
wc.shrink[[i]] <- sign(wci) * (abs(wci) - unithresh) *
(abs(wci) > unithresh)
}
}
wc.shrink
}
universal.thresh.modwt <- function(wc, max.level=4, hard=TRUE)
{
n <- length(wc[[1]])
wc.fine <- wc[["d1"]]
factor <- sqrt(2) * median(abs(wc.fine)) / .6745
wc.shrink <- wc
j <- 1
if(hard) {
## Hard thresholding
for(i in names(wc)[1:max.level]) {
wci <- wc[[i]]
unithresh <- factor * sqrt(2 * log(n)) / 2^(j/2)
wc.shrink[[i]] <- wci * (abs(wci) > unithresh)
j <- j+1
}
}
else {
## Soft thresholding
for(i in names(wc)[1:max.level]) {
wci <- wc[[i]]
unithresh <- factor * sqrt(2 * log(n)) / 2^(j/2)
wc.shrink[[i]] <- sign(wci) * (abs(wci) - unithresh) *
(abs(wci) > unithresh)
j <- j+1
}
}
wc.shrink
}
sure.thresh <- function(wc, max.level=4, hard=TRUE)
{
wc.shrink <- wc
sure <- function(t, x) {
ax <- sort(abs(x))
num <- match(FALSE, ax <= t, nomatch = length(ax) + 1) - 1
length(ax) - 2 * num + sum(pmin(ax, t)^2)
}
for(i in names(wc)[1:max.level]) {
wci <- wc[[i]]
ni <- length(wci)
factor <- median(abs(wci)) / .6745
xi <- wci / factor
sxi <- sort(abs(xi))^2
s <- cumsum(sxi) + ((ni - 1):0) * sxi
risk <- (ni - (2 * (1:ni)) + s) / ni
surethresh <- sqrt(sxi[order(risk)[1]])
if(hard) {
## Hard thresholding
wc.shrink[[i]] <- wci * (abs(xi) > surethresh)
}
else {
## Soft thresholding
wc.shrink[[i]] <- sign(wci) * (abs(wci) - factor*surethresh) *
(abs(xi) > surethresh)
}
}
return(wc.shrink)
}
hybrid.thresh <- function(wc, max.level = 4, verbose = FALSE, seed = 0)
{
shrinkit <- function(coeffs, thresh)
sign(coeffs) * pmax(abs(coeffs) - thresh, 0)
sure <- function(t, x) {
ax <- sort(abs(x))
num <- match(FALSE, ax <= t, nomatch = length(ax) + 1) - 1
length(ax) - 2 * num + sum(pmin(ax, t)^2)
}
wc.shrink <- wc
n <- length(unlist(wc))
nlev <- log(n + 1, 2) - 1
i <- 1
iloc <- 1
while(i <= max.level) {
## Extract current level coefficients from all wavelet coefficients
raw <- wc[[names(wc)[i]]]
d <- length(raw)
## Test: if the variance is small enough, just use threshold sqrt(2logd)
if((sum(raw^2) - d)/d <= sqrt(i^3/2^i)) {
if(verbose)
cat(paste("At level ", i, " the threshhold is sqrt(2log(d)): ",
sqrt(2 * log(d)), "\n", sep = ""))
wc.shrink[[names(wc)[i]]] <- shrinkit(wc[[names(wc)[i]]], sqrt(2*log(d)))
}
else {
## Generate random subset
if(length(seed) != 1)
.Random.seed <- seed
Iset <- sort(sample(d, d/2))
rawI <- raw[Iset] / (median(abs(raw[Iset])) / .6745)
rawIp <- raw[ - Iset] / (median(abs(raw[ - Iset])) / .6745)
ggI <- sort(abs(rawI))
ggIp <- sort(abs(rawIp))
## Calculate SURE for all possible thresholds
surevecI <- sapply(c(ggI[ggI < sqrt(2 * log(d))], 0,
sqrt(2 * log(d))), sure, ggI)
surevecIp <- sapply(c(ggIp[ggI < sqrt(2 * log(d))], 0,
sqrt(2 * log(d))), sure, ggIp)
## Threshold that minimizes risk
llI <- length(surevecI)
llIp <- length(surevecIp)
## The minimum occurs either at sqrt(2logd),
if(min(surevecI) == surevecI[llI])
threshI <- sqrt(2 * log(d))
else if(min(surevecI) == surevecI[llI - 1])
threshI <- 0
else threshI <- ggI[match(min(surevecI), surevecI)]
## or at 0,
if(min(surevecIp) == surevecIp[llIp])
threshIp <- sqrt(2 * log(d))
else if(min(surevecIp) == surevecI[llIp - 1])
threshIp <- 0
else
threshIp <- ggIp[match(min(surevecIp), surevecIp)]
## or at 0,
if(verbose) {
cat(paste("At level ", i, ", threshold1 is ", threshI, "\n",
sep = ""))
cat(paste("At level ", i, ", threshold2 is ", threshIp,
"\n", sep = ""))
}
## Perform shrinking
newI <- shrinkit(rawI, threshIp)
newIp <- shrinkit(rawIp, threshI)
new <- rep(0, d)
new[Iset] <- newI
new[ - Iset] <- newIp
wc.shrink[[names(wc)[i]]] <- new
}
## Otherwise, go through all this stuff
iloc <- iloc + 2^i
i <- i + 1
}
wc.shrink
}
da.thresh <- function(wc, alpha=.05, max.level=4, verbose=FALSE,
return.thresh=FALSE) {
onebyone2 <- function(dat, alpha) {
kolsmi.chi2 <- function(dat) {
n <- length(dat)
return(max(abs(cumsum(dat)-(1:n)*sum(dat)/n))/sqrt(2*n))
}
crit <- c(seq(0.28,1.49,by=.01), seq(1.50,2.48,by=.02))
alph <- c(.999999,.999996,.999991,.999979,.999954,.999909,.999829,
.999697,.999489,.999174,.998715,.998071,.997192,.996028,
.994524,.992623,.990270,.987410,.983995,.979978,.975318,
.969983,.963945,.957186,.949694,.941466,.932503,.922817,
.912423,.901344,.889605,.877240,.864282,.850771,.836775,
.822247,.807323,.792013,.776363,.760418,.744220,.727811,
.711235,.694529,.677735,.660887,.644019,.627167,.610360,
.593628,.576998,.560495,.544143,.527959,.511970,.496192,
.480634,.465318,.450256,.435454,.420930,.406684,.392730,
.379072,.365714,.352662,.339918,.327484,.315364,.303556,
.292060,.280874,.270000,.259434,.249174,.239220,.229566,
.220206,.211140,.202364,.193872,.185658,.177718,.170050,
.162644,.155498,.148606,.141962,.135558,.129388,.123452,
.117742,.112250,.106970,.101896,.097028,.092352,.087868,
.083568,.079444,.075495,.071712,.068092,.064630,.061318,
.058152,.055128,.052244,.049488,.046858,.044350,.041960,
.039682,.037514,.035448,.033484,.031618,.029842,.028154,
.026552,.025030,.023588,.022218,.019690,.017422,.015390,
.013574,.011952,.010508,.009223,.008083,.007072,.006177,
.005388,.004691,.004078,.003540,.003068,.002654,.002293,
.001977,.001703,.001464,.001256,.001076,.000921,.000787,
.000671,.000572,.000484,.000412,.000350,.000295,.000250,
.000210,.000178,.000148,.000126,.000104,.000088,.000074,
.000060,.000051,.000042,.000035,.000030,.000024,.000020,
.000016,.000013,.000011,.000009)
if(alpha < min(alph) || alpha > max(alph))
stop("alpha =",alpha,"is out of range")
ind <- match(TRUE, alpha > alph)
critval <- crit[ind-1]+(alph[ind-1]-alpha)*(crit[ind]-crit[ind-1]) /
(alph[ind-1]-alph[ind])
i <- length(dat)
cc <- kolsmi.chi2(dat)
while(cc[length(cc)] > critval && i > 1) {
i <- i-1
cc <- c(cc,kolsmi.chi2(dat[sort(order(dat)[1:i])]))
}
return(cc)
}
getthrda2 <- function(dat, alpha) {
a <- onebyone2(dat, alpha)
if(length(a) == length(dat))
if(1 - pchisq(min(dat),1) < alpha)
ggg <- 0
else
ggg <- sqrt(min(dat))
else
ggg <- sqrt(max(dat[sort(order(dat)[1:(length(dat)-length(a)+1)])]))
return(ggg)
}
shrinkit <- function(coeffs, thresh)
sign(coeffs) * pmax(abs(coeffs) - thresh, 0)
if(alpha <= .000009 || alpha >= .999999)
stop("alpha out of range")
ans <- wc
n <- length(unlist(wc))
nlev <- log(n+1, 2)-1
i <- 1
iloc <- 1
while(i <= max.level) {
gg <- wc[[names(wc)[i]]]
thresh <- getthrda2(gg^2,alpha)
if(verbose)
cat(paste("At level ",i,", the threshold is ",thresh, "\n",sep=""))
if(return.thresh)
if(i == nlev)
rt <- thresh
else
rt <- c(thresh, rt)
else
ans[[names(wc)[i]]] <- shrinkit(wc[[names(wc)[i]]], thresh)
iloc <- iloc + 2^i
i <- i+1
}
if(return.thresh)
return(rt)
else
return(ans)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/denoise.R
|
#' Kingsbury's Q-filters for the Dual-Tree Complex DWT
#'
#' Kingsbury's Q-filters for the dual-tree complex DWT.
#'
#' These cofficients are rounded to 8 decimal places.
#'
#' @aliases dualfilt1 AntonB
#' @return \item{af}{List (i=1,2) - analysis filters for tree i} \item{sf}{List
#' (i=1,2) - synthesis filters for tree i} Note: \code{af[[2]]} is the reverse
#' of \code{af[[1]]}.
#' @author Matlab: S. Cai, K. Li and I. Selesnick; R port: B. Whitcher
#' @seealso \code{\link{dualtree}}
#' @references Kingsbury, N.G. (2000). A dual-tree complex wavelet transform
#' with improved orthogonality and symmetry properties, \emph{Proceedings of
#' the IEEE Int. Conf. on Image Proc.} (ICIP).
#' @keywords ts
#' @export dualfilt1
dualfilt1 <- function() {
af1 <- c(0.03516384000000, 0,
0, 0,
-0.08832942000000, -0.11430184000000,
0.23389032000000, 0,
0.76027237000000, 0.58751830000000,
0.58751830000000, -0.76027237000000,
0, 0.23389032000000,
-0.11430184000000, 0.08832942000000,
0, 0,
0, -0.03516384000000)
af1 <- matrix(af1, ncol=2, byrow=TRUE)
af2 <- c(0, -0.03516384000000,
0, 0,
-0.11430184000000, 0.08832942000000,
0, 0.23389032000000,
0.58751830000000, -0.76027237000000,
0.76027237000000, 0.58751830000000,
0.23389032000000, 0,
-0.08832942000000, -0.11430184000000,
0, 0,
0.03516384000000, 0)
af2 <- matrix(af2, ncol=2, byrow=TRUE)
sf1 <- af1[nrow(af1):1, ]
sf2 <- af2[nrow(af2):1, ]
list(af = list(af1, af2), sf = list(sf1, sf2))
}
#' Farras nearly symmetric filters
#'
#' Farras nearly symmetric filters for orthogonal 2-channel perfect
#' reconstruction filter bank and Farras filters organized for the dual-tree
#' complex DWT.
#'
#'
#' @aliases farras FSfarras
#' @return \item{af}{List (i=1,2) - analysis filters for tree i} \item{sf}{List
#' (i=1,2) - synthesis filters for tree i}
#' @author Matlab: S. Cai, K. Li and I. Selesnick; R port: B. Whitcher
#' @seealso \code{\link{afb}}, \code{\link{dualtree}}, \code{\link{dualfilt1}}.
#' @references A. F. Abdelnour and I. W. Selesnick. \dQuote{Nearly symmetric
#' orthogonal wavelet bases}, Proc. IEEE Int. Conf. Acoust., Speech, Signal
#' Processing (ICASSP), May 2001.
#' @keywords ts
FSfarras <- function() {
af1 <- c(0, 0,
-0.08838834764832, -0.01122679215254,
0.08838834764832, 0.01122679215254,
0.69587998903400, 0.08838834764832,
0.69587998903400, 0.08838834764832,
0.08838834764832, -0.69587998903400,
-0.08838834764832, 0.69587998903400,
0.01122679215254, -0.08838834764832,
0.01122679215254, -0.08838834764832,
0, 0)
af1 <- matrix(af1, ncol=2, byrow=TRUE)
sf1 <- af1[nrow(af1):1, ]
af2 <- c(0.01122679215254, 0,
0.01122679215254, 0,
-0.08838834764832, -0.08838834764832,
0.08838834764832, -0.08838834764832,
0.69587998903400, 0.69587998903400,
0.69587998903400, -0.69587998903400,
0.08838834764832, 0.08838834764832,
-0.08838834764832, 0.08838834764832,
0, 0.01122679215254,
0, -0.01122679215254)
af2 <- matrix(af2, ncol=2, byrow=TRUE)
sf2 <- af2[nrow(af2):1, ]
list(af = list(af1, af2), sf = list(sf1, sf2))
}
farras <- function() {
af <- c(0, -0.01122679215254, 0, 0.01122679215254,
-0.08838834764832, 0.08838834764832,
0.08838834764832, 0.08838834764832,
0.69587998903400, -0.69587998903400,
0.69587998903400, 0.69587998903400,
0.08838834764832, -0.08838834764832,
-0.08838834764832, -0.08838834764832,
0.01122679215254, 0, 0.01122679215254, 0)
af <- matrix(af, nrow=10, byrow=TRUE)
sf <- af[nrow(af):1, ]
list(af = af, sf = sf)
}
#' Miscellaneous Functions for Dual-Tree Wavelet Software
#'
#' Miscellaneous functions for dual-tree wavelet software.
#'
#'
#' @usage cshift(x, m)
#' @usage cshift2D(x, m)
#' @usage pm(a, b)
#' @aliases cshift cshift2D pm
#' @param x N-point vector
#' @param m amount of shift
#' @param a,b input parameters
#' @return \item{y}{vector \code{x} will be shifed by \code{m} samples to the
#' left or matrix \code{x} will be shifed by \code{m} samples down.}
#' \item{u}{(a + b) / sqrt(2)} \item{v}{(a - b) / sqrt(2)}
#' @author Matlab: S. Cai, K. Li and I. Selesnick; R port: B. Whitcher
#' @keywords ts
cshift <- function(x, m) {
N <- length(x)
n <- 0:(N-1)
n <- (n-m) %% N
y <- x[n+1]
y
}
#' Filter Banks for Dual-Tree Wavelet Transforms
#'
#' Analysis and synthesis filter banks used in dual-tree wavelet algorithms.
#'
#' The functions \code{afb2D.A} and \code{sfb2D.A} implement the convolutions,
#' either for analysis or synthesis, in one dimension only. Thus, they are the
#' workhorses of \code{afb2D} and \code{sfb2D}. The output for the analysis
#' filter bank along one dimension (\code{afb2D.A}) is a list with two elements
#' \describe{ \item{lo}{low-pass subband} \item{hi}{high-pass subband} } where
#' the dimension of analysis will be half its original length. The output for
#' the synthesis filter bank along one dimension (\code{sfb2D.A}) will be the
#' output array, where the dimension of synthesis will be twice its original
#' length.
#'
#' @usage afb(x, af)
#' @usage afb2D(x, af1, af2 = NULL)
#' @usage afb2D.A(x, af, d)
#' @usage sfb(lo, hi, sf)
#' @usage sfb2D(lo, hi, sf1, sf2 = NULL)
#' @usage sfb2D.A(lo, hi, sf, d)
#' @aliases afb afb2D afb2D.A sfb sfb2D sfb2D.A
#' @param x vector or matrix of observations
#' @param af analysis filters. First element of the list is the low-pass
#' filter, second element is the high-pass filter.
#' @param af1,af2 analysis filters for the first and second dimension of a 2D
#' array.
#' @param sf synthesis filters. First element of the list is the low-pass
#' filter, second element is the high-pass filter.
#' @param sf1,sf2 synthesis filters for the first and second dimension of a 2D
#' array.
#' @param d dimension of filtering (d = 1 or 2)
#' @param lo low-frequecy coefficients
#' @param hi high-frequency coefficients
#' @return In one dimension the output for the analysis filter bank
#' (\code{afb}) is a list with two elements \item{lo}{Low frequecy output}
#' \item{hi}{High frequency output} and the output for the synthesis filter
#' bank (\code{sfb}) is the output signal.
#'
#' In two dimensions the output for the analysis filter bank (\code{afb2D}) is
#' a list with four elements \item{lo}{low-pass subband} \item{hi[[1]]}{'lohi'
#' subband} \item{hi[[2]]}{'hilo' subband} \item{hi[[3]]}{'hihi' subband} and
#' the output for the synthesis filter bank (\code{sfb2D}) is the output array.
#' @author Matlab: S. Cai, K. Li and I. Selesnick; R port: B. Whitcher
#' @keywords ts
#' @examples
#'
#' ## EXAMPLE: afb, sfb
#' af = farras()$af
#' sf = farras()$sf
#' x = rnorm(64)
#' x.afb = afb(x, af)
#' lo = x.afb$lo
#' hi = x.afb$hi
#' y = sfb(lo, hi, sf)
#' err = x - y
#' max(abs(err))
#'
#' ## EXAMPLE: afb2D, sfb2D
#' x = matrix(rnorm(32*64), 32, 64)
#' af = farras()$af
#' sf = farras()$sf
#' x.afb2D = afb2D(x, af, af)
#' lo = x.afb2D$lo
#' hi = x.afb2D$hi
#' y = sfb2D(lo, hi, sf, sf)
#' err = x - y
#' max(abs(err))
#'
#' ## Example: afb2D.A, sfb2D.A
#' x = matrix(rnorm(32*64), 32, 64)
#' af = farras()$af
#' sf = farras()$sf
#' x.afb2D.A = afb2D.A(x, af, 1)
#' lo = x.afb2D.A$lo
#' hi = x.afb2D.A$hi
#' y = sfb2D.A(lo, hi, sf, 1)
#' err = x - y
#' max(abs(err))
afb <- function(x, af) {
N <- length(x)
L <- nrow(af)/2
x <- cshift(x,-L)
## lowpass filter
lo <- convolve(x, af[,1], conj=FALSE, type="open")
lo <- cshift(lo,-(2*L-1))
lo <- lo[seq(1, length(lo), by=2)]
lo[1:L] <- lo[N/2+(1:L)] + lo[1:L]
lo <- lo[1:(N/2)]
## highpass filter
hi <- convolve(x, af[,2], conj=FALSE, type="open")
hi <- cshift(hi,-(2*L-1))
hi <- hi[seq(1, length(hi), by=2)]
hi[1:L] <- hi[N/2+(1:L)] + hi[1:L]
hi <- hi[1:(N/2)]
list(lo = lo, hi = hi)
}
#' Dual-tree Complex Discrete Wavelet Transform
#'
#' One- and two-dimensional dual-tree complex discrete wavelet transforms
#' developed by Kingsbury and Selesnick \emph{et al.}
#'
#' In one dimension \eqn{N} is divisible by \eqn{2^J} and
#' \eqn{N\ge2^{J-1}\cdot\mbox{length}(\mbox{\code{af}})}.
#'
#' In two dimensions, these two conditions must hold for both \eqn{M} and
#' \eqn{N}.
#'
#' @usage dualtree(x, J, Faf, af)
#' @usage idualtree(w, J, Fsf, sf)
#' @usage dualtree2D(x, J, Faf, af)
#' @usage idualtree2D(w, J, Fsf, sf)
#' @aliases dualtree idualtree dualtree2D idualtree2D
#' @param x N-point vector or MxN matrix.
#' @param w DWT coefficients.
#' @param J number of stages.
#' @param Faf analysis filters for the first stage.
#' @param af analysis filters for the remaining stages.
#' @param Fsf synthesis filters for the last stage.
#' @param sf synthesis filters for the preceeding stages.
#' @return For the analysis of \code{x}, the output is \item{w}{DWT
#' coefficients. Each wavelet scale is a list containing the real and
#' imaginary parts. The final scale (J+1) contains the low-pass filter
#' coefficients.} For the synthesis of \code{w}, the output is \item{y}{output
#' signal}
#' @author Matlab: S. Cai, K. Li and I. Selesnick; R port: B. Whitcher
#' @seealso \code{\link{FSfarras}}, \code{\link{farras}},
#' \code{\link{convolve}}, \code{\link{cshift}}, \code{\link{afb}},
#' \code{\link{sfb}}.
#' @keywords ts
#' @examples
#'
#' ## EXAMPLE: dualtree
#' x = rnorm(512)
#' J = 4
#' Faf = FSfarras()$af
#' Fsf = FSfarras()$sf
#' af = dualfilt1()$af
#' sf = dualfilt1()$sf
#' w = dualtree(x, J, Faf, af)
#' y = idualtree(w, J, Fsf, sf)
#' err = x - y
#' max(abs(err))
#'
#' ## Example: dualtree2D
#' x = matrix(rnorm(64*64), 64, 64)
#' J = 3
#' Faf = FSfarras()$af
#' Fsf = FSfarras()$sf
#' af = dualfilt1()$af
#' sf = dualfilt1()$sf
#' w = dualtree2D(x, J, Faf, af)
#' y = idualtree2D(w, J, Fsf, sf)
#' err = x - y
#' max(abs(err))
#'
#' ## Display 2D wavelets of dualtree2D.m
#'
#' J <- 4
#' L <- 3 * 2^(J+1)
#' N <- L / 2^J
#' Faf <- FSfarras()$af
#' Fsf <- FSfarras()$sf
#' af <- dualfilt1()$af
#' sf <- dualfilt1()$sf
#' x <- matrix(0, 2*L, 3*L)
#' w <- dualtree2D(x, J, Faf, af)
#' w[[J]][[1]][[1]][N/2, N/2+0*N] <- 1
#' w[[J]][[1]][[2]][N/2, N/2+1*N] <- 1
#' w[[J]][[1]][[3]][N/2, N/2+2*N] <- 1
#' w[[J]][[2]][[1]][N/2+N, N/2+0*N] <- 1
#' w[[J]][[2]][[2]][N/2+N, N/2+1*N] <- 1
#' w[[J]][[2]][[3]][N/2+N, N/2+2*N] <- 1
#' y <- idualtree2D(w, J, Fsf, sf)
#' image(t(y), col=grey(0:64/64), axes=FALSE)
#'
dualtree <- function(x, J, Faf, af) {
## normalization
x <- x/sqrt(2)
w <- vector("list", J+1)
## Tree 1
w[[1]] <- vector("list", 2)
temp <- afb(x, Faf[[1]])
x1 <- temp$lo
w[[1]][[1]] <- temp$hi
if(J > 1) {
for(j in 2:J) {
w[[j]] <- vector("list", 2)
temp <- afb(x1, af[[1]])
x1 <- temp$lo
w[[j]][[1]] <- temp$hi
}
}
w[[J+1]] <- vector("list", 2)
w[[J+1]][[1]] <- x1
## Tree 2
temp <- afb(x, Faf[[2]])
x2 <- temp$lo
w[[1]][[2]] <- temp$hi
if(J > 1) {
for(j in 2:J) {
temp <- afb(x2, af[[2]])
x2 <- temp$lo
w[[j]][[2]] <- temp$hi
}
}
w[[J+1]][[2]] <- x2
w
}
sfb <- function(lo, hi, sf) {
N <- 2*length(lo)
L <- nrow(sf)
## lo <- upfirdn(lo, sf[,1], 2, 1)
lo <- c(matrix(c(rep(0, N/2), lo), nrow=2, byrow=TRUE))
lo <- convolve(lo, sf[,1], conj=FALSE, type="open")
lo <- cshift(lo, -L)
## hi <- upfirdn(hi, sf[,2], 2, 1)
hi <- c(matrix(c(rep(0, N/2), hi), nrow=2, byrow=TRUE))
hi <- convolve(hi, sf[,2], conj=FALSE, type="open")
hi <- cshift(hi, -L)
y <- lo + hi
y[1:(L-2)] <- y[1:(L-2)] + y[N+1:(L-2)]
y <- y[1:N]
## y = cshift(y, 1-L/2);
y <- cshift(y, 1-L/2)
y
}
idualtree <- function(w, J, Fsf, sf) {
## Tree 1
y1 <- w[[J+1]][[1]]
if(J > 1) {
for(j in J:2) {
y1 <- sfb(y1, w[[j]][[1]], sf[[1]])
}
}
y1 <- sfb(y1, w[[1]][[1]], Fsf[[1]])
## Tree 2
y2 <- w[[J+1]][[2]]
if(J > 1) {
for(j in J:2) {
y2 <- sfb(y2, w[[j]][[2]], sf[[2]])
}
}
y2 <- sfb(y2, w[[1]][[2]], Fsf[[2]])
## normalization
y <- (y1 + y2)/sqrt(2)
y
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/dualtree.R
|
dualtree2D <- function(x, J, Faf, af) {
## normalization
x <- x/sqrt(2)
w <- vector("list", J+1)
## Tree 1
w[[1]] <- vector("list", 2)
temp <- afb2D(x, Faf[[1]]) # stage 1
x1 <- temp$lo
w[[1]][[1]] <- temp$hi
if (J > 1) {
for (j in 2:J) {
w[[j]] <- vector("list", 2)
temp <- afb2D(x1, af[[1]]) # remaining stages
x1 <- temp$lo
w[[j]][[1]] <- temp$hi
}
}
w[[J+1]] <- vector("list", 2)
w[[J+1]][[1]] <- x1 # lowpass subband
## Tree 2
temp <- afb2D(x, Faf[[2]]) # stage 1
x2 <- temp$lo
w[[1]][[2]] <- temp$hi
if (J > 1) {
for (j in 2:J) {
temp <- afb2D(x2, af[[2]]) # remaining stages
x2 <- temp$lo
w[[j]][[2]] <- temp$hi
}
}
w[[J+1]][[2]] <- x2 # lowpass subband
## sum and difference
for (j in 1:J) {
for (m in 1:3) {
A <- w[[j]][[1]][[m]]
B <- w[[j]][[2]][[m]]
w[[j]][[1]][[m]] <- (A + B) / sqrt(2)
w[[j]][[2]][[m]] <- (A - B) / sqrt(2)
}
}
return(w)
}
afb2D <- function(x, af1, af2=NULL) {
if (is.null(af2)) {
af2 <- af1
}
## filter along columns
temp <- afb2D.A(x, af1, 1)
L <- temp$lo
H <- temp$hi
## filter along rows
hi <- vector("list", 3)
temp <- afb2D.A(L, af2, 2)
lo <- temp$lo
hi[[1]] <- temp$hi
temp <- afb2D.A(H, af2, 2)
hi[[2]] <- temp$lo
hi[[3]] <- temp$hi
list(lo = lo, hi = hi)
}
afb2D.A <- function(x, af, d) {
lpf <- af[,1] # lowpass filter
hpf <- af[,2] # highpass filter
if (d == 2) {
x <- t(x)
}
## x <- matrix(1:32, 32, 64)
N <- nrow(x)
L <- nrow(af) / 2
x <- cshift2D(x, -L)
## image(x, col=rainbow(16))
## lo <- upfirdn(x, lpf, 1, 2)
lo <- convolve2D(x, lpf, conj=FALSE, type="open")
lo <- cshift2D(lo, -(2 * L - 1))
lo <- lo[seq(1, nrow(lo), by=2),]
lo[1:L,] <- lo[1:L,] + lo[1:L + N/2,]
lo <- lo[1:(N/2),]
## hi <- upfirdn(x, hpf, 1, 2)
hi <- convolve2D(x, hpf, conj=FALSE, type="open")
hi <- cshift2D(hi, -(2 * L - 1))
hi <- hi[seq(1, nrow(hi), by=2),]
hi[1:L,] <- hi[1:L,] + hi[1:L + N/2,]
hi <- hi[1:(N/2),]
if (d == 2) {
lo <- t(lo)
hi <- t(hi)
}
list(lo = lo, hi = hi)
}
cshift2D <- function(x, m) {
N <- nrow(x)
n <- 0:(N-1)
n <- (n-m) %% N
y <- x[n+1,]
return(y)
}
#' Fast Column-wise Convolution of a Matrix
#'
#' Use the Fast Fourier Transform to perform convolutions between a sequence
#' and each column of a matrix.
#'
#' This is a corrupted version of \code{convolve} made by replacing \code{fft}
#' with \code{mvfft} in a few places. It would be nice to submit this to the R
#' Developers for inclusion.
#'
#' @param x MxN matrix.
#' @param y numeric sequence of length N.
#' @param conj logical; if \code{TRUE}, take the complex \emph{conjugate}
#' before back-transforming (default, and used for usual convolution).
#' @param type character; one of \code{circular}, \code{open} (beginning of
#' word is ok). For \code{circular}, the two sequences are treated as
#' \emph{circular}, i.e., periodic.
#'
#' For \code{open} and \code{filter}, the sequences are padded with zeros (from
#' left and right) first; \code{filter} returns the middle sub-vector of
#' \code{open}, namely, the result of running a weighted mean of \code{x} with
#' weights \code{y}.
#' @author B. Whitcher
#' @seealso \code{\link{convolve}}
#' @keywords ts
#' @export convolve2D
convolve2D <- function(x, y, conj=TRUE, type=c("circular", "open")) {
## Generalize convolve to handle vector arrays by calling mvfft()
type <- match.arg(type)
n <- nrow(x)
ny <- length(y)
Real <- is.numeric(x) && is.numeric(y)
if (type == "circular") {
if (ny != n) {
stop("length mismatch in convolution")
}
} else {
n1 <- ny - 1
x <- rbind(matrix(0, n1, ncol(x)), x)
n <- length(y <- c(y, rep.int(0, n - 1)))
}
x <- mvfft(mvfft(x) * (if (conj) Conj(fft(y)) else fft(y)), inverse=TRUE)
(if (Real) Re(x) else x) / n
}
idualtree2D <- function(w, J, Fsf, sf) {
## sum and difference
for (k in 1:J) {
for (m in 1:3) {
A <- w[[k]][[1]][[m]]
B <- w[[k]][[2]][[m]]
w[[k]][[1]][[m]] <- (A+B)/sqrt(2)
w[[k]][[2]][[m]] <- (A-B)/sqrt(2)
}
}
## Tree 1
y1 <- w[[J+1]][[1]]
if (J > 1) {
for (j in J:2) {
y1 <- sfb2D(y1, w[[j]][[1]], sf[[1]])
}
}
y1 <- sfb2D(y1, w[[1]][[1]], Fsf[[1]])
## Tree 2
y2 <- w[[J+1]][[2]]
if (J > 1) {
for (j in J:2) {
y2 <- sfb2D(y2, w[[j]][[2]], sf[[2]])
}
y2 <- sfb2D(y2, w[[1]][[2]], Fsf[[2]])
}
## normalization
y <- (y1 + y2)/sqrt(2)
return(y)
}
sfb2D <- function(lo, hi, sf1, sf2=NULL) {
if (is.null(sf2)) {
sf2 <- sf1
}
## filter along rows
lo <- sfb2D.A(lo, hi[[1]], sf2, 2)
hi <- sfb2D.A(hi[[2]], hi[[3]], sf2, 2)
## filter along columns
y <- sfb2D.A(lo, hi, sf1, 1)
return(y)
}
sfb2D.A <- function(lo, hi, sf, d) {
lpf <- sf[,1] # lowpass filter
hpf <- sf[,2] # highpass filter
if (d == 2) {
lo <- t(lo)
hi <- t(hi)
}
N <- 2 * nrow(lo)
M <- ncol(lo)
L <- nrow(sf)
## y = upfirdn(lo, lpf, 2, 1) + upfirdn(hi, hpf, 2, 1);
lo <- c(matrix(c(rep(0, length(lo)), c(lo)), nrow=2, byrow=TRUE))
lo <- matrix(lo, N, M)
lo <- convolve2D(lo, lpf, conj=FALSE, type="open")
lo <- cshift2D(lo, -L)
hi <- c(matrix(c(rep(0, length(hi)), c(hi)), nrow=2, byrow=TRUE))
hi <- matrix(hi, N, M)
hi <- convolve2D(hi, hpf, conj=FALSE, type="open")
hi <- cshift2D(hi, -L)
y <- lo + hi
y[1:(L-2),] <- y[1:(L-2),] + y[N+1:(L-2),]
y <- y[1:N,]
y <- cshift2D(y, 1 - L/2)
if (d == 2) {
y <- t(y)
}
return(y)
}
pm <- function(a, b) {
u <- (a + b) / sqrt(2)
v <- (a - b) / sqrt(2)
list(u=u, v=v)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/dualtree2D.R
|
#' (Inverse) Discrete Wavelet Packet Transforms
#'
#' All possible filtering combinations (low- and high-pass) are performed to
#' decompose a vector or time series. The resulting coefficients are
#' associated with a binary tree structure corresponding to a partitioning of
#' the frequency axis.
#'
#' The code implements the one-dimensional DWPT using the pyramid algorithm
#' (Mallat, 1989).
#'
#' @usage dwpt(x, wf = "la8", n.levels = 4, boundary = "periodic")
#' @usage idwpt(y, y.basis)
#' @aliases dwpt idwpt modwpt
#' @param x a vector or time series containing the data be to decomposed. This
#' must be a dyadic length vector (power of 2).
#' @param wf Name of the wavelet filter to use in the decomposition. By
#' default this is set to \code{"la8"}, the Daubechies orthonormal compactly
#' supported wavelet of length L=8 (Daubechies, 1992), least asymmetric family.
#' @param n.levels Specifies the depth of the decomposition.This must be a
#' number less than or equal to
#' \eqn{\log(\mbox{length}(x),2)}{log2[length(x)]}.
#' @param boundary Character string specifying the boundary condition. If
#' \code{boundary=="periodic"} the default, then the vector you decompose is
#' assumed to be periodic on its defined interval,\cr if
#' \code{boundary=="reflection"}, the vector beyond its boundaries is assumed
#' to be a symmetric reflection of itself.
#' @param y Object of S3 class \code{dwpt}.
#' @param y.basis Vector of character strings that describe leaves on the DWPT
#' basis tree.
#' @return Basically, a list with the following components
#' \item{w?.?}{Wavelet coefficient vectors. The first index is associated with
#' the scale of the decomposition while the second is associated with the
#' frequency partition within that level.}
#' \item{wavelet}{Name of the wavelet filter used.}
#' \item{boundary}{How the boundaries were handled.}
#' @author B. Whitcher
#' @seealso \code{\link{dwt}}, \code{\link{modwpt}}, \code{\link{wave.filter}}.
#' @references Mallat, S. G. (1989) A theory for multiresolution signal
#' decomposition: the wavelet representation, \emph{IEEE Transactions on
#' Pattern Analysis and Machine Intelligence}, \bold{11}(7), 674--693.
#'
#' Percival, D. B. and A. T. Walden (2000) \emph{Wavelet Methods for Time
#' Series Analysis}, Cambridge University Press.
#'
#' Wickerhauser, M. V. (1994) \emph{Adapted Wavelet Analysis from Theory to
#' Software}, A K Peters.
#' @keywords ts
#' @examples
#'
#' data(mexm)
#' J <- 4
#' mexm.mra <- mra(log(mexm), "mb8", J, "modwt", "reflection")
#' mexm.nomean <- ts(
#' apply(matrix(unlist(mexm.mra), ncol=J+1, byrow=FALSE)[,-(J+1)], 1, sum),
#' start=1957, freq=12)
#' mexm.dwpt <- dwpt(mexm.nomean[-c(1:4)], "mb8", 7, "reflection")
#'
#' @export dwpt
dwpt <- function(x, wf="la8", n.levels=4, boundary="periodic") {
N <- length(x)
J <- n.levels
if(N/2^J != trunc(N/2^J))
stop("Sample size is not a power of 2")
if(2^J > N)
stop("wavelet transform exceeds sample size in dwt")
dict <- wave.filter(wf)
L <- dict$length
storage.mode(L) <- "integer"
h <- dict$hpf
storage.mode(h) <- "double"
g <- dict$lpf
storage.mode(g) <- "double"
y <- vector("list", sum(2^(1:J)))
crystals1 <- rep(1:J, 2^(1:J))
crystals2 <- unlist(apply(as.matrix(2^(1:J) - 1), 1, seq, from=0))
names(y) <- paste("w", crystals1, ".", crystals2, sep="")
for(j in 1:J) {
jj <- min((1:length(crystals1))[crystals1 == j])
for(n in 0:(2^j/2-1)) {
if(j > 1)
x <- y[[(1:length(crystals1))[crystals1 == j-1][n+1]]]
W <- V <- numeric(N/2^j)
if(n %% 2 == 0) {
z <- .C(C_dwt, as.double(x), as.integer(N/2^(j-1)), L, h, g,
W=as.double(W), V=as.double(V))
y[[jj + 2*n + 1]] <- z$W
y[[jj + 2*n]] <- z$V
}
else {
z <- .C(C_dwt, as.double(x), as.integer(N/2^(j-1)), L, h, g,
W=as.double(W), V=as.double(V))
y[[jj + 2*n]] <- z$W
y[[jj + 2*n + 1 ]] <- z$V
}
}
}
attr(y, "wavelet") <- wf
return(y)
}
idwpt <- function(y, y.basis)
{
J <- trunc(log(length(y), 2))
dict <- wave.filter(attributes(y)$wavelet)
L <- dict$length
storage.mode(L) <- "integer"
h <- dict$hpf
storage.mode(h) <- "double"
g <- dict$lpf
storage.mode(g) <- "double"
for(j in J:1) {
a <- min((1:length(rep(1:J, 2^(1:J))))[rep(1:J, 2^(1:J)) == j])
b <- max((1:length(rep(1:J, 2^(1:J))))[rep(1:J, 2^(1:J)) == j])
n <- a
while(n <= b) {
if(y.basis[n]) {
m <- length(y[[n]])
XX <- numeric(2 * m)
if(floor((n-a)/2) %% 2 == 0)
X <- .C(C_idwt, as.double(y[[n+1]]), as.double(y[[n]]),
as.integer(m), L, h, g, out=as.double(XX))$out
else
X <- .C(C_idwt, as.double(y[[n]]), as.double(y[[n+1]]),
as.integer(m), L, h, g, out=as.double(XX))$out
if(j != 1) {
y[[a-(b-a+1)/2 + (n-a)/2]] <- X
y.basis[[a-(b-a+1)/2 + (n-a)/2]] <- 1
}
n <- n + 2
}
else { n <- n + 1 }
}
}
return(X)
}
##plot.dwpt <- function(x, n.levels, pgrid=TRUE)
##{
## J <- n.levels
## scales <- rep(1:J, 2^(1:J))
## y <- matrix(NA, 2*length(x[[1]]), J)
## for(j in 1:J) {
## a <- min((1:length(scales))[scales == j])
## b <- max((1:length(scales))[scales == j])
## y[, j] <- unlist(x[a:b])
## x.length <- length(y[, j])
## }
## plot(ts(y), ylim=c(-.45,.45))
## if(pgrid) {
## lines(x.length * c(0,1), c(0,0), lty=2)
## for(j in 1:J) {
## lines(x.length * c(0,1), c(-j,-j), lty=2)
## for(n in 0:2^j) lines(x.length * c(n/2^j, n/2^j), c(-j,-(j-1)), lty=2)
## }
## }
## title(ylab="Level")
##}
#' Produce Boolean Vector from Wavelet Basis Names
#'
#' Produce a vector of zeros and ones from a vector of basis names.
#'
#' None.
#'
#' @param x Output from the discrete wavelet package transfrom (DWPT).
#' @param basis.names Vector of character strings that describe leaves on the
#' DWPT basis tree. See the examples below for appropriate syntax.
#' @return Vector of zeros and ones.
#' @seealso \code{\link{dwpt}}.
#' @keywords ts
#' @examples
#'
#' data(acvs.andel8)
#' \dontrun{
#' x <- hosking.sim(1024, acvs.andel8[,2])
#' x.dwpt <- dwpt(x, "la8", 7)
#' ## Select orthonormal basis from wavelet packet tree
#' x.basis <- basis(x.dwpt, c("w1.1","w2.1","w3.0","w4.3","w5.4","w6.10",
#' "w7.22","w7.23"))
#' for(i in 1:length(x.dwpt))
#' x.dwpt[[i]] <- x.basis[i] * x.dwpt[[i]]
#' ## Resonstruct original series using selected orthonormal basis
#' y <- idwpt(x.dwpt, x.basis)
#' par(mfrow=c(2,1), mar=c(5-1,4,4-1,2))
#' plot.ts(x, xlab="", ylab="", main="Original Series")
#' plot.ts(y, xlab="", ylab="", main="Reconstructed Series")
#' }
#'
#' @export basis
basis <- function(x, basis.names)
{
m <- length(x)
n <- length(basis.names)
y <- numeric(m)
for(i in 1:n) { y <- y + as.integer(names(x) == basis.names[i]) }
return(y)
}
#' Derive Orthonormal Basis from Wavelet Packet Tree
#'
#' An orthonormal basis for the discrete wavelet transform may be characterized
#' via a disjoint partitioning of the frequency axis that covers
#' \eqn{[0,\frac{1}{2})}{[0,1/2)}. This subroutine produces an orthonormal
#' basis from a full wavelet packet tree.
#'
#' A wavelet packet tree is a binary tree of Boolean variables. Parent nodes
#' are removed if any of their children exist.
#'
#' @param xtree is a vector whose entries are associated with a wavelet packet
#' tree.
#' @return Boolean vector describing the orthonormal basis for the DWPT.
#' @author B. Whitcher
#' @keywords ts
#' @examples
#'
#' data(japan)
#' J <- 4
#' wf <- "mb8"
#' japan.mra <- mra(log(japan), wf, J, boundary="reflection")
#' japan.nomean <-
#' ts(apply(matrix(unlist(japan.mra[-(J+1)]), ncol=J, byrow=FALSE), 1, sum),
#' start=1955, freq=4)
#' japan.nomean2 <- ts(japan.nomean[42:169], start=1965.25, freq=4)
#' plot(japan.nomean2, type="l")
#' japan.dwpt <- dwpt(japan.nomean2, wf, 6)
#' japan.basis <-
#' ortho.basis(portmanteau.test(japan.dwpt, p=0.01, type="other"))
#' # Not implemented yet
#' # par(mfrow=c(1,1))
#' # plot.basis(japan.basis)
#'
#' @export ortho.basis
ortho.basis <- function(xtree) {
J <- trunc(log(length(xtree), 2))
X <- vector("list", J)
X[[1]] <- xtree[rep(1:J, 2^(1:J)) == 1]
for(i in 2:J) {
for(j in i:J) {
if(i == 2) X[[j]] <- xtree[rep(1:J, 2^(1:J)) == j]
X[[j]] <- X[[j]] + 2 * c(apply(matrix(xtree[rep(1:J, 2^(1:J)) == i-1]),
1, rep, 2^(j-i+1)))
}
}
X[[J]][X[[J]] == 0] <- 1
ifelse(unlist(X) == 1, 1, 0)
}
##plot.basis <- function(xtree)
##{
## J <- trunc(log(length(xtree), base=2))
## j <- rep(1:J, 2^(1:J))
## n <- unlist(apply(matrix(2^(1:J)-1), 1, seq, from=0))
## basis <- ifelse(xtree, paste("w", j, ".", n, sep=""), NA)
## pgrid.plot(basis[basis != "NA"])
## invisible()
##}
phase.shift.packet <- function(z, wf, inv=FALSE)
{
## Center of energy
coe <- function(g)
sum(0:(length(g)-1) * g^2) / sum(g^2)
J <- length(z) - 1
g <- wave.filter(wf)$lpf
h <- wave.filter(wf)$hpf
if(!inv) {
for(j in 1:J) {
ph <- round(2^(j-1) * (coe(g) + coe(h)) - coe(g), 0)
Nj <- length(z[[j]])
z[[j]] <- c(z[[j]][(ph+1):Nj], z[[j]][1:ph])
}
ph <- round((2^J-1) * coe(g), 0)
J <- J + 1
z[[J]] <- c(z[[J]][(ph+1):Nj], z[[J]][1:ph])
} else {
for(j in 1:J) {
ph <- round(2^(j-1) * (coe(g) + coe(h)) - coe(g), 0)
Nj <- length(z[[j]])
z[[j]] <- c(z[[j]][(Nj-ph+1):Nj], z[[j]][1:(Nj-ph)])
}
ph <- round((2^J-1) * coe(g), 0)
J <- J + 1
z[[J]] <- c(z[[j]][(Nj-ph+1):Nj], z[[j]][1:(Nj-ph)])
}
return(z)
}
modwpt <- function(x, wf="la8", n.levels=4, boundary="periodic")
{
N <- length(x); storage.mode(N) <- "integer"
J <- n.levels
if(2^J > N) stop("wavelet transform exceeds sample size in modwt")
dict <- wave.filter(wf)
L <- dict$length
storage.mode(L) <- "integer"
ht <- dict$hpf/sqrt(2)
storage.mode(ht) <- "double"
gt <- dict$lpf/sqrt(2)
storage.mode(gt) <- "double"
y <- vector("list", sum(2^(1:J)))
yn <- length(y)
crystals1 <- rep(1:J, 2^(1:J))
crystals2 <- unlist(apply(as.matrix(2^(1:J) - 1), 1, seq, from=0))
names(y) <- paste("w", crystals1, ".", crystals2, sep="")
W <- V <- numeric(N)
storage.mode(W) <- storage.mode(V) <- "double"
for(j in 1:J) {
index <- 0
jj <- min((1:yn)[crystals1 == j])
for(n in 0:(2^j / 2 - 1)) {
index <- index + 1
if(j > 1)
x <- y[[(1:yn)[crystals1 == j-1][index]]]
if(n %% 2 == 0) {
z <- .C(C_modwt, as.double(x), N, as.integer(j), L, ht, gt,
W = W, V = V)[7:8]
y[[jj + 2*n + 1]] <- z$W
y[[jj + 2*n]] <- z$V
}
else {
z <- .C(C_modwt, as.double(x), N, as.integer(j), L, ht, gt,
W = W, V = V)[7:8]
y[[jj + 2*n]] <- z$W
y[[jj + 2*n + 1 ]] <- z$V
}
}
}
attr(y, "wavelet") <- wf
return(y)
}
dwpt.brick.wall <- function(x, wf, n.levels, method="modwpt")
{
N <- length(x[[1]])
m <- wave.filter(wf)$length
J <- n.levels
crystals1 <- rep(1:J, 2^(1:J))
crystals2 <- unlist(apply(as.matrix(2^(1:J) - 1), 1, seq, from=0))
if(method=="dwpt") {
## for DWPT
for(j in 1:J) {
jj <- min((1:length(crystals1))[crystals1 == j])
L <- switch(j,
(m-2)/2,
((m-2)/2 + floor(m/4)),
((m-2)/2 + floor((m/2 + floor(m/4))/2)))
if(is.null(L)) L <- (m-2)
for(n in 0:(2^j-1))
x[[jj+n]][1:L] <- NA
}
}
else {
## for MODWPT
for(j in 1:J) {
jj <- min((1:length(crystals1))[crystals1 == j])
L <- min((2^j - 1) * (m - 1), N)
for(n in 0:(2^j-1))
x[[jj+n]][1:L] <- NA
}
}
return(x)
}
#' Testing the Wavelet Packet Tree for White Noise
#'
#' A wavelet packet tree, from the discrete wavelet packet transform (DWPT), is
#' tested node-by-node for white noise. This is the first step in selecting an
#' orthonormal basis for the DWPT.
#'
#' Top-down recursive testing of the wavelet packet tree is
#'
#' @usage cpgram.test(y, p = 0.05, taper = 0.1)
#' @usage css.test(y)
#' @usage entropy.test(y)
#' @usage portmanteau.test(y, p = 0.05, type = "Box-Pierce")
#' @aliases cpgram.test css.test entropy.test portmanteau.test
#' @param y wavelet packet tree (from the DWPT)
#' @param p significance level
#' @param taper weight of cosine bell taper (\code{cpgram.test} only)
#' @param type \code{"Box-Pierce"} and \code{other} recognized
#' (\code{portmanteau.test} only)
#' @return Boolean vector of the same length as the number of nodes in the
#' wavelet packet tree.
#' @author B. Whitcher
#' @seealso \code{\link{ortho.basis}}.
#' @references Brockwell and Davis (1991) \emph{Time Series: Theory and
#' Methods}, (2nd. edition), Springer-Verlag.
#'
#' Brown, Durbin and Evans (1975) Techniques for testing the constancy of
#' regression relationships over time, \emph{Journal of the Royal Statistical
#' Society B}, \bold{37}, 149-163.
#'
#' Percival, D. B., and A. T. Walden (1993) \emph{Spectral Analysis for
#' Physical Applications: Multitaper and Conventional Univariate Techniques},
#' Cambridge University Press.
#' @keywords ts
#' @examples
#'
#' data(mexm)
#' J <- 6
#' wf <- "la8"
#' mexm.dwpt <- dwpt(mexm[-c(1:4)], wf, J)
#' ## Not implemented yet
#' ## plot.dwpt(x.dwpt, J)
#' mexm.dwpt.bw <- dwpt.brick.wall(mexm.dwpt, wf, 6, method="dwpt")
#' mexm.tree <- ortho.basis(portmanteau.test(mexm.dwpt.bw, p=0.025))
#' ## Not implemented yet
#' ## plot.basis(mexm.tree)
#'
css.test <- function(y)
{
K <- length(y)
test <- numeric(K)
for(k in 1:K) {
x <- y[[k]]
x <- x[!is.na(x)]
n <- length(x)
plus <- 1:n/(n - 1) - cumsum(x^2)/sum(x^2)
minus <- cumsum(x^2)/sum(x^2) - 0:(n - 1)/(n - 1)
D <- max(abs(plus), abs(minus))
if(D < 1.224/(sqrt(n) + 0.12 + 0.11/sqrt(n))) test[k] <- 1
}
return(test)
}
entropy.test <- function(y)
{
K <- length(y)
test <- numeric(K)
for(k in 1:K) {
x <- y[[k]]
test[k] <- sum(x^2 * log(x^2), na.rm=TRUE)
}
return(test)
}
cpgram.test <- function(y, p=0.05, taper=0.1)
{
K <- length(y)
test <- numeric(K)
for(k in 1:K) {
x <- y[[k]]
x <- x[!is.na(x)]
x <- spec.taper(scale(x, center=TRUE, scale=FALSE), p=taper)
y <- Mod(fft(x))^2/length(x)
y[1] <- 0
n <- length(x)
x <- (0:(n/2))/n
if(length(x) %% 2 == 0) {
n <- length(x) - 1
y <- y[1:n]
x <- x[1:n]
}
else y <- y[1:length(x)]
mp <- length(x) - 1
if(p == 0.05)
crit <- 1.358/(sqrt(mp) + 0.12 + 0.11/sqrt(mp))
else {
if(p == 0.01) crit <- 1.628/(sqrt(mp) + 0.12 + 0.11/sqrt(mp))
else stop("critical value is not known")
}
D <- abs(cumsum(y)/sum(y) - 0:mp/mp)
if(max(D) < crit) test[k] <- 1
}
return(test)
}
portmanteau.test <- function(y, p = 0.05, type = "Box-Pierce")
{
K <- length(y)
test <- numeric(K)
for(k in 1:K) {
x <- y[[k]]
x <- x[!is.na(x)]
n <- length(x)
h <- trunc(n/2)
x.acf <- my.acf(x)[1:(h+1)]
x.acf <- x.acf / x.acf[1];
if(type == "Box-Pierce")
test[k] <- ifelse(n * sum((x.acf[-1])^2) > qchisq(1-p, h), 0, 1)
else
test[k] <- ifelse(n*(n+2) * sum((x.acf[-1])^2 / (n - h:1)) >
qchisq(1-p, h), 0, 1)
}
return(test)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/dwpt.R
|
#' Bootstrap Time Series Using the DWPT
#'
#' An adaptive orthonormal basis is selected in order to perform the naive
#' bootstrap within nodes of the wavelet packet tree. A bootstrap realization
#' of the time series is produce by applying the inverse DWPT.
#'
#' A subroutines is used to select an adaptive orthonormal basis for the
#' piecewise-constant approximation to the underlying spectral density function
#' (SDF). Once selected, sampling with replacement is performed within each
#' wavelet packet coefficient vector and the new collection of wavelet packet
#' coefficients are reconstructed into a bootstrap realization of the original
#' time series.
#'
#' @param y Not necessarily dyadic length time series.
#' @param wf Name of the wavelet filter to use in the decomposition. See
#' \code{\link{wave.filter}} for those wavelet filters available.
#' @param J Depth of the discrete wavelet packet transform.
#' @param p Level of significance for the white noise testing procedure.
#' @param frac Fraction of the time series that should be used in constructing
#' the likelihood function.
#' @return Time series of length $N$, where $N$ is the length of \code{y}.
#' @author B. Whitcher
#' @seealso \code{\link{dwpt.sim}}, \code{\link{spp.mle}}
#' @references Percival, D.B., S. Sardy, A. Davision (2000) Wavestrapping Time
#' Series: Adaptive Wavelet-Based Bootstrapping, in B.J. Fitzgerald, R.L.
#' Smith, A.T. Walden, P.C. Young (Eds.) \emph{Nonlinear and Nonstationary
#' Signal Processing}, pp. 442-471.
#'
#' Whitcher, B. (2001) Simulating Gaussian Stationary Time Series with
#' Unbounded Spectra, \emph{Journal of Computational and Graphical Statistics},
#' \bold{10}, No. 1, 112-134.
#'
#' Whitcher, B. (2004) Wavelet-Based Estimation for Seasonal Long-Memory
#' Processes, \emph{Technometrics}, \bold{46}, No. 2, 225-238.
#' @keywords ts
#' @export dwpt.boot
dwpt.boot <- function(y, wf, J=log(length(y),2)-1, p=1e-04, frac=1) {
N <- length(y)
if(N/2^J != trunc(N/2^J))
stop("Sample size is not divisible by 2^J")
## Perform discrete wavelet packet transform (DWPT) on Y
y.dwpt <- dwpt(y, wf, n.levels=J)
n <- length(y)
if(frac < 1) {
for(i in 1:length(y.dwpt)) {
vec <- y.dwpt[[i]]
ni <- length(vec)
j <- rep(1:J, 2^(1:J))[i]
vec[trunc(frac * n/2^j):ni] <- NA
y.dwpt[[i]] <- vec
}
}
y.basis <- as.logical(ortho.basis(portmanteau.test(y.dwpt, p, type="other")))
## Taken from my 2D bootstrapping methodology
resample.dwpt <- y.dwpt
for(i in 1:length(y.basis)) {
m <- length(y.dwpt[[i]])
if(y.basis[i])
resample.dwpt[[i]] <- sample(y.dwpt[[i]], replace=TRUE)
else
resample.dwpt[[i]] <- rep(NA, m)
}
idwpt(resample.dwpt, y.basis)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/dwpt_boot.R
|
#' Simulate Seasonal Persistent Processes Using the DWPT
#'
#' A seasonal persistent process may be characterized by a spectral density
#' function with an asymptote occuring at a particular frequency in
#' \eqn{[0,\frac{1}{2})}{[0,1/2)}. It's time domain representation was first
#' noted in passing by Hosking (1981). Although an exact time-domain approach
#' to simulation is possible, this function utilizes the discrete wavelet
#' packet transform (DWPT).
#'
#' Two subroutines are used, the first selects an adaptive orthonormal basis
#' for the true spectral density function (SDF) while the second computes the
#' bandpass variances associated with the chosen orthonormal basis and SDF.
#' Finally, when \eqn{M>N}{\code{M} > \code{N}} a uniform random variable is
#' generated in order to select a random piece of the simulated time series.
#' For more details see Whitcher (2001).
#'
#' @param N Length of time series to be generated.
#' @param wf Character string for the wavelet filter.
#' @param delta Long-memory parameter for the seasonal persistent process.
#' @param fG Gegenbauer frequency.
#' @param M Actual length of simulated time series.
#' @param adaptive Logical; if \code{TRUE} the orthonormal basis used in the
#' DWPT is adapted to the ideal spectrum, otherwise the orthonormal basis is
#' performed to a maximum depth.
#' @param epsilon Threshold for adaptive basis selection.
#' @return Time series of length \code{N}.
#' @author B. Whitcher
#' @seealso \code{\link{hosking.sim}} for an exact time-domain method and
#' \code{\link{wave.filter}} for a list of available wavelet filters.
#' @references Hosking, J. R. M. (1981) Fractional Differencing,
#' \emph{Biometrika}, \bold{68}, No. 1, 165-176.
#'
#' Whitcher, B. (2001) Simulating Gaussian Stationary Time Series with
#' Unbounded Spectra, \emph{Journal of Computational and Graphical Statistics},
#' \bold{10}, No. 1, 112-134.
#' @keywords ts
#' @examples
#'
#' ## Generate monthly time series with annual oscillation
#' ## library(ts) is required in order to access acf()
#' x <- dwpt.sim(256, "mb16", .4, 1/12, M=4, epsilon=.001)
#' par(mfrow=c(2,1))
#' plot(x, type="l", xlab="Time")
#' acf(x, lag.max=128, ylim=c(-.6,1))
#' data(acvs.andel8)
#' lines(acvs.andel8$lag[1:128], acvs.andel8$acf[1:128], col=2)
#'
#' @export dwpt.sim
dwpt.sim <- function(N, wf, delta, fG, M=2, adaptive=TRUE, epsilon=0.05) {
M <- M*N
J <- log(M, 2)
jn <- rep(1:J, 2^(1:J))
jl <- length(jn)
if( adaptive ) {
Basis <- find.adaptive.basis(wf, J, fG, epsilon)
} else {
Basis <- numeric(jl)
a <- min((1:jl)[jn == J])
b <- max((1:jl)[jn == J])
Basis[a:b] <- 1
}
Index <- (1:jl)[as.logical(Basis)]
Length <- 2^jn
variance <- bandpass.var.spp(delta, fG, J, Basis, Length)
z <- vector("list", jl)
class(z) <- "dwpt"
attr(z, "wavelet") <- wf
for(i in Index)
z[[i]] <- rnorm(M/Length[i], sd=sqrt(Length[i]*variance[i]))
x <- idwpt(z, Basis)
xi <- trunc(runif(1, 1, M-N))
return(x[xi:(xi+N-1)])
}
#' Determine an Orthonormal Basis for the Discrete Wavelet Packet Transform
#'
#' Subroutine for use in simulating seasonal persistent processes using the
#' discrete wavelet packet transform.
#'
#' The squared gain functions for a Daubechies (extremal phase or least
#' asymmetric) wavelet family are used in a filter cascade to compute the value
#' of the squared gain function for the wavelet packet filter at the
#' Gengenbauer frequency. This is done for all nodes of the wavelet packet
#' table.
#'
#' The idea behind this subroutine is to approximate the relationship between
#' the discrete wavelet transform and long-memory processes, where the squared
#' gain function is zero at frequency zero for all levels of the DWT.
#'
#' @param wf Character string; name of the wavelet filter.
#' @param J Depth of the discrete wavelet packet transform.
#' @param fG Gegenbauer frequency.
#' @param eps Threshold for the squared gain function.
#' @return Boolean vector describing the orthonormal basis for the DWPT.
#' @author B. Whitcher
#' @seealso Used in \code{\link{dwpt.sim}}.
#' @keywords ts
#' @export find.adaptive.basis
find.adaptive.basis <- function(wf, J, fG, eps) {
H <- function(f, L) {
H <- 0
for(l in 0:(L/2-1))
H <- H + choose(L/2+l-1,l) * cos(pi*f)^(2*l)
H <- 2 * sin(pi*f)^L * H
return(H)
}
G <- function(f, L) {
G <- 0
for(l in 0:(L/2-1))
G <- G + choose(L/2+l-1,l) * sin(pi*f)^(2*l)
G <- 2 * cos(pi*f)^L * G
return(G)
}
L <- wave.filter(wf)$length
jn <- rep(1:J, 2^(1:J))
jl <- length(jn)
U <- numeric(jl)
U[1] <- G(fG, L)
U[2] <- H(fG, L)
for(j in 2:J) {
jj <- min((1:jl)[jn == j])
jp <- (1:jl)[jn == j-1]
for(n in 0:(2^j/2-1)) {
if (n%%2 == 0) {
U[jj + 2 * n + 1] <- U[jp[n+1]] * H(2^(j-1)*fG, L)
U[jj + 2 * n] <- U[jp[n+1]] * G(2^(j-1)*fG, L)
} else {
U[jj + 2 * n] <- U[jp[n+1]] * H(2^(j-1)*fG, L)
U[jj + 2 * n + 1] <- U[jp[n+1]] * G(2^(j-1)*fG, L)
}
}
}
return(ortho.basis(U < eps))
}
#' Bandpass Variance for Long-Memory Processes
#'
#' Computes the band-pass variance for fractional difference (FD) or seasonal
#' persistent (SP) processes using numeric integration of their spectral
#' density function.
#'
#' See references.
#'
#' @usage bandpass.fdp(a, b, d)
#' @usage bandpass.spp(a, b, d, fG)
#' @usage bandpass.spp2(a, b, d1, f1, d2, f2)
#' @usage bandpass.var.spp(delta, fG, J, Basis, Length)
#' @aliases bandpass.fdp bandpass.spp bandpass.spp2 bandpass.var.spp
#' @param a Left-hand boundary for the definite integral.
#' @param b Right-hand boundary for the definite integral.
#' @param d,delta,d1,d2 Fractional difference parameter.
#' @param fG,f1,f2 Gegenbauer frequency.
#' @param J Depth of the wavelet transform.
#' @param Basis Logical vector representing the adaptive basis.
#' @param Length Number of elements in Basis.
#' @return Band-pass variance for the FD or SP process between \eqn{a} and
#' \eqn{b}.
#' @author B. Whitcher
#' @references McCoy, E. J., and A. T. Walden (1996) Wavelet analysis and
#' synthesis of stationary long-memory processes, \emph{Journal for
#' Computational and Graphical Statistics}, \bold{5}, No. 1, 26-56.
#'
#' Whitcher, B. (2001) Simulating Gaussian stationary processes with unbounded
#' spectra, \emph{Journal for Computational and Graphical Statistics},
#' \bold{10}, No. 1, 112-134.
#' @keywords ts
bandpass.var.spp <- function(delta, fG, J, Basis, Length) {
a <- unlist(sapply(2^(1:J)-1, seq, from=0, by=1)) / (2*Length)
b <- unlist(sapply(2^(1:J), seq, from=1, by=1)) / (2*Length)
bp.var <- rep(0, length(Basis))
for(jn in (1:length(Basis))[as.logical(Basis)]) {
if(fG < a[jn] | fG > b[jn])
bp.var[jn] <- 2*integrate(spp.sdf, a[jn], b[jn], d=delta, fG=fG)$value
else {
result1 <- 2*integrate(spp.sdf, a[jn], fG, d=delta, fG=fG)$value
result2 <- 2*integrate(spp.sdf, fG, b[jn], d=delta, fG=fG)$value
bp.var[jn] <- result1 + result2
}
}
return(bp.var)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/dwpt_sim.R
|
#' Discrete Wavelet Transform (DWT)
#'
#' This function performs a level \eqn{J} decomposition of the input vector or
#' time series using the pyramid algorithm (Mallat 1989).
#'
#' The code implements the one-dimensional DWT using the pyramid algorithm
#' (Mallat, 1989). The actual transform is performed in C using pseudocode
#' from Percival and Walden (2001). That means convolutions, not inner
#' products, are used to apply the wavelet filters.
#'
#' For a non-dyadic length vector or time series, \code{dwt.nondyadic} pads
#' with zeros, performs the orthonormal DWT on this dyadic length series and
#' then truncates the wavelet coefficient vectors appropriately.
#'
#' @usage dwt(x, wf = "la8", n.levels = 4, boundary = "periodic")
#' @usage dwt.nondyadic(x)
#' @usage idwt(y)
#' @aliases dwt dwt.nondyadic idwt
#' @param x a vector or time series containing the data be to decomposed. This
#' must be a dyadic length vector (power of 2).
#' @param wf Name of the wavelet filter to use in the decomposition. By
#' default this is set to \code{"la8"}, the Daubechies orthonormal compactly
#' supported wavelet of length L=8 (Daubechies, 1992), least asymmetric family.
#' @param n.levels Specifies the depth of the decomposition. This must be a
#' number less than or equal to log(length(x),2).
#' @param boundary Character string specifying the boundary condition. If
#' \code{boundary=="periodic"} the default, then the vector you decompose is
#' assumed to be periodic on its defined interval,\cr if
#' \code{boundary=="reflection"}, the vector beyond its boundaries is assumed
#' to be a symmetric reflection of itself.
#' @param y An object of S3 class \code{dwt}.
#' @return Basically, a list with the following components
#' \item{d?}{Wavelet coefficient vectors.}
#' \item{s?}{Scaling coefficient vector.}
#' \item{wavelet}{Name of the wavelet filter used.}
#' \item{boundary}{How the boundaries were handled.}
#' @author B. Whitcher
#' @seealso \code{\link{modwt}}, \code{\link{mra}}.
#' @references Daubechies, I. (1992) \emph{Ten Lectures on Wavelets}, CBMS-NSF
#' Regional Conference Series in Applied Mathematics, SIAM: Philadelphia.
#'
#' Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An Introduction to
#' Wavelets and Other Filtering Methods in Finance and Economics}, Academic
#' Press.
#'
#' Mallat, S. G. (1989) A theory for multiresolution signal decomposition: the
#' wavelet representation, \emph{IEEE Transactions on Pattern Analysis and
#' Machine Intelligence}, \bold{11}(7), 674--693.
#'
#' Percival, D. B. and A. T. Walden (2000) \emph{Wavelet Methods for Time
#' Series Analysis}, Cambridge University Press.
#' @keywords ts
#' @examples
#'
#' ## Figures 4.17 and 4.18 in Gencay, Selcuk and Whitcher (2001).
#' data(ibm)
#' ibm.returns <- diff(log(ibm))
#' ## Haar
#' ibmr.haar <- dwt(ibm.returns, "haar")
#' names(ibmr.haar) <- c("w1", "w2", "w3", "w4", "v4")
#' ## plot partial Haar DWT for IBM data
#' par(mfcol=c(6,1), pty="m", mar=c(5-2,4,4-2,2))
#' plot.ts(ibm.returns, axes=FALSE, ylab="", main="(a)")
#' for(i in 1:4)
#' plot.ts(up.sample(ibmr.haar[[i]], 2^i), type="h", axes=FALSE,
#' ylab=names(ibmr.haar)[i])
#' plot.ts(up.sample(ibmr.haar$v4, 2^4), type="h", axes=FALSE,
#' ylab=names(ibmr.haar)[5])
#' axis(side=1, at=seq(0,368,by=23),
#' labels=c(0,"",46,"",92,"",138,"",184,"",230,"",276,"",322,"",368))
#' ## LA(8)
#' ibmr.la8 <- dwt(ibm.returns, "la8")
#' names(ibmr.la8) <- c("w1", "w2", "w3", "w4", "v4")
#' ## must shift LA(8) coefficients
#' ibmr.la8$w1 <- c(ibmr.la8$w1[-c(1:2)], ibmr.la8$w1[1:2])
#' ibmr.la8$w2 <- c(ibmr.la8$w2[-c(1:2)], ibmr.la8$w2[1:2])
#' for(i in names(ibmr.la8)[3:4])
#' ibmr.la8[[i]] <- c(ibmr.la8[[i]][-c(1:3)], ibmr.la8[[i]][1:3])
#' ibmr.la8$v4 <- c(ibmr.la8$v4[-c(1:2)], ibmr.la8$v4[1:2])
#' ## plot partial LA(8) DWT for IBM data
#' par(mfcol=c(6,1), pty="m", mar=c(5-2,4,4-2,2))
#' plot.ts(ibm.returns, axes=FALSE, ylab="", main="(b)")
#' for(i in 1:4)
#' plot.ts(up.sample(ibmr.la8[[i]], 2^i), type="h", axes=FALSE,
#' ylab=names(ibmr.la8)[i])
#' plot.ts(up.sample(ibmr.la8$v4, 2^4), type="h", axes=FALSE,
#' ylab=names(ibmr.la8)[5])
#' axis(side=1, at=seq(0,368,by=23),
#' labels=c(0,"",46,"",92,"",138,"",184,"",230,"",276,"",322,"",368))
#'
#' @export dwt
dwt <- function(x, wf="la8", n.levels=4, boundary="periodic")
{
switch(boundary,
"reflection" = x <- c(x, rev(x)),
"periodic" = invisible(),
stop("Invalid boundary rule in dwt"))
N <- length(x)
J <- n.levels
if(N/2^J != trunc(N/2^J))
stop("Sample size is not divisible by 2^J")
if(2^J > N)
stop("wavelet transform exceeds sample size in dwt")
dict <- wave.filter(wf)
L <- dict$length
storage.mode(L) <- "integer"
h <- dict$hpf
storage.mode(h) <- "double"
g <- dict$lpf
storage.mode(g) <- "double"
y <- vector("list", J+1)
names(y) <- c(paste("d", 1:J, sep=""), paste("s", J, sep=""))
for(j in 1:J) {
W <- V <- numeric(N/2^j)
out <- .C(C_dwt, as.double(x), as.integer(N/2^(j-1)), L, h, g,
W=as.double(W), V=as.double(V))[6:7]
y[[j]] <- out$W
x <- out$V
}
y[[J+1]] <- x
class(y) <- "dwt"
attr(y, "wavelet") <- wf
attr(y, "boundary") <- boundary
return(y)
}
dwt.nondyadic <- function(x)
{
M <- length(x)
N <- 2^(ceiling(log(M, 2)))
xx <- c(x, rep(0, N - M))
y <- dwt(xx)
J <- length(y) - 1
for(j in 1:J)
y[[j]] <- y[[j]][1:trunc(M/2^j)]
return(y)
}
idwt <- function(y)
{
ctmp <- class(y)
if(is.null(ctmp) || all(ctmp != "dwt"))
stop("argument `y' is not of class \"dwt\"")
J <- length(y) - 1
dict <- wave.filter(attributes(y)$wavelet)
L <- dict$length
storage.mode(L) <- "integer"
h <- dict$hpf
storage.mode(h) <- "double"
g <- dict$lpf
storage.mode(g) <- "double"
jj <- paste("s", J, sep="")
X <- y[[jj]]
for(j in J:1) {
jj <- paste("d", j, sep="")
N <- length(X)
XX <- numeric(2 * length(y[[jj]]))
X <- .C(C_idwt, as.double(y[[jj]]), as.double(X), as.integer(N), L,
h, g, out=as.double(XX))$out
}
if(attr(y, "boundary") == "reflection") return(X[1:N])
else return(X)
}
#' (Inverse) Maximal Overlap Discrete Wavelet Transform
#'
#' This function performs a level \eqn{J} decomposition of the input vector
#' using the non-decimated discrete wavelet transform. The inverse transform
#' performs the reconstruction of a vector or time series from its maximal
#' overlap discrete wavelet transform.
#'
#' The code implements the one-dimensional non-decimated DWT using the pyramid
#' algorithm. The actual transform is performed in C using pseudocode from
#' Percival and Walden (2001). That means convolutions, not inner products,
#' are used to apply the wavelet filters.
#'
#' The MODWT goes by several names in the statistical and engineering
#' literature, such as, the ``stationary DWT'', ``translation-invariant DWT'',
#' and ``time-invariant DWT''.
#'
#' The inverse MODWT implements the one-dimensional inverse transform using the
#' pyramid algorithm (Mallat, 1989).
#'
#' @usage modwt(x, wf = "la8", n.levels = 4, boundary = "periodic")
#' @usage imodwt(y)
#' @aliases modwt imodwt
#' @param x a vector or time series containing the data be to decomposed.
#' There is \bold{no} restriction on its length.
#' @param y Object of class \code{"modwt"}.
#' @param wf Name of the wavelet filter to use in the decomposition. By
#' default this is set to \code{"la8"}, the Daubechies orthonormal compactly
#' supported wavelet of length L=8 (Daubechies, 1992), least asymmetric family.
#' @param n.levels Specifies the depth of the decomposition. This must be a
#' number less than or equal to log(length(x),2).
#' @param boundary Character string specifying the boundary condition. If
#' \code{boundary=="periodic"} the defaulTRUE, then the vector you decompose is
#' assumed to be periodic on its defined interval,\cr if
#' \code{boundary=="reflection"}, the vector beyond its boundaries is assumed
#' to be a symmetric reflection of itself.
#' @param y an object of class \code{"modwt"}
#' @return Basically, a list with the following components
#' \item{d?}{Wavelet coefficient vectors.}
#' \item{s?}{Scaling coefficient vector.}
#' \item{wavelet}{Name of the wavelet filter used.}
#' \item{boundary}{How the boundaries were handled.}
#' @author B. Whitcher
#' @seealso \code{\link{dwt}}, \code{\link{idwt}}, \code{\link{mra}}.
#' @references Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An
#' Introduction to Wavelets and Other Filtering Methods in Finance and
#' Economics}, Academic Press.
#'
#' Percival, D. B. and P. Guttorp (1994) Long-memory processes, the Allan
#' variance and wavelets, In \emph{Wavelets and Geophysics}, pages 325-344,
#' Academic Press.
#'
#' Percival, D. B. and A. T. Walden (2000) \emph{Wavelet Methods for Time
#' Series Analysis}, Cambridge University Press.
#' @keywords ts
#' @examples
#'
#' ## Figure 4.23 in Gencay, Selcuk and Whitcher (2001)
#' data(ibm)
#' ibm.returns <- diff(log(ibm))
#' # Haar
#' ibmr.haar <- modwt(ibm.returns, "haar")
#' names(ibmr.haar) <- c("w1", "w2", "w3", "w4", "v4")
#' # LA(8)
#' ibmr.la8 <- modwt(ibm.returns, "la8")
#' names(ibmr.la8) <- c("w1", "w2", "w3", "w4", "v4")
#' # shift the MODWT vectors
#' ibmr.la8 <- phase.shift(ibmr.la8, "la8")
#' ## plot partial MODWT for IBM data
#' par(mfcol=c(6,1), pty="m", mar=c(5-2,4,4-2,2))
#' plot.ts(ibm.returns, axes=FALSE, ylab="", main="(a)")
#' for(i in 1:5)
#' plot.ts(ibmr.haar[[i]], axes=FALSE, ylab=names(ibmr.haar)[i])
#' axis(side=1, at=seq(0,368,by=23),
#' labels=c(0,"",46,"",92,"",138,"",184,"",230,"",276,"",322,"",368))
#' par(mfcol=c(6,1), pty="m", mar=c(5-2,4,4-2,2))
#' plot.ts(ibm.returns, axes=FALSE, ylab="", main="(b)")
#' for(i in 1:5)
#' plot.ts(ibmr.la8[[i]], axes=FALSE, ylab=names(ibmr.la8)[i])
#' axis(side=1, at=seq(0,368,by=23),
#' labels=c(0,"",46,"",92,"",138,"",184,"",230,"",276,"",322,"",368))
#'
#' @export modwt
modwt <- function(x, wf="la8", n.levels=4, boundary="periodic")
{
switch(boundary,
"reflection" = x <- c(x, rev(x)),
"periodic" = invisible(),
stop("Invalid boundary rule in modwt"))
N <- length(x)
storage.mode(N) <- "integer"
J <- n.levels
if(2^J > N)
stop("wavelet transform exceeds sample size in modwt")
dict <- wave.filter(wf)
L <- dict$length
storage.mode(L) <- "integer"
ht <- dict$hpf / sqrt(2)
storage.mode(ht) <- "double"
gt <- dict$lpf / sqrt(2)
storage.mode(gt) <- "double"
y <- vector("list", J+1)
names(y) <- c(paste("d", 1:J, sep=""), paste("s", J, sep=""))
W <- V <- numeric(N)
storage.mode(W) <- "double"
storage.mode(V) <- "double"
for(j in 1:J) {
out <- .C(C_modwt, as.double(x), N, as.integer(j), L, ht, gt,
W=W, V=V)[7:8]
y[[j]] <- out$W
x <- out$V
}
y[[J+1]] <- x
class(y) <- "modwt"
attr(y, "wavelet") <- wf
attr(y, "boundary") <- boundary
return(y)
}
imodwt <- function(y)
{
ctmp <- class(y)
if(is.null(ctmp) || all(ctmp != "modwt"))
stop("argument `y' is not of class \"modwt\"")
J <- length(y) - 1
dict <- wave.filter(attributes(y)$wavelet)
L <- dict$length
storage.mode(L) <- "integer"
ht <- dict$hpf / sqrt(2)
storage.mode(ht) <- "double"
gt <- dict$lpf / sqrt(2)
storage.mode(gt) <- "double"
jj <- paste("s", J, sep="")
X <- y[[jj]]
N <- length(X)
storage.mode(N) <- "integer"
XX <- numeric(N)
storage.mode(XX) <- "double"
for(j in J:1) {
jj <- paste("d", j, sep="")
X <- .C(C_imodwt, as.double(y[[jj]]), as.double(X), N, as.integer(j),
L, ht, gt, out=XX)$out
}
if(attr(y, "boundary") == "reflection") return(X[1:(N/2)])
else return(X)
}
#' Replace Boundary Wavelet Coefficients with Missing Values
#'
#' Sets the first \eqn{n} wavelet coefficients to \code{NA}.
#'
#' The fact that observed time series are finite causes boundary issues. One
#' way to get around this is to simply remove any wavelet coefficient computed
#' involving the boundary. This is done here by replacing boundary wavelet
#' coefficients with \code{NA}.
#'
#' @usage brick.wall(x, wf, method = "modwt")
#' @usage dwpt.brick.wall(x, wf, n.levels, method = "modwpt")
#' @usage brick.wall.2d(x, method = "modwt")
#' @aliases brick.wall dwpt.brick.wall brick.wall.2d
#' @param x DWT/MODWT/DWPT/MODWPT object
#' @param wf Character string; name of wavelet filter
#' @param n.levels Specifies the depth of the decomposition. This must be a
#' number less than or equal to log(length(x),2).
#' @param method Either \code{\link{dwt}} or \code{\link{modwt}} for
#' \code{brick.wall}, or either \code{\link{dwpt}} or \code{\link{modwpt}} for
#' \code{dwpt.brick.wall}
#' @return Same object as \code{x} only with some missing values.
#' @author B. Whitcher
#' @references Lindsay, R. W., D. B. Percival and D. A. Rothrock (1996). The
#' discrete wavelet transform and the scale anlaysis of the surface properties
#' of sea ice, \emph{IEEE Transactions on Geoscience and Remote Sensing},
#' \bold{34}, No. 3, 771-787.
#'
#' Percival, D. B. and A. T. Walden (2000) \emph{Wavelet Methods for Time
#' Series Analysis}, Cambridge University Press.
#' @keywords ts
#' @export brick.wall
brick.wall <- function(x, wf, method = "modwt")
{
m <- wave.filter(wf)$length
for (j in 1:(length(x) - 1)) {
if (method == "dwt") {
n <- ceiling((m - 2) * (1 - 1 / 2 ^ j))
} else {
n <- (2^j - 1) * (m - 1)
}
n <- min(n, length(x[[j]]))
x[[j]][1:n] <- NA
}
x[[j + 1]][1:n] <- NA
return(x)
}
#' Phase Shift Wavelet Coefficients
#'
#' Wavelet coefficients are circularly shifted by the amount of phase shift
#' induced by the wavelet transform.
#'
#' The center-of-energy argument of Hess-Nielsen and Wickerhauser (1996) is
#' used to provide a flexible way to circularly shift wavelet coefficients
#' regardless of the wavelet filter used. The results are not identical to
#' those used by Percival and Walden (2000), but are more flexible.
#'
#' \code{phase.shift.packet} is not yet implemented fully.
#'
#' @usage phase.shift(z, wf, inv = FALSE)
#' @usage phase.shift.packet(z, wf, inv = FALSE)
#' @aliases phase.shift phase.shift.packet
#' @param z DWT object
#' @param wf character string; wavelet filter used in DWT
#' @param inv Boolean variable; if \code{inv=TRUE} then the inverse phase shift
#' is applied
#' @return DWT (DWPT) object with coefficients circularly shifted.
#' @author B. Whitcher
#' @references Hess-Nielsen, N. and M. V. Wickerhauser (1996) Wavelets and
#' time-frequency analysis, \emph{Proceedings of the IEEE}, \bold{84}, No. 4,
#' 523-540.
#'
#' Percival, D. B. and A. T. Walden (2000) \emph{Wavelet Methods for Time
#' Series Analysis}, Cambridge University Press.
#' @keywords ts
#' @export phase.shift
phase.shift <- function(z, wf, inv = FALSE)
{
coe <- function(g)
sum(0:(length(g)-1) * g^2) / sum(g^2)
J <- length(z) - 1
g <- wave.filter(wf)$lpf
h <- wave.filter(wf)$hpf
if(!inv) {
for(j in 1:J) {
ph <- round(2^(j-1) * (coe(g) + coe(h)) - coe(g), 0)
Nj <- length(z[[j]])
z[[j]] <- c(z[[j]][(ph + 1):Nj], z[[j]][1:ph])
}
ph <- round((2^J-1) * coe(g), 0)
J <- J + 1
z[[J]] <- c(z[[J]][(ph + 1):Nj], z[[J]][1:ph])
} else {
for(j in 1:J) {
ph <- round(2^(j-1) * (coe(g) + coe(h)) - coe(g), 0)
Nj <- length(z[[j]])
z[[j]] <- c(z[[j]][(Nj - ph + 1):Nj], z[[j]][1:(Nj - ph)])
}
ph <- round((2^J-1) * coe(g), 0)
J <- J + 1
z[[J]] <- c(z[[J]][(Nj - ph + 1):Nj], z[[J]][1:(Nj - ph)])
}
return(z)
}
#' Multiresolution Analysis of Time Series
#'
#' This function performs a level \eqn{J} additive decomposition of the input
#' vector or time series using the pyramid algorithm (Mallat 1989).
#'
#' This code implements a one-dimensional multiresolution analysis introduced
#' by Mallat (1989). Either the DWT or MODWT may be used to compute the
#' multiresolution analysis, which is an additive decomposition of the original
#' time series.
#'
#' @param x A vector or time series containing the data be to decomposed. This
#' must be a dyadic length vector (power of 2) for \code{method="dwt"}.
#' @param wf Name of the wavelet filter to use in the decomposition. By
#' default this is set to \code{"la8"}, the Daubechies orthonormal compactly
#' supported wavelet of length L=8 least asymmetric family.
#' @param J Specifies the depth of the decomposition. This must be a number
#' less than or equal to log(length(x), 2).
#' @param method Either \code{"dwt"} or \code{"modwt"}.
#' @param boundary Character string specifying the boundary condition. If
#' \code{boundary=="periodic"} the default, then the vector you decompose is
#' assumed to be periodic on its defined interval,\cr if
#' \code{boundary=="reflection"}, the vector beyond its boundaries is assumed
#' to be a symmetric reflection of itself.
#' @return Basically, a list with the following components \item{D?}{Wavelet
#' detail vectors.} \item{S?}{Wavelet smooth vector.} \item{wavelet}{Name of
#' the wavelet filter used.} \item{boundary}{How the boundaries were handled.}
#' @author B. Whitcher
#' @seealso \code{\link{dwt}}, \code{\link{modwt}}.
#' @references Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An
#' Introduction to Wavelets and Other Filtering Methods in Finance and
#' Economics}, Academic Press.
#'
#' Mallat, S. G. (1989) A theory for multiresolution signal decomposition: the
#' wavelet representation, \emph{IEEE Transactions on Pattern Analysis and
#' Machine Intelligence}, \bold{11}, No. 7, 674-693.
#'
#' Percival, D. B. and A. T. Walden (2000) \emph{Wavelet Methods for Time
#' Series Analysis}, Cambridge University Press.
#' @keywords ts
#' @examples
#'
#' ## Easy check to see if it works...
#' x <- rnorm(32)
#' x.mra <- mra(x)
#' sum(x - apply(matrix(unlist(x.mra), nrow=32), 1, sum))^2
#'
#' ## Figure 4.19 in Gencay, Selcuk and Whitcher (2001)
#' data(ibm)
#' ibm.returns <- diff(log(ibm))
#' ibm.volatility <- abs(ibm.returns)
#' ## Haar
#' ibmv.haar <- mra(ibm.volatility, "haar", 4, "dwt")
#' names(ibmv.haar) <- c("d1", "d2", "d3", "d4", "s4")
#' ## LA(8)
#' ibmv.la8 <- mra(ibm.volatility, "la8", 4, "dwt")
#' names(ibmv.la8) <- c("d1", "d2", "d3", "d4", "s4")
#' ## plot multiresolution analysis of IBM data
#' par(mfcol=c(6,1), pty="m", mar=c(5-2,4,4-2,2))
#' plot.ts(ibm.volatility, axes=FALSE, ylab="", main="(a)")
#' for(i in 1:5)
#' plot.ts(ibmv.haar[[i]], axes=FALSE, ylab=names(ibmv.haar)[i])
#' axis(side=1, at=seq(0,368,by=23),
#' labels=c(0,"",46,"",92,"",138,"",184,"",230,"",276,"",322,"",368))
#' par(mfcol=c(6,1), pty="m", mar=c(5-2,4,4-2,2))
#' plot.ts(ibm.volatility, axes=FALSE, ylab="", main="(b)")
#' for(i in 1:5)
#' plot.ts(ibmv.la8[[i]], axes=FALSE, ylab=names(ibmv.la8)[i])
#' axis(side=1, at=seq(0,368,by=23),
#' labels=c(0,"",46,"",92,"",138,"",184,"",230,"",276,"",322,"",368))
#'
#' @export mra
mra <- function(x, wf = "la8", J = 4, method = "modwt", boundary = "periodic")
{
switch(boundary,
"reflection" = x <- c(x, rev(x)),
"periodic" = invisible(),
stop("Invalid boundary rule in mra"))
n <- length(x)
if(method == "modwt")
x.wt <- modwt(x, wf, J, "periodic")
else
x.wt <- dwt(x, wf, J, "periodic")
x.mra <- vector("list", J+1)
## Smooth
zero <- vector("list", J+1)
names(zero) <- c(paste("d", 1:J, sep = ""), paste("s", J, sep = ""))
class(zero) <- method
attr(zero, "wavelet") <- wf
attr(zero, "boundary") <- boundary
zero[[J+1]] <- x.wt[[J+1]]
if(method == "modwt") {
for(k in 1:J)
zero[[k]] <- numeric(n)
x.mra[[J+1]] <- imodwt(zero)
} else {
for(k in 1:J)
zero[[k]] <- numeric(n/2^k)
x.mra[[J+1]] <- idwt(zero)
}
## Details
for(j in J:1) {
zero <- vector("list", j+1)
names(zero) <- c(paste("d", 1:j, sep = ""), paste("s", j, sep = ""))
class(zero) <- method
attr(zero, "wavelet") <- wf
attr(zero, "boundary") <- boundary
zero[[j]] <- x.wt[[j]]
if(method == "modwt") {
if(j != 1) {
for(k in c(j+1,(j-1):1))
zero[[k]] <- numeric(n)
} else {
zero[[j+1]] <- numeric(n)
}
x.mra[[j]] <- imodwt(zero)
} else {
zero[[j+1]] <- numeric(n/2^j)
if(j != 1) {
for(k in (j-1):1)
zero[[k]] <- numeric(n/2^k)
}
x.mra[[j]] <- idwt(zero)
}
}
names(x.mra) <- c(paste("D", 1:J, sep = ""), paste("S", J, sep = ""))
if(boundary == "reflection") {
for(j in (J+1):1)
x.mra[[j]] <- x.mra[[j]][1:(n/2)]
return(x.mra)
} else {
return(x.mra)
}
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/dwt.R
|
#' Wavelet-based Maximum Likelihood Estimation for a Fractional Difference
#' Process
#'
#' Parameter estimation for a fractional difference (long-memory, self-similar)
#' process is performed via maximum likelihood on the wavelet coefficients.
#'
#' The variance-covariance matrix of the original time series is approximated
#' by its wavelet-based equivalent. A Whittle-type likelihood is then
#' constructed where the sums of squared wavelet coefficients are compared to
#' bandpass filtered version of the true spectrum. Minimization occurs only
#' for the fractional difference parameter \eqn{d}, while variance is estimated
#' afterwards.
#'
#' @param y Dyadic length time series.
#' @param wf Name of the wavelet filter to use in the decomposition. See
#' \code{\link{wave.filter}} for those wavelet filters available.
#' @param J Depth of the discrete wavelet transform.
#' @return List containing the maximum likelihood estimates (MLEs) of \eqn{d}
#' and \eqn{\sigma^2}, along with the value of the likelihood for those
#' estimates.
#' @author B. Whitcher
#' @references M. J. Jensen (2000) An alternative maximum likelihood estimator
#' of long-memory processes using compactly supported wavelets, \emph{Journal
#' of Economic Dynamics and Control}, \bold{24}, No. 3, 361-387.
#'
#' McCoy, E. J., and A. T. Walden (1996) Wavelet analysis and synthesis of
#' stationary long-memory processes, \emph{Journal for Computational and
#' Graphical Statistics}, \bold{5}, No. 1, 26-56.
#'
#' Percival, D. B. and A. T. Walden (2000) \emph{Wavelet Methods for Time
#' Series Analysis}, Cambridge University Press.
#' @keywords ts
#' @examples
#'
#' ## Figure 5.5 in Gencay, Selcuk and Whitcher (2001)
#' fdp.sdf <- function(freq, d, sigma2=1)
#' sigma2 / ((2*sin(pi * freq))^2)^d
#' dB <- function(x) 10 * log10(x)
#' per <- function(z) {
#' n <- length(z)
#' (Mod(fft(z))**2/(2*pi*n))[1:(n %/% 2 + 1)]
#' }
#' data(ibm)
#' ibm.returns <- diff(log(ibm))
#' ibm.volatility <- abs(ibm.returns)
#' ibm.vol.mle <- fdp.mle(ibm.volatility, "d4", 4)
#' freq <- 0:184/368
#' ibm.vol.per <- 2 * pi * per(ibm.volatility)
#' ibm.vol.resid <- ibm.vol.per/ fdp.sdf(freq, ibm.vol.mle$parameters[1])
#' par(mfrow=c(1,1), las=0, pty="m")
#' plot(freq, dB(ibm.vol.per), type="l", xlab="Frequency", ylab="Spectrum")
#' lines(freq, dB(fdp.sdf(freq, ibm.vol.mle$parameters[1],
#' ibm.vol.mle$parameters[2]/2)), col=2)
#'
#' @export fdp.mle
fdp.mle <- function(y, wf, J=log(length(y),2))
{
fdpML <- function(d, y) {
y.dwt <- y[[1]]
n <- y[[2]]
J <- y[[3]]
## Establish the limits of integration for the band-pass variances
a <- c(1/2^c(1:J+1), 0)
b <- 1/2^c(0:J+1)
## Define some useful parameters for computing the likelihood
length.j <- n / c(2^(1:J), 2^J)
scale.j <- c(2^(1:J+1), 2^(J+1))
## Initialize various parameters for computing the approximate ML
bp.var <- numeric(J+1)
## Compute the band-pass variances according to d
omega.diag <- NULL
for(j in 1:(J+1)) {
bp.var[j] <- integrate(fdp.sdf, a[j], b[j], d=d)$value
omega.diag <- c(omega.diag, scale.j[j] * rep(bp.var[j], length.j[j]))
}
## Compute approximate maximum likelihood
n * log(sum(y.dwt^2 / omega.diag) / n) +
sum(length.j * log(scale.j * bp.var))
}
n <- length(y)
y.dwt <- as.vector(unlist(dwt(y, wf, n.levels=J)))
## Compute MLE of d (limited to stationary region)
result <- optimize(fdpML, interval=c(-0.5,0.5), maximum=FALSE,
y=list(y.dwt, n, J))
## Compute MLE of sigma_epsilon^2
a <- c(1/2^c(1:J+1), 0)
b <- 1/2^c(0:J+1)
length.j <- n / c(2^(1:J), 2^J)
scale.j <- c(2^(1:J+1), 2^(J+1))
bp.var <- numeric(J+1)
omega.diag <- NULL
for(j in 1:(J+1)) {
bp.var[j] <- integrate(fdp.sdf, a[j], b[j], d=result$minimum)$value
omega.diag <- c(omega.diag, scale.j[j] * rep(bp.var[j], length.j[j]))
}
sigma2 <- sum(y.dwt^2 / omega.diag) / n
list(parameters=c(result$minimum, sigma2), objective=result$objective)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/fdp.R
|
#' Discrete Hilbert Wavelet Transforms
#'
#' The discrete Hilbert wavelet transforms (DHWTs) for seasonal and
#' time-varying time series analysis. Transforms include the usual orthogonal
#' (decimated), maximal-overlap (non-decimated) and maximal-overlap packet
#' transforms.
#'
#' @usage dwt.hilbert(x, wf, n.levels = 4, boundary = "periodic", ...)
#' @usage dwt.hilbert.nondyadic(x, ...)
#' @usage idwt.hilbert(y)
#' @usage modwt.hilbert(x, wf, n.levels = 4, boundary = "periodic", ...)
#' @usage imodwt.hilbert(y)
#' @usage modwpt.hilbert(x, wf, n.levels = 4, boundary = "periodic")
#' @aliases dwt.hilbert dwt.hilbert.nondyadic idwt.hilbert modwt.hilbert
#' imodwt.hilbert modwpt.hilbert
#' @param x Real-valued time series or vector of observations.
#' @param wf Hilbert wavelet pair
#' @param n.levels Number of levels (depth) of the wavelet transform.
#' @param boundary Boundary treatment, currently only \code{periodic} and
#' \code{reflection}.
#' @param \ldots Additional parametes to be passed on.
#' @param y An object of S3 class \code{dwt.hilbert}.
#' @return Hilbert wavelet transform object (list).
#' @author B. Whitcher
#' @seealso \code{\link{hilbert.filter}}
#' @references Selesnick, I. (200X). \emph{IEEE Signal Processing Magazine}
#'
#' Selesnick, I. (200X). \emph{IEEE Transactions in Signal Processing}
#'
#' Whither, B. and P.F. Craigmile (2004). Multivariate Spectral Analysis Using
#' Hilbert Wavelet Pairs, \emph{International Journal of Wavelets,
#' Multiresolution and Information Processing}, \bold{2}(4), 567--587.
#' @keywords ts
dwt.hilbert <- function(x, wf, n.levels=4, boundary="periodic", ...) {
switch(boundary,
"reflection" = x <- c(x, rev(x)),
"periodic" = invisible(),
stop("Invalid boundary rule in dwt.hilbert"))
N <- length(x)
J <- n.levels
if(N/2^J != trunc(N/2^J))
stop("Sample size is not divisible by 2^J")
if(2^J > N)
stop("Wavelet transform exceeds sample size in dwt")
dict <- hilbert.filter(wf)
L <- dict$length; storage.mode(L) <- "integer"
h0 <- dict$lpf[[1]]; storage.mode(h0) <- "double"
g0 <- dict$lpf[[2]]; storage.mode(g0) <- "double"
h1 <- dict$hpf[[1]]; storage.mode(h1) <- "double"
g1 <- dict$hpf[[2]]; storage.mode(g1) <- "double"
y <- vector("list", J+1)
names(y) <- c(paste("d", 1:J, sep=""), paste("s", J, sep=""))
x.h <- x.g <- x
for(j in 1:J) {
W <- V <- numeric(N/2^j)
out.h <- .C(C_dwt, as.double(x.h), as.integer(N/2^(j-1)), L, h1, h0,
W = W, V = V)[6:7]
out.g <- .C(C_dwt, as.double(x.g), as.integer(N/2^(j-1)), L, g1, g0,
W = W, V = V)[6:7]
y[[j]] <- complex(real = out.h$W, imaginary = out.g$W)
x.h <- out.h$V
x.g <- out.g$V
}
y[[J+1]] <- complex(real = x.h, imaginary = x.g)
attr(y, "wavelet") <- wf
attr(y, "levels") <- n.levels
attr(y, "boundary") <- boundary
return(y)
}
########################################################################
dwt.hilbert.nondyadic <- function(x, ...) {
M <- length(x)
N <- 2^(ceiling(log(M, 2)))
xx <- c(x, rep(0, N - M))
y <- dwt.hilbert(xx, ...)
J <- length(y) - 1
for(j in 1:J) {
y[[j]] <- y[[j]][1:trunc(M/2^j)]
}
return(y)
}
########################################################################
idwt.hilbert <- function(y) {
switch(attributes(y)$boundary,
"reflection" = y <- c(y, rev(y)),
"periodic" = invisible(),
stop("Invalid boundary rule in dwt.dbp"))
J <- attributes(y)$levels
dict <- hilbert.filter(attributes(y)$wavelet)
L <- dict$length; storage.mode(L) <- "integer"
h <- dict$hpf; storage.mode(h) <- "double"
g <- dict$lpf; storage.mode(g) <- "double"
jj <- paste("s", J, sep="")
X <- y[[jj]]
for(j in J:1) {
jj <- paste("d", j, sep="")
XX <- numeric(2 * length(y[[jj]]))
X <- .C(C_idwt, y[[jj]], as.double(X), as.integer(length(X)),
L, h, g, XX=XX)$XX
}
return(X)
}
########################################################################
modwt.hilbert <- function(x, wf, n.levels=4, boundary="periodic", ...) {
switch(boundary,
"reflection" = x <- c(x, rev(x)),
"periodic" = invisible(),
stop("Invalid boundary rule in modwt"))
N <- length(x)
storage.mode(N) <- "integer"
J <- n.levels
if(2^J > N) stop("wavelet transform exceeds sample size in modwt")
dict <- hilbert.filter(wf)
L <- dict$length; storage.mode(L) <- "integer"
h0 <- dict$lpf[[1]] / sqrt(2); storage.mode(h0) <- "double"
g0 <- dict$lpf[[2]] / sqrt(2); storage.mode(g0) <- "double"
h1 <- dict$hpf[[1]] / sqrt(2); storage.mode(h1) <- "double"
g1 <- dict$hpf[[2]] / sqrt(2); storage.mode(g1) <- "double"
y <- vector("list", J+1)
names(y) <- c(paste("d", 1:J, sep=""), paste("s", J, sep=""))
W <- V <- numeric(N)
x.h <- x.g <- x
for(j in 1:J) {
out.h <- .C(C_modwt, as.double(x.h), N, as.integer(j), L, h1, h0,
W = W, V = V)[7:8]
out.g <- .C(C_modwt, as.double(x.g), N, as.integer(j), L, g1, g0,
W = W, V = V)[7:8]
y[[j]] <- complex(real = out.h$W, imaginary = out.g$W)
x.h <- out.h$V
x.g <- out.g$V
}
y[[J+1]] <- complex(real = x.h, imaginary = x.g)
attr(y, "wavelet") <- wf
attr(y, "boundary") <- boundary
attr(y, "levels") <- n.levels
return(y)
}
########################################################################
imodwt.hilbert <- function(y) {
if(attributes(y)$boundary != "periodic")
stop("Invalid boundary rule in imodwt")
J <- length(y) - 1
dict <- hilbert.filter(attributes(y)$wavelet)
L <- dict$length
ht <- dict$hpf / sqrt(2)
gt <- dict$lpf / sqrt(2)
jj <- paste("s", J, sep="")
X <- y[[jj]]; N <- length(X)
XX <- numeric(N)
for(j in J:1) {
jj <- paste("d", j, sep="")
X <- .C(C_imodwt, y[[jj]], X, as.integer(N), as.integer(j),
as.integer(L), ht, gt, XX)[[8]]
}
return(X)
}
########################################################################
#' Select a Hilbert Wavelet Pair
#'
#' Converts name of Hilbert wavelet pair to filter coefficients.
#'
#' Simple \code{switch} statement selects the appropriate HWP. There are two
#' parameters that define a Hilbert wavelet pair using the notation of
#' Selesnick (2001,2002), \eqn{K} and \eqn{L}. Currently, the only implemented
#' combinations \eqn{(K,L)} are (3,3), (3,5), (4,2) and (4,4).
#'
#' @param name Character string of Hilbert wavelet pair, see acceptable names
#' below (e.g., \code{"k3l3"}).
#' @return List containing the following items: \item{L}{length of the wavelet
#' filter} \item{h0,g0}{low-pass filter coefficients} \item{h1,g1}{high-pass
#' filter coefficients}
#' @author B. Whitcher
#' @seealso \code{\link{wave.filter}}
#' @references Selesnick, I.W. (2001). Hilbert transform pairs of wavelet
#' bases. \emph{IEEE Signal Processing Letters} \bold{8}(6), 170--173.
#'
#' Selesnick, I.W. (2002). The design of approximate Hilbert transform pairs
#' of wavelet bases. \emph{IEEE Transactions on Signal Processing}
#' \bold{50}(5), 1144--1152.
#' @keywords ts
#' @examples
#'
#' hilbert.filter("k3l3")
#' hilbert.filter("k3l5")
#' hilbert.filter("k4l2")
#' hilbert.filter("k4l4")
#'
#' @export hilbert.filter
hilbert.filter <- function(name) {
select.K3L3 <- function() {
L <- 12
h0 <- c(1.1594353e-04, -2.2229002e-03, -2.2046914e-03, 4.3427642e-02,
-3.3189896e-02, -1.5642755e-01, 2.8678636e-01, 7.9972652e-01,
4.9827824e-01, 2.4829160e-02, -4.2679177e-02, -2.2260892e-03)
h1 <- qmf(h0)
g0 <- c(1.6563361e-05, -5.2543406e-05, -6.1909121e-03, 1.9701141e-02,
3.2369691e-02, -1.2705043e-01, -1.5506397e-02, 6.1333712e-01,
7.4585008e-01, 2.1675412e-01, -4.9432248e-02, -1.5582624e-02)
g1 <- qmf(g0)
return(list(length = L, hpf = list(h1, g1), lpf = list(h0, g0)))
}
select.K3L5 <- function() {
L <- 12
h0 <- c(5.4258791e-06, -2.1310518e-04, -2.6140914e-03, 1.0212881e-02,
3.5747880e-02, -4.5576766e-02, 3.9810341e-03, 5.3402475e-01,
7.8757164e-01, 2.6537457e-01, -1.3008915e-01, -5.9573795e-02,
1.2733976e-02, 2.8641011e-03, -2.2992683e-04, -5.8541759e-06)
h1 <- qmf(h0)
g0 <- c(4.9326174e-07, 3.5727140e-07, -1.1664703e-03, -8.4003116e-04,
2.8601474e-02, 9.2509748e-03, -7.4562251e-02, 2.2929480e-01,
7.6509138e-01, 5.8328559e-01, -4.6218010e-03, -1.2336841e-01,
-6.2826896e-03, 9.5478911e-03, 4.6642226e-05, -6.4395935e-05)
g1 <- qmf(g0)
return(list(length = L, hpf = list(h1, g1), lpf = list(h0, g0)))
}
select.K4L2 <- function() {
L <- 12
h0 <- c(-1.7853301e-03, 1.3358873e-02, 3.6090743e-02, -3.4722190e-02,
4.1525062e-02, 5.6035837e-01, 7.7458617e-01, 2.2752075e-01,
-1.6040927e-01, -6.1694251e-02, 1.7099408e-02, 2.2852293e-03)
h1 <- qmf(h0)
g0 <- c(-3.5706603e-04, -1.8475351e-04, 3.2591486e-02, 1.3449902e-02,
-5.8466725e-02, 2.7464308e-01, 7.7956622e-01, 5.4097379e-01,
-4.0315008e-02, -1.3320138e-01, -5.9121296e-03, 1.1426146e-02)
g1 <- qmf(g0)
return(list(length = L, hpf = list(h1, g1), lpf = list(h0, g0)))
}
select.K4L4 <- function() {
L <- 16
h0 <- c(2.5734665593981519e-05, -6.6909066441298817e-04,
-5.5482443985275260e-03, 1.3203474646343588e-02,
3.8605327384848696e-02, -5.0687259299773510e-02,
8.1364447220208733e-03, 5.3021727476690994e-01,
7.8330912249663232e-01, 2.7909546754271131e-01,
-1.3372674246928601e-01, -6.9759509629953295e-02,
1.6979390952358446e-02, 5.7323570134311854e-03,
-6.7425216644469892e-04, -2.5933188060087743e-05)
h1 <- qmf(h0)
g0 <- c(2.8594072882201687e-06, 1.9074538622058143e-06,
-2.9903835439216066e-03, -1.9808995184875909e-03,
3.3554663884350758e-02, 7.7023844121478988e-03,
-7.7084571412435535e-02, 2.3298110528093252e-01,
7.5749376288995063e-01, 5.8834703992067783e-01,
5.1708789323078770e-03, -1.3520099946241465e-01,
-9.1961246067629732e-03, 1.5489641793018745e-02,
1.5569563641876791e-04, -2.3339869254078969e-04)
g1 <- qmf(g0)
return(list(length = L, hpf = list(h1, g1), lpf = list(h0, g0)))
}
select.K5L7 <- function() {
L <- 24
h0 <- c(-2.5841959496364648e-10, 6.0231243121018760e-10,
2.1451486802217960e-06, -4.9989222844980982e-06,
-2.2613489535132104e-04, 5.1967501391358343e-04,
3.4011963595840899e-03, -7.1996997688061597e-03,
-1.7721433874932836e-02, 3.5491112173858148e-02,
3.0580617312936355e-02, -1.3452365188777773e-01,
2.1741748603083836e-03, 5.8046856094922639e-01,
7.4964083145768690e-01, 2.6775497264154541e-01,
-7.9593287728224230e-02, -4.3942149960221458e-02,
1.9574969406037097e-02, 8.8554643330725387e-03,
-7.2770446614145033e-04, -3.1310992841759443e-04,
1.4045333283124608e-06, 6.0260907100656169e-07)
h1 <- qmf(h0)
g0 <- c(-3.8762939244546978e-09, 2.9846463282743695e-07,
5.6276030758515370e-06, -7.7697066311187957e-05,
-2.1442686434841905e-04, 2.1948612668324223e-03,
9.5408758453423542e-04, -1.7149735951945008e-02,
1.5212479104581677e-03, 5.6600564413983846e-02,
-4.8900162376504831e-02, -1.3993440493611778e-01,
2.7793346796113222e-01, 7.6735603850281364e-01,
5.4681951651005178e-01, 3.6275855872448776e-02,
-8.8224410289407154e-02, 3.2821708368951431e-05,
1.7994969189524142e-02, 1.8662128501760204e-03,
-7.8622878632753014e-04, -5.8077443328549205e-05,
3.0932895975646042e-06, 4.0173938067104100e-08)
g1 <- qmf(g0)
return(list(length = L, hpf = list(h1, g1), lpf = list(h0, g0)))
}
select.K6L6 <- function() {
L <- 24
h0 <- c(1.4491207137947255e-09 -3.4673992369566253e-09,
-6.7544152844875963e-06, 1.6157040144070828e-05,
4.0416340595645441e-04, -9.4536696039781878e-04,
-4.2924086033924620e-03, 9.0688042722858742e-03,
1.8690864167884680e-02, -3.7883945370993717e-02,
-2.7337592282061701e-02, 1.3185812419468312e-01,
-2.1034481553730465e-02, -5.9035515013747486e-01,
-7.4361804647499452e-01, -2.5752016951708306e-01,
9.2725410672739983e-02, 4.9100676534870831e-02,
-2.4411085480175867e-02, -1.1190458223944993e-02,
1.7793885751382626e-03, 7.4715940333597059e-04,
-6.2392430013359510e-06, -2.6075498267775052e-06)
h1 <- qmf(h0)
g0 <- c(1.8838569279331431e-08, -1.1000360697229965e-06,
-1.4600820117782769e-05, 1.6936567299204319e-04,
2.6967189953984829e-04, -3.1633669438102655e-03,
-7.2081460313487946e-04, 1.9638595542490079e-02,
-3.0968325940269846e-03, -5.6722348677476261e-02,
5.2260784738219289e-02, 1.2763836788794369e-01,
-2.9566169882112192e-01, -7.6771793937333599e-01,
-5.3818432160802543e-01, -2.4023872575927138e-02,
9.9019132161496132e-02, -1.2059411664071501e-03,
-2.2693488886969308e-02, -1.8724943382560243e-03,
1.7270823778712107e-03, 1.5415480681200776e-04,
-1.1712464100067407e-05, -2.0058075590596196e-07)
g1 <- qmf(g0)
return(list(length = L, hpf = list(-h1, -g1), lpf = list(-h0, -g0)))
}
switch(name,
"k3l3" = select.K3L3(),
"k3l5" = select.K3L5(),
"k4l2" = select.K4L2(),
"k4l4" = select.K4L4(),
"k5l7" = select.K5L7(),
"k6l6" = select.K6L6(),
stop("Invalid selection for hilbert.filter"))
}
########################################################################
#' Phase Shift for Hilbert Wavelet Coefficients
#'
#' Wavelet coefficients are circularly shifted by the amount of phase shift
#' induced by the discrete Hilbert wavelet transform.
#'
#' The "center-of-energy" argument of Hess-Nielsen and Wickerhauser (1996) is
#' used to provide a flexible way to circularly shift wavelet coefficients
#' regardless of the wavelet filter used.
#'
#' @aliases phase.shift.hilbert phase.shift.hilbert.packet
#' @param x Discete Hilbert wavelet transform (DHWT) object.
#' @param wf character string; Hilbert wavelet pair used in DHWT
#' @return DHWT (DHWPT) object with coefficients circularly shifted.
#' @author B. Whitcher
#' @seealso \code{\link{phase.shift}}
#' @references Hess-Nielsen, N. and M. V. Wickerhauser (1996) Wavelets and
#' time-frequency analysis, \emph{Proceedings of the IEEE}, \bold{84}, No. 4,
#' 523-540.
#' @keywords ts
#' @export phase.shift.hilbert
phase.shift.hilbert <- function(x, wf) {
coe <- function(g)
sum(0:(length(g)-1) * g^2) / sum(g^2)
J <- length(x) - 1
h0 <- hilbert.filter(wf)$lpf[[1]]
h1 <- hilbert.filter(wf)$hpf[[1]]
for(j in 1:J) {
ph <- round(2^(j-1) * (coe(h0) + coe(h1)) - coe(h0), 0)
Nj <- length(x[[j]])
x[[j]] <- c(x[[j]][(ph+1):Nj], x[[j]][1:ph])
}
ph <- round((2^J-1) * coe(h0), 0)
J <- J + 1
x[[J]] <- c(x[[J]][(ph+1):Nj], x[[J]][1:ph])
return(x)
}
########################################################################
modwpt.hilbert <- function(x, wf, n.levels=4, boundary="periodic") {
N <- length(x)
storage.mode(N) <- "integer"
J <- n.levels
if(2^J > N) stop("wavelet transform exceeds sample size in modwpt")
dict <- hilbert.filter(wf)
L <- dict$length; storage.mode(L) <- "integer"
h0 <- dict$lpf[[1]] / sqrt(2); storage.mode(h0) <- "double"
g0 <- dict$lpf[[2]] / sqrt(2); storage.mode(g0) <- "double"
h1 <- dict$hpf[[1]] / sqrt(2); storage.mode(h1) <- "double"
g1 <- dict$hpf[[2]] / sqrt(2); storage.mode(g1) <- "double"
y <- vector("list", sum(2^(1:J)))
yn <- length(y)
crystals1 <- rep(1:J, 2^(1:J))
crystals2 <- unlist(apply(as.matrix(2^(1:J) - 1), 1, seq, from=0))
names(y) <- paste("w", crystals1, ".", crystals2, sep="")
W <- V <- numeric(N)
storage.mode(W) <- storage.mode(V) <- "double"
for(j in 1:J) {
## cat(paste("j =", j, fill=T))
index <- 0
jj <- min((1:yn)[crystals1 == j])
for(n in 0:(2^j / 2 - 1)) {
index <- index + 1
if(j > 1)
x <- y[[(1:yn)[crystals1 == j-1][index]]]
else
x <- complex(real=x, imaginary=x)
if(n %% 2 == 0) {
zr <- .C(C_modwt, as.double(Re(x)), N, as.integer(j), L, h1, h0,
W = W, V = V)[7:8]
zc <- .C(C_modwt, as.double(Im(x)), N, as.integer(j), L, g1, g0,
W = W, V = V)[7:8]
y[[jj + 2*n + 1]] <- complex(real=zr$W, imaginary=zc$W)
y[[jj + 2*n]] <- complex(real=zr$V, imaginary=zc$V)
}
else {
zr <- .C(C_modwt, as.double(Re(x)), N, as.integer(j), L, h1, h0,
W = W, V = V)[7:8]
zc <- .C(C_modwt, as.double(Im(x)), N, as.integer(j), L, g1, g0,
W = W, V = V)[7:8]
y[[jj + 2*n]] <- complex(real=zr$W, imaginary=zc$W)
y[[jj + 2*n + 1 ]] <- complex(real=zr$V, imaginary=zc$V)
}
}
}
attr(y, "wavelet") <- wf
return(y)
}
########################################################################
phase.shift.hilbert.packet <- function(x, wf) {
coe <- function(g)
sum(0:(length(g)-1) * g^2) / sum(g^2)
dict <- hilbert.filter(wf)
h0 <- dict$lpf[[1]]; h1 <- dict$hpf[[1]]
g0 <- dict$lpf[[2]]; g1 <- dict$hpf[[2]]
xn <- length(x)
N <- length(x[[1]])
J <- trunc(log(xn,2))
jbit <- vector("list", xn)
jbit[[1]] <- FALSE; jbit[[2]] <- TRUE
crystals1 <- rep(1:J, 2^(1:J))
for(j in 1:J) {
jj <- min((1:xn)[crystals1 == j])
for(n in 0:(2^j - 1)) {
if(j > 1) {
jp <- min((1:xn)[crystals1 == j-1])
if(n %% 4 == 0 | n %% 4 == 3)
jbit[[jj + n]] <- c(jbit[[jp + floor(n/2)]], FALSE)
else
jbit[[jj + n]] <- c(jbit[[jp + floor(n/2)]], TRUE)
}
Sjn0 <- sum((1 - jbit[[jj + n]]) * 2^(0:(j-1)))
Sjn1 <- sum(jbit[[jj + n]] * 2^(0:(j-1)))
ph <- round(Sjn0 * coe(h0) + Sjn1 * coe(h1), 0)
x[[jj + n]] <- c(x[[jj + n]][(ph+1):N], x[[jj + n]][1:ph])
}
}
return(x)
}
#' Time-varying and Seasonal Analysis Using Hilbert Wavelet Pairs
#'
#' Performs time-varying or seasonal coherence and phase anlaysis between two
#' time seris using the maximal-overlap discrete Hilbert wavelet transform
#' (MODHWT).
#'
#' The idea of seasonally-varying spectral analysis (SVSA, Madden 1986) is
#' generalized using the MODWT and Hilbert wavelet pairs. For the seasonal
#' case, \eqn{S} seasons are used to produce a consistent estimate of the
#' coherence and phase. For the non-seasonal case, a simple rectangular
#' (moving-average) filter is applied to the MODHWT coefficients in order to
#' produce consistent estimates.
#'
#' @usage modhwt.coh(x, y, f.length = 0)
#' @usage modhwt.phase(x, y, f.length = 0)
#' @usage modhwt.coh.seasonal(x, y, S = 10, season = 365)
#' @usage modhwt.phase.seasonal(x, y, season = 365)
#' @aliases modhwt.coh modhwt.phase modhwt.coh.seasonal modhwt.phase.seasonal
#' @param x MODHWT object.
#' @param y MODHWT object.
#' @param f.length Length of the rectangular filter.
#' @param S Number of "seasons".
#' @param season Length of the "season".
#' @return Time-varying or seasonal coherence and phase between two time
#' series. The coherence estimates are between zero and one, while the phase
#' estimates are between \eqn{-\pi}{-pi} and \eqn{\pi}{pi}.
#' @author B. Whitcher
#' @seealso \code{\link{hilbert.filter}}
#' @references Madden, R.A. (1986). Seasonal variation of the 40--50 day
#' oscillation in the tropics. \emph{Journal of the Atmospheric Sciences}
#' \bold{43}(24), 3138--3158.
#'
#' Whither, B. and P.F. Craigmile (2004). Multivariate Spectral Analysis Using
#' Hilbert Wavelet Pairs, \emph{International Journal of Wavelets,
#' Multiresolution and Information Processing}, \bold{2}(4), 567--587.
#' @keywords ts
modhwt.coh <- function(x, y, f.length = 0) {
filt <- rep(1, f.length + 1)
filt <- filt / length(filt)
J <- length(x) - 1
coh <- vector("list", J)
for(j in 1:J) {
co.spec <- filter(Re(x[[j]] * Conj(y[[j]])), filt)
quad.spec <- filter(-Im(x[[j]] * Conj(y[[j]])), filt)
x.spec <- filter(Mod(x[[j]])^2, filt)
y.spec <- filter(Mod(y[[j]])^2, filt)
coh[[j]] <- (co.spec^2 + quad.spec^2) / x.spec / y.spec
}
coh
}
########################################################################
modhwt.phase <- function(x, y, f.length = 0) {
filt <- rep(1, f.length + 1)
filt <- filt / length(filt)
J <- length(x) - 1
phase <- vector("list", J)
for(j in 1:J) {
co.spec <- filter(Re(x[[j]] * Conj(y[[j]])), filt)
quad.spec <- filter(-Im(x[[j]] * Conj(y[[j]])), filt)
phase[[j]] <- Arg(co.spec - 1i * quad.spec)
}
phase
}
########################################################################
modhwt.coh.seasonal <- function(x, y, S=10, season=365) {
J <- length(x) - 1
coh <- shat <- vector("list", J)
for(j in 1:J) {
xj <- x[[j]]
yj <- y[[j]]
## Cospectrum
co <- matrix(Re(xj * Conj(yj)), ncol=season, byrow=TRUE)
co.spec <- c(apply(co, 2, mean, na.rm=TRUE))
gamma.c <- my.acf(as.vector(co))
omega.c <- sum(gamma.c[c(1, rep(seq(season+1, S*season, by=season),
each=2))])
## Quadrature spectrum
quad <- matrix(-Im(xj * Conj(yj)), ncol=season, byrow=TRUE)
quad.spec <- c(apply(quad, 2, mean, na.rm=TRUE))
gamma.q <- my.acf(as.vector(quad))
omega.q <- sum(gamma.q[c(1, rep(seq(season+1, S*season, by=season),
each=2))])
gamma.cq <- my.ccf(as.vector(co), as.vector(quad))
omega.cq <- sum(gamma.cq[S*season + seq(-S*season+1, S*season, by=season)])
## Autospectrum(X)
autoX <- matrix(Mod(xj)^2, ncol=season, byrow=TRUE)
x.spec <- c(apply(autoX, 2, mean, na.rm=TRUE))
## Autospectrum(Y)
autoY <- matrix(Mod(yj)^2, ncol=season, byrow=TRUE)
y.spec <- c(apply(autoY, 2, mean, na.rm=TRUE))
shat[[j]] <- 4 * (co.spec*omega.c + quad.spec * omega.q +
2*co.spec*quad.spec*omega.cq) / x.spec^2 / y.spec^2
coh[[j]] <- (co.spec^2 + quad.spec^2) / x.spec / y.spec
}
list(coh = coh, var = shat)
}
########################################################################
modhwt.phase.seasonal <- function(x, y, season=365) {
J <- length(x) - 1
phase <- vector("list", J)
for(j in 1:J) {
co.spec <- Re(x[[j]] * Conj(y[[j]]))
co.spec <- c(apply(matrix(co.spec, ncol=season, byrow=TRUE), 2,
mean, na.rm=TRUE))
quad.spec <- -Im(x[[j]] * Conj(y[[j]]))
quad.spec <- c(apply(matrix(quad.spec, ncol=season, byrow=TRUE), 2,
mean, na.rm=TRUE))
phase[[j]] <- Arg(co.spec - 1i * quad.spec)
}
phase
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/hilbert.R
|
#' Generate Stationary Gaussian Process Using Hosking's Method
#'
#' Uses exact time-domain method from Hosking (1984) to generate a simulated
#' time series from a specified autocovariance sequence.
#'
#'
#' @param n Length of series.
#' @param acvs Autocovariance sequence of series with which to generate, must
#' be of length at least \code{n}.
#' @return Length \code{n} time series from true autocovariance sequence
#' \code{acvs}.
#' @author B. Whitcher
#' @references Hosking, J. R. M. (1984) Modeling persistence in hydrological
#' time series using fractional differencing, \emph{Water Resources Research},
#' \bold{20}, No. 12, 1898-1908.
#'
#' Percival, D. B. (1992) Simulating Gaussian random processes with specified
#' spectra, \emph{Computing Science and Statistics}, \bold{22}, 534-538.
#' @keywords ts
#' @examples
#'
#' dB <- function(x) 10 * log10(x)
#' per <- function (z) {
#' n <- length(z)
#' (Mod(fft(z))^2/(2 * pi * n))[1:(n%/%2 + 1)]
#' }
#' spp.sdf <- function(freq, delta, omega)
#' abs(2 * (cos(2*pi*freq) - cos(2*pi*omega)))^(-2*delta)
#' data(acvs.andel8)
#' n <- 1024
#' \dontrun{
#' z <- hosking.sim(n, acvs.andel8[,2])
#' per.z <- 2 * pi * per(z)
#' par(mfrow=c(2,1), las=1)
#' plot.ts(z, ylab="", main="Realization of a Seasonal Long-Memory Process")
#' plot(0:(n/2)/n, dB(per.z), type="l", xlab="Frequency", ylab="dB",
#' main="Periodogram")
#' lines(0:(n/2)/n, dB(spp.sdf(0:(n/2)/n, .4, 1/12)), col=2)
#' }
#'
#' @export hosking.sim
hosking.sim <- function(n, acvs) {
.C(C_hosking, tseries=rnorm(n), as.integer(n), as.double(acvs[1:n]))$tseries
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/hosking.R
|
#' Autocovariance Functions via the Discrete Fourier Transform
#'
#' Computes the autocovariance function (ACF) for a time series or the
#' cross-covariance function (CCF) between two time series.
#'
#' The series is zero padded to twice its length before the discrete Fourier
#' transform is applied. Only the values corresponding to nonnegative lags are
#' provided (for the ACF).
#'
#' @usage my.acf(x)
#' @usage my.ccf(a, b)
#' @aliases my.acf my.ccf
#' @param x,a,b time series
#' @return The autocovariance function for all nonnegative lags or the
#' cross-covariance function for all lags.
#' @author B. Whitcher
#' @keywords ts
#' @examples
#'
#' data(ibm)
#' ibm.returns <- diff(log(ibm))
#' plot(1:length(ibm.returns) - 1, my.acf(ibm.returns), type="h",
#' xlab="lag", ylab="ACVS", main="Autocovariance Sequence for IBM Returns")
#'
#' @export my.acf
my.acf <- function(x)
{
n <- length(x)
x <- c(x, rep(0, n))
Re(fft(Mod(fft(x)) ^ 2, inverse = TRUE) / 2 / n ^ 2)[1:n]
}
my.ccf <- function(a, b) {
n <- length(a)
a <- c(a, rep(0, n))
b <- c(b, rep(0, n))
x <- Re(fft(fft(a) * Conj(fft(b)), inverse = TRUE)) / 2 / n ^ 2
x[c((n + 2):(2 * n), 1:n)]
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/misc.R
|
#' Multiresolution Analysis of an Image
#'
#' This function performs a level \eqn{J} additive decomposition of the input
#' matrix or image using the pyramid algorithm (Mallat 1989).
#'
#' This code implements a two-dimensional multiresolution analysis by
#' performing the one-dimensional pyramid algorithm (Mallat 1989) on the rows
#' and columns of the input matrix. Either the DWT or MODWT may be used to
#' compute the multiresolution analysis, which is an additive decomposition of
#' the original matrix (image).
#'
#' @param x A matrix or image containing the data be to decomposed. This must
#' be have dyadic length in both dimensions (but not necessarily the same) for
#' \code{method="dwt"}.
#' @param wf Name of the wavelet filter to use in the decomposition. By
#' default this is set to \code{"la8"}, the Daubechies orthonormal compactly
#' supported wavelet of length L=8 least asymmetric family.
#' @param J Specifies the depth of the decomposition. This must be a number
#' less than or equal to log(length(x),2).
#' @param method Either \code{"dwt"} or \code{"modwt"}.
#' @param boundary Character string specifying the boundary condition. If
#' \code{boundary=="periodic"} the default, then the matrix you decompose is
#' assumed to be periodic on its defined interval,\cr if
#' \code{boundary=="reflection"}, the matrix beyond its boundaries is assumed
#' to be a symmetric reflection of itself.
#' @return Basically, a list with the following components \item{LH?}{Wavelet
#' detail image in the horizontal direction.} \item{HL?}{Wavelet detail image
#' in the vertical direction.} \item{HH?}{Wavelet detail image in the diagonal
#' direction.} \item{LLJ}{Wavelet smooth image at the coarsest resolution.}
#' \item{J}{Depth of the wavelet transform.} \item{wavelet}{Name of the wavelet
#' filter used.} \item{boundary}{How the boundaries were handled.}
#' @author B. Whitcher
#' @seealso \code{\link{dwt.2d}}, \code{\link{modwt.2d}}
#' @references Mallat, S. G. (1989) A theory for multiresolution signal
#' decomposition: the wavelet representation, \emph{IEEE Transactions on
#' Pattern Analysis and Machine Intelligence}, \bold{11}, No. 7, 674-693.
#'
#' Mallat, S. G. (1998) \emph{A Wavelet Tour of Signal Processing}, Academic
#' Press.
#' @keywords ts
#' @examples
#'
#' ## Easy check to see if it works...
#' ## --------------------------------
#'
#' x <- matrix(rnorm(32*32), 32, 32)
#' # MODWT
#' x.mra <- mra.2d(x, method="modwt")
#' x.mra.sum <- x.mra[[1]]
#' for(j in 2:length(x.mra))
#' x.mra.sum <- x.mra.sum + x.mra[[j]]
#' sum((x - x.mra.sum)^2)
#'
#' # DWT
#' x.mra <- mra.2d(x, method="dwt")
#' x.mra.sum <- x.mra[[1]]
#' for(j in 2:length(x.mra))
#' x.mra.sum <- x.mra.sum + x.mra[[j]]
#' sum((x - x.mra.sum)^2)
#'
#' @export mra.2d
mra.2d <-
function(x, wf="la8", J=4, method="modwt", boundary="periodic")
{
m <- dim(x)[1]
n <- dim(x)[2]
switch(boundary,
"periodic" = invisible(),
stop("Invalid boundary rule in mra"))
if(method == "modwt") {
x.wt <- modwt.2d(x, wf, J, "periodic")
} else {
x.wt <- dwt.2d(x, wf, J, "periodic")
}
x.mra <- vector("list", 3*J+1)
## Smooth
zero <- vector("list", 3*J+1)
names(zero) <-
c(matrix(rbind(paste("LH", 1:J, sep=""), paste("HL", 1:J, sep=""),
paste("HH", 1:J, sep="")), nrow=1), paste("LL", J, sep=""))
attr(zero, "J") <- J
attr(zero, "wavelet") <- wf
attr(zero, "boundary") <- boundary
zero[[3*J+1]] <- x.wt[[3*J+1]]
if(method == "modwt") {
for(k in 1:(3*J))
zero[[k]] <- matrix(0, m, n)
x.mra[[3*J+1]] <- imodwt.2d(zero)
} else {
for(k in 1:J)
zero[[3*(k-1)+1]] <- zero[[3*(k-1)+2]] <- zero[[3*k]] <-
matrix(0, m/2^k, n/2^k)
x.mra[[3*J+1]] <- idwt.2d(zero)
}
## Details
for(j in (3*J):1) {
Jj <- ceiling(j/3)
zero <- vector("list", 3*Jj+1)
names(zero) <-
c(matrix(rbind(paste("LH", 1:Jj, sep=""), paste("HL", 1:Jj, sep=""),
paste("HH", 1:Jj, sep="")), nrow=1),
paste("LL", Jj, sep=""))
attr(zero, "J") <- Jj
attr(zero, "wavelet") <- wf
attr(zero, "boundary") <- boundary
zero[[j]] <- x.wt[[j]]
if(method == "modwt") {
for(k in names(zero)[-charmatch(names(zero)[j], names(zero))])
zero[[k]] <- matrix(0, m, n)
x.mra[[j]] <- imodwt.2d(zero)
} else {
for(k in 1:Jj)
zero[[3*(k-1)+1]] <- zero[[3*(k-1)+2]] <- zero[[3*k]] <-
matrix(0, m/2^k, n/2^k)
zero[[3*Jj+1]] <- matrix(0, m/2^Jj, n/2^Jj)
zero[[j]] <- x.wt[[j]]
x.mra[[j]] <- idwt.2d(zero)
}
}
names(x.mra) <-
c(matrix(rbind(paste("LH", 1:J, sep=""), paste("HL", 1:J, sep=""),
paste("HH", 1:J, sep="")), nrow=1), paste("LL", Jj, sep=""))
return(x.mra)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/mra.2d.R
|
#' Rotated Cumulative Variance
#'
#' Provides the normalized cumulative sums of squares from a sequence of
#' coefficients with the diagonal line removed.
#'
#' The rotated cumulative variance, when plotted, provides a qualitative way to
#' study the time dependence of the variance of a series. If the variance is
#' stationary over time, then only small deviations from zero should be
#' present. If on the other hand the variance is non-stationary, then large
#' departures may exist. Formal hypothesis testing may be performed based on
#' boundary crossings of Brownian bridge processes.
#'
#' @param x vector of coefficients to be cumulatively summed (missing values
#' excluded)
#' @return Vector of coefficients that are the sumulative sum of squared input
#' coefficients.
#' @author B. Whitcher
#' @references Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An
#' Introduction to Wavelets and Other Filtering Methods in Finance and
#' Economics}, Academic Press.
#'
#' Percival, D. B. and A. T. Walden (2000) \emph{Wavelet Methods for Time
#' Series Analysis}, Cambridge University Press.
#' @keywords ts
#' @export rotcumvar
rotcumvar <- function(x) {
x <- x[!is.na(x)]
n <- length(x)
plus <- 1:n/(n-1) - cumsum(x^2)/sum(x^2)
minus <- cumsum(x^2)/sum(x^2) - 0:(n-1)/(n-1)
pmax(abs(plus), abs(minus))
}
#' Testing for Homogeneity of Variance
#'
#' A recursive algorithm for detecting and locating multiple variance change
#' points in a sequence of random variables with long-range dependence.
#'
#' For details see Section 9.6 of Percival and Walden (2000) or Section 7.3 in
#' Gencay, Selcuk and Whitcher (2001).
#'
#' @param x Sequence of observations from a (long memory) time series.
#' @param wf Name of the wavelet filter to use in the decomposition.
#' @param J Specifies the depth of the decomposition. This must be a number
#' less than or equal to \eqn{\log(\mbox{length}(x),2)}{log(length(x),2)}.
#' @param min.coef Minimum number of wavelet coefficients for testing purposes.
#' Empirical results suggest that 128 is a reasonable number in order to apply
#' asymptotic critical values.
#' @param debug Boolean variable: if set to \code{TRUE}, actions taken by the
#' algorithm are printed to the screen.
#' @return Matrix whose columns include (1) the level of the wavelet transform
#' where the variance change occurs, (2) the value of the test statistic, (3)
#' the DWT coefficient where the change point is located, (4) the MODWT
#' coefficient where the change point is located. Note, there is currently no
#' checking that the MODWT is contained within the associated support of the
#' DWT coefficient. This could lead to incorrect estimates of the location of
#' the variance change.
#' @author B. Whitcher
#' @seealso \code{\link{dwt}}, \code{\link{modwt}}, \code{\link{rotcumvar}},
#' \code{\link{mult.loc}}.
#' @references Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An
#' Introduction to Wavelets and Other Filtering Methods in Finance and
#' Economics}, Academic Press.
#'
#' Percival, D. B. and A. T. Walden (2000) \emph{Wavelet Methods for Time
#' Series Analysis}, Cambridge University Press.
#' @keywords ts
#' @export testing.hov
testing.hov <- function(x, wf, J, min.coef=128, debug=FALSE) {
n <- length(x)
change.points <- NULL
x.dwt <- dwt(x, wf, J)
x.dwt.bw <- brick.wall(x.dwt, wf, method="dwt")
x.modwt <- modwt(x, wf, J)
x.modwt.bw <- brick.wall(x.modwt, wf)
for(j in 1:J) {
cat("##### Level ", j, " #####", fill=TRUE)
Nj <- n/2^j
dwt.list <- list(dwt = (x.dwt.bw[[j]])[!is.na(x.dwt.bw[[j]])],
left = min((1:Nj)[!is.na(x.dwt.bw[[j]])]) + 1,
right = sum(!is.na(x.dwt.bw[[j]])))
modwt.list <- list(modwt = (x.modwt.bw[[j]])[!is.na(x.modwt.bw[[j]])],
left = min((1:n)[!is.na(x.modwt.bw[[j]])]) + 1,
right = sum(!is.na(x.modwt.bw[[j]])))
if(debug) cat("Starting recursion; using", dwt.list$left,
"to", dwt.list$right - 1, "... ")
change.points <-
rbind(change.points,
mult.loc(dwt.list, modwt.list, wf, j, min.coef, debug))
}
dimnames(change.points) <-
list(NULL, c("level", "crit.value", "loc.dwt", "loc.modwt"))
return(change.points)
}
#' Wavelet-based Testing and Locating for Variance Change Points
#'
#' This is the major subroutine for \code{\link{testing.hov}}, providing the
#' workhorse algorithm to recursively test and locate multiple variance changes
#' in so-called long memory processes.
#'
#' For details see Section 9.6 of Percival and Walden (2000) or Section 7.3 in
#' Gencay, Selcuk and Whitcher (2001).
#'
#' @param dwt.list List of wavelet vector coefficients from the \code{dwt}.
#' @param modwt.list List of wavelet vector coefficients from the \code{modwt}.
#' @param wf Name of the wavelet filter to use in the decomposition.
#' @param level Specifies the depth of the decomposition.
#' @param min.coef Minimum number of wavelet coefficients for testing purposes.
#' @param debug Boolean variable: if set to \code{TRUE}, actions taken by the
#' algorithm are printed to the screen.
#' @return Matrix.
#' @author B. Whitcher
#' @seealso \code{\link{rotcumvar}}, \code{\link{testing.hov}}.
#' @references Gencay, R., F. Selcuk and B. Whitcher (2001) \emph{An
#' Introduction to Wavelets and Other Filtering Methods in Finance and
#' Economics}, Academic Press.
#'
#' Percival, D. B. and A. T. Walden (2000) \emph{Wavelet Methods for Time
#' Series Analysis}, Cambridge University Press.
#' @keywords ts
#' @export mult.loc
mult.loc <- function(dwt.list, modwt.list, wf, level, min.coef, debug)
{
Nj <- length(dwt.list$dwt)
N <- length(modwt.list$modwt)
crit <- 1.358
change.points <- NULL
if(Nj > min.coef) {
## test statistic using the DWT
P <- cumsum(dwt.list$dwt^2) / sum(dwt.list$dwt^2)
test.stat <- pmax((1:Nj) / (Nj-1) - P, P - (1:Nj - 1) / (Nj-1))
loc.dwt <- (1:Nj)[max(test.stat) == test.stat]
test.stat <- max(test.stat)
## location using the MODWT
P <- cumsum(modwt.list$modwt^2) / sum(modwt.list$modwt^2)
loc.stat <- pmax((1:N) / (N-1) - P, P - (1:N - 1) / (N-1))
loc.modwt <- (1:N)[max(loc.stat) == loc.stat]
if(test.stat > sqrt(2) * crit / sqrt(Nj)) {
if(debug) cat("Accepted!", fill=TRUE)
## Left
if(debug) cat("Going left; using", dwt.list$left,
"to", loc.dwt + dwt.list$left - 1, "... ")
temp.dwt.list <- list(dwt = dwt.list$dwt[1:(loc.dwt-1)],
left = dwt.list$left,
right = loc.dwt + dwt.list$left - 1)
temp.modwt.list <- list(modwt = modwt.list$modwt[1:(loc.modwt-1)],
left = modwt.list$left,
right = loc.modwt + modwt.list$left - 1)
change.points <-
rbind(c(level, test.stat, loc.dwt + dwt.list$left,
loc.modwt + modwt.list$left),
Recall(temp.dwt.list, temp.modwt.list, wf, level, min.coef, debug))
## Right
if(debug) cat("Going right; using", loc.dwt + dwt.list$left + 1,
"to", dwt.list$right, "... ")
temp.dwt.list <- list(dwt = dwt.list$dwt[(loc.dwt+1):Nj],
left = loc.dwt + dwt.list$left + 1,
right = dwt.list$right)
temp.modwt.list <- list(modwt = modwt.list$modwt[(loc.modwt+1):N],
left = loc.modwt + modwt.list$left + 1,
right = modwt.list$right)
change.points <-
rbind(change.points,
Recall(temp.dwt.list, temp.modwt.list, wf, level, min.coef, debug))
}
else
if(debug) cat("Rejected!", fill=TRUE)
}
else
if(debug) cat("Sample size does not exceed ", min.coef, "!",
sep="", fill=TRUE)
return(change.points)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/multiple.R
|
#' Periodogram
#'
#' Computation of the periodogram via the Fast Fourier Transform (FFT).
#'
#'
#' @param z time series
#' @author Author: Jan Beran; modified: Martin Maechler, Date: Sep 1995.
#' @keywords ts
per <- function(z) {
n <- length(z)
(Mod(fft(z)) ** 2 / (2 * pi * n)) [1:(n %/% 2 + 1)]
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/periodogram.R
|
#' Spectral Density Functions for Long-Memory Processes
#'
#' Draws the spectral density functions (SDFs) for standard long-memory
#' processes including fractional difference (FD), seasonal persistent (SP),
#' and seasonal fractional difference (SFD) processes.
#'
#'
#' @usage fdp.sdf(freq, d, sigma2 = 1)
#' @usage spp.sdf(freq, d, fG, sigma2 = 1)
#' @usage spp2.sdf(freq, d1, f1, d2, f2, sigma2 = 1)
#' @usage sfd.sdf(freq, s, d, sigma2 = 1)
#' @aliases fdp.sdf spp.sdf spp2.sdf sfd.sdf
#' @param freq vector of frequencies, normally from 0 to 0.5
#' @param d,d1,d2 fractional difference parameter
#' @param fG,f1,f2 Gegenbauer frequency
#' @param s seasonal parameter
#' @param sigma2 innovations variance
#' @return The power spectrum from an FD, SP or SFD process.
#' @author B. Whitcher
#' @seealso \code{\link{fdp.mle}}, \code{\link{spp.mle}}.
#' @keywords ts
#' @examples
#'
#' dB <- function(x) 10 * log10(x)
#'
#' fdp.main <- expression(paste("FD", group("(",d==0.4,")")))
#' sfd.main <- expression(paste("SFD", group("(",list(s==12, d==0.4),")")))
#' spp.main <- expression(paste("SPP",
#' group("(",list(delta==0.4, f[G]==1/12),")")))
#'
#' freq <- 0:512/1024
#'
#' par(mfrow=c(2,2), mar=c(5-1,4,4-1,2), col.main="darkred")
#' plot(freq, dB(fdp.sdf(freq, .4)), type="l", xlab="frequency",
#' ylab="spectrum (dB)", main=fdp.main)
#' plot(freq, dB(spp.sdf(freq, .4, 1/12)), type="l", xlab="frequency",
#' ylab="spectrum (dB)", font.main=1, main=spp.main)
#' plot(freq, dB(sfd.sdf(freq, 12, .4)), type="l", xlab="frequency",
#' ylab="spectrum (dB)", main=sfd.main)
fdp.sdf <- function(freq, d, sigma2 = 1)
sigma2 / ((2 * sin(pi * freq)) ^ 2) ^ d
bandpass.fdp <- function(a, b, d)
2 * integrate(fdp.sdf, lower = a, upper = b, d = d)$value
spp.sdf <- function(freq, d, fG, sigma2 = 1)
sigma2 * abs(2 * (cos(2 * pi * freq) - cos(2 * pi * fG))) ^ (-2 * d)
spp2.sdf <- function(freq, d1, f1, d2, f2, sigma2 = 1) {
sigma2 * abs(2 * (cos(2 * pi * freq) - cos(2 * pi * f1))) ^ (-2 * d1) *
abs(2 * (cos(2 * pi * freq) - cos(2 * pi * f2))) ^ (-2 * d2)
}
sfd.sdf <- function(freq, s, d, sigma2=1)
sigma2 / (2 * (1 - cos(s * 2 * pi * freq))) ^ d
bandpass.spp <- function(a, b, d, fG) {
if (fG > a && fG < b) {
result1 <- integrate(spp.sdf, lower=a, upper=fG, d=d, fG=fG)$value
result2 <- integrate(spp.sdf, lower=fG, upper=b, d=d, fG=fG)$value
}
else {
result1 <- integrate(spp.sdf, lower=a, upper=b, d=d, fG=fG)$value
result2 <- 0
}
return(2*(result1 + result2))
}
bandpass.spp2 <- function(a, b, d1, f1, d2, f2) {
a1 <- a
b1 <- b
if(a1 < f1 && b1 > f2) {
a2 <- f1
b2 <- f2
result1 <- integrate(spp2.sdf, a1, a2, d1=d1, f1=f1, d2=d2, f2=f2)$value
result2 <- integrate(spp2.sdf, a1, b2, d1=d1, f1=f1, d2=d2, f2=f2)$value
result3 <- integrate(spp2.sdf, b2, b1, d1=d1, f1=f1, d2=d2, f2=f2)$value
}
else {
if (a1 < f1 && b1 < f2) {
a2 <- f1
result1 <- integrate(spp2.sdf, a1, a2, d1=d1, f1=f1, d2=d2, f2=f2)$value
result2 <- integrate(spp2.sdf, a2, b1, d1=d1, f1=f1, d2=d2, f2=f2)$value
result3 <- 0
}
else {
if (a1 < f1 && b1 > f1 && b1 < f2) {
a2 <- f1
result1 <- integrate(spp2.sdf, a1, a2, d1=d1, f1=f1, d2=d2, f2=f2)$value
result2 <- integrate(spp2.sdf, a2, b1, d1=d1, f1=f1, d2=d2, f2=f2)$value
result3 <- 0
}
else {
if (a1 > f1 && a1 < f2 && b1 > f2) {
a2 <- f2
result1 <- integrate(spp2.sdf, a1, a2, d1=d1, f1=f1, d2=d2, f2=f2)$value
result2 <- integrate(spp2.sdf, a2, b1, d1=d1, f1=f1, d2=d2, f2=f2)$value
result3 <- 0
}
else {
result1 <- integrate(spp2.sdf, a1, b1, d1=d1, f1=f1, d2=d2, f2=f2)$value
result2 <- 0
result3 <- 0
}
}
}
}
return(2 * (result1 + result2 + result3))
}
#' Variance of a Seasonal Persistent Process
#'
#' Computes the variance of a seasonal persistent (SP) process using a
#' hypergeometric series expansion.
#'
#' See Lapsa (1997). The subroutine to compute a hypergeometric series was
#' taken from \emph{Numerical Recipes in C}.
#'
#' @usage spp.var(d, fG, sigma2 = 1)
#' @usage Hypergeometric(a, b, c, z)
#' @aliases spp.var Hypergeometric
#' @param d Fractional difference parameter.
#' @param fG Gegenbauer frequency.
#' @param sigma2 Innovations variance.
#' @param a,b,c,z Parameters for the hypergeometric series.
#' @return The variance of an SP process.
#' @author B. Whitcher
#' @references Lapsa, P.M. (1997) Determination of Gegenbauer-type random
#' process models. \emph{Signal Processing} \bold{63}, 73-90.
#'
#' Press, W.H., S.A. Teukolsky, W.T. Vetterling and B.P. Flannery (1992)
#' \emph{Numerical Recipes in C}, 2nd edition, Cambridge University Press.
#' @keywords ts
#' @export spp.var
spp.var <- function(d, fG, sigma2 = 1) {
## Hypergeometric series representation of the variance taken from
## Lapsa (1997)
omega <- 2 * pi * fG
A <- sigma2 / 2 / sqrt(pi) * gamma(1 - 2 * d) / gamma(3 / 2 - 2 * d) * sin(omega) ^(1 - 4 * d)
P1 <- Hypergeometric(1 - 2 * d, 1 - 2 * d, 3 / 2 - 2 * d, sin(omega / 2) ^ 2)
P2 <- Hypergeometric(1 - 2 * d, 1 - 2 * d, 3 / 2 - 2 * d, cos(omega / 2) ^ 2)
return(A * (P1 + P2))
}
Hypergeometric <- function(a, b, c, z) {
## Recursive implementation taken from Numerical Recipes in C (6.12)
## Press, Teukolsky, Vetterling and Flannery (1992)
fac <- 1
temp <- fac
aa <- a
bb <- b
cc <- c
for (n in 1:1000) {
fac <- fac * (aa * bb) / cc
fac <- fac * z / n
series <- temp + fac
if (series == temp)
return(series)
temp <- series
aa <- aa + 1
bb <- bb + 1
cc <- cc + 1
}
stop("convergence failure in Hypergeometric")
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/sdf.R
|
#' Circularly Shift Matrices from a 2D MODWT
#'
#' Compute phase shifts for wavelet sub-matrices based on the ``center of
#' energy'' argument of Hess-Nielsen and Wickerhauser (1996).
#'
#' The "center of energy" technique of Wickerhauser and Hess-Nielsen (1996) is
#' employed to find circular shifts for the wavelet sub-matrices such that the
#' coefficients are aligned with the original series. This corresponds to
#' applying a (near) linear-phase filtering operation.
#'
#' @param z Two-dimensional MODWT object
#' @param inverse Boolean value on whether to perform the forward or inverse
#' operation.
#' @return Two-dimensional MODWT object with circularly shifted coefficients.
#' @author B. Whitcher
#' @seealso \code{\link{phase.shift}}, \code{\link{modwt.2d}}.
#' @references Hess-Nielsen, N. and M. V. Wickerhauser (1996) Wavelets and
#' time-frequency analysis, \emph{Proceedings of the IEEE}, \bold{84}, No. 4,
#' 523-540.
#'
#' Percival, D. B. and A. T. Walden (2000) \emph{Wavelet Methods for Time
#' Series Analysis}, Cambridge University Press.
#' @keywords ts
#' @examples
#'
#' n <- 512
#' G1 <- G2 <- dnorm(seq(-n/4, n/4, length=n))
#' G <- 100 * zapsmall(outer(G1, G2))
#' G <- modwt.2d(G, wf="la8", J=6)
#' k <- 50
#' xr <- yr <- trunc(n/2) + (-k:k)
#' par(mfrow=c(3,3), mar=c(1,1,2,1), pty="s")
#' for (j in names(G)[1:9]) {
#' image(G[[j]][xr,yr], col=rainbow(64), axes=FALSE, main=j)
#' }
#' Gs <- shift.2d(G)
#' for (j in names(G)[1:9]) {
#' image(Gs[[j]][xr,yr], col=rainbow(64), axes=FALSE, main=j)
#' }
#'
#' @export shift.2d
shift.2d <- function(z, inverse=FALSE) {
## "Center of Energy"
coe <- function(g) {
sum(0:(length(g)-1) * g^2) / sum(g^2)
}
wf <- attributes(z)$wavelet
h <- wave.filter(wf)$hpf
g <- wave.filter(wf)$lpf
J <- (length(z) - 1) / 3
m <- nrow(z[[1]])
n <- ncol(z[[1]])
nu.H <- round(2^(1:J-1) * (coe(g) + coe(h)) - coe(g), 0)
nu.Hm <- ifelse(nu.H/m < 1, nu.H, nu.H - trunc(nu.H/m) * m)
nu.Hn <- ifelse(nu.H/n < 1, nu.H, nu.H - trunc(nu.H/n) * n)
nu.G <- round((2^(1:J) - 1) * coe(g), 0)
nu.Gm <- ifelse(nu.G/m < 1, nu.G, nu.G - trunc(nu.G/m) * m)
nu.Gn <- ifelse(nu.G/n < 1, nu.G, nu.G - trunc(nu.G/n) * n)
if (!inverse) {
## Apply the phase shifts
for (j in 0:(J-1)) {
Hm.order <- c((nu.H[j+1]+1):m, 1:nu.H[j+1])
Hn.order <- c((nu.H[j+1]+1):n, 1:nu.H[j+1])
Gm.order <- c((nu.G[j+1]+1):m, 1:nu.G[j+1])
Gn.order <- c((nu.G[j+1]+1):n, 1:nu.G[j+1])
z[[3*j+1]] <- z[[3*j+1]][Gm.order, Hn.order]
z[[3*j+2]] <- z[[3*j+2]][Hm.order, Gn.order]
z[[3*j+3]] <- z[[3*j+3]][Hm.order, Hn.order]
}
z[[3*J+1]] <- z[[3*J+1]][Gm.order, Gn.order]
} else {
## Apply the phase shifts "reversed"
for (j in 0:(J-1)) {
Hm.order <- c((m-nu.H[j+1]+1):m, 1:(m-nu.H[j+1]))
Hn.order <- c((n-nu.H[j+1]+1):n, 1:(n-nu.H[j+1]))
Gm.order <- c((m-nu.G[j+1]+1):m, 1:(m-nu.G[j+1]))
Gn.order <- c((n-nu.G[j+1]+1):n, 1:(n-nu.G[j+1]))
z[[3*j+1]] <- z[[3*j+1]][Gm.order, Hn.order]
z[[3*j+2]] <- z[[3*j+2]][Hm.order, Gn.order]
z[[3*j+3]] <- z[[3*j+3]][Hm.order, Hn.order]
}
z[[3*J+1]] <- z[[3*J+1]][Gm.order, Gn.order]
}
return(z)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/shift.2d.R
|
soft <- function(x, T) {
y <- max(abs(x) - T, 0)
return(y/(y+T) * x)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/soft.R
|
#' Wavelet-based Maximum Likelihood Estimation for Seasonal Persistent
#' Processes
#'
#' Parameter estimation for a seasonal persistent (seasonal long-memory)
#' process is performed via maximum likelihood on the wavelet coefficients.
#'
#' The variance-covariance matrix of the original time series is approximated
#' by its wavelet-based equivalent. A Whittle-type likelihood is then
#' constructed where the sums of squared wavelet coefficients are compared to
#' bandpass filtered version of the true spectral density function.
#' Minimization occurs for the fractional difference parameter \eqn{d} and the
#' Gegenbauer frequency \eqn{f_G}, while the innovations variance is
#' subsequently estimated.
#'
#' @usage spp.mle(y, wf, J = log(length(y), 2) - 1, p = 0.01, frac = 1)
#' @usage spp2.mle(y, wf, J = log(length(y), 2) - 1, p = 0.01, dyadic = TRUE, frac = 1)
#' @aliases spp.mle spp2.mle
#' @param y Not necessarily dyadic length time series.
#' @param wf Name of the wavelet filter to use in the decomposition. See
#' \code{\link{wave.filter}} for those wavelet filters available.
#' @param J Depth of the discrete wavelet packet transform.
#' @param p Level of significance for the white noise testing procedure.
#' @param dyadic Logical parameter indicating whether or not the original time
#' series is dyadic in length.
#' @param frac Fraction of the time series that should be used in constructing
#' the likelihood function.
#' @return List containing the maximum likelihood estimates (MLEs) of
#' \eqn{\delta}, \eqn{f_G} and \eqn{\sigma^2}, along with the value of the
#' likelihood for those estimates.
#' @author B. Whitcher
#' @seealso \code{\link{fdp.mle}}
#' @references Whitcher, B. (2004) Wavelet-based estimation for seasonal
#' long-memory processes, \emph{Technometrics}, \bold{46}, No. 2, 225-238.
#' @keywords ts
#' @export spp.mle
spp.mle <- function(y, wf, J=log(length(y),2)-1, p=0.01, frac=1)
{
sppLL <- function(x, y) {
delta <- x[1]
fG <- x[2]
## cat("Parameters are: d =", delta, ", and f =", fG, fill=TRUE)
y.dwpt <- y[[1]]
y.basis <- y[[2]]
n <- y[[3]]
J <- y[[4]]
## Establish the limits of integration for the band-pass variances
a <- unlist(apply(matrix(2^(1:J)-1), 1, seq, from=0, by=1)) /
2^(rep(1:J, 2^(1:J))) / 2
b <- unlist(apply(matrix(2^(1:J)), 1, seq, from=1, by=1)) /
2^(rep(1:J, 2^(1:J))) / 2
## Define some useful parameters for the wavelet packet tree
# n <- length(y)
length.jn <- n / rep(2^(1:J), 2^(1:J))
scale.jn <- rep(2^(1:J+1), 2^(1:J))
## Initialize various parameters for the reduced LL
Basis <- (1:length(y.basis))[y.basis]
bp.var <- numeric(length(Basis))
delta.n <- 100
## Compute the band-pass variances according to \delta and f_G
omega.diag <- NULL
for(i in 1:sum(y.basis)) {
jn <- Basis[i]
bp.var[i] <- bandpass.spp(a[jn], b[jn], delta, fG)
omega.diag <- c(omega.diag,
scale.jn[jn] * rep(bp.var[i], length.jn[jn]))
}
## Compute reduced log-likelihood
rLL <- n * log(1/n * sum(y.dwpt^2 / omega.diag, na.rm=TRUE)) +
sum(length.jn[y.basis] * log(scale.jn[y.basis] * bp.var))
rLL
}
n <- length(y)
x0 <- numeric(2)
## Perform discrete wavelet packet transform (DWPT) on Y
y.dwpt <- dwpt(y, wf, n.levels=J)
n <- length(y)
if(frac < 1) {
for(i in 1:length(y.dwpt)) {
vec <- y.dwpt[[i]]
ni <- length(vec)
j <- rep(1:J, 2^(1:J))[i]
vec[trunc(frac * n/2^j):ni] <- NA
y.dwpt[[i]] <- vec
}
}
y.basis <- as.logical(ortho.basis(portmanteau.test(y.dwpt, p)))
y.dwpt <- as.matrix(unlist(y.dwpt[y.basis]))
## Compute initial estimate of the Gegenbauer frequency
y.per <- per(y - mean(y))
x0[2] <- (0:(n/2)/n)[max(y.per) == y.per]
## Compute initial estimate of the fractional difference parameter
muJ <- (unlist(apply(matrix(2^(1:J)-1), 1, seq, from=0, by=1)) /
2^(rep(1:J, 2^(1:J))) +
unlist(apply(matrix(2^(1:J)), 1, seq, from=1, by=1)) /
2^(rep(1:J, 2^(1:J)))) / 4
y.modwpt <- modwpt(y, wf=wf, n.levels=J)
y.varJ <- rep(2^(1:J), 2^(1:J)) *
unlist(lapply(y.modwpt,
FUN=function(x)sum(x*x,na.rm=TRUE)/length(x[!is.na(x)])))
x0[1] <- min(-0.5 * lsfit(log(abs(muJ[y.basis] - x0[2])),
log(y.varJ[y.basis]))$coef[2], 0.49)
cat(paste("Initial parameters are: delta =", round(x0[1],4),
"freqG =", round(x0[2],4), "\n"))
result <- optim(par=x0, fn=sppLL, method="L-BFGS-B",
lower=c(0.001,0.001), upper=c(0.499,0.499),
control=list(trace=0, fnscale=2),
y=list(y.dwpt, y.basis, n, J))
return(result)
}
spp2.mle <- function(y, wf, J=log(length(y),2)-1, p=0.01,
dyadic=TRUE, frac=1)
{
spp2LL <- function(x, y) {
d1 <- x[1]
f1 <- x[2]
d2 <- x[3]
f2 <- x[4]
## cat("Parameters are: d1 =", round(d1,6), ", and f1 =", round(f1,6),
## ", d2 =", round(d2,6), ", and f2 =", round(f2,6), fill=TRUE)
y.dwpt <- y[[1]]
y.basis <- y[[2]]
n <- y[[3]]
J <- y[[4]]
## Establish the limits of integration for the band-pass variances
a <- unlist(apply(matrix(2^(1:J)-1), 1, seq, from=0, by=1)) /
2^(rep(1:J, 2^(1:J))) / 2
b <- unlist(apply(matrix(2^(1:J)), 1, seq, from=1, by=1)) /
2^(rep(1:J, 2^(1:J))) / 2
## Define some useful parameters for the wavelet packet tree
length.jn <- n / rep(2^(1:J), 2^(1:J))
scale.jn <- rep(2^(1:J+1), 2^(1:J))
## Initialize various parameters for the reduced LL
Basis <- (1:length(y.basis))[y.basis]
bp.var <- numeric(length(Basis))
delta.n <- 100
## Compute the band-pass variances according to \delta and f_G
omega.diag <- NULL
for(i in 1:sum(y.basis)) {
jn <- Basis[i]
bp.var[i] <- bandpass.spp2(a[jn], b[jn], d1, f1, d2, f2)
omega.diag <- c(omega.diag,
scale.jn[jn] * rep(bp.var[i], length.jn[jn]))
}
## Compute reduced log-likelihood
n * log(1/n * sum(y.dwpt^2 / omega.diag, na.rm=TRUE)) +
sum(length.jn[y.basis] * log(scale.jn[y.basis] * bp.var), na.rm=TRUE)
}
n <- length(y)
x0 <- numeric(4)
## Perform discrete wavelet packet transform (DWPT) on Y
y.dwpt <- dwpt(y, wf, n.levels=J)
if(!dyadic) {
for(i in 1:length(y.dwpt)) {
vec <- y.dwpt[[i]]
ni <- length(vec)
j <- rep(1:J, 2^(1:J))[i]
vec[trunc(frac * n/2^j):ni] <- NA
y.dwpt[[i]] <- vec
}
}
y.basis <- as.logical(ortho.basis(portmanteau.test(y.dwpt, p, type="other")))
y.dwpt <- as.vector(unlist(y.dwpt[y.basis]))
## Compute initial estimate of the Gegenbauer frequencies
if(dyadic)
y.per <- per(y - mean(y))
else
y.per <- per(y[1:(frac*n)] - mean(y[1:(frac*n)]))
freq.y <- (0:(frac*n %/% 2))/(frac*n)
x0[2] <- freq.y[max(y.per) == y.per]
x0[4] <- freq.y[max(y.per[freq.y > x0[2] + freq.y[10] |
freq.y < x0[2] - freq.y[10]]) == y.per]
if(x0[2] > x0[4]) {
xx <- x0[2]
x0[2] <- x0[4]
x0[4] <- xx
rm(xx)
}
## Compute initial estimate of the fractional difference parameters
muJ <- (unlist(apply(matrix(2^(1:J)-1), 1, seq, from=0, by=1)) /
2^(rep(1:J, 2^(1:J))) +
unlist(apply(matrix(2^(1:J)), 1, seq, from=1, by=1)) /
2^(rep(1:J, 2^(1:J)))) / 4
y.modwpt <- modwpt(y, wf=wf, n.levels=J)
y.varJ <- rep(2^(1:J), 2^(1:J)) *
unlist(lapply(y.modwpt,
FUN = function(x) sum(x*x,na.rm=TRUE)/length(x[!is.na(x)])))
x0.mid <- (x0[2] + x0[4]) / 2
muJ <- muJ[y.basis]
y.varJ <- y.varJ[y.basis]
x0[1] <- min(-0.5 * lsfit(log(abs(muJ[muJ < x0.mid] - x0[2])),
log(y.varJ[muJ < x0.mid]))$coef[2], 0.49)
x0[3] <- min(-0.5 * lsfit(log(abs(muJ[muJ > x0.mid] - x0[4])),
log(y.varJ[muJ > x0.mid]))$coef[2], 0.49)
cat(paste("Initial parameters: d1 = ", round(x0[1],4),
", f1 = ", round(x0[2],4), ", d2 = ", round(x0[3],4),
", f2 = ", round(x0[4],4), sep=""), fill=TRUE)
result <- optim(par=x0, fn=spp2LL, method="L-BFGS-B",
lower=rep(0.001,4), upper=rep(0.499,4),
control=list(trace=1, fnscale=2),
y=list(y.dwpt, y.basis, n, J))
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/spp.R
|
#' Stack Plot
#'
#' Stack plot of an object. This function attempts to mimic a function called
#' \code{stack.plot} in S+WAVELETS.
#'
#' Produces a set of plots, one for each element (column) of \code{x}.
#'
#' @param x \code{ts} object
#' @param layout Doublet defining the dimension of the panel. If not
#' specified, the dimensions are chosen automatically.
#' @param same.scale Vector the same length as the number of series to be
#' plotted. If not specified, all panels will have unique axes.
#' @param plot.type,panel,log,col,bg,pch,cex,lty,lwd,ann,xlab,main,oma,... See
#' \code{plot.ts}.
#' @author B. Whitcher
#' @keywords hplot
#' @export stackPlot
stackPlot <-
function (x, plot.type = c("multiple", "single"), panel = lines,
log = "", col = par("col"), bg = NA, pch = par("pch"), cex = par("cex"),
lty = par("lty"), lwd = par("lwd"), ann = par("ann"), xlab = "Time",
main = NULL, oma = c(6, 0, 5, 0), layout = NULL,
same.scale = 1:dim(x)[2], ...)
{
addmain <- function(main, cex.main = par("cex.main"),
font.main = par("font.main"),
col.main = par("col.main"), ...) {
mtext(main, 3, 3, cex = cex.main, font = font.main, col = col.main,
...)
}
plot.type <- match.arg(plot.type)
panel <- match.fun(panel)
nser <- NCOL(x)
if (plot.type == "single" || nser == 1) {
m <- match.call()
m[[1]] <- as.name("plot.ts")
m$plot.type <- "single"
return(eval(m, parent.frame()))
}
if (nser > 10)
stop("Can't plot more than 10 series")
if (is.null(main))
main <- deparse(substitute(x))
nm <- colnames(x)
if (is.null(nm))
nm <- paste("Series", 1:nser)
nc <- if (nser > 4)
2
else 1
oldpar <- par("mar", "oma", "mfcol")
on.exit(par(oldpar))
par(mar = c(0, 5.1, 0, 2.1), oma = oma)
nr <- ceiling(nser/nc)
## Begin added code
if(!is.null(same.scale)) {
unique.scales <- length(unique(same.scale))
ylim <- vector("list", unique.scales)
for (i in 1:unique.scales)
ylim[[i]] <- range(x[, same.scale==i])
}
else
for (i in 1:dim(x)[2])
ylim[[i]] <- range(x[,i])
if(is.null(layout))
par(mfcol = c(nr, nc))
else {
par(mfcol = layout)
nr <- layout[1]
}
## End added code
for (i in 1:nser) {
plot(x[, i], axes = FALSE, xlab = "", ylab = "", log = log,
col = col, bg = bg, pch = pch, ann = ann, type = "n",
ylim=ylim[[same.scale[i]]], ...)
panel(x[, i], col = col, bg = bg, pch = pch, ...)
box()
axis(2, xpd = NA)
mtext(nm[i], 2, 3)
if (i%%nr == 0 || i == nser)
axis(1, xpd = NA)
}
if (ann) {
mtext(xlab, 1, 3, ...)
if (!is.null(main)) {
par(mfcol = c(1, 1))
addmain(main, ...)
}
}
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/stack.plot.R
|
#' Calculating Thomson's Spectral Multitapers by Inverse Iteration
#'
#' The following function links the subroutines in "bell-p-w.o" to an R
#' function in order to compute discrete prolate spheroidal sequences (dpss).
#'
#' Spectral estimation using a set of orthogonal tapers is becoming widely used
#' and appreciated in scientific research. It produces direct spectral
#' estimates with more than 2 df at each Fourier frequency, resulting in
#' spectral estimators with reduced variance. Computation of the orthogonal
#' tapers from the basic defining equation is difficult, however, due to the
#' instability of the calculations -- the eigenproblem is very poorly
#' conditioned. In this article the severe numerical instability problems are
#' illustrated and then a technique for stable calculation of the tapers --
#' namely, inverse iteration -- is described. Each iteration involves the
#' solution of a matrix equation. Because the matrix has Toeplitz form, the
#' Levinson recursions are used to rapidly solve the matrix equation. FORTRAN
#' code for this method is available through the Statlib archive. An
#' alternative stable method is also briefly reviewed.
#'
#' @param n length of data taper(s)
#' @param k number of data tapers; 1, 2, 3, ... (do not use 0!)
#' @param nw product of length and half-bandwidth parameter (w)
#' @param nmax maximum possible taper length, necessary for FORTRAN code
#' @return \item{v}{matrix of data tapers (cols = tapers)}
#' \item{eigen}{eigenvalue associated with each data taper} \item{iter}{total
#' number of iterations performed} \item{n}{same as input}
#' \item{w}{half-bandwidth parameter} \item{ifault}{0 indicates success, see
#' documentation for "bell-p-w" for information on non-zero values}
#' @author B. Whitcher
#' @seealso \code{\link{sine.taper}}.
#' @references B. Bell, D. B. Percival, and A. T. Walden (1993) Calculating
#' Thomson's spectral multitapers by inverse iteration, \emph{Journal of
#' Computational and Graphical Statistics}, \bold{2}, No. 1, 119-130.
#'
#' Percival, D. B. and A. T. Walden (1993) \emph{Spectral Estimation for
#' Physical Applications: Multitaper and Conventional Univariate Techniques},
#' Cambridge University Press.
#' @keywords ts
#' @export dpss.taper
dpss.taper <- function(n, k, nw = 4, nmax = 2^(ceiling(log(n,2)))) {
if(n > nmax)
stop("length of taper is greater than nmax")
w <- nw/n
if(w > 0.5)
stop("half-bandwidth parameter (w) is greater than 1/2")
if(k <= 0)
stop("positive dpss order (k) required")
v <- matrix(0, nrow = nmax, ncol = (k + 1))
storage.mode(v) <- "double"
out <- .Fortran(C_dpss,
nmax = as.integer(nmax),
kmax = as.integer(k),
n = as.integer(n),
w = as.double(w),
v = v,
sig = double(k + 1),
totit = integer(1),
sines = double(n),
vold = double(n),
u = double(n),
scr1 = double(n),
ifault = integer(1))
##list(v = out$v[1:n, 1:k], eigen = out$sig[-1] + 1, iter =
## out$totiTRUE, n = out$n, w = out$w, ifault = out$ifault)
return(out$v[1:n, 1:k])
}
#' Computing Sinusoidal Data Tapers
#'
#' Computes sinusoidal data tapers directly from equations.
#'
#' See reference.
#'
#' @param n length of data taper(s)
#' @param k number of data tapers
#' @return A vector or matrix of data tapers (cols = tapers).
#' @author B. Whitcher
#' @seealso \code{\link{dpss.taper}}.
#' @references Riedel, K. S. and A. Sidorenko (1995) Minimum bias multiple
#' taper spectral estimation, \emph{IEEE Transactions on Signal Processing},
#' \bold{43}, 188-195.
#' @keywords ts
#' @export sine.taper
sine.taper <- function(n, k) {
tapers <- NULL
for(i in 1:k)
tapers <- cbind(tapers, sqrt(2/(n+1)) * sin((pi*i*1:n)/(n+1)))
return(tapers)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/tapers.R
|
###########################################################################
###########################################################################
###########################################################################
#' Three Dimensional Separable Discrete Wavelet Transform
#'
#' Three-dimensional separable discrete wavelet transform (DWT).
#'
#'
#' @usage dwt.3d(x, wf, J = 4, boundary = "periodic")
#' @usage idwt.3d(y)
#' @aliases dwt.3d idwt.3d
#' @param x input array
#' @param wf name of the wavelet filter to use in the decomposition
#' @param J depth of the decomposition, must be a number less than or equal to
#' log(minZ,Y,Z,2)
#' @param boundary only \code{"periodic"} is currently implemented
#' @param y an object of class \code{dwt.3d}
#' @author B. Whitcher
#' @keywords ts
#' @export dwt.3d
dwt.3d <- function(x, wf, J=4, boundary="periodic")
{
nx <- dim(x)[1]
storage.mode(nx) <- "integer"
ny <- dim(x)[2]
storage.mode(ny) <- "integer"
nz <- dim(x)[3]
storage.mode(nz) <- "integer"
dict <- wave.filter(wf)
L <- dict$length
storage.mode(L) <- "integer"
h <- dict$hpf
storage.mode(h) <- "double"
g <- dict$lpf
storage.mode(g) <- "double"
z <- array(0, dim=c(nx,ny,nz)/2)
storage.mode(z) <- "double"
x.wt <- vector("list", 7*J+1)
x.names <- NULL
for(j in 1:J) {
out <- .C(C_three_D_dwt, "cube"=as.double(x), "NX"=nx, "NY"=ny,
"NZ"=nz, "filter.length"=L, "hpf"=h, "lpf"=g, "LLL"=z,
"HLL"=z, "LHL"=z, "LLH"=z, "HHL"=z, "HLH"=z, "LHH"=z,
"HHH"=z)[8:15]
if(j < J) {
index <- (7*(j-1)+1):(7*j)
x.wt[index] <- out[-1]
x.names <- c(x.names, sapply(names(out)[-1], paste, j, sep=""))
x <- out[[1]]
nx <- dim(x)[1]
storage.mode(nx) <- "integer"
ny <- dim(x)[2]
storage.mode(ny) <- "integer"
nz <- dim(x)[3]
storage.mode(nz) <- "integer"
z <- array(0, dim=c(nx,ny,nz)/2)
storage.mode(z) <- "double"
}
else {
index <- (7*(j-1)+1):(7*j+1)
x.wt[index] <- out[c(2:8,1)]
x.names <- c(x.names, sapply(names(out)[c(2:8,1)], paste, j, sep=""))
}
}
names(x.wt) <- x.names
class(x.wt) <- "dwt.3d"
attr(x.wt, "J") <- J
attr(x.wt, "wavelet") <- wf
attr(x.wt, "boundary") <- boundary
return(x.wt)
}
###########################################################################
###########################################################################
###########################################################################
idwt.3d <- function(y)
{
J <- attributes(y)$J
LLL <- paste("LLL", J, sep="")
wf <- attributes(y)$wavelet
dict <- wave.filter(wf)
L <- dict$length
storage.mode(L) <- "integer"
h <- dict$hpf
storage.mode(h) <- "double"
g <- dict$lpf
storage.mode(g) <- "double"
y.in <- y$LLL
for(j in J:1) {
HLL <- paste("HLL", j, sep="")
LHL <- paste("LHL", j, sep="")
LLH <- paste("LLH", j, sep="")
HHL <- paste("HHL", j, sep="")
HLH <- paste("HLH", j, sep="")
LHH <- paste("LHH", j, sep="")
HHH <- paste("HHH", j, sep="")
nx <- dim(y.in)[1]
storage.mode(nx) <- "integer"
ny <- dim(y.in)[2]
storage.mode(ny) <- "integer"
nz <- dim(y.in)[3]
storage.mode(nz) <- "integer"
z <- array(0, dim=2*c(nx, ny, nz))
storage.mode(z) <- "double"
out <- .C(C_three_D_idwt, as.double(y.in), as.double(y[[HLL]]),
as.double(y[[LHL]]), as.double(y[[LLH]]),
as.double(y[[HHL]]), as.double(y[[HLH]]),
as.double(y[[LHH]]), as.double(y[[HHH]]),
nx, ny, nz, L, h, g, "Y"=z)
y.in <- out$Y
}
zapsmall(y.in)
}
#' Three Dimensional Separable Maximal Ovelrap Discrete Wavelet Transform
#'
#' Three-dimensional separable maximal overlap discrete wavelet transform
#' (MODWT).
#'
#'
#' @usage modwt.3d(x, wf, J = 4, boundary = "periodic")
#' @usage imodwt.3d(y)
#' @aliases modwt.3d imodwt.3d
#' @param x input array
#' @param wf name of the wavelet filter to use in the decomposition
#' @param J depth of the decomposition
#' @param boundary only \code{"periodic"} is currently implemented
#' @param y an object of class \code{modwt.3d}
#' @author B. Whitcher
#' @keywords ts
#' @export modwt.3d
#' @export imodwt.3d
modwt.3d <- function(x, wf, J=4, boundary="periodic")
{
nx <- dim(x)[1]
storage.mode(nx) <- "integer"
ny <- dim(x)[2]
storage.mode(ny) <- "integer"
nz <- dim(x)[3]
storage.mode(nz) <- "integer"
dict <- wave.filter(wf)
L <- dict$length
storage.mode(L) <- "integer"
h <- dict$hpf / sqrt(2)
storage.mode(h) <- "double"
g <- dict$lpf / sqrt(2)
storage.mode(g) <- "double"
z <- array(0, dim=c(nx,ny,nz))
storage.mode(z) <- "double"
x.wt <- vector("list", 7*J+1)
x.names <- NULL
for(j in 1:J) {
out <- .C(C_three_D_modwt, "cube"=as.double(x), "NX"=nx, "NY"=ny,
"NZ"=nz, "J"=j, "filter.length"=L, "hpf"=h, "lpf"=g,
"LLL"=z, "HLL"=z, "LHL"=z, "LLH"=z, "HHL"=z, "HLH"=z,
"LHH"=z, "HHH"=z)[9:16]
if(j < J) {
index <- (7*(j-1)+1):(7*j)
x.wt[index] <- out[-1]
x.names <- c(x.names, sapply(names(out)[-1], paste, j, sep=""))
x <- out[[1]]
nx <- dim(x)[1]
storage.mode(nx) <- "integer"
ny <- dim(x)[2]
storage.mode(ny) <- "integer"
nz <- dim(x)[3]
storage.mode(nz) <- "integer"
z <- array(0, dim=c(nx,ny,nz))
storage.mode(z) <- "double"
}
else {
index <- (7*(j-1)+1):(7*j+1)
x.wt[index] <- out[c(2:8,1)]
x.names <- c(x.names, sapply(names(out)[c(2:8,1)], paste, j, sep=""))
}
}
names(x.wt) <- x.names
class(x.wt) <- "modwt.3d"
attr(x.wt, "J") <- J
attr(x.wt, "wavelet") <- wf
attr(x.wt, "boundary") <- boundary
return(x.wt)
}
###########################################################################
###########################################################################
###########################################################################
imodwt.3d <- function(y)
{
J <- attributes(y)$J
LLL <- paste("LLL", J, sep="")
wf <- attributes(y)$wavelet
dict <- wave.filter(wf)
L <- dict$length
storage.mode(L) <- "integer"
h <- dict$hpf / sqrt(2)
storage.mode(h) <- "double"
g <- dict$lpf / sqrt(2)
storage.mode(g) <- "double"
y.in <- y$LLL
for(j in J:1) {
HLL <- paste("HLL", j, sep="")
LHL <- paste("LHL", j, sep="")
LLH <- paste("LLH", j, sep="")
HHL <- paste("HHL", j, sep="")
HLH <- paste("HLH", j, sep="")
LHH <- paste("LHH", j, sep="")
HHH <- paste("HHH", j, sep="")
nx <- dim(y.in)[1]
storage.mode(nx) <- "integer"
ny <- dim(y.in)[2]
storage.mode(ny) <- "integer"
nz <- dim(y.in)[3]
storage.mode(nz) <- "integer"
z <- array(0, dim=c(nx, ny, nz))
storage.mode(z) <- "double"
out <- .C(C_three_D_imodwt, as.double(y.in), as.double(y[[HLL]]),
as.double(y[[LHL]]), as.double(y[[LLH]]),
as.double(y[[HHL]]), as.double(y[[HLH]]),
as.double(y[[LHH]]), as.double(y[[HHH]]),
nx, ny, nz, j, L, h, g, "Y"=z)
y.in <- out$Y
}
zapsmall(y.in)
}
###########################################################################
###########################################################################
###########################################################################
#' Three Dimensional Multiresolution Analysis
#'
#' This function performs a level \eqn{J} additive decomposition of the input
#' array using the pyramid algorithm (Mallat 1989).
#'
#' This code implements a three-dimensional multiresolution analysis by
#' performing the one-dimensional pyramid algorithm (Mallat 1989) on each
#' dimension of the input array. Either the DWT or MODWT may be used to
#' compute the multiresolution analysis, which is an additive decomposition of
#' the original array.
#'
#' @param x A three-dimensional array containing the data be to decomposed.
#' This must be have dyadic length in all three dimensions (but not necessarily
#' the same) for \code{method="dwt"}.
#' @param wf Name of the wavelet filter to use in the decomposition. By
#' default this is set to \code{"la8"}, the Daubechies orthonormal compactly
#' supported wavelet of length \eqn{L=8} least asymmetric family.
#' @param J Specifies the depth of the decomposition. This must be a number
#' less than or equal to \eqn{\log(\mbox{length}(x),2)}{log(length(x),2)}.
#' @param method Either \code{"dwt"} or \code{"modwt"}.
#' @param boundary Character string specifying the boundary condition. If
#' \code{boundary=="periodic"} the default and only method implemented, then
#' the matrix you decompose is assumed to be periodic on its defined interval.
#' @return List structure containing the filter triplets associated with the
#' multiresolution analysis.
#' @author B. Whitcher
#' @seealso \code{\link{dwt.3d}}, \code{\link{modwt.3d}}
#' @references Mallat, S. G. (1989) A theory for multiresolution signal
#' decomposition: the wavelet representation, \emph{IEEE Transactions on
#' Pattern Analysis and Machine Intelligence}, \bold{11}, No. 7, 674-693.
#'
#' Mallat, S. G. (1998) \emph{A Wavelet Tour of Signal Processing}, Academic
#' Press.
#' @keywords ts
#' @export mra.3d
mra.3d <- function(x, wf="la8", J=4, method="modwt", boundary="periodic")
{
nx <- dim(x)[1]
ny <- dim(x)[2]
nz <- dim(x)[3]
if(method == "modwt") {
x.wt <- modwt.3d(x, wf, J, "periodic")
} else {
x.wt <- dwt.3d(x, wf, J, "periodic")
}
x.mra <- vector("list", 7*J+1)
names(x.mra) <-
c(matrix(rbind(paste("HLL", 1:J, sep=""), paste("LHL", 1:J, sep=""),
paste("LLH", 1:J, sep=""), paste("HHL", 1:J, sep=""),
paste("HLH", 1:J, sep=""), paste("LHH", 1:J, sep=""),
paste("HHH", 1:J, sep="")), nrow=1),
paste("LLL", J, sep=""))
## Smooth
zero <- vector("list", 7*J+1)
names(zero) <- names(x.mra)
attr(zero, "J") <- J
attr(zero, "wavelet") <- wf
attr(zero, "boundary") <- boundary
zero[[7*J+1]] <- x.wt[[7*J+1]]
if(method == "modwt") {
class(x.wt) <- "modwt.3d"
for(k in 1:(7*J))
zero[[k]] <- array(0, dim=c(nx,ny,nz))
x.mra[[7*J+1]] <- imodwt.3d(zero)
} else {
class(x.wt) <- "dwt.3d"
for(k in 1:J)
zero[[7*(k-1)+1]] <- zero[[7*(k-1)+2]] <- zero[[7*(k-1)+3]] <-
zero[[7*(k-1)+4]] <- zero[[7*(k-1)+5]] <- zero[[7*(k-1)+6]] <-
zero[[7*k]] <- array(0, dim=c(nx,ny,nz)/2^k)
x.mra[[7*J+1]] <- idwt.3d(zero)
}
## Details
for(j in (7*J):1) {
Jj <- ceiling(j/7)
zero <- vector("list", 7*Jj+1)
names(zero) <-
c(matrix(rbind(paste("HLL", 1:Jj, sep=""), paste("LHL", 1:Jj, sep=""),
paste("LLH", 1:Jj, sep=""), paste("HHL", 1:Jj, sep=""),
paste("HLH", 1:Jj, sep=""), paste("LHH", 1:Jj, sep=""),
paste("HHH", 1:Jj, sep="")), nrow=1),
paste("LLL", Jj, sep=""))
attr(zero, "J") <- Jj
attr(zero, "wavelet") <- wf
attr(zero, "boundary") <- boundary
zero[[j]] <- x.wt[[j]]
if(method == "modwt") {
for(k in names(zero)[-charmatch(names(zero)[j], names(zero))])
zero[[k]] <- array(0, dim=c(nx,ny,nz))
x.mra[[j]] <- imodwt.3d(zero)
} else {
for(k in 1:Jj)
zero[[7*(k-1)+1]] <- zero[[7*(k-1)+2]] <- zero[[7*(k-1)+3]] <-
zero[[7*(k-1)+4]] <- zero[[7*(k-1)+5]] <- zero[[7*(k-1)+6]] <-
zero[[7*k]] <- array(0, dim=c(nx,ny,nz)/2^k)
zero[[7*Jj+1]] <- array(0, dim=c(nx,ny,nz)/2^Jj)
zero[[j]] <- x.wt[[j]]
x.mra[[j]] <- idwt.3d(zero)
}
}
return(x.mra)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/three_D.R
|
#' Two-Dimensional Discrete Wavelet Transform
#'
#' Performs a separable two-dimensional discrete wavelet transform (DWT) on a
#' matrix of dyadic dimensions.
#'
#' See references.
#'
#' @usage dwt.2d(x, wf, J = 4, boundary = "periodic")
#' @usage idwt.2d(y)
#' @aliases dwt.2d idwt.2d
#' @param x input matrix (image)
#' @param wf name of the wavelet filter to use in the decomposition
#' @param J depth of the decomposition, must be a number less than or equal to
#' log(minM,N,2)
#' @param boundary only \code{"periodic"} is currently implemented
#' @param y an object of class \code{dwt.2d}
#' @return List structure containing the \eqn{3J+1} sub-matrices from the
#' decomposition.
#' @author B. Whitcher
#' @seealso \code{\link{modwt.2d}}.
#' @references Mallat, S. (1998) \emph{A Wavelet Tour of Signal Processing},
#' Academic Press.
#'
#' Vetterli, M. and J. Kovacevic (1995) \emph{Wavelets and Subband Coding},
#' Prentice Hall.
#' @keywords ts
#' @examples
#'
#' ## Xbox image
#' data(xbox)
#' xbox.dwt <- dwt.2d(xbox, "haar", 3)
#' par(mfrow=c(1,1), pty="s")
#' plot.dwt.2d(xbox.dwt)
#' par(mfrow=c(2,2), pty="s")
#' image(1:dim(xbox)[1], 1:dim(xbox)[2], xbox, xlab="", ylab="",
#' main="Original Image")
#' image(1:dim(xbox)[1], 1:dim(xbox)[2], idwt.2d(xbox.dwt), xlab="", ylab="",
#' main="Wavelet Reconstruction")
#' image(1:dim(xbox)[1], 1:dim(xbox)[2], xbox - idwt.2d(xbox.dwt),
#' xlab="", ylab="", main="Difference")
#'
#' ## Daubechies image
#' data(dau)
#' par(mfrow=c(1,1), pty="s")
#' image(dau, col=rainbow(128))
#' sum(dau^2)
#' dau.dwt <- dwt.2d(dau, "d4", 3)
#' plot.dwt.2d(dau.dwt)
#' sum(plot.dwt.2d(dau.dwt, plot=FALSE)^2)
#'
#' @export dwt.2d
dwt.2d <- function(x, wf, J=4, boundary="periodic")
{
m <- dim(x)[1]
storage.mode(m) <- "integer"
n <- dim(x)[2]
storage.mode(n) <- "integer"
dict <- wave.filter(wf)
L <- dict$length
storage.mode(L) <- "integer"
h <- dict$hpf
storage.mode(h) <- "double"
g <- dict$lpf
storage.mode(g) <- "double"
z <- matrix(0, m/2, n/2)
storage.mode(z) <- "double"
x.wt <- vector("list", 3*J+1)
x.names <- NULL
for(j in 1:J) {
out <- .C(C_two_D_dwt, "Image"=as.double(x), "Rows"=m, "Cols"=n,
"filter.length"=L, "hpf"=h, "lpf"=g, "LL"=z, "LH"=z,
"HL"=z, "HH"=z)[7:10]
if(j < J) {
index <- (3*j-2):(3*j)
x.wt[index] <- out[-1]
x.names <- c(x.names, sapply(names(out)[-1], paste, j, sep=""))
x <- out[[1]]
m <- dim(x)[1]
storage.mode(m) <- "integer"
n <- dim(x)[2]
storage.mode(n) <- "integer"
z <- matrix(0, m/2, n/2)
storage.mode(z) <- "double"
}
else {
index <- (3*j):(3*(j+1)) - 2
x.wt[index] <- out[c(2:4,1)]
x.names <- c(x.names, sapply(names(out)[c(2:4,1)], paste, j, sep=""))
}
}
names(x.wt) <- x.names
attr(x.wt, "J") <- J
attr(x.wt, "wavelet") <- wf
attr(x.wt, "boundary") <- boundary
attr(x.wt, "class") <- "dwt.2d"
x.wt
}
idwt.2d <- function(y)
{
J <- attributes(y)$J
dict <- wave.filter(attributes(y)$wavelet)
L <- dict$length
storage.mode(L) <- "integer"
h <- dict$hpf
storage.mode(h) <- "double"
g <- dict$lpf
storage.mode(g) <- "double"
LL <- paste("LL", J, sep="")
y.in <- y[[LL]]
for(j in J:1) {
LH <- paste("LH", j, sep="")
HL <- paste("HL", j, sep="")
HH <- paste("HH", j, sep="")
m <- dim(y.in)[1]
storage.mode(m) <- "integer"
n <- dim(y.in)[2]
storage.mode(n) <- "integer"
x <- matrix(0, 2*m, 2*n)
storage.mode(x) <- "double"
out <- .C(C_two_D_idwt, as.double(y.in), as.double(y[[LH]]),
as.double(y[[HL]]), as.double(y[[HH]]), m, n, L, h, g,
"Y"=x)
y.in <- out$Y
}
zapsmall(y.in)
}
#' Two-Dimensional Maximal Overlap Discrete Wavelet Transform
#'
#' Performs a separable two-dimensional maximal overlap discrete wavelet
#' transform (MODWT) on a matrix of arbitrary dimensions.
#'
#' See references.
#'
#' @usage modwt.2d(x, wf, J = 4, boundary = "periodic")
#' @usage imodwt.2d(y)
#' @aliases modwt.2d imodwt.2d
#' @param x input matrix
#' @param wf name of the wavelet filter to use in the decomposition
#' @param J depth of the decomposition
#' @param boundary only \code{"periodic"} is currently implemented
#' @param y an object of class \code{dwt.2d}
#' @return List structure containing the \eqn{3J+1} sub-matrices from the
#' decomposition.
#' @author B. Whitcher
#' @seealso \code{\link{dwt.2d}}, \code{\link{shift.2d}}.
#' @references Liang, J. and T. W. Parks (1994) A two-dimensional translation
#' invariant wavelet representation and its applications, \emph{Proceedings
#' ICIP-94}, Vol. 1, 66-70.
#'
#' Liang, J. and T. W. Parks (1994) Image coding using translation invariant
#' wavelet transforms with symmetric extensions, \emph{IEEE Transactions on
#' Image Processing}, \bold{7}, No. 5, 762-769.
#' @keywords ts
#' @examples
#'
#' ## Xbox image
#' data(xbox)
#' xbox.modwt <- modwt.2d(xbox, "haar", 2)
#' ## Level 1 decomposition
#' par(mfrow=c(2,2), pty="s")
#' image(xbox.modwt$LH1, col=rainbow(128), axes=FALSE, main="LH1")
#' image(xbox.modwt$HH1, col=rainbow(128), axes=FALSE, main="HH1")
#' frame()
#' image(xbox.modwt$HL1, col=rainbow(128), axes=FALSE, main="HL1")
#' ## Level 2 decomposition
#' par(mfrow=c(2,2), pty="s")
#' image(xbox.modwt$LH2, col=rainbow(128), axes=FALSE, main="LH2")
#' image(xbox.modwt$HH2, col=rainbow(128), axes=FALSE, main="HH2")
#' image(xbox.modwt$LL2, col=rainbow(128), axes=FALSE, main="LL2")
#' image(xbox.modwt$HL2, col=rainbow(128), axes=FALSE, main="HL2")
#' sum((xbox - imodwt.2d(xbox.modwt))^2)
#'
#' data(dau)
#' par(mfrow=c(1,1), pty="s")
#' image(dau, col=rainbow(128), axes=FALSE, main="Ingrid Daubechies")
#' sum(dau^2)
#' dau.modwt <- modwt.2d(dau, "d4", 2)
#' ## Level 1 decomposition
#' par(mfrow=c(2,2), pty="s")
#' image(dau.modwt$LH1, col=rainbow(128), axes=FALSE, main="LH1")
#' image(dau.modwt$HH1, col=rainbow(128), axes=FALSE, main="HH1")
#' frame()
#' image(dau.modwt$HL1, col=rainbow(128), axes=FALSE, main="HL1")
#' ## Level 2 decomposition
#' par(mfrow=c(2,2), pty="s")
#' image(dau.modwt$LH2, col=rainbow(128), axes=FALSE, main="LH2")
#' image(dau.modwt$HH2, col=rainbow(128), axes=FALSE, main="HH2")
#' image(dau.modwt$LL2, col=rainbow(128), axes=FALSE, main="LL2")
#' image(dau.modwt$HL2, col=rainbow(128), axes=FALSE, main="HL2")
#' sum((dau - imodwt.2d(dau.modwt))^2)
#'
#' @export modwt.2d
modwt.2d <- function(x, wf, J=4, boundary="periodic")
{
m <- dim(x)[1]
storage.mode(m) <- "integer"
n <- dim(x)[2]
storage.mode(n) <- "integer"
dict <- wave.filter(wf)
L <- dict$length
storage.mode(L) <- "integer"
h <- dict$hpf / sqrt(2)
storage.mode(h) <- "double"
g <- dict$lpf / sqrt(2)
storage.mode(g) <- "double"
z <- matrix(0, m, n)
storage.mode(z) <- "double"
x.wt <- vector("list", 3*J+1)
x.names <- NULL
for(j in 1:J) {
out <- .C("two_D_modwt", "Image"=as.double(x), "Rows"=m, "Cols"=n,
"Level"=j, "filter.length"=L, "hpf"=h, "lpf"=g, "LL"=z,
"LH"=z, "HL"=z, "HH"=z, PACKAGE="waveslim")[8:11]
if(j < J) {
index <- (3*j-2):(3*j)
x.wt[index] <- out[-1]
x.names <- c(x.names, sapply(names(out)[-1], paste, j, sep=""))
x <- out$LL
}
else {
index <- (3*j):(3*(j+1)) - 2
x.wt[index] <- out[c(2:4,1)]
x.names <- c(x.names, sapply(names(out)[c(2:4,1)], paste, j, sep=""))
}
}
names(x.wt) <- x.names
attr(x.wt, "J") <- J
attr(x.wt, "wavelet") <- wf
attr(x.wt, "boundary") <- boundary
x.wt
}
imodwt.2d <- function(y)
{
J <- attributes(y)$J
dict <- wave.filter(attributes(y)$wavelet)
L <- dict$length
storage.mode(L) <- "integer"
h <- dict$hpf / sqrt(2)
storage.mode(h) <- "double"
g <- dict$lpf / sqrt(2)
storage.mode(g) <- "double"
LL <- paste("LL", J, sep="")
y.in <- y[[LL]]
for(j in J:1) {
LH <- paste("LH", j, sep="")
HL <- paste("HL", j, sep="")
HH <- paste("HH", j, sep="")
m <- dim(y.in)[1]
storage.mode(m) <- "integer"
n <- dim(y.in)[2]
storage.mode(n) <- "integer"
x <- matrix(0, m, n)
storage.mode(x) <- "double"
out <- .C(C_two_D_imodwt, as.double(y.in), as.double(y[[LH]]),
as.double(y[[HL]]), as.double(y[[HH]]), m, n, j, L,
h, g, "Y"=x)
y.in <- out$Y
}
zapsmall(y.in)
}
#' Plot Two-dimensional Discrete Wavelet Transform
#'
#' Organizes the wavelet coefficients from a 2D DWT into a single matrix and
#' plots it. The coarser resolutions are nested within the lower-lefthand
#' corner of the image.
#'
#' The wavelet coefficients from the DWT object (a list) are reorganized into a
#' single matrix of the same dimension as the original image and the result is
#' plotted.
#'
#' @param x input matrix (image)
#' @param cex.axis \code{par} plotting parameter that controls the size of the
#' axis text
#' @param plot if \code{plot = FALSE} then the matrix of wavelet coefficients
#' is returned, the default is \code{plot = TRUE}
#' @param ... additional graphical parameters if necessary
#' @return Image plot.
#' @author B. Whitcher
#' @seealso \code{\link{dwt.2d}}.
#' @keywords ts
#' @export plot.dwt.2d
plot.dwt.2d <- function(x, cex.axis=1, plot=TRUE, ...)
{
J <- attributes(x)$J
X <- x[[paste("LL", J, sep="")]]
for(j in J:1) {
x.names <- sapply(c("LH","HL","HH"), paste, j, sep="")
X <- rbind(cbind(X, x[[x.names[2]]]),
cbind(x[[x.names[1]]], x[[x.names[3]]]))
}
M <- dim(X)[1]; N <- dim(X)[2]
if(plot) {
image(1:M, 1:N, X, col=rainbow(128), axes=FALSE, xlab="", ylab="", ...)
x.label <- NULL
lines(c(0,N,N,0,0) + 0.5, c(0,0,M,M,0) + 0.5)
for(j in J:1) {
lines(c(M/2^j,M/2^j) + 0.5, 2*c(0,N/2^j) + 0.5)
lines(2*c(0,M/2^j) + 0.5, c(N/2^j,N/2^j) + 0.5)
}
at <- c((3*N+2)/2^(1:J+1),(N+2)/2^(J+1))
labs <- c(paste("H",1:J,sep=""), paste("L",J,sep=""))
axis(side=1, at=at, labels=labs, tick=FALSE, cex.axis=cex.axis)
axis(side=2, at=at, labels=labs, tick=FALSE, cex.axis=cex.axis)
}
else
return(X)
invisible()
}
#' Denoise an Image via the 2D Discrete Wavelet Transform
#'
#' Perform simple de-noising of an image using the two-dimensional discrete
#' wavelet transform.
#'
#' See \code{\link{Thresholding}}.
#'
#' @aliases denoise.dwt.2d denoise.modwt.2d
#' @param x input matrix (image)
#' @param wf name of the wavelet filter to use in the decomposition
#' @param J depth of the decomposition, must be a number less than or equal to
#' log(minM,N,2)
#' @param method character string describing the threshold applied, only
#' \code{"universal"} and \code{"long-memory"} are currently implemented
#' @param H self-similarity or Hurst parameter to indicate spectral scaling,
#' white noise is 0.5
#' @param noise.dir number of directions to estimate background noise standard
#' deviation, the default is 3 which produces a unique estimate of the
#' background noise for each spatial direction
#' @param rule either a \code{"hard"} or \code{"soft"} thresholding rule may be
#' used
#' @return Image of the same dimension as the original but with high-freqency
#' fluctuations removed.
#' @author B. Whitcher
#' @seealso \code{\link{Thresholding}}
#' @references See \code{\link{Thresholding}} for references concerning
#' de-noising in one dimension.
#' @keywords ts
#' @examples
#'
#' ## Xbox image
#' data(xbox)
#' n <- nrow(xbox)
#' xbox.noise <- xbox + matrix(rnorm(n*n, sd=.15), n, n)
#' par(mfrow=c(2,2), cex=.8, pty="s")
#' image(xbox.noise, col=rainbow(128), main="Original Image")
#' image(denoise.dwt.2d(xbox.noise, wf="haar"), col=rainbow(128),
#' zlim=range(xbox.noise), main="Denoised image")
#' image(xbox.noise - denoise.dwt.2d(xbox.noise, wf="haar"), col=rainbow(128),
#' zlim=range(xbox.noise), main="Residual image")
#'
#' ## Daubechies image
#' data(dau)
#' n <- nrow(dau)
#' dau.noise <- dau + matrix(rnorm(n*n, sd=10), n, n)
#' par(mfrow=c(2,2), cex=.8, pty="s")
#' image(dau.noise, col=rainbow(128), main="Original Image")
#' dau.denoise <- denoise.modwt.2d(dau.noise, wf="d4", rule="soft")
#' image(dau.denoise, col=rainbow(128), zlim=range(dau.noise),
#' main="Denoised image")
#' image(dau.noise - dau.denoise, col=rainbow(128), main="Residual image")
#'
denoise.dwt.2d <- function(x, wf = "la8", J = 4, method = "universal",
H = 0.5, noise.dir = 3, rule = "hard")
{
soft <- function(x, delta) sign(x) * pmax(abs(x) - delta, 0)
hard <- function(x, delta) ifelse(abs(x) > delta, x, 0)
n <- length(x)
x.dwt <- dwt.2d(x, wf, J)
if(noise.dir == 3)
sigma.mad <- list(HH = mad(x.dwt$HH1), HL = mad(x.dwt$HL1),
LH = mad(x.dwt$LH1))
else {
noise <- x.dwt$jj
sigma.mad <- list(HH = mad(noise), HL = mad(noise), LH = mad(noise))
}
thresh <- list(HH = rep(sqrt(2 * sigma.mad$HH^2 * log(n)), J),
HL = rep(sqrt(2 * sigma.mad$HL^2 * log(n)), J),
LH = rep(sqrt(2 * sigma.mad$LH^2 * log(n)), J))
if(method == "long-memory")
thresh <- lapply(thresh, function(x,J,H) 2^(0:(J-1)*(H-1/2))*x, J=J, H=H)
for(j in 1:J) {
jj <- paste("HL", j, sep = "")
if(rule == "hard")
x.dwt[[jj]] <- hard(x.dwt[[jj]], thresh$HL[j])
else
x.dwt[[jj]] <- soft(x.dwt[[jj]], thresh$HL[j])
jj <- paste("LH", j, sep = "")
if(rule == "hard")
x.dwt[[jj]] <- hard(x.dwt[[jj]], thresh$LH[j])
else
x.dwt[[jj]] <- soft(x.dwt[[jj]], thresh$LH[j])
jj <- paste("HH", j, sep = "")
if(rule == "hard")
x.dwt[[jj]] <- hard(x.dwt[[jj]], thresh$HH[j])
else
x.dwt[[jj]] <- soft(x.dwt[[jj]], thresh$HH[j])
}
idwt.2d(x.dwt)
}
denoise.modwt.2d <- function(x, wf = "la8", J = 4, method = "universal",
H = 0.5, rule = "hard")
{
soft <- function(x, delta) sign(x) * pmax(abs(x) - delta, 0)
hard <- function(x, delta) ifelse(abs(x) > delta, x, 0)
n <- length(x)
x.modwt <- modwt.2d(x, wf, J)
sigma.mad <- list(HH = sqrt(2) * mad(x.modwt$HH1),
HL = sqrt(2) * mad(x.modwt$HL1),
LH = sqrt(2) * mad(x.modwt$LH1))
thresh <- list(HH = rep(sqrt(2 * sigma.mad$HH^2 * log(n))/2^(1:J), J),
HL = rep(sqrt(2 * sigma.mad$HL^2 * log(n))/2^(1:J), J),
LH = rep(sqrt(2 * sigma.mad$LH^2 * log(n))/2^(1:J), J))
if(method == "long-memory")
thresh <- lapply(thresh, function(x,J,H) 2^(0:(J-1)*(H-1/2))*x, J=J, H=H)
for(j in 1:J) {
jj <- paste("HL", j, sep = "")
if(rule == "hard")
x.modwt[[jj]] <- hard(x.modwt[[jj]], thresh$HL[j])
else
x.modwt[[jj]] <- soft(x.modwt[[jj]], thresh$HL[j])
jj <- paste("LH", j, sep = "")
if(rule == "hard")
x.modwt[[jj]] <- hard(x.modwt[[jj]], thresh$LH[j])
else
x.modwt[[jj]] <- soft(x.modwt[[jj]], thresh$LH[j])
jj <- paste("HH", j, sep = "")
if(rule == "hard")
x.modwt[[jj]] <- hard(x.modwt[[jj]], thresh$HH[j])
else
x.modwt[[jj]] <- soft(x.modwt[[jj]], thresh$HH[j])
}
imodwt.2d(x.modwt)
}
#' (Inverse) Discrete Wavelet Packet Transforms in Two Dimensions
#'
#' All possible filtering combinations (low- and high-pass) are performed to
#' decompose a matrix or image. The resulting coefficients are associated with
#' a quad-tree structure corresponding to a partitioning of the two-dimensional
#' frequency plane.
#'
#' The code implements the two-dimensional DWPT using the pyramid algorithm of
#' Mallat (1989).
#'
#' @usage dwpt.2d(x, wf = "la8", J = 4, boundary = "periodic")
#' @usage idwpt.2d(y, y.basis)
#' @aliases dwpt.2d idwpt.2d
#' @param x a matrix or image containing the data be to decomposed. This
#' ojbect must be dyadic (power of 2) in length in each dimension.
#' @param wf Name of the wavelet filter to use in the decomposition. By
#' default this is set to \code{"la8"}, the Daubechies orthonormal compactly
#' supported wavelet of length \eqn{L=8} (Daubechies, 1992), least asymmetric
#' family.
#' @param J Specifies the depth of the decomposition. This must be a number
#' less than or equal to \eqn{\log(\mbox{length}(x),2)}.
#' @param boundary Character string specifying the boundary condition. If
#' \code{boundary=="periodic"} the default, then the vector you decompose is
#' assumed to be periodic on its defined interval,\cr if
#' \code{boundary=="reflection"}, the vector beyond its boundaries is assumed
#' to be a symmetric reflection of itself.
#' @param y \code{dwpt.2d} object (list-based structure of matrices)
#' @param y.basis Boolean vector, the same length as \eqn{y}, where \code{TRUE}
#' means the basis tensor should be used in the reconstruction.
#' @return Basically, a list with the following components
#' \item{w?.?-w?.?}{Wavelet coefficient matrices (images). The first index is
#' associated with the scale of the decomposition while the second is
#' associated with the frequency partition within that level. The left and
#' right strings, separated by the dash `-', correspond to the first \eqn{(x)}
#' and second \eqn{(y)} dimensions.} \item{wavelet}{Name of the wavelet filter
#' used.} \item{boundary}{How the boundaries were handled.}
#' @author B. Whitcher
#' @seealso \code{\link{dwt.2d}}, \code{\link{modwt.2d}},
#' \code{\link{wave.filter}}.
#' @references Mallat, S. G. (1989) A theory for multiresolution signal
#' decomposition: the wavelet representation, \emph{IEEE Transactions on
#' Pattern Analysis and Machine Intelligence}, \bold{11}, No. 7, 674-693.
#'
#' Wickerhauser, M. V. (1994) \emph{Adapted Wavelet Analysis from Theory to
#' Software}, A K Peters.
#' @keywords ts
#' @export dwpt.2d
dwpt.2d <- function(x, wf="la8", J=4, boundary="periodic")
{
## x <- xbox
## Define image dimensions (assign mode for C) and perform simple
## diagnostics.
m <- dim(x)[1]
storage.mode(m) <- "integer"
n <- dim(x)[2]
storage.mode(n) <- "integer"
if(log(m, 2) != trunc(log(m, 2)) | log(n, 2) != trunc(log(n, 2)))
stop("One dimension is not a power of 2")
if(2^J > m | 2^J > n)
stop("Wavelet transform exceeds sample size in one dimension of DWPT")
## Extract wavelet and scaling filter coefficients, along with filter
## length, from the filter name provided.
dict <- wave.filter(wf)
L <- dict$length
storage.mode(L) <- "integer"
h <- dict$hpf
storage.mode(h) <- "double"
g <- dict$lpf
storage.mode(g) <- "double"
## Create names for wavelet packet nodes (quad-tree structure).
N <- sum(4^(1:J))
level <- rep(1:J, 4^(1:J))
x.wpt <- vector("list", N)
c1 <- rep(1:J, 2^(1:J))
c2 <- unlist(apply(as.matrix(2^(1:J) - 1), 1, seq, from=0))
cry <- paste("w", c1, ".", c2, sep="")
x.wpt.names <- NULL
for(j in 1:J) {
xx <- matrix(cry[c1 == j], 2^j, 2^j)
yy <- matrix(cry[c1 == j], 2^j, 2^j, byrow=TRUE)
x.wpt.names <- c(x.wpt.names, as.matrix(paste(xx, "-", yy, sep="")))
}
names(x.wpt) <- x.wpt.names
rm(j,xx,yy,c1,c2,cry)
## Define initial zero matrix to store wavelet sub-images.
z <- matrix(0, m/2, n/2)
storage.mode(z) <- "double"
## Implement the 2D DWPT in a nested loop structure.
for(j in 1:J) {
## cat("j =", j, fill=TRUE)
for(k in 0:(4^j/4-1)) {
if(j > 1) {
## if j > 1, grab wavelet coefficient image and also its name.
index <- min((1:N)[level == j-1]) + k
parent <- x.wpt.names[index]
## cat("parent =", parent, fill=TRUE)
x <- x.wpt[[parent]]
tmp <- unlist(strsplit(parent, "\\-"))
}
else
tmp <- c("w0.0", "w0.0")
## Deconstruct name into nodes for the x and y dimensions.
node <- unlist(strsplit(tmp, "\\."))
node <- as.integer(node[-c(1,3)])
## Preliminary assignments in order to keep wavelet coefficient
## sub-images in sequency order.
if(node[1] %% 2 == 0) {
Xlow <- paste("w", j, ".", 2 * node[1], sep="")
Xhigh <- paste("w", j, ".", 2 * node[1] + 1, sep="")
}
else {
Xlow <- paste("w", j, ".", 2 * node[1] + 1, sep="")
Xhigh <- paste("w", j, ".", 2 * node[1], sep="")
}
if(node[2] %% 2 == 0) {
Ylow <- paste("w", j, ".", 2 * node[2], sep="")
Yhigh <- paste("w", j, ".", 2 * node[2] + 1, sep="")
}
else {
Ylow <- paste("w", j, ".", 2 * node[2] + 1, sep="")
Yhigh <- paste("w", j, ".", 2 * node[2], sep="")
}
## Create names for the new wavelet coefficient images.
LL <- paste(Xlow, "-", Ylow, sep="")
LH <- paste(Xlow, "-", Yhigh, sep="")
HL <- paste(Xhigh, "-", Ylow, sep="")
HH <- paste(Xhigh, "-", Yhigh, sep="")
## cat(matrix(c(LH,LL,HH,HL), 2, 2), fill=TRUE)
## Perform the DWPT
out <- .C(C_two_D_dwt, "Image"=as.double(x), "Rows"=m, "Cols"=n,
"filter.length"=L, "hpf"=h, "lpf"=g, "LL"=z, "LH"=z,
"HL"=z, "HH"=z)[7:10]
## Pass wavelet coefficient images into the DWPT object.
x.wpt[[LL]] <- out[["LL"]]
x.wpt[[LH]] <- out[["LH"]]
x.wpt[[HL]] <- out[["HL"]]
x.wpt[[HH]] <- out[["HH"]]
}
## Redefine zero matrix to its new (decimated) size.
m <- dim(out[["LL"]])[1]
storage.mode(m) <- "integer"
n <- dim(out[["LL"]])[2]
storage.mode(n) <- "integer"
z <- matrix(0, m/2, n/2)
storage.mode(z) <- "double"
}
attr(x.wpt, "J") <- J
attr(x.wpt, "wavelet") <- wf
attr(x.wpt, "boundary") <- boundary
return(x.wpt)
}
idwpt.2d <- function(y, y.basis)
{
## Error checking
if(length(y) != length(y.basis))
stop("DWPT object and basis selection must be the same length")
## Number of wavelet scales
J <- attributes(y)$J
## Define wavelet/scaling filter coefficients and length
dict <- wave.filter(attributes(y)$wavelet)
L <- dict$length
storage.mode(L) <- "integer"
h <- dict$hpf
storage.mode(h) <- "double"
g <- dict$lpf
storage.mode(g) <- "double"
## Nested for loops
names(y.basis) <- names(y)
for(j in J:1) {
for(nx in seq(0, 2^j - 1, by = 2)) {
for(ny in seq(0, 2^j - 1, by = 2)) {
## Name the four wavelet coefficients sub-images
LL <- paste("w", j, ".", nx, "-", "w", j, ".", ny, sep="")
LH <- paste("w", j, ".", nx, "-", "w", j, ".", ny+1, sep="")
HL <- paste("w", j, ".", nx+1, "-", "w", j, ".", ny, sep="")
HH <- paste("w", j, ".", nx+1, "-", "w", j, ".", ny+1, sep="")
if(any(y.basis[LL], y.basis[LH], y.basis[HL], y.basis[HH])) {
m <- nrow(y[[LL]])
storage.mode(m) <- "integer"
n <- ncol(y[[LL]])
storage.mode(n) <- "integer"
XX <- matrix(0, 2*m, 2*n)
storage.mode(XX) <- "double"
## parent indices to construct string
pnx <- floor(nx / 2)
pny <- floor(ny / 2)
if((pnx %% 2 != 0) & (pny %% 2 != 0))
## Upper right-hand corner
out <- .C(C_two_D_idwt, as.double(y[[HH]]),
as.double(y[[HL]]), as.double(y[[LH]]),
as.double(y[[LL]]), m, n, L, h, g, "Y"=XX)$Y
else {
## Upper left-hand corner
if((pnx %% 2 == 0) & (pny %% 2 != 0))
out <- .C(C_two_D_idwt, as.double(y[[LH]]),
as.double(y[[LL]]), as.double(y[[HH]]),
as.double(y[[HL]]), m, n, L, h, g, "Y"=XX)$Y
else {
## Lower right-hand corner
if((pnx %% 2 != 0) & (pny %% 2 == 0))
out <- .C(C_two_D_idwt, as.double(y[[HL]]),
as.double(y[[HH]]), as.double(y[[LL]]),
as.double(y[[LH]]), m, n, L, h, g, "Y"=XX)$Y
else {
## Lower left-hand corner
if((pnx %% 2 == 0) & (pny %% 2 == 0))
out <- .C(C_two_D_idwt, as.double(y[[LL]]),
as.double(y[[LH]]), as.double(y[[HL]]),
as.double(y[[HH]]), m, n, L, h, g, "Y"=XX)$Y
else
stop("Ouch!")
}
}
}
if(j > 1) {
pname <- paste("w", j-1, ".", pnx, "-", "w", j-1, ".", pny, sep="")
y[[pname]] <- out
y.basis[pname] <- 1
}
}
}
}
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/two_D.R
|
#' Upsampling of a vector
#'
#' Upsamples a given vector.
#'
#'
#' @param x vector of observations
#' @param f frequency of upsampling; e.g, 2, 4, etc.
#' @param y value to upsample with; e.g., NA, 0, etc.
#' @return A vector twice its length.
#' @author B. Whitcher
#' @references Any basic signal processing text.
#' @keywords ts
#' @export up.sample
up.sample <- function(x, f, y=NA) {
n <- length(x)
as.vector(rbind(x, matrix(rep(y, (f-1)*n), nrow=f-1)))
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/up.sample.R
|
brick.wall.2d <- function (x, method = "modwt") {
wf <- attributes(x)$wavelet
m <- wave.filter(wf)$length
for (i in names(x)) {
j <- as.numeric(substr(i, 3, 3))
if (method == "dwt") {
n <- ceiling((m - 2) * (1 - 1/2^j))
} else {
n <- (2^j - 1) * (m - 1)
}
n <- min(n, nrow(x[[i]]))
x[[i]][1:n, ] <- NA
x[[i]][, 1:n] <- NA
}
return(x)
}
#' Wavelet Analysis of Images
#'
#' Produces an estimate of the multiscale variance with approximate
#' confidence intervals using the 2D MODWT.
#'
#' The wavelet variance is basically the average of the squared wavelet
#' coefficients across each scale and direction of an image. As shown
#' in Mondal and Percival (2012), the wavelet variance is a
#' scale-by-scale decomposition of the variance for a stationary spatial
#' process, and certain non-stationary spatial processes.
#'
#' @param x image
#' @param p (one minus the) two-sided p-value for the confidence interval
#' @return Data frame with 3J+1 rows.
#' @author B. Whitcher
#' @references Mondal, D. and D. B. Percival (2012). Wavelet variance
#' analysis for random fields on a regular lattice. \emph{IEEE
#' Transactions on Image Processing} \bold{21}, 537–549.
#'
wave.variance.2d <- function(x, p = 0.025) {
# The unbiased estimator ignores those coefficients affected by the boundary
x_bw <- brick.wall.2d(x)
x_ss <- unlist(lapply(x_bw, FUN = function(v) sum(v * v, na.rm = TRUE)))
x_length <- unlist(lapply(x_bw, FUN = function(v) sum(! is.na(v))))
wave_var <- x_ss / x_length
edof <- rep(NA, length(x))
names(edof) <- names(x)
wf_length <- wave.filter(attributes(x)$wavelet)$length
# from Section 3.3 Confidence intervals in Geilhufe et al. (2013)
for (i in names(x)) {
j <- as.integer(substr(i, 3, 3))
Lj <- (2^j - 1) * (wf_length - 1) + 1
Nj <- nrow(x[[i]]) - Lj + 1
Mj <- ncol(x[[i]]) - Lj + 1
pad_with_zeros <- matrix(0, nrow = 2^(trunc(log2(Nj)) + 2), ncol = 2^(trunc(log2(Mj)) + 2))
pad_with_zeros[1:Nj, 1:Mj] <- x[[i]][Lj:nrow(x[[i]]), Lj:ncol(x[[i]])]
sW <- fft(fft(pad_with_zeros) * Conj(fft(pad_with_zeros)), inverse = TRUE) / prod(dim(pad_with_zeros)) / Nj / Mj
sigma_W <- sum(sW^2)
if (Nj * Mj > 128) {
edof[i] <- 2 * Nj * Mj * wave_var[i]^2 / Re(sigma_W)
} else {
edof[i] <- max((Nj * Mj) / (2^j * 2^j), 1)
}
}
data.frame(
value = wave_var,
level = as.integer(substr(names(x), 3, 3)),
direction = factor(
toupper(substr(names(x), 1, 2)),
levels = c("LH", "HL", "HH", "LL"),
labels = c("Horizontal", "Vertical", "Diagonal", "Approximation")
),
ci_lower = unlist(edof) * wave_var / qchisq(1 - p, unlist(edof)),
ci_upper = unlist(edof) * wave_var / qchisq(p, unlist(edof))
)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/var_2D.R
|
#' Select a Wavelet Filter
#'
#' Converts name of wavelet filter to filter coefficients.
#'
#' Simple \code{switch} statement selects the appropriate filter.
#'
#' @param name Character string of wavelet filter.
#' @return List containing the following items: \item{L}{Length of the wavelet
#' filter.} \item{hpf}{High-pass filter coefficients.} \item{lpf}{Low-pass
#' filter coefficients.}
#' @author B. Whitcher
#' @seealso \code{\link{wavelet.filter}}, \code{\link{squared.gain}}.
#' @references Daubechies, I. (1992) \emph{Ten Lectures on Wavelets}, CBMS-NSF
#' Regional Conference Series in Applied Mathematics, SIAM: Philadelphia.
#'
#' Doroslovacki (1998) On the least asymmetric wavelets, \emph{IEEE
#' Transactions for Signal Processing}, \bold{46}, No. 4, 1125-1130.
#'
#' Morris and Peravali (1999) Minimum-bandwidth discrete-time wavelets,
#' \emph{Signal Processing}, \bold{76}, No. 2, 181-193.
#'
#' Nielsen, M. (2000) On the Construction and Frequency Localization of
#' Orthogonal Quadrature Filters, \emph{Journal of Approximation Theory},
#' \bold{108}, No. 1, 36-52.
#' @keywords ts
#' @export wave.filter
wave.filter <- function(name)
{
select.haar <- function() {
L <- 2
g <- c(0.7071067811865475, 0.7071067811865475)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.d4 <- function() {
L <- 4
g <- c(0.4829629131445341, 0.8365163037378077, 0.2241438680420134,
-0.1294095225512603)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.mb4 <- function() {
L <- 4
g <- c(4.801755e-01, 8.372545e-01, 2.269312e-01, -1.301477e-01)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.bs3.1 <- function() {
L <- 4
g <- c(0.1767767, 0.5303301, 0.5303301, 0.1767767)
h <- qmf(g)
gd <- c(0.3535534, 1.06066, -1.06066, -0.3535534)
hd <- qmf(g)
return(list(length = L, hpf = h, lpf = g, dhpf = hd, dlpf = gd))
}
select.w4 <- function() {
L <- 4
g <- c(-1, 3, 3, -1) / 8
h <- c(-1, 3, -3, 1) / 8
return(list(length = L, hpf = h, lpf = g))
}
select.fk4 <- function() {
L <- 4
g <- c(.6539275555697651, .7532724928394872, .5317922877905981e-1,
-.4616571481521770e-1)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.d6 <- function() {
L <- 6
g <- c(0.3326705529500827, 0.8068915093110928, 0.4598775021184915,
-0.1350110200102546, -0.0854412738820267, 0.0352262918857096)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.fk6 <- function() {
L <- 6
g <- c(.4279150324223103, .8129196431369074, .3563695110701871,
-.1464386812725773, -.7717775740697006e-1, .4062581442323794e-1)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.d8 <- function() {
L <- 8
g <- c(0.2303778133074431, 0.7148465705484058, 0.6308807679358788,
-0.0279837694166834, -0.1870348117179132, 0.0308413818353661,
0.0328830116666778, -0.0105974017850021)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.fk8 <- function() {
L <- 8
g <- c(.3492381118637999, .7826836203840648, .4752651350794712,
-.9968332845057319e-1, -.1599780974340301, .4310666810651625e-1,
.4258163167758178e-1, -.1900017885373592e-1)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.la8 <- function() {
L <- 8
g <- c(-0.07576571478935668, -0.02963552764596039, 0.49761866763256290,
0.80373875180538600, 0.29785779560560505, -0.09921954357695636,
-0.01260396726226383, 0.03222310060407815)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.mb8 <- function() {
L <- 8
g <- rev(c(-1.673619e-01, 1.847751e-02, 5.725771e-01, 7.351331e-01,
2.947855e-01, -1.108673e-01, 7.106015e-03, 6.436345e-02))
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.bl14 <- function() {
L <- 14
g <- c( 0.0120154192834842, 0.0172133762994439, -0.0649080035533744,
-0.0641312898189170, 0.3602184608985549, 0.7819215932965554,
0.4836109156937821, -0.0568044768822707, -0.1010109208664125,
0.0447423494687405, 0.0204642075778225, -0.0181266051311065,
-0.0032832978473081, 0.0022918339541009)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.fk14 <- function() {
L <- 14
g <- c(.2603717692913964, .6868914772395985, .6115546539595115,
.5142165414211914e-1, -.2456139281621916, -.4857533908585527e-1,
.1242825609215128, .2222673962246313e-1, -.6399737303914167e-1,
-.5074372549972850e-2, .2977971159037902e-1, -.3297479152708717e-2,
-.9270613374448239e-2, .3514100970435962e-2)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.d16 <- function() {
L <- 16
g <- c(0.0544158422431049, 0.3128715909143031, 0.6756307362972904,
0.5853546836541907, -0.0158291052563816, -0.2840155429615702,
0.0004724845739124, 0.1287474266204837, -0.0173693010018083,
-0.0440882539307952, 0.0139810279173995, 0.0087460940474061,
-0.0048703529934518, -0.0003917403733770, 0.0006754494064506,
-0.0001174767841248)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.la16 <- function() {
L <- 16
g <- c(-0.0033824159513594, -0.0005421323316355, 0.0316950878103452,
0.0076074873252848, -0.1432942383510542, -0.0612733590679088,
0.4813596512592012, 0.7771857516997478, 0.3644418948359564,
-0.0519458381078751, -0.0272190299168137, 0.0491371796734768,
0.0038087520140601, -0.0149522583367926, -0.0003029205145516,
0.0018899503329007)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.mb16 <- function() {
L <- 16
g <- rev(c(-1.302770e-02, 2.173677e-02, 1.136116e-01, -5.776570e-02,
-2.278359e-01, 1.188725e-01, 6.349228e-01, 6.701646e-01,
2.345342e-01, -5.656657e-02, -1.987986e-02, 5.474628e-02,
-2.483876e-02, -4.984698e-02, 9.620427e-03, 5.765899e-03))
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.la20 <- function() {
L <- 20
g <- c(0.0007701598091030, 0.0000956326707837, -0.0086412992759401,
-0.0014653825833465, 0.0459272392237649, 0.0116098939129724,
-0.1594942788575307, -0.0708805358108615, 0.4716906668426588,
0.7695100370143388, 0.3838267612253823, -0.0355367403054689,
-0.0319900568281631, 0.0499949720791560, 0.0057649120455518,
-0.0203549398039460, -0.0008043589345370, 0.0045931735836703,
0.0000570360843390, -0.0004593294205481)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.bl20 <- function() {
L <- 20
g <- c(0.0008625782242896, 0.0007154205305517, -0.0070567640909701,
0.0005956827305406, 0.0496861265075979, 0.0262403647054251,
-0.1215521061578162, -0.0150192395413644, 0.5137098728334054,
0.7669548365010849, 0.3402160135110789, -0.0878787107378667,
-0.0670899071680668, 0.0338423550064691, -0.0008687519578684,
-0.0230054612862905, -0.0011404297773324, 0.0050716491945793,
0.0003401492622332, -0.0004101159165852)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.fk22 <- function() {
L <- 22
g <- c(.1938961077599566, .5894521909294277, .6700849629420265,
.2156298491347700, -.2280288557715772, -.1644657152688429,
.1115491437220700, .1101552649340661, -.6608451679377920e-1,
-.7184168192312605e-1, .4354236762555708e-1, .4477521218440976e-1,
-.2974288074927414e-1, -.2597087308902119e-1, .2028448606667798e-1,
.1296424941108978e-1, -.1288599056244363e-1, -.4838432636440189e-2,
.7173803165271690e-2, .3612855622194901e-3, -.2676991638581043e-2,
.8805773686384639e-3)
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
select.mb24 <- function() {
L <- 24
g <- rev(c(-2.132706e-05, 4.745736e-04, 7.456041e-04, -4.879053e-03,
-1.482995e-03, 4.199576e-02, -2.658282e-03, -6.559513e-03,
1.019512e-01, 1.689456e-01, 1.243531e-01, 1.949147e-01,
4.581101e-01, 6.176385e-01, 2.556731e-01, -3.091111e-01,
-3.622424e-01, -4.575448e-03, 1.479342e-01, 1.027154e-02,
-1.644859e-02, -2.062335e-03, 1.193006e-03, 5.361301e-05))
h <- qmf(g)
return(list(length = L, hpf = h, lpf = g))
}
switch(name,
"haar" = select.haar(),
"d4" = select.d4(),
"mb4" = select.mb4(),
"w4" = select.w4(),
"bs3.1" = select.bs3.1(),
"fk4" = select.fk4(),
"d6" = select.d6(),
"fk6" = select.fk6(),
"d8" = select.d8(),
"fk8" = select.fk8(),
"la8" = select.la8(),
"mb8" = select.mb8(),
"bl14" = select.bl14(),
"fk14" = select.fk14(),
"d16" = select.d16(),
"la16" = select.la16(),
"mb16" = select.mb16(),
"la20" = select.la20(),
"bl20" = select.bl20(),
"fk22" = select.fk22(),
"mb24" = select.mb24(),
stop("Invalid selection for wave.filter"))
}
#' Quadrature Mirror Filter
#'
#' Computes the quadrature mirror filter from a given filter.
#'
#' None.
#'
#' @param g Filter coefficients.
#' @param low2high Logical, default is \code{TRUE} which means a low-pass
#' filter is input and a high-pass filter is output. Setting \code{low2high=F}
#' performs the inverse.
#' @return Quadrature mirror filter.
#' @author B. Whitcher
#' @seealso \code{\link{wave.filter}}.
#' @references Any basic signal processing text.
#' @keywords ts
#' @examples
#'
#' ## Haar wavelet filter
#' g <- wave.filter("haar")$lpf
#' qmf(g)
#'
#' @export qmf
qmf <- function(g, low2high = TRUE) {
L <- length(g)
if(low2high)
h <- (-1)^(0:(L - 1)) * rev(g)
else
h <- (-1)^(1:L) * rev(g)
return(h)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/wave.filter.R
|
## .First.lib <- function(lib, pkg) library.dynam("waveslim", pkg, lib)
.onAttach <- function(lib, pkg) {
txt <- paste(
"\n",
pkg,
": Wavelet Method for 1/2/3D Signals (version = ",
utils::packageDescription(pkg, lib)[["Version"]],
")\n",
sep = ""
)
packageStartupMessage(txt)
}
|
/scratch/gouwar.j/cran-all/cranData/waveslim/R/zzz.R
|
"LSWsim"<-
function(spec){
#
#
# First check that all spectral elements are non-zero
#
if (any(spec$D < 0))
stop("All spectral elements must be non-negative")
#
#
# Now multiply by random element and factor of 2 (to undo AvBasis
# averaging)
#
nlev <- nlevelsWT(spec)
len <- 2^nlev
for(i in (nlev-1):0) {
v <- accessD(spec, level=i)
v <- sqrt(v)*2^(nlev-i)*rnorm(len)
spec <- putD(spec, level=i, v=v)
}
AvBasis(convert(spec))
}
"cns"<-
function(n, filter.number=1, family="DaubExPhase"){
if (is.na(IsPowerOfTwo(n)))
stop("n must be a power of two")
z <- rep(0, n)
zwdS <- wd(z, filter.number=filter.number, family=family, type="station")
zwdS
}
"checkmyews" <- function(spec, nsim=10){
ans <- cns(2^nlevelsWT(spec))
for(i in 1:nsim) {
cat(".")
LSWproc <- LSWsim(spec)
ews <- ewspec(LSWproc, filter.number=1, family="DaubExPhase",
WPsmooth=FALSE)
ans$D <- ans$D + ews$S$D
ans$C <- ans$C + ews$S$C
}
ans$D <- ans$D/nsim
ans$C <- ans$C/nsim
ans
}
|
/scratch/gouwar.j/cran-all/cranData/wavethresh/R/LSWsim.r
|
"makewpstRO" <-
function(timeseries, response, filter.number = 10., family =
"DaubExPhase", trans = logabs, percentage = 10.)
{
#
#
# Using the data in time series (which should be a length a power of two)
# and the response information. Create an object
# of class wpstRO (stationary wavelet packet regression Object).
#
# Given this wpstRO and another timeseries a function exists to predict
# the group membership of each timeseries element
#
#
# First build stationary wavelet packet object
#
#
# Now convert this to a matrix
#
twpst <- #
wpst(timeseries, filter.number = filter.number, family = family
)
#
# Now extract the ``best'' 1D variables.
#
tw2m <- #
wpst2m(wpstobj = twpst, trans = trans)
tbm <- #
bestm(tw2m, y = response, percentage = percentage)
#
# Now build data frame from these variables
#
#
print.w2m(tbm)
nc <- ncol(tbm$m)
nr <- nrow(tbm$m)
tdf <- data.frame(response, tbm$m)
dimnames(tdf) <- list(as.character(1.:nr), c("response", paste(
"X", 1.:nc, sep = "")))
l <- list(df = tdf, ixvec = tbm$ixvec, level = tbm$level, pktix
= tbm$pktix, nlevels = tbm$nlevels, cv = tbm$cv,
filter = twpst$filter, trans = trans)
oldClass(l) <- "wpstRO"
l
}
"wpstREGR" <-
function(newTS, wpstRO)
{
#
# Extract the "best packets"
#
newwpst <- #
wpst(newTS, filter.number = wpstRO$filter$filter.number, family
= wpstRO$filter$family)
goodlevel <- wpstRO$level
goodpkt <- wpstRO$pkt
npkts <- length(goodpkt)
ndata <- length(newTS)
m <- matrix(0., nrow = ndata, ncol = npkts)
J <- nlevelsWT(newwpst)
grot <- compgrot(J, filter.number=wpstRO$filter$filter.number,
family=wpstRO$filter$family)
for(i in 1.:npkts) {
j <- goodlevel[i]
m[, i] <- guyrot(accessD(newwpst, level = j, index =
goodpkt[i]), grot[J - j])/(sqrt(2.)^(J - j))
m[, i] <- wpstRO$trans(m[, i])
}
dimnames(m) <- list(NULL, paste("X", 1.:npkts, sep = ""))
l <- data.frame(m)
l
}
"wpst2m" <-
function(wpstobj, trans = identity)
{
#
# Function that converts a wpstobj into a matrix
#
# Input:
#
# wpstobj: the wpstobj to convert
#
# trans: the transform to apply to the
# wpst coefficients as they come out
#
# an interesting alternative is
# trans = log( . )^2
# (you'll have to write this function)
#
#
# Returns: An object of class w2m
#
# This is a list with the following components:
#
# m - a matrix of order ndata x nbasis
#
# where ndata is the number of data points for
# the time series that constituted wpstobj
#
# and nbasis is the number of bases in the wpstobj
#
# Each column corresponds to a basis function
#
# The row ordering is the same as the time series
# that constituted wpstobj
#
# pktix - a vector of length nbasis which
# describes the packet index of the
# basis function in wpstm
#
# level - as pktix but for the level
#
# nlevels The number of levels
#
J <- nlev <- nlevelsWT(wpstobj)
grot <- compgrot(J, filter.number = wpstobj$filter$filter.number,
family = wpstobj$filter$family)
nbasis <- 2. * (2.^nlev - 1.)
ndata <- 2.^nlev
m <- matrix(0., nrow = ndata, ncol = nbasis)
level <- rep(0., nbasis)
pktix <- rep(0., nbasis)
cnt <- 1.
cat("Level: ")
for(j in 0.:(nlev - 1.)) {
cat(j, " ")
lcnt <- 0.
npkts <- 2.^(nlev - j)
prcnt <- as.integer(npkts/10.)
for(i in 0.:(npkts - 1.)) {
pkcoef <- guyrot(accessD(wpstobj, level = j,
index = i), grot[J - j])/(sqrt(2.)^
(J - j))
m[, cnt] <- trans(pkcoef)
level[cnt] <- j
pktix[cnt] <- i
lcnt <- lcnt + 1.
cnt <- cnt + 1.
if (prcnt > 0) {
if(lcnt %% prcnt == 0.) {
lcnt <- 0.
cat(".")
}
}
}
cat("\n")
}
cat("\n")
l <- list(m = m, level = level, pktix = pktix, nlevels = J)
oldClass(l) <- "w2m"
l
}
"compgrot" <-
function(J, filter.number, family)
{
if(filter.number == 1. && family == "DaubExPhase") {
grot <- (2.^(0.:(J - 1.)) - 1.)
}
else {
grot <- (1.:J)^2.
grot[1.] <- 2.
grot <- cumsum(grot)
}
grot
}
"logabs" <-
function(x)
logb(x^2.)
"bestm" <-
function(w2mobj, y, percentage = 50.)
{
#
# Compute desired number of bases
#
ndata <- #
nrow(w2mobj$m)
#
# Actual number of bases
#
dbasis <- #
as.integer((percentage * ndata)/100.)
nbasis <- ncol(w2mobj$m)
cv <- rep(0., nbasis)
for(i in 1.:nbasis) {
cv[i] <- cor(w2mobj$m[, i], y)
}
cv[is.na(cv)] <- 0.
sv <- rev(sort.list(abs(cv)))[1.:dbasis]
ixvec <- 1.:nbasis
l <- list(m = w2mobj$m[, sv], ixvec = ixvec[sv], pktix = w2mobj$
pktix[sv], level = w2mobj$level[sv], nlevels = w2mobj$
nlevels, cv = cv[sv])
oldClass(l) <- "w2m"
l
}
"print.w2m" <-
function(x, maxbasis = 10., ...)
{
w2mobj <- x
cat("Contains SWP coefficients\n")
cat("Original time series length: ", nrow(w2mobj$m), "\n")
cat("Number of bases: ", ncol(w2mobj$m), "\n")
lbasis <- min(maxbasis, ncol(w2mobj$m))
if(is.null(w2mobj$ixvec)) {
cat("Raw object\n")
mtmp <- cbind(w2mobj$level[1.:lbasis], w2mobj$pktix[
1.:lbasis])
dimnames(mtmp) <- list(NULL, c("Level", "Pkt Index"))
}
else {
cat("Some basis selection performed\n")
mtmp <- cbind(w2mobj$level[1.:lbasis], w2mobj$pktix[
1.:lbasis], w2mobj$ixvec[1.:lbasis], w2mobj$
cv[1.:lbasis])
dimnames(mtmp) <- list(NULL, c("Level", "Pkt Index",
"Orig Index", "Score"))
}
print(mtmp)
if(ncol(w2mobj$m) > maxbasis)
cat("etc. etc.\n")
invisible()
}
"print.wpstRO" <-
function(x, maxbasis = 10., ...)
{
wpstRO <- x
cat("Stationary wavelet packet regression object\n")
cat("Composite object containing components:")
cat("Original time series length: ", nrow(wpstRO$df), "\n")
cat("Number of bases: ", ncol(wpstRO$df) - 1., "\n")
lbasis <- min(maxbasis, ncol(wpstRO$df) - 1.)
if(is.null(wpstRO$ixvec)) {
cat("Raw object\n")
mtmp <- cbind(wpstRO$level[1.:lbasis], wpstRO$pktix[
1.:lbasis])
dimnames(mtmp) <- list(NULL, c("Level", "Pkt Index"))
}
else {
cat("Some basis selection performed\n")
mtmp <- cbind(wpstRO$level[1.:lbasis], wpstRO$pktix[
1.:lbasis], wpstRO$ixvec[1.:lbasis], wpstRO$
cv[1.:lbasis])
dimnames(mtmp) <- list(NULL, c("Level", "Pkt Index",
"Orig Index", "Score"))
}
print(mtmp)
if(ncol(wpstRO$df) > maxbasis)
cat("etc. etc.\n")
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/wavethresh/R/NSextra.r
|
".onAttach"<-
function(...)
{
wvrelease()
}
#
# Create environment for some WaveThresh functions (PsiJ, ipndacw) to store
# results for reuse. Let efficient than previous versions of WaveThresh
# but plays more nicely with the R people
#
if (!exists("WTEnv", mode="environment")) {
WTEnv <- new.env()
}
"LinaMayrand3" <-
structure(list(S = structure(c(-0.0662912607362388-0.0855811337270078i,
-0.0662912607362388+0.0855811337270078i, 0.0352266456251514+0i,
0.332671113131273+0i, 0.110485434560398-0.0855811337270078i,
0.110485434560398+0.0855811337270078i, -0.0854411265843329+0i,
0.806890861720468+0i, 0.662912607362388+0.171163681667578i, 0.662912607362388-0.171163681667578i,
-0.135010726159072+0i, 0.45987820885317+0i, 0.662912607362388+0.171163681667578i,
0.662912607362388-0.171163681667578i, 0.45987820885317+0i, -0.135010726159072+0i,
0.110485434560398-0.0855811337270078i, 0.110485434560398+0.0855811337270078i,
0.806890861720468+0i, -0.0854411265843329+0i, -0.0662912607362388-0.0855811337270078i,
-0.0662912607362388+0.0855811337270078i, 0.332671113131273+0i,
0.0352266456251514+0i), .Dim = as.integer(c(4, 6))), W = structure(c(-0.0662912607362388+0.0855811337270078i,
-0.0662912607362388-0.0855811337270078i, 0.332671113131273+0i,
0.0352266456251514+0i, -0.110485434560398-0.0855811337270078i,
-0.110485434560398+0.0855811337270078i, -0.806890861720468+0i,
0.0854411265843329+0i, 0.662912607362388-0.171163681667578i,
0.662912607362388+0.171163681667578i, 0.45987820885317+0i, -0.135010726159072+0i,
-0.662912607362388+0.171163681667578i, -0.662912607362388-0.171163681667578i,
0.135010726159072+0i, -0.45987820885317+0i, 0.110485434560398+0.0855811337270078i,
0.110485434560398-0.0855811337270078i, -0.0854411265843329+0i,
0.806890861720468+0i, 0.0662912607362388-0.0855811337270078i,
0.0662912607362388+0.0855811337270078i, -0.0352266456251514+0i,
-0.332671113131273+0i), .Dim = as.integer(c(4, 6)))), .Names = c("S",
"W"))
"LinaMayrand4" <-
structure(list(S = structure(c(-0.0177682977370364-0.0843076215447475i,
0.102008915752387-0.140888496674900i, 0.512949613906065+0.139761114430506i,
0.682186908447622+0.309503739778537i, 0.261320230715269-0.0265993641984858i,
-0.0829326081014193-0.196341989489948i, -0.0493947656694662-0.0288541287014151i,
0.00584356522937926+0.0277267464287373i), .Dim = as.integer(c(1,
8))), W = structure(c(-0.00584356522937926+0.0277267464287373i,
-0.0493947656694662+0.0288541287014151i, 0.0829326081014193-0.196341989489948i,
0.261320230715269+0.0265993641984858i, -0.682186908447622+0.309503739778537i,
0.512949613906065-0.139761114430506i, -0.102008915752387-0.140888496674900i,
-0.0177682977370364+0.0843076215447475i), .Dim = as.integer(c(1,
8)))), .Names = c("S", "W"))
"LinaMayrand5" <-
structure(list(S = structure(c(0.0104924505144049+0.0205904370844365i,
-0.0131549130788862+0.0190001547113654i, -0.0480171707489855-0.0286805385686857i,
0.00443868969370267-0.0660029379744943i, -0.0171289081256946+0.00872852869497756i,
-0.0407762717133288-0.0282317864304761i, -0.0457735601342806-0.0701496826501424i,
0.109045176430938-0.153497807951817i, -0.080639704153759-0.117947473548549i,
0.0139497502179911-0.217696442313413i, 0.342248869674118+0.0140988497709936i,
0.423036269003173+0.0594750872271794i, 0.151379708479645-0.0942236567554891i,
0.245969162830182-0.123232560001445i, 0.772484323772727+0.144605393302011i,
0.642829163846022+0.350360717350611i, 0.643003234585088+0.182852164538766i,
0.501119052917861+0.350160634132963i, 0.479618312994977+0.059046616665079i,
0.375016379640746+0.0994046669755474i, 0.643003234585088+0.182852164538766i,
0.501119052917861+0.350160634132963i, -0.0564771558731019-0.0836581495806555i,
-0.0349735956831048-0.248283003884364i, 0.151379708479645-0.0942236567554891i,
0.245969162830182-0.123232560001445i, -0.0809927427988999-0.0456676283259696i,
-0.106064370637416-0.113222843833651i, -0.080639704153759-0.117947473548549i,
0.0139497502179911-0.217696442313413i, 0.0450707806910314+0.0140988497709936i,
-0.0103356606306847+0.0594750872271794i, -0.0171289081256946+0.00872852869497756i,
-0.0407762717133288-0.0282317864304761i, 0.0142495119522009+0.00120270047413905i,
0.0106798133845187+0.0203460275629919i, 0.0104924505144049+0.0205904370844365i,
-0.0131549130788862+0.0190001547113654i, -0.00819760743953431-0.00489641086342034i,
0.000541697299744814-0.00805499281231948i), .Dim = as.integer(c(4,
10))), W = structure(c(0.0104924505144049-0.0205904370844365i,
-0.0131549130788862-0.0190001547113654i, -0.00819760743953431+0.00489641086342034i,
0.000541697299744814+0.00805499281231948i, 0.0171289081256946+0.00872852869497756i,
0.0407762717133288-0.0282317864304761i, -0.0142495119522009+0.00120270047413905i,
-0.0106798133845187+0.0203460275629919i, -0.080639704153759+0.117947473548549i,
0.0139497502179911+0.217696442313413i, 0.0450707806910314-0.0140988497709936i,
-0.0103356606306847-0.0594750872271794i, -0.151379708479645-0.0942236567554891i,
-0.245969162830182-0.123232560001445i, 0.0809927427988999-0.0456676283259696i,
0.106064370637416-0.113222843833651i, 0.643003234585088-0.182852164538766i,
0.501119052917861-0.350160634132963i, -0.0564771558731019+0.0836581495806555i,
-0.0349735956831048+0.248283003884364i, -0.643003234585088+0.182852164538766i,
-0.501119052917861+0.350160634132963i, -0.479618312994977+0.059046616665079i,
-0.375016379640746+0.0994046669755474i, 0.151379708479645+0.0942236567554891i,
0.245969162830182+0.123232560001445i, 0.772484323772727-0.144605393302011i,
0.642829163846022-0.350360717350611i, 0.080639704153759-0.117947473548549i,
-0.0139497502179911-0.217696442313413i, -0.342248869674118+0.0140988497709936i,
-0.423036269003173+0.0594750872271794i, -0.0171289081256946-0.00872852869497756i,
-0.0407762717133288+0.0282317864304761i, -0.0457735601342806+0.0701496826501424i,
0.109045176430938+0.153497807951817i, -0.0104924505144049+0.0205904370844365i,
0.0131549130788862+0.0190001547113654i, 0.0480171707489855-0.0286805385686857i,
-0.00443868969370267-0.0660029379744943i), .Dim = as.integer(c(4,
10)))), .Names = c("S", "W"))
"comp.theta" <-
function(djk, Sigma.inv)
{
#
# Takes in the complex wavelet coefficient d_{j,k} and the inverse
# of the covariance matrix Sigma. Returns the scalar statistic
# theta_{j,k}; this is \chi^2_2 if the coefficient contains
# only noise.
#
if(!is.complex(djk)) stop(
"comp.theta should only be used on complex wavelet coefficients."
)
tmp <- cbind(Re(djk), Im(djk))
tmp <- diag(tmp %*% Sigma.inv %*% t(tmp))
return(tmp)
}
"cthr.negloglik" <-
function(parvec, dstarvec, Sigma, Sigma.inv, twopirtdetS, code)
{
#
# Compute -log likelihood of sample dstar from
# mixture of bivariate normal distributions.
#
# Each row of dstarvec should contain one coefficient.
#
if(code == "C") {
SigVec <- c(Sigma[1, 1], Sigma[1, 2], Sigma[2, 2])
di <- dstarvec[, 2]
dr <- dstarvec[, 1]
pnd <- length(di)
pans <- 0
Cout <- .C("Ccthrnegloglik",
parvec = as.double(parvec),
SigVec = as.double(SigVec),
di = as.double(di),
dr = as.double(dr),
pnd = as.integer(pnd),
pans = as.double(pans), PACKAGE = "wavethresh")
return(Cout$pans)
}
else {
p <- parvec[1]
tmp <- parvec[3] * sqrt(parvec[2] * parvec[4])
V <- matrix(c(parvec[2], tmp, tmp, parvec[4]), byrow = TRUE, ncol
= 2)
VpS <- V + Sigma
detVpS <- VpS[1, 1] * VpS[2, 2] - VpS[1, 2] * VpS[2, 1]
VpS.inv <- matrix(c(VpS[2, 2], - VpS[1, 2], - VpS[2, 1],
VpS[1, 1]), ncol = 2, byrow = TRUE)/detVpS
twopirtdetVpS <- 2 * pi * sqrt(detVpS)
tmp <- apply(dstarvec, 1, cthreb.mixden, p = p, twopirtdetS =
twopirtdetS, twopirtdetVpS = twopirtdetVpS, Sigma.inv
= Sigma.inv, VpS.inv = VpS.inv)
return( - sum(log(tmp)))
}
}
"cthreb.mixden" <-
function(dstar, p, twopirtdetS, twopirtdetVpS, Sigma.inv, VpS.inv)
{
#
# Compute density fn. of dstar from normal mixture
#
den1 <- exp(-0.5 * t(dstar) %*% VpS.inv %*% dstar)/twopirtdetVpS
den2 <- exp(-0.5 * t(dstar) %*% Sigma.inv %*% dstar)/twopirtdetS
return(p * den1 + (1 - p) * den2)
}
"cthreb.odds" <-
function(coefs, p, V, Sig, code = "NAG")
{
#
# Takes in coefs from a given level with EB-chosen prior parameters
# p and V and DWT covariance matrix Sig.
#
# Returns posterior weights of coefficients being non-zero.
#
if(code == "C" || code == "NAG") {
dr <- coefs[, 1]
di <- coefs[, 2]
nd <- length(dr)
SigVec <- c(Sig[1, 1], Sig[1, 2], Sig[2, 2])
VVec <- c(V[1, 1], V[1, 2], V[2, 2])
pp <- p
ans <- rep(0, nd)
odds <- rep(0, nd)
Cout <- .C("Ccthrcalcodds",
pnd = as.integer(nd),
dr = as.double(dr),
di = as.double(di),
VVec = as.double(VVec),
SigVec = as.double(SigVec),
pp = as.double(p),
ans = as.double(ans),
odds = as.double(odds),PACKAGE = "wavethresh")
ptilde <- Cout$ans
}
else {
VpS <- V + Sig
detS <- Sig[1, 1] * Sig[2, 2] - Sig[1, 2]^2
detVpS <- VpS[1, 1] * VpS[2, 2] - VpS[1, 2]^2
mat <- solve(Sig) - solve(V + Sig)
odds <- apply(coefs, 1, odds.matrix.mult, mat = mat)
# Take care of excessively huge odds giving NAs in exp(odds/2)
odds[odds > 1400] <- 1400
odds <- p/(1 - p) * sqrt(detS/detVpS) * exp(odds/2)
ptilde <- odds/(1 + odds)
}
if(any(is.na(ptilde))) {
print("NAs in ptilde; setting those values to one")
ptilde[is.na(ptilde)] <- 1
}
return(ptilde)
}
"cthreb.thresh" <-
function(coefs, ptilde, V, Sig, rule, code)
{
#
# Takes in coefs from a given level with EB-chosen
# prior covariance matrix V, posterior weights ptilde
# and DWT covariance matrix Sig.
#
# Returns thresholded coefficients; how the thresholding is
# done depends on rule:
# rule == "hard": ptilde < 1/2 -> zero, otherwise
# keep unchanged (kill or keep).
# rule == "soft": ptilde < 1/2 -> zero, otherwise
# use posterior mean (kill or shrink).
# rule == "mean": use posterior mean (no zeros).
#
if(rule == "hard") {
coefs[ptilde <= 0.5, ] <- 0
return(coefs)
}
else if(code == "C" || code == "NAG") {
nd <- length(coefs[, 1])
dr <- coefs[, 1]
di <- coefs[, 2]
ansr <- rep(0, nd)
ansi <- rep(0, nd)
VVec <- c(V[1, 1], V[1, 2], V[2, 2])
SigVec <- c(Sig[1, 1], Sig[1, 2], Sig[2, 2])
Cout <- .C("Cpostmean",
pnd = as.integer(nd),
dr = as.double(dr),
di = as.double(di),
VVec = as.double(VVec),
SigVec = as.double(SigVec),
ptilde = as.double(ptilde),
ansr = as.double(ansr),
ansi = as.double(ansi),PACKAGE = "wavethresh")
coefs <- cbind(Cout$ansr, Cout$ansi)
}
else {
stop("Unknown code or rule")
}
if(rule == "mean")
return(coefs)
coefs[ptilde <= 0.5, ] <- 0
return(coefs)
}
"cthresh" <-
function(data, j0 = 3, dwwt = NULL, dev = madmad, rule = "hard", filter.number
= 3.1, family = "LinaMayrand", plotfn = FALSE, TI = FALSE,
details = FALSE, policy = "mws", code = "NAG", tol = 0.01)
{
#
# Limited parameter checking
#
n <- length(data)
nlevels <- IsPowerOfTwo(n)
if(is.na(nlevels))
stop("Data should be of length a power of two.")
if((rule != "hard") & (rule != "soft") & (rule != "mean")) {
warning(paste("Unknown rule", rule, "so hard thresholding used"
))
rule <- "hard"
}
if((policy != "mws") & (policy != "ebayes")) {
warning(paste("Unknown policy", policy,
"so using multiwavelet style thresholding"))
policy <- "mws"
}
#
# If 5 vanishing moments is called for, average over all
# Lina-Mayrand wavelets with 5 vanishing moments by recursively
# calling cthresh; if filter.number=0 use all LimaMayrand wavelets
#
if(filter.number == 3 & ((family == "LinaMayrand") || (family =
"Lawton"))) {
filter.number <- 3.1
family <- "LinaMayrand"
}
else if(filter.number == 4 & family == "LinaMayrand")
filter.number <- 4.1
else if((filter.number == 5) & (family == "LinaMayrand")) {
est1 <- cthresh(data, j0 = j0, dev = dev, rule = rule,
filter.number = 5.1, TI = TI, policy =
policy, details = FALSE, plotfn = FALSE, code = code, tol = tol
)
est2 <- cthresh(data, j0 = j0, dev = dev, rule = rule,
filter.number = 5.2, TI = TI, policy =
policy, details = FALSE, plotfn = FALSE, code = code, tol = tol
)
est3 <- cthresh(data, j0 = j0, dev = dev, rule = rule,
filter.number = 5.3, TI = TI, policy =
policy, details = FALSE, plotfn = FALSE, code = code, tol = tol
)
est4 <- cthresh(data, j0 = j0, dev = dev, rule = rule,
filter.number = 5.4, TI = TI, policy =
policy, details = FALSE, plotfn = FALSE, code = code, tol = tol
)
estimate <- (est1 + est2 + est3 + est4)/4
if(plotfn) {
x <- (1:n)/n
plot(x, data, ylim = range(data, Re(estimate)))
lines(x, Re(estimate), lwd = 2, col = 2)
}
return(estimate)
}
else if((filter.number == 0) & (family == "LinaMayrand")) {
est1 <- cthresh(data, j0 = j0, dev = dev, rule = rule,
filter.number = 3.1, TI = TI, policy =
policy, details = FALSE, plotfn = FALSE, code = code, tol = tol
)
est2 <- cthresh(data, j0 = j0, dev = dev, rule = rule,
filter.number = 4.1, TI = TI, policy =
policy, details = FALSE, plotfn = FALSE, code = code, tol = tol
)
est3 <- cthresh(data, j0 = j0, dev = dev, rule = rule,
filter.number = 5.1, TI = TI, policy =
policy, details = FALSE, plotfn = FALSE, code = code, tol = tol
)
est4 <- cthresh(data, j0 = j0, dev = dev, rule = rule,
filter.number = 5.2, TI = TI, policy =
policy, details = FALSE, plotfn = FALSE, code = code, tol = tol
)
est5 <- cthresh(data, j0 = j0, dev = dev, rule = rule,
filter.number = 5.3, TI = TI, policy =
policy, details = FALSE, plotfn = FALSE, code = code, tol = tol
)
est6 <- cthresh(data, j0 = j0, dev = dev, rule = rule,
filter.number = 5.4, TI = TI, policy =
policy, details = FALSE, plotfn = FALSE, code = code, tol = tol
)
estimate <- (est1 + est2 + est3 + est4 + est5 + est6)/6
if(plotfn) {
x <- (1:n)/n
plot(x, data, ylim = range(data, Re(estimate)))
lines(x, Re(estimate), lwd = 2, col = 2)
}
return(estimate)
}
#
# Take required type of wavelet transform.
#
if(TI==TRUE) data.wd <- wst(data, filter.number = filter.number, family =
family) else data.wd <- wd(data, filter.number =
filter.number, family = family)
#
# Generate covariance matrices
#
if(is.null(dwwt)) dwwt <- make.dwwt(nlevels = nlevels, filter.number =
filter.number, family = family)
sigsq <- dev(Re(accessD(data.wd, level = nlevels - 1))) + dev(Im(
accessD(data.wd, level = nlevels - 1)))
Sigma <- array(0, c(nlevels, 2, 2))
Sigma[, 1:2, 1:2] <- (sigsq * Im(dwwt))/2
Sigma[, 1, 1] <- (sigsq * (1 + Re(dwwt)))/2
Sigma[, 2, 2] <- (sigsq * (1 - Re(dwwt)))/2
thr.wd <- data.wd
if(policy == "mws") {
#
# Do multiwavelet style universal thresholding
#
if(rule == "mean") {
warning("Can't use posterior mean with multiwavelet style thresholding. Using soft thresholding instead"
)
rule <- "soft"
}
lambda <- 2 * log(n)
for(j in j0:(nlevels - 1)) {
coefs <- accessD(data.wd, level = j)
Sigma.inv <- solve(Sigma[j + 1, , ])
thetaj <- comp.theta(coefs, Sigma.inv)
if(rule == "hard")
coefs[abs(thetaj) < lambda] <- 0
else {
k <- Re(coefs)/Im(coefs)
thetahat <- pmax(0, thetaj - lambda)
varr <- Sigma[j + 1, 1, 1]
vari <- Sigma[j + 1, 2, 2]
covar <- Sigma[j + 1, 1, 2]
bhatsq <- (varr * vari - covar^2) * thetahat
bhatsq <- bhatsq/(vari * k^2 - 2 * covar * k +
varr)
coefs <- complex(modulus = sqrt(bhatsq * (k^2 +
1)), argument = Arg(coefs))
}
thr.wd <- putD(thr.wd, level = j, v = coefs)
}
}
else {
#
# Do empirical Bayes shrinkage/thresholding.
# Start by finding parameters:
#
EBpars <- find.parameters(data.wd = data.wd, dwwt = dwwt, j0 =
j0, code = code, tol = tol, Sigma = Sigma)
p <- c(EBpars$pars[, 1])
Sigma <- EBpars$Sigma
V <- array(0, dim = c(nlevels - 1, 2, 2))
for(i in j0:(nlevels - 1))
V[i, , ] <- matrix(EBpars$pars[i, c(2, 3, 3, 4)],
ncol = 2)
#
# Do thresholding.
#
for(j in j0:(nlevels - 1)) {
coefs <- accessD(data.wd, level = j)
coefs <- cbind(Re(coefs), Im(coefs))
ptilde <- cthreb.odds(coefs, p = p[j], V = V[j, ,
], Sig = Sigma[j + 1, , ], code = code)
coefs.thr <- cthreb.thresh(coefs, ptilde = ptilde,
V = V[j, , ], Sig = Sigma[j, , ], rule =
rule, code = code)
thr.wd <- putD(thr.wd, level = j, v = complex(real =
coefs.thr[, 1], imaginary = coefs.thr[, 2]))
}
}
#
# Reconstruct
#
if(TI) data.rec <- AvBasis(thr.wd) else data.rec <- wr(thr.wd)
#
# Plot data and estimate
#
if(plotfn) {
x <- (1:n)/n
plot(x, data, ylim = range(data, Re(data.rec)))
lines(x, Re(data.rec), lwd = 2, col = 2)
}
#
# Return either just the estimate or an unweildy list.
#
if(details == FALSE) invisible(data.rec) else if(policy == "ebayes")
invisible(list(data = data, data.wd = data.wd, thr.wd = thr.wd,
estimate = data.rec, Sigma = Sigma, sigsq = sigsq,
rule = rule, EBpars = EBpars$pars, wavelet = list(
filter.number, family)))
else invisible(list(data = data, data.wd = data.wd, thr.wd = thr.wd,
estimate = data.rec, Sigma = Sigma, sigsq = sigsq,
rule = rule, wavelet = list(filter.number, family)))
}
"filter.select" <-
function(filter.number, family = "DaubLeAsymm", constant = 1)
{
G <- NULL
if(family == "DaubExPhase") {
family <- "DaubExPhase"
#
#
# The following wavelet coefficients are taken from
# Daubechies, I (1988) Orthonormal Bases of Wavelets
# Communications on Pure and Applied Mathematics. Page 980
# or Ten Lectures on Wavelets, Daubechies, I, 1992
# CBMS-NSF Regional Conference Series, page 195, Table 6.1
#
# Comment from that table reads:
# "The filter coefficients for the compactly supported wavelets
# with extremal phase and highest number of vanishing moments
# compatible with their support width".
#
if(filter.number == 1) {
#
#
# This is for the Haar basis. (not in Daubechies).
#
H <- rep(0, 2)
H[1] <- 1/sqrt(2)
H[2] <- H[1]
filter.name <- c("Haar wavelet")
}
else if(filter.number == 2) {
H <- rep(0, 4)
H[1] <- 0.482962913145
H[2] <- 0.836516303738
H[3] <- 0.224143868042
H[4] <- -0.129409522551
filter.name <- c("Daub cmpct on ext. phase N=2")
}
else if(filter.number == 3) {
H <- rep(0, 6)
H[1] <- 0.33267055295
H[2] <- 0.806891509311
H[3] <- 0.459877502118
H[4] <- -0.13501102001
H[5] <- -0.085441273882
H[6] <- 0.035226291882
filter.name <- c("Daub cmpct on ext. phase N=3")
}
else if(filter.number == 4) {
H <- rep(0, 8)
H[1] <- 0.230377813309
H[2] <- 0.714846570553
H[3] <- 0.63088076793
H[4] <- -0.027983769417
H[5] <- -0.187034811719
H[6] <- 0.030841381836
H[7] <- 0.032883011667
H[8] <- -0.010597401785
filter.name <- c("Daub cmpct on ext. phase N=4")
}
else if(filter.number == 5) {
H <- rep(0, 10)
H[1] <- 0.160102397974
H[2] <- 0.603829269797
H[3] <- 0.724308528438
H[4] <- 0.138428145901
H[5] <- -0.242294887066
H[6] <- -0.032244869585
H[7] <- 0.07757149384
H[8] <- -0.006241490213
H[9] <- -0.012580752
H[10] <- 0.003335725285
filter.name <- c("Daub cmpct on ext. phase N=5")
}
else if(filter.number == 6) {
H <- rep(0, 12)
H[1] <- 0.11154074335
H[2] <- 0.494623890398
H[3] <- 0.751133908021
H[4] <- 0.315250351709
H[5] <- -0.226264693965
H[6] <- -0.129766867567
H[7] <- 0.097501605587
H[8] <- 0.02752286553
H[9] <- -0.031582039318
H[10] <- 0.000553842201
H[11] <- 0.004777257511
H[12] <- -0.001077301085
filter.name <- c("Daub cmpct on ext. phase N=6")
}
else if(filter.number == 7) {
H <- rep(0, 14)
H[1] <- 0.077852054085
H[2] <- 0.396539319482
H[3] <- 0.729132090846
H[4] <- 0.469782287405
H[5] <- -0.143906003929
H[6] <- -0.224036184994
H[7] <- 0.071309219267
H[8] <- 0.080612609151
H[9] <- -0.038029936935
H[10] <- -0.016574541631
H[11] <- 0.012550998556
H[12] <- 0.000429577973
H[13] <- -0.001801640704
H[14] <- 0.0003537138
filter.name <- c("Daub cmpct on ext. phase N=7")
}
else if(filter.number == 8) {
H <- rep(0, 16)
H[1] <- 0.054415842243
H[2] <- 0.312871590914
H[3] <- 0.675630736297
H[4] <- 0.585354683654
H[5] <- -0.015829105256
H[6] <- -0.284015542962
H[7] <- 0.000472484574
H[8] <- 0.12874742662
H[9] <- -0.017369301002
H[10] <- -0.044088253931
H[11] <- 0.013981027917
H[12] <- 0.008746094047
H[13] <- -0.004870352993
H[14] <- -0.000391740373
H[15] <- 0.000675449406
H[16] <- -0.000117476784
filter.name <- c("Daub cmpct on ext. phase N=8")
}
else if(filter.number == 9) {
H <- rep(0, 18)
H[1] <- 0.038077947364
H[2] <- 0.243834674613
H[3] <- 0.60482312369
H[4] <- 0.657288078051
H[5] <- 0.133197385825
H[6] <- -0.293273783279
H[7] <- -0.096840783223
H[8] <- 0.148540749338
H[9] <- 0.030725681479
H[10] <- -0.067632829061
H[11] <- 0.000250947115
H[12] <- 0.022361662124
H[13] <- -0.004723204758
H[14] <- -0.004281503682
H[15] <- 0.001847646883
H[16] <- 0.000230385764
H[17] <- -0.000251963189
H[18] <- 3.934732e-05
filter.name <- c("Daub cmpct on ext. phase N=9")
}
else if(filter.number == 10) {
H <- rep(0, 20)
H[1] <- 0.026670057901
H[2] <- 0.188176800078
H[3] <- 0.527201188932
H[4] <- 0.688459039454
H[5] <- 0.281172343661
H[6] <- -0.249846424327
H[7] <- -0.195946274377
H[8] <- 0.127369340336
H[9] <- 0.093057364604
H[10] <- -0.071394147166
H[11] <- -0.029457536822
H[12] <- 0.033212674059
H[13] <- 0.003606553567
H[14] <- -0.010733175483
H[15] <- 0.001395351747
H[16] <- 0.001992405295
H[17] <- -0.000685856695
H[18] <- -0.000116466855
H[19] <- 9.358867e-05
H[20] <- -1.3264203e-05
filter.name <- c("Daub cmpct on ext. phase N=10")
}
else {
stop("Unknown filter number for Daubechies wavelets with extremal phase and highest number of vanishing moments..."
)
}
}
else if(family == "DaubLeAsymm") {
family <- "DaubLeAsymm"
#
#
# The following wavelet coefficients are taken from
# Ten Lectures on Wavelets, Daubechies, I, 1992
# CBMS-NSF Regional Conference Series, page 198, Table 6.3
#
# Comment from that table reads:
# "The low pass filter coefficients for the "least-asymmetric"
# compactly supported wavelets with maximum number of
# vanishing moments, for N = 4 to 10
#
if(filter.number == 4) {
H <- rep(0, 8)
H[1] <- -0.107148901418
H[2] <- -0.041910965125
H[3] <- 0.703739068656
H[4] <- 1.136658243408
H[5] <- 0.421234534204
H[6] <- -0.140317624179
H[7] <- -0.017824701442
H[8] <- 0.045570345896
filter.name <- c("Daub cmpct on least asymm N=4")
H <- H/sqrt(2)
}
else if(filter.number == 5) {
H <- rep(0, 10)
H[1] <- 0.038654795955
H[2] <- 0.041746864422
H[3] <- -0.055344186117
H[4] <- 0.281990696854
H[5] <- 1.023052966894
H[6] <- 0.89658164838
H[7] <- 0.023478923136
H[8] <- -0.247951362613
H[9] <- -0.029842499869
H[10] <- 0.027632152958
filter.name <- c("Daub cmpct on least asymm N=5")
H <- H/sqrt(2)
}
else if(filter.number == 6) {
H <- rep(0, 12)
H[1] <- 0.021784700327
H[2] <- 0.004936612372
H[3] <- -0.166863215412
H[4] <- -0.068323121587
H[5] <- 0.694457972958
H[6] <- 1.113892783926
H[7] <- 0.477904371333
H[8] <- -0.102724969862
H[9] <- -0.029783751299
H[10] <- 0.06325056266
H[11] <- 0.002499922093
H[12] <- -0.011031867509
filter.name <- c("Daub cmpct on least asymm N=6")
H <- H/sqrt(2)
}
else if(filter.number == 7) {
H <- rep(0, 14)
H[1] <- 0.003792658534
H[2] <- -0.001481225915
H[3] <- -0.017870431651
H[4] <- 0.043155452582
H[5] <- 0.096014767936
H[6] <- -0.070078291222
H[7] <- 0.024665659489
H[8] <- 0.758162601964
H[9] <- 1.085782709814
H[10] <- 0.408183939725
H[11] <- -0.198056706807
H[12] <- -0.152463871896
H[13] <- 0.005671342686
H[14] <- 0.014521394762
filter.name <- c("Daub cmpct on least asymm N=7")
H <- H/sqrt(2)
}
else if(filter.number == 8) {
H <- rep(0, 16)
H[1] <- 0.002672793393
H[2] <- -0.0004283943
H[3] <- -0.021145686528
H[4] <- 0.005386388754
H[5] <- 0.069490465911
H[6] <- -0.038493521263
H[7] <- -0.073462508761
H[8] <- 0.515398670374
H[9] <- 1.099106630537
H[10] <- 0.68074534719
H[11] <- -0.086653615406
H[12] <- -0.202648655286
H[13] <- 0.010758611751
H[14] <- 0.044823623042
H[15] <- -0.000766690896
H[16] <- -0.004783458512
filter.name <- c("Daub cmpct on least asymm N=8")
H <- H/sqrt(2)
}
else if(filter.number == 9) {
H <- rep(0, 18)
H[1] <- 0.001512487309
H[2] <- -0.000669141509
H[3] <- -0.014515578553
H[4] <- 0.012528896242
H[5] <- 0.087791251554
H[6] <- -0.02578644593
H[7] <- -0.270893783503
H[8] <- 0.049882830959
H[9] <- 0.873048407349
H[10] <- 1.015259790832
H[11] <- 0.337658923602
H[12] <- -0.077172161097
H[13] <- 0.000825140929
H[14] <- 0.042744433602
H[15] <- -0.016303351226
H[16] <- -0.018769396836
H[17] <- 0.000876502539
H[18] <- 0.001981193736
filter.name <- c("Daub cmpct on least asymm N=9")
H <- H/sqrt(2)
}
else if(filter.number == 10) {
H <- rep(0, 20)
H[1] <- 0.001089170447
H[2] <- 0.000135245020
H[3] <- -0.01222064263
H[4] <- -0.002072363923
H[5] <- 0.064950924579
H[6] <- 0.016418869426
H[7] <- -0.225558972234
H[8] <- -0.100240215031
H[9] <- 0.667071338154
H[10] <- 1.0882515305
H[11] <- 0.542813011213
H[12] <- -0.050256540092
H[13] <- -0.045240772218
H[14] <- 0.07070356755
H[15] <- 0.008152816799
H[16] <- -0.028786231926
H[17] <- -0.001137535314
H[18] <- 0.006495728375
H[19] <- 8.0661204e-05
H[20] <- -0.000649589896
filter.name <- c("Daub cmpct on least asymm N=10")
H <- H/sqrt(2)
}
else {
stop("Unknown filter number for Daubechies wavelets with\n least asymmetry and highest number of vanishing moments..."
)
}
}
else if (family == "Coiflets") {
family <- "Coiflets"
if (filter.number == 1) {
H <- rep(0, 6)
H[1] <- -0.051429728471
H[2] <- 0.238929728471
H[3] <- 0.602859456942
H[4] <- 0.272140543058
H[5] <- -0.051429972847
H[6] <- -0.011070271529
filter.name <- c("Coiflets N=1")
H <- H * sqrt(2)
}
else if (filter.number == 2) {
H <- rep(0, 12)
H[1] <- 0.0115876
H[2] <- -0.02932014
H[3] <- -0.04763959
H[4] <- 0.273021
H[5] <- 0.5746824
H[6] <- 0.2948672
H[7] <- -0.05408561
H[8] <- -0.04202648
H[9] <- 0.01674441
H[10] <- 0.003967884
H[11] <- -0.001289203
H[12] <- -0.0005095054
filter.name <- c("Coiflets N=2")
H <- H * sqrt(2)
}
else if (filter.number == 3) {
H <- rep(0, 18)
H[1] <- -0.002682419
H[2] <- 0.005503127
H[3] <- 0.01658356
H[4] <- -0.04650776
H[5] <- -0.04322076
H[6] <- 0.2865033
H[7] <- 0.5612853
H[8] <- 0.3029836
H[9] <- -0.05077014
H[10] <- -0.05819625
H[11] <- 0.02443409
H[12] <- 0.01122924
H[13] <- -0.006369601
H[14] <- -0.001820459
H[15] <- 0.0007902051
H[16] <- 0.0003296652
H[17] <- -5.019277e-05
H[18] <- -2.446573e-05
filter.name <- c("Coiflets N=3")
H <- H * sqrt(2)
}
else if (filter.number == 4) {
H <- rep(0, 24)
H[1] <- 0.000630961
H[2] <- -0.001152225
H[3] <- -0.005194524
H[4] <- 0.01136246
H[5] <- 0.01886724
H[6] <- -0.05746423
H[7] <- -0.03965265
H[8] <- 0.2936674
H[9] <- 0.5531265
H[10] <- 0.3071573
H[11] <- -0.04711274
H[12] <- -0.06803813
H[13] <- 0.02781364
H[14] <- 0.01773584
H[15] <- -0.01075632
H[16] <- -0.004001013
H[17] <- 0.002652666
H[18] <- 0.0008955945
H[19] <- -0.0004165006
H[20] <- -0.0001838298
H[21] <- 4.408035e-05
H[22] <- 2.208286e-05
H[23] <- -2.304942e-06
H[24] <- -1.262175e-06
filter.name <- c("Coiflets N=4")
H <- H * sqrt(2)
}
else if (filter.number == 5) {
H <- rep(0, 30)
H[1] <- -0.0001499638
H[2] <- 0.0002535612
H[3] <- 0.001540246
H[4] <- -0.002941111
H[5] <- -0.007163782
H[6] <- 0.01655207
H[7] <- 0.0199178
H[8] <- -0.06499726
H[9] <- -0.03680007
H[10] <- 0.2980923
H[11] <- 0.5475054
H[12] <- 0.3097068
H[13] <- -0.04386605
H[14] <- -0.07465224
H[15] <- 0.02919588
H[16] <- 0.02311078
H[17] <- -0.01397369
H[18] <- -0.00648009
H[19] <- 0.004783001
H[20] <- 0.001720655
H[21] <- -0.001175822
H[22] <- -0.000451227
H[23] <- 0.0002137298
H[24] <- 9.93776e-05
H[25] <- -2.92321e-05
H[26] <- -1.5072e-05
H[27] <- 2.6408e-06
H[28] <- 1.4593e-06
H[29] <- -1.184e-07
H[30] <- -6.73e-08
filter.name <- c("Coiflets N=5")
H <- H * sqrt(2)
}
else {
stop("Unknown filter number for Coiflet wavelets with\n least asymmetry and highest number of vanishing moments...")
}
}
else if(family == "MagKing") {
family <- "MagKing"
if(filter.number == 4) {
H <- c(1-1i, 4-1i, 4+1i, 1+1i)/10
G <- c(-1-2i, 5+2i, -5+2i, 1-2i)/14
filter.name <- c("MagareyKingsbury Wavelet 4-tap")
}
else stop("Only have 4-tap filter at present")
}
else if(family == "Nason") {
family <- "Nason"
if(filter.number == 3) {
H <- c(-0.066291+0.085581i,
0.110485+0.085558i,
0.662912-0.171163i,
0.662912-0.171163i,
0.110485+0.085558i,
-0.066291+0.085581i)
G <- c(-0.066291+0.085581i,
-0.110485-0.085558i,
0.662912-0.171163i,
-0.662912+0.171163i
, 0.110485+0.085558i,
0.066291-0.085581i)
filter.name <- c("Nason Complex Wavelet 6-tap")
}
else stop("Only have 6-tap filter at present")
}
else if(family == "Lawton") {
family <- "Lawton"
if(filter.number == 3) {
H <- c(-0.066291+0.085581i,
0.110485+0.085558i,
0.662912-0.171163i,
0.662912-0.171163i,
0.110485+0.085558,
-0.066291+0.085581i)
G <- c(-0.066291-0.085581i,
-0.110485+0.085558i,
0.662912+0.171163i,
-0.662912-0.171163i
, 0.110485-0.085558i,
0.066291+0.085581i)
filter.name <- c("Lawton Complex Wavelet 6-tap")
}
else stop("Only have 6-tap filter at present")
}
else if(family == "LittlewoodPaley") {
family <- "LittlewoodPaley"
#
#
# Define the function that computes the coefficients
#
hn <- function(n)
{
if(n == 0)
return(1)
else {
pin2 <- (pi * 1:n)/2
pin2 <- (sin(pin2)/pin2)
return(c(rev(pin2), 1, pin2))
}
}
# Next line changed in 4.6.4: added division by sqrt(2)
H <- hn(filter.number)/sqrt(2)
filter.name <- paste("Littlewood-Paley, N=", filter.number)
}
else if(family == "Yates") {
if(filter.number != 1)
stop("Only filter number 1 exists for Yates wavelet")
family <- "Yates"
H <- c(-1, 1)/sqrt(2)
filter.name <- "Yates"
}
else if(family == "LinaMayrand") {
origfn <- filter.number
nsolution <- as.character(filter.number)
dotpos <- regexpr("\\.", nsolution)
leftint <- substring(nsolution, first = 1, last = dotpos - 1)
rightint <- substring(nsolution, first = dotpos + 1, last =
nchar(nsolution))
if(nchar(nsolution) == 0)
nsolution <- 1
else nsolution <- as.numeric(rightint)
filter.number <- as.numeric(leftint)
matname <- paste(family, filter.number, sep = "")
if(!exists(matname)) {
stop(paste("Filter matrix \"", matname,
"\" does not exist", sep = ""))
}
else {
fm <- get(matname)
if(nsolution > nrow(fm$S))
stop(paste("Solution number ", nsolution,
" is too big. Filter matrix ", matname,
" only has ", nrow(fm$S), " solutions")
)
H <- fm$S[nsolution, ]
G <- fm$W[nsolution, ]
filter.name <- paste("Lina Mayrand, J=", filter.number,
" (nsolution=", nsolution, ")", sep = "")
}
filter.number <- origfn
}
else {
stop("Unknown family")
}
H <- H/constant
return(list(H = H, G = G, name = filter.name, family = family,
filter.number = filter.number))
}
"find.parameters" <-
function(data.wd, dwwt, j0, code, tol, Sigma)
{
#
# Preliminaries
#
nlevels <- nlevelsWT(data.wd)
pars <- matrix(0, ncol = 4, nrow = nlevels - 1)
dimnames(pars) <- list(paste("level", 1:(nlevels - 1)), c("p",
"var(re)", "covar(re,im)", "var(im)"))
lower <- c(tol, tol, tol - 1, tol)
upper <- c(1 - tol, 1000, 1 - tol, 1000)
#
# Calculate the covariance matrix of white noise put
# through the DWT:
#
detSigma <- rep(0, nlevels)
Sigma.inv <- array(0, c(nlevels, 2, 2))
for(i in 1:nlevels) {
detSigma[i] <- Sigma[i, 1, 1] * Sigma[i, 2, 2] - Sigma[i, 1,
2]^2
Sigma.inv[i, , ] <- solve(Sigma[i, , ])
}
#
# Now search at each level in turn.
#
for(j in j0:(nlevels - 1)) {
#
# Get a starting point for the
# search over p_j and V_j
#
coefs <- accessD(data.wd, level = j)
re <- Re(coefs)
im <- Im(coefs)
start <- c(min(1 - 10 * tol, 0.5^(j - j0)), var(re), cor(re,
im), var(im))
#
# Find the MML parameter values
#
coefs <- accessD(data.wd, level = j)
dstarvec <- cbind(Re(coefs), Im(coefs))
if(code == "NAG") {
write(c(Sigma[j + 1, 1, 1], Sigma[j + 1, 1, 2], Sigma[
j + 1, 2, 2]), file = "cthresh.maxloglik.data")
write(length(re), file = "cthresh.maxloglik.data",
append = TRUE)
write(t(cbind(re, im)), file = "cthresh.maxloglik.data",
append = TRUE, ncolumns = 2)
write(start, file = "cthresh.maxloglik.start")
write(t(cbind(lower, upper)), file =
"cthresh.maxloglik.start", append = TRUE)
system("./cthresh.maxloglik")
tmp <- scan(file = "cthresh.maxloglik.out", multi.line
= TRUE, quiet = TRUE)
pars[j, ] <- tmp[1:4]
pars[j, 3] <- pars[j, 3] * sqrt(pars[j, 2] * pars[
j, 4])
ifail <- tmp[6]
if(ifail > 0)
warning(paste("At level", j,
"NAG routine e04jyf returned ifail",
ifail))
system("rm cthresh.maxloglik.out cthresh.maxloglik.data cthresh.maxloglik.start"
)
}
else {
if(exists("optim"))
tmp <- optim(start, cthr.negloglik, method =
"L-BFGS-B", lower = lower,
upper = upper, dstarvec = dstarvec, Sigma =
Sigma[j + 1, , ], Sigma.inv = Sigma.inv[
j + 1, , ], twopirtdetS = 2 * pi * sqrt(
detSigma[j + 1]), code = code)$par
else
tmp <- nlminb(start, cthr.negloglik, lower = lower,
upper = upper, dstarvec = dstarvec, Sigma =
Sigma[j + 1, , ], Sigma.inv = Sigma.inv[
j + 1, , ], twopirtdetS = 2 * pi * sqrt(
detSigma[j + 1]), code = code)$parameters
pars[j, ] <- tmp
pars[j, 3] <- pars[j, 3] * sqrt(pars[j, 2] * pars[
j, 4])
}
}
invisible(list(pars = pars, Sigma = Sigma))
}
"make.dwwt" <-
function(nlevels, filter.number = 3.1, family = "LinaMayrand")
{
#
# Given a choice of wavelet and number of
# resolution levels, compute the distinct
# elements of diag(WW^T).
#
zero.wd <- wd(rep(0, 2^nlevels), filter.number = filter.number, family
= family)
dwwt <- rep(0, nlevels)
tmp.wd <- putD(zero.wd, v = 1, level = 0)
tmp <- Conj(wr(tmp.wd))
#
# tmp contains the row of W which gives the mother wavelet
# coefficient. Need Conj() as the inverse DWT corresponds to
# Conj(W^T). Now get the corresponding element of diag(WW^T)
# by summing the squared elements of tmp.
#
# Then repeat for each resolution level.
#
dwwt[1] <- sum(tmp * tmp)
for(lev in 1:(nlevels - 1)) {
tmp.wd <- putD(zero.wd, v = c(1, rep(0, 2^lev - 1)), level =
lev)
tmp <- Conj(wr(tmp.wd))
dwwt[lev + 1] <- sum(tmp * tmp)
}
return(dwwt)
}
"odds.matrix.mult" <-
function(coef, mat)
{
return(t(coef) %*% mat %*% coef)
}
"test.dataCT" <-
function(type = "ppoly", n = 512, signal = 1, rsnr = 7, plotfn = FALSE)
{
x <- seq(0., 1., length = n + 1)[1:n]
if(type == "ppoly") {
y <- rep(0., n)
xsv <- (x <= 0.5)
y[xsv] <- -16. * x[xsv]^3. + 12. * x[xsv]^2.
xsv <- (x > 0.5) & (x <= 0.75)
y[xsv] <- (x[xsv] * (16. * x[xsv]^2. - 40. * x[xsv] + 28.))/
3. - 1.5
xsv <- x > 0.75
y[xsv] <- (x[xsv] * (16. * x[xsv]^2. - 32. * x[xsv] + 16.))/
3.
}
else if(type == "blocks") {
t <- c(0.1, 0.13, 0.15, 0.23, 0.25, 0.4, 0.44,
0.65, 0.76, 0.78, 0.81)
h <- c(4., -5., 3., -4., 5., -4.2, 2.1, 4.3,
-3.1, 2.1, -4.2)
y <- rep(0., n)
for(i in seq(1., length(h))) {
y <- y + (h[i] * (1. + sign(x - t[i])))/2.
}
}
else if(type == "bumps") {
t <- c(0.1, 0.13, 0.15, 0.23, 0.25, 0.4, 0.44,
0.65, 0.76, 0.78, 0.81)
h <- c(4., 5., 3., 4., 5., 4.2, 2.1, 4.3,
3.1, 5.1, 4.2)
w <- c(0.005, 0.005, 0.006, 0.01, 0.01, 0.03,
0.01, 0.01, 0.005, 0.008, 0.005)
y <- rep(0, n)
for(j in 1:length(t)) {
y <- y + h[j]/(1. + abs((x - t[j])/w[j]))^4.
}
}
else if(type == "heavi")
y <- 4. * sin(4. * pi * x) - sign(x - 0.3) -
sign(0.72 - x)
else if(type == "doppler") {
eps <- 0.05
y <- sqrt(x * (1. - x)) * sin((2. * pi * (1. + eps))/(x + eps))
}
else {
cat(c("test.dataCT: unknown test function type", type, "\n"))
cat(c("Terminating\n"))
return("NoType")
}
y <- y/sqrt(var(y)) * signal
ynoise <- y + rnorm(n, 0, signal/rsnr)
if(plotfn == TRUE) {
if(type == "ppoly")
mlab <- "Piecewise polynomial"
if(type == "blocks")
mlab <- "Blocks"
if(type == "bumps")
mlab <- "Bumps"
if(type == "heavi")
mlab <- "HeaviSine"
if(type == "doppler")
mlab <- "Doppler"
plot(x, y, type = "l", lwd = 2, main = mlab, ylim = range(
c(y, ynoise)))
lines(x, ynoise, col = 2)
lines(x, y)
}
return(list(x = x, y = y, ynoise = ynoise, type = type, rsnr = rsnr))
}
"wd"<-
function(data, filter.number = 10, family = "DaubLeAsymm", type = "wavelet", bc
= "periodic", verbose = FALSE, min.scale = 0, precond = TRUE)
{
if(verbose == TRUE)
cat("wd: Argument checking...")
if(!is.atomic(data))
stop("Data is not atomic")
DataLength <- length(data) #
#
# Check that we have a power of 2 data elements
#
nlevels <- nlevelsWT(data)
if(is.na(nlevels)) stop("Data length is not power of two") #
#
# Check for correct type
#
if(type != "wavelet" && type != "station")
stop("Unknown type of wavelet decomposition")
if(type == "station" && bc != "periodic") stop(
"Can only do periodic boundary conditions with station"
) #
#
# Select the appropriate filter
#
if(verbose == TRUE)
cat("...done\nFilter...")
if(bc != "interval") filter <- filter.select(filter.number =
filter.number, family = family) #
#
# Build the first/last database
#
if(verbose == TRUE)
cat("...selected\nFirst/last database...")
fl.dbase <- first.last(LengthH = length(filter$H), DataLength =
DataLength, type = type, bc = bc) #
#
#
# Check if we are doing "wavelets on the interval". If so, do it!
#
if(bc == "interval") {
ans <- wd.int(data = data, preferred.filter.number =
filter.number, min.scale = min.scale, precond = precond
)
fl.dbase <- first.last(LengthH = length(filter$H), DataLength
= DataLength, type = type, bc = bc, current.scale =
min.scale) #
filter <- list(name = paste("CDV", filter.number, sep = ""),
family = "CDV", filter.number = filter.number)
l <- list(transformed.vector = ans$transformed.vector,
current.scale = ans$current.scale, filters.used = ans$
filters.used, preconditioned = ans$preconditioned, date
= ans$date, nlevels = IsPowerOfTwo(length(ans$
transformed.vector)), fl.dbase = fl.dbase, type = type,
bc = bc, filter = filter)
class(l) <- "wd"
return(l)
}
#
# Put in the data
#
C <- rep(0, fl.dbase$ntotal)
C[1:DataLength] <- data #
if(verbose == TRUE)
error <- 1
else error <- 0
if(verbose == TRUE) cat("built\n") #
#
# Compute the decomposition
#
if(verbose == TRUE)
cat("Decomposing...\n")
nbc <- switch(bc,
periodic = 1,
symmetric = 2)
if(is.null(nbc))
stop("Unknown boundary condition")
ntype <- switch(type,
wavelet = 1,
station = 2)
if(is.null(filter$G)) {
wavelet.decomposition <- .C("wavedecomp",
C = as.double(C),
D = as.double(rep(0, fl.dbase$ntotal.d)),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
nlevels = as.integer(nlevels),
firstC = as.integer(fl.dbase$first.last.c[, 1]),
lastC = as.integer(fl.dbase$first.last.c[, 2]),
offsetC = as.integer(fl.dbase$first.last.c[, 3]),
firstD = as.integer(fl.dbase$first.last.d[, 1]),
lastD = as.integer(fl.dbase$first.last.d[, 2]),
offsetD = as.integer(fl.dbase$first.last.d[, 3]),
ntype = as.integer(ntype),
nbc = as.integer(nbc),
error = as.integer(error), PACKAGE = "wavethresh")
}
else {
wavelet.decomposition <- .C("comwd",
CR = as.double(Re(C)),
CI = as.double(Im(C)),
LengthC = as.integer(fl.dbase$ntotal),
DR = as.double(rep(0, fl.dbase$ntotal.d)),
DI = as.double(rep(0, fl.dbase$ntotal.d)),
LengthD = as.integer(fl.dbase$ntotal.d),
HR = as.double(Re(filter$H)),
HI = as.double( - Im(filter$H)),
GR = as.double(Re(filter$G)),
GI = as.double( - Im(filter$G)),
LengthH = as.integer(length(filter$H)),
nlevels = as.integer(nlevels),
firstC = as.integer(fl.dbase$first.last.c[, 1]),
lastC = as.integer(fl.dbase$first.last.c[, 2]),
offsetC = as.integer(fl.dbase$first.last.c[, 3]),
firstD = as.integer(fl.dbase$first.last.d[, 1]),
lastD = as.integer(fl.dbase$first.last.d[, 2]),
offsetD = as.integer(fl.dbase$first.last.d[, 3]),
ntype = as.integer(ntype),
nbc = as.integer(nbc),
error = as.integer(error), PACKAGE = "wavethresh")
}
if(verbose == TRUE)
cat("done\n")
error <- wavelet.decomposition$error
if(error != 0) {
cat("Error ", error, " occured in wavedecomp\n")
stop("Error")
}
if(is.null(filter$G)) {
l <- list(C = wavelet.decomposition$C, D =
wavelet.decomposition$D, nlevels =
nlevelsWT(wavelet.decomposition), fl.dbase = fl.dbase,
filter = filter, type = type, bc = bc, date = date())
}
else {
l <- list(C = complex(real = wavelet.decomposition$CR, imaginary =
wavelet.decomposition$CI), D = complex(real =
wavelet.decomposition$DR, imaginary = wavelet.decomposition$DI
), nlevels = nlevelsWT(wavelet.decomposition), fl.dbase =
fl.dbase, filter = filter, type = type, bc = bc, date
= date())
}
class(l) <- "wd"
return(l)
}
"wr.wd"<-
function(wd, start.level = 0, verbose = FALSE, bc = wd$bc, return.object = FALSE,
filter.number = wd$filter$filter.number, family = wd$filter$family, ...)
{
if(IsEarly(wd)) {
ConvertMessage()
stop()
}
if(verbose == TRUE) cat("Argument checking...") #
#
# Check class of wd
#
if(verbose == TRUE)
cat("Argument checking\n")
ctmp <- class(wd)
if(is.null(ctmp))
stop("wd has no class")
else if(ctmp != "wd")
stop("wd is not of class wd")
if(start.level < 0)
stop("start.level must be nonnegative")
if(start.level >= nlevelsWT(wd))
stop("start.level must be less than the number of levels")
if(is.null(wd$filter$filter.number))
stop("NULL filter.number for wd")
if(bc != wd$bc)
warning("Boundary handling is different to original")
if(wd$type == "station")
stop("Use convert to generate wst object and then AvBasis or InvBasis"
)
if(wd$bc == "interval") {
warning("All optional arguments ignored for \"wavelets on the interval\" transform"
)
return(wr.int(wd))
}
type <- wd$type
filter <- filter.select(filter.number = filter.number, family = family)
LengthH <- length(filter$H) #
#
# Build the reconstruction first/last database
#
if(verbose == TRUE)
cat("...done\nFirst/last database...")
r.first.last.c <- wd$fl.dbase$first.last.c[(start.level + 1):(wd$
nlevels + 1), ] #
r.first.last.d <- matrix(wd$fl.dbase$first.last.d[(start.level + 1):(wd$
nlevels), ], ncol = 3)
ntotal <- r.first.last.c[1, 3] + r.first.last.c[1, 2] - r.first.last.c[
1, 1] + 1
names(ntotal) <- NULL
C <- accessC(wd, level = start.level, boundary = TRUE)
C <- c(rep(0, length = (ntotal - length(C))), C)
Nlevels <- nlevelsWT(wd)- start.level
error <- 0 #
#
# Load object code
#
if(verbose == TRUE)
cat("...built\n")
if(verbose == TRUE) {
cat("Reconstruction...")
error <- 1
}
ntype <- switch(type,
wavelet = 1,
station = 2)
if(is.null(ntype))
stop("Unknown type of decomposition")
nbc <- switch(bc,
periodic = 1,
symmetric = 2)
if(is.null(nbc))
stop("Unknown boundary handling")
if(!is.complex(wd$D)) {
wavelet.reconstruction <- .C("waverecons",
C = as.double(C),
D = as.double(wd$D),
H = as.double(filter$H),
LengthH = as.integer(LengthH),
nlevels = as.integer(Nlevels),
firstC = as.integer(r.first.last.c[, 1]),
lastC = as.integer(r.first.last.c[, 2]),
offsetC = as.integer(r.first.last.c[, 3]),
firstD = as.integer(r.first.last.d[, 1]),
lastD = as.integer(r.first.last.d[, 2]),
offsetD = as.integer(r.first.last.d[, 3]),
ntype = as.integer(ntype),
nbc = as.integer(nbc),
error = as.integer(error), PACKAGE = "wavethresh")
}
else {
wavelet.reconstruction <- .C("comwr",
CR = as.double(Re(C)),
CI = as.double(Im(C)),
LengthC = as.integer(length(C)),
DR = as.double(Re(wd$D)),
DI = as.double(Im(wd$D)),
LengthD = as.integer(length(wd$D)),
HR = as.double(Re(filter$H)),
HI = as.double(Im(filter$H)),
GR = as.double(Re(filter$G)),
GI = as.double(Im(filter$G)),
LengthH = as.integer(LengthH),
nlevels = as.integer(Nlevels),
firstC = as.integer(r.first.last.c[, 1]),
lastC = as.integer(r.first.last.c[, 2]),
offsetC = as.integer(r.first.last.c[, 3]),
firstD = as.integer(r.first.last.d[, 1]),
lastD = as.integer(r.first.last.d[, 2]),
offsetD = as.integer(r.first.last.d[, 3]),
ntype = as.integer(ntype),
nbc = as.integer(nbc),
error = as.integer(error), PACKAGE = "wavethresh")
}
if(verbose == TRUE)
cat("done\n")
error <- wavelet.reconstruction$error
if(error != 0) {
cat("Error code returned from waverecons: ", error, "\n")
stop("waverecons returned error")
}
fl.dbase <- wd$fl.dbase
if(!is.complex(wd$D)) {
l <- list(C = wavelet.reconstruction$C, D =
wavelet.reconstruction$D, fl.dbase = fl.dbase, nlevels
= nlevelsWT(wd), filter = filter, type = type, bc = bc,
date = date())
}
else {
l <- list(C = complex(real = wavelet.reconstruction$CR, imaginary =
wavelet.reconstruction$CI), D = complex(real =
wavelet.reconstruction$DR, imaginary = wavelet.reconstruction$
DI), fl.dbase = fl.dbase, nlevels = nlevelsWT(wd), filter
= filter, type = type, bc = bc, date = date())
}
class(l) <- "wd"
if(return.object == TRUE)
return(l)
else return(accessC(l))
stop("Shouldn't get here\n")
}
"wst"<-
function(data, filter.number = 10, family = "DaubLeAsymm", verbose = FALSE)
{
if(verbose == TRUE)
cat("Argument checking...")
DataLength <- length(data) #
#
# Check that we have a power of 2 data elements
#
nlevels <- log(DataLength)/log(2)
if(round(nlevels) != nlevels)
stop("The length of data is not a power of 2") #
if(verbose == TRUE) {
cat("There are ", nlevels, " levels\n")
}
#
# Select the appropriate filter
#
if(verbose == TRUE)
cat("...done\nFilter...")
filter <- filter.select(filter.number = filter.number, family = family)
#
#
# Compute the decomposition
#
if(verbose == TRUE)
cat("Decomposing...\n")
newdata <- c(rep(0, DataLength * nlevels), data)
Carray <- newdata
error <- 0 #
#
# See whether we are using complex wavelets
#
if(is.null(filter$G)) {
wavelet.station <- .C("wavepackst",
Carray = as.double(Carray),
newdata = as.double(newdata),
DataLength = as.integer(DataLength),
levels = as.integer(nlevels),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
error = as.integer(error), PACKAGE = "wavethresh")
}
else {
wavelet.station <- .C("comwst",
CaR = as.double(Re(Carray)),
CaI = as.double(Im(Carray)),
newdataR = as.double(Re(newdata)),
newdataI = as.double(Im(newdata)),
DataLength = as.integer(DataLength),
levels = as.integer(nlevels),
HR = as.double(Re(filter$H)),
HI = as.double( - Im(filter$H)),
GR = as.double(Re(filter$G)),
GI = as.double( - Im(filter$G)),
LengthH = as.integer(length(filter$H)),
error = as.integer(error), PACKAGE = "wavethresh")
}
if(wavelet.station$error != 0)
stop(paste("Memory error in wavepackst (or comwst). Code ",
wavelet.station))
if(is.null(filter$G)) {
wpm <- matrix(wavelet.station$newdata, ncol = DataLength, byrow
= TRUE)
Carray <- matrix(wavelet.station$Carray, ncol = DataLength,
byrow = TRUE)
}
else {
newdata <- complex(real = wavelet.station$newdataR, imaginary =
wavelet.station$newdataI)
Carray <- complex(real = wavelet.station$CaR, imaginary =
wavelet.station$CaI)
wpm <- matrix(newdata, ncol = DataLength, byrow = TRUE)
Carray <- matrix(Carray, ncol = DataLength, byrow = TRUE)
}
wp <- list(wp = wpm, Carray = Carray, nlevels = nlevels, filter =
filter, date = date())
class(wp) <- "wst"
wp
}
"AutoBasis"<-
function(wp, verbose = FALSE, zilchtol = 1e-08,entropy = Shannon.entropy)
{
if (!inherits(wp, "wp")) {
stop("Can only operate on wavelet packet objects")
}
if(IsEarly(wp)) {
ConvertMessage()
stop()
}
#
#
# Including the original data set there are nlevels levels. Labelled
# 0,...,nlevels-1. Level nlevels-1 is the original data set.
#
nlevels <- nlevelsWT(wp)
for(i in 1:(nlevels - 1)) {
NPBaseLev <- 2^(nlevels - i)
PKLength <- 2^i
if(verbose == TRUE) {
cat("Base level is ", i)
cat(" Number of packets is ", NPBaseLev, "\n")
cat(" Packet Length is ", PKLength, "\n")
}
scan()
for(j in 0:(NPBaseLev - 1)) {
p1 <- getpacket(wp, level = (i - 1), index = 2 * j)
p2 <- getpacket(wp, level = (i - 1), index = 2 * j + 1)
p <- getpacket(wp, level = i, index = j)
if(verbose == TRUE) {
cat("Comparing: (", i, ",", j, ") with ")
cat("(", (i - 1), ",", 2 * j, ") + (", (i - 1),
",", 2 * j + 1, ")\n")
}
if(is.na(p1[1]) || is.na(p2[1])) {
if(verbose == TRUE) {
cat("Upper Level is not eligible for")
cat(" incorporation. Moving on...\n")
}
wp <- putpacket(wp, lev = i, index = j, packet
= rep(NA, length = length(p)))
}
else {
e1 <- entropy(p1, zilchtol)
e2 <- entropy(p2, zilchtol)
e <- entropy(p, zilchtol)
if(verbose == TRUE) {
cat("Entropy:", signif(e, 3), "?", signif(e1,
3), "+", signif(e2, 3), "=", signif(e1 + e2,
3))
}
if(e < e1 + e2 || (is.infinite(e) && is.infinite(e1) &&
is.infinite(e2))) {
wp <- putpacket(wp, level = (i - 1), index =
2 * j, packet = rep(NA, length = PKLength/2
))
wp <- putpacket(wp, level = (i - 1), index =
2 * j + 1, packet = rep(NA, length =
PKLength/2))
}
else {
wp <- putpacket(wp, level = i, index = j,
packet = rep(NA, length = PKLength))
}
if(e < e1 + e2 || (is.infinite(e) && is.infinite(e1) &&
is.infinite(e2)))
cat(" REPLACE\n")
else cat(" KEEP\n")
}
}
}
wp
}
"AvBasis"<-
function(...)
UseMethod("AvBasis")
"AvBasis.wst"<-
function(wst, Ccode = TRUE, ...)
{
nlevels <- nlevelsWT(wst)
if(is.null(wst$filter$G)) {
if(Ccode == FALSE) {
answer <- av.basis(wst, level = nlevels - 1, ix1 = 0,
ix2 = 1, filter = wst$filter)
}
else {
error <- 0
answer <- rep(0, 2^nlevels)
H <- wst$filter$H
aobj <- .C("av_basisWRAP",
wstR = as.double(wst$wp),
wstC = as.double(wst$Carray),
LengthData = as.integer(length(answer)),
level = as.integer(nlevels - 1),
H = as.double(H),
LengthH = as.integer(length(H)),
answer = as.double(answer),
error = as.integer(error), PACKAGE = "wavethresh")
if(aobj$error != 0)
stop(paste("av_basisWRAP returned error code",
aobj$error))
answer <- aobj$answer
}
}
else {
error <- 0
answerR <- answerI <- rep(0, 2^nlevels)
H <- wst$filter$H
G <- wst$filter$G
aobj <- .C("comAB_WRAP",
wstR = as.double(Re(wst$wp)),
wstI = as.double(Im(wst$wp)),
wstCR = as.double(Re(wst$Carray)),
wstCI = as.double(Im(wst$Carray)),
LengthData = as.integer(length(answerR)),
level = as.integer(nlevels - 1),
HR = as.double(Re(H)),
HI = as.double(Im(H)),
GR = as.double(Re(G)),
GI = as.double(Im(G)),
LengthH = as.integer(length(H)),
answerR = as.double(answerR),
answerI = as.double(answerI),
error = as.integer(error), PACKAGE = "wavethresh")
if(aobj$error != 0)
stop(paste("av_basisWRAP returned error code", aobj$
error))
answer <- complex(real = aobj$answerR, imaginary = aobj$answerI)
}
answer
}
"AvBasis.wst2D"<-
function(wst2D, ...)
{
filter <- wst2D$filter
amdim <- dim(wst2D$wst2D)
im <- matrix(0, nrow = amdim[2]/2, ncol = amdim[2]/2)
ans <- .C("SAvBasis",
am = as.double(wst2D$wst2D),
d1 = as.integer(amdim[1]),
d12 = as.integer(amdim[1] * amdim[2]),
TheSmooth = as.double(im),
levj = as.integer(amdim[1]),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
error = as.integer(0), PACKAGE = "wavethresh")
if(ans$error != 0)
stop(paste("Error code was ", ans$error))
matrix(ans$TheSmooth, nrow = amdim[2]/2)
}
"BAYES.THR"<-
function(data, alpha = 0.5, beta = 1, filter.number = 8, family = "DaubLeAsymm",
bc = "periodic", dev = var, j0 = 5, plotfn = FALSE)
{
#
#------------Estimation of C1 and C2 via universal threshodling-----------------
#
ywd <- wd(data, filter.number = filter.number, family = family, bc = bc
)
sigma <- sqrt(dev(accessD(ywd, level = (nlevelsWT(ywd) - 1))))
uvt <- threshold(ywd, policy = "universal", type = "soft", dev = dev,
by.level = FALSE, levels = (nlevelsWT(ywd) - 1), return.threshold = TRUE)
universal <- threshold(ywd, policy = "manual", value = uvt, type =
"soft", dev = dev, levels = j0:(nlevelsWT(ywd) - 1))
nsignal <- rep(0, nlevelsWT(ywd))
sum2 <- rep(0, nlevelsWT(ywd))
for(j in 0:(nlevelsWT(ywd) - 1)) {
coefthr <- accessD(universal, level = j)
nsignal[j + 1] <- sum(abs(coefthr) > 0)
if(nsignal[j + 1] > 0)
sum2[j + 1] <- sum(coefthr[abs(coefthr) > 0]^2)
}
C <- seq(1000, 15000, 50)
l <- rep(0, length(C))
lev <- seq(0, nlevelsWT(ywd) - 1)
v <- 2^( - alpha * lev)
for(i in 1:length(C)) {
l[i] <- 0.5 * sum(- nsignal * (log(sigma^2 + C[i] * v) + 2 * log(pnorm(( - sigma * sqrt(2 * log(2^nlevelsWT(ywd))))/
sqrt(sigma^2 + C[i] * v)))) - sum2/2/(sigma^2 + C[i] * v))
}
C1 <- C[l == max(l)]
tau2 <- C1 * v
p <- 2 * pnorm(( - sigma * sqrt(2 * log(2^nlevelsWT(ywd))))/sqrt(sigma^2 +
tau2))
if(beta == 1)
C2 <- sum(nsignal/p)/nlevelsWT(ywd)
else C2 <- (1 - 2^(1 - beta))/(1 - 2^((1 - beta) * nlevelsWT(ywd))) * sum(
nsignal/p)
pr <- pmin(1, C2 * 2^( - beta * lev))
rat <- tau2/(sigma^2 + tau2) #
#
#----------------------Bayesian Thresholding------------------------------------
#
bayesian <- ywd
for(j in 0:(nlevelsWT(ywd)- 1)) {
coef <- accessD(ywd, level = j)
w <- (1 - pr[j + 1])/pr[j + 1]/sqrt((sigma^2 * rat[j + 1])/tau2[
j + 1]) * exp(( - rat[j + 1] * coef^2)/2/sigma^2)
z <- 0.5 * (1 + pmin(w, 1))
median <- sign(coef) * pmax(0, rat[j + 1] * abs(coef) - sigma *
sqrt(rat[j + 1]) * qnorm(z))
bayesian <- putD(bayesian, level = j, v = median)
}
bayesrec <- wr(bayesian) #
#---------------Resulting plots--------------------------------------------
#
if(plotfn == TRUE) {
x <- seq(1, length(data))/length(data)
par(mfrow = c(1, 2))
plot(x, data, type = "l", ylab = "(a) Data")
plot(x, bayesrec, type = "l", ylab = "(b) BayesThresh", ylim =
c(min(data), max(data)))
}
return(bayesrec)
}
"BMdiscr"<-
function(BP)
{
dm <- lda(x = BP$BasisMatrix, grouping = BP$groups) #
BMd <- list(BP = BP, dm = dm)
}
"Best1DCols"<-
function(w2d, mincor = 0.7)
{
m <- w2d$m
level <- w2d$level
pktix <- w2d$pktix
nbasis <- length(level)
corvec <- rep(0, nbasis)
#
# Note: we don't calculate the first one, since the
# first basis function is a constant, and so we know
# the correlation will be zero
#
for(i in 2:nbasis) {
corvec[i] <- cor(m[, i], w2d$groups)
}
corvec <- abs(corvec)
sv <- corvec > mincor
if (sum(sv) < 2)
stop("Not enough variables. Decrease mincor")
m <- m[, sv]
level <- level[sv]
pktix <- pktix[sv]
corvec <- corvec[sv]
sl <- rev(sort.list(corvec))
l <- list(nlevels = nlevelsWT(w2d), BasisMatrix = m[, sl], level = level[
sl], pkt = pktix[sl], basiscoef = corvec[sl], groups = w2d$groups)
class(l) <- "BP"
l
}
"CWCV"<-
function(ynoise, ll, x = 1:length(ynoise), filter.number = 10, family =
"DaubLeAsymm", thresh.type = "soft", tol = 0.01, maxits=500,
verbose = 0, plot.it
= TRUE, interptype = "noise")
{
#
# Switch on verbosity for function calls if necessary
#
if(verbose == 2) CallsVerbose <- TRUE else CallsVerbose <- FALSE
if(verbose == 1)
cat("WaveletCV: Wavelet model building\nThinking ")
n <- length(ynoise)
ywd <- wd(ynoise, filter.number = filter.number, family = family,
verbose = CallsVerbose)
univ.threshold <- threshold(ywd, type = thresh.type, return.threshold
= TRUE, lev = ll:(nlevelsWT(ywd)- 1), verbose = CallsVerbose,
policy = "universal")[1]
if(verbose == 1) {
cat("Universal threshold: ", univ.threshold, "\n")
cat("Now doing universal threshold reconstruction...")
}
yuvtwd <- threshold(ywd, type = thresh.type, lev = ll:(nlevelsWT(ywd)- 1),
verbose = CallsVerbose, policy = "universal")
if(verbose == 1)
cat("done\nNow reconstructing...")
yuvtwr <- wr(yuvtwd, verbose = CallsVerbose)
if(verbose == 1)
cat("done\nNow plotting universal thresholded\n")
if(plot.it == TRUE) {
oldpar <- par(mfrow = c(2, 2))
matplot(x, cbind(ynoise, yuvtwr), type = "l", main =
"Universal Threshold Reconstruction", xlab = "x", col
= c(3, 2), lty = c(3, 2))
}
filter <- filter.select(filter.number = filter.number, family = family)
N <- length(ynoise)
nlevels <- log(N)/log(2)
ssq <- 0
if(verbose > 0)
error <- 1
else error <- 0
if(round(nlevels) != nlevels)
stop("Datalength not power of 2")
fl.dbase <- first.last(length(filter$H), N/2)
C <- rep(0, fl.dbase$ntotal)
D <- rep(0, fl.dbase$ntotal.d)
ntt <- switch(thresh.type,
hard = 1,
soft = 2)
if(is.null(ntt))
stop("Unknown threshold type")
interptype <- switch(interptype,
noise = 1,
normal = 2)
if(is.null(interptype))
stop("Unknown interptype")
bc <- "periodic"
nbc <- switch(bc,
periodic = 1,
symmetric = 2)
if(is.null(nbc))
stop("Unknown boundary conditions")
xvthresh <- 0
if(verbose == 1)
cat("Now optimising cross-validated error estimate\n")
ans <- .C("CWaveletCV",
noisy = as.double(ynoise),
nnoisy = as.integer(N),
univ.threshold = as.double(univ.threshold),
C = as.double(C),
D = as.double(D),
LengthD = as.integer(length(D)),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
levels = as.integer(nlevels),
firstC = as.integer(fl.dbase$first.last.c[, 1]),
lastC = as.integer(fl.dbase$first.last.c[, 2]),
offsetC = as.integer(fl.dbase$first.last.c[, 3]),
firstD = as.integer(fl.dbase$first.last.d[, 1]),
lastD = as.integer(fl.dbase$first.last.d[, 2]),
offsetD = as.integer(fl.dbase$first.last.d[, 3]),
ntt = as.integer(ntt),
ll = as.integer(ll),
nbc = as.integer(nbc),
tol = as.double(tol),
maxits = as.integer(maxits),
xvthresh = as.double(xvthresh),
interptype = as.integer(interptype),
error = as.integer(error), PACKAGE = "wavethresh")
if (ans$error == 1700) {
message("Algorithm not converging (yet).")
message("Maybe increase number of maximum iterations (maxits or cvmaxits)?")
message("Or increase tolerance (tol or cvtol) a bit?")
message("Wanted to achieve tolerance of ", tol,
" but have actually achieved: ", ans$tol)
message("Check levels you are thresholding, especially if length of data set is small. E.g. if n<=16 then default levels argument probably should be changed.")
stop(paste("Maximum number of iterations", maxits, " exceeded."))
}
else if(ans$error != 0) {
cat("Error code ", ans$error, "\n")
stop("There was an error")
}
#
#
# Now do the reconstuction using xvthresh
#
xvwd <- threshold(ywd, policy = "manual", value = ans$xvthresh, type =
thresh.type, lev = ll:(nlevelsWT(ywd)- 1))
xvwddof <- dof(xvwd)
xvwr <- wr(xvwd)
if(plot.it == TRUE)
matplot(x, cbind(ynoise, yuvtwr, xvwr), type = "l", main =
"XV Threshold Reconstruction", xlab = "x", col = c(3, 2,
1))
fkeep <- NULL
xkeep <- NULL
list(x = x, ynoise = ynoise, xvwr = xvwr, yuvtwr = yuvtwr, xvthresh =
ans$xvthresh, uvthresh = univ.threshold, xvdof = xvwddof, uvdof
= dof(yuvtwd), xkeep = xkeep, fkeep = fkeep)
}
"CWavDE"<-
function(x, Jmax, threshold = 0, nout = 100, primary.resolution = 1,
filter.number = 10, family = "DaubLeAsymm", verbose = 0, SF = NULL, WV
= NULL)
{
rx <- range(x)
xout <- rep(0, nout)
fout <- rep(0, nout)
kmin <- 0
kmax <- 0
kminW <- rep(0, Jmax)
kmaxW <- rep(0, Jmax)
xminW <- rep(0, Jmax)
xmaxW <- rep(0, Jmax) #
# Generate the scaling function and the wavelet if they're not supplied
#
if(is.null(SF)) {
if(verbose > 0)
cat("Computing scaling function\n")
SF <- draw.default(filter.number = filter.number, family =
family, plot.it = FALSE, scaling.function = TRUE, enhance = FALSE)
}
if(is.null(WV)) {
if(verbose > 0)
cat("Computing wavelet function\n")
WV <- draw.default(filter.number = filter.number, family =
family, plot.it = FALSE, enhance = FALSE)
}
swv <- support(filter.number = filter.number, family = family) #
error <- 0
ans <- .C("CWavDE",
x = as.double(x),
n = as.integer(length(x)),
minx = as.double(rx[1]),
maxx = as.double(rx[2]),
Jmax = as.integer(Jmax),
threshold = as.double(threshold),
xout = as.double(xout),
fout = as.double(fout),
nout = as.integer(nout),
primary.resolution = as.double(primary.resolution),
SFx = as.double(SF$x),
SFy = as.double(SF$y),
lengthSF = as.integer(length(SF$x)),
WVx = as.double(WV$x),
WVy = as.double(WV$y),
lengthWV = as.integer(length(WV$x)),
kmin = as.integer(kmin),
kmax = as.integer(kmax),
kminW = as.integer(kminW),
kmaxW = as.integer(kmaxW),
xminW = as.double(xminW),
xmaxW = as.double(xmaxW),
phiLH = as.double(swv$phi.lh),
phiRH = as.double(swv$phi.rh),
psiLH = as.double(swv$psi.lh),
psiRH = as.double(swv$psi.rh),
verbose = as.integer(verbose),
error = as.integer(error), PACKAGE = "wavethresh")
if(ans$error != 0)
stop(paste("CWavDE returned error code", ans$error))
l <- list(x = ans$xout, y = ans$fout, sfix = ans$kmin:ans$kmax, wvixmin
= ans$kminW, wvixmax = ans$kmaxW)
l
}
"CanUseMoreThanOneColor"<-
function()
{
#
# In the S version of this code it was possible to interrogate certain
# graphics devices to see how many colors they display.
# Most users these days will be using X11, or quartz or pdf which can
# so this routine is fixed now to return true.
return(TRUE)
}
"ConvertMessage"<-
function()
{
cat("Your wavelet object is from an old release of wavethresh.\n")
cat("Please apply the function convert() to your object.\n")
cat("This will update it to the most up to date release.\n")
cat("e.g. if the name of your wavelet object is \"fred\" then type:\n")
cat("fred <- convert(fred)\n")
}
"Crsswav"<-
function(noisy, value = 1, filter.number = 10, family = "DaubLeAsymm",
thresh.type = "hard", ll = 3)
{
filter <- filter.select(filter.number = filter.number, family = family)
N <- length(noisy)
nlevels <- log(N)/log(2)
ssq <- 0
error <- 0
if(round(nlevels) != nlevels)
stop("Datalength not power of 2")
fl.dbase <- first.last(length(filter$H), N/2)
C <- rep(0, fl.dbase$ntotal)
D <- rep(0, fl.dbase$ntotal.d)
ntt <- switch(thresh.type,
hard = 1,
soft = 2)
if(is.null(ntt))
stop("Unknown threshold type")
bc <- "periodic"
nbc <- switch(bc,
periodic = 1,
symmetric = 2)
if(is.null(nbc))
stop("Unknown boundary conditions")
ans <- .C("Crsswav",
noisy = as.double(noisy),
nnoisy = as.integer(N),
value = as.double(value),
C = as.double(C),
D = as.double(D),
LengthD = as.integer(length(D)),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
levels = as.integer(nlevels),
firstC = as.integer(fl.dbase$first.last.c[, 1]),
lastC = as.integer(fl.dbase$first.last.c[, 2]),
offsetC = as.integer(fl.dbase$first.last.c[, 3]),
firstD = as.integer(fl.dbase$first.last.d[, 1]),
lastD = as.integer(fl.dbase$first.last.d[, 2]),
offsetD = as.integer(fl.dbase$first.last.d[, 3]),
ntt = as.integer(ntt),
ll = as.integer(ll),
nbc = as.integer(nbc),
ssq = as.double(ssq),
error = as.integer(error), PACKAGE = "wavethresh")
if(ans$error != 0) {
cat("Error code ", ans$error, "\n")
stop("There was an error")
}
cat("The answer was ", ans$ssq, "\n")
return(list(ssq = ans$ssq, value = value, type = thresh.type, lev = ll:(
nlevels - 1)))
}
"Cthreshold"<-
function(wd, thresh.type = "soft", value = 0, levels = 3:(nlevelsWT(wd)- 1))
{
D <- wd$D
Dlevels <- nlevelsWT(wd)- 1
error <- 0
ntt <- switch(thresh.type,
hard = 1,
soft = 2)
if(is.null(ntt))
stop("Unknown thresh.type")
nbc <- switch(wd$bc,
periodic = 1,
symmetric = 2)
if(is.null(nbc))
stop("Unknown boundary conditions")
ans <- .C("Cthreshold",
D = as.double(D),
LengthD = as.integer(wd$fl.dbase$ntotal.d),
firstD = as.integer(wd$fl.dbase$first.last.d[, 1]),
lastD = as.integer(wd$fl.dbase$first.last.d[, 2]),
offsetD = as.integer(wd$fl.dbase$first.last.d[, 3]),
Dlevels = as.integer(Dlevels),
ntt = as.integer(ntt),
value = as.double(value),
levels = as.integer(levels),
qlevels = as.integer(length(levels)),
nbc = as.integer(nbc),
error = as.integer(error), PACKAGE = "wavethresh")
if(ans$error != 0) {
stop("Error occurred")
cat("Error code was ", ans$error, "\n")
}
wd$D <- ans$D
wd
}
"DJ.EX"<-
function(n = 1024, signal = 7, rsnr = 7, noisy = FALSE, plotfn = FALSE)
{
x <- seq(1, n)/n
#--------------------Blocks---------------------------------------------------
t <- c(0.10000000000000001, 0.13, 0.14999999999999999,
0.23000000000000001, 0.25, 0.40000000000000002, 0.44,
0.65000000000000002, 0.76000000000000001, 0.78000000000000003,
0.81000000000000005)
h1 <- c(4, -5, 3, -4, 5, -4.2000000000000002, 2.1000000000000001,
4.2999999999999998, -3.1000000000000001, 2.1000000000000001,
-4.2000000000000002)
blocks <- rep(0, n)
for(i in seq(1, length(h1))) {
blocks <- blocks + (h1[i] * (1 + sign(x - t[i])))/2
}
#--------------------Bumps----------------------------------------------------
h2 <- c(4, 5, 3, 4, 5, 4.2000000000000002, 2.1000000000000001,
4.2999999999999998, 3.1000000000000001, 5.0999999999999996,
4.2000000000000002)
w <- c(0.0050000000000000001, 0.0050000000000000001,
0.0060000000000000001, 0.01, 0.01, 0.029999999999999999, 0.01,
0.01, 0.0050000000000000001, 0.0080000000000000002,
0.0050000000000000001)
bumps <- rep(0, n)
for(i in seq(1, length(h2))) {
bumps <- bumps + h2[i] * pmax(0, (1 - abs((x - t[i])/w[i])))^4
}
#-------------------HeaviSine-------------------------------------------------
heavi <- 4 * sin(4 * pi * x) - sign(x - 0.29999999999999999) - sign(
0.71999999999999997 - x)
#--------------------Doppler--------------------------------------------------
eps <- 0.050000000000000003
doppler <- sqrt(x * (1 - x)) * sin((2 * pi * (1 - eps))/(x + eps))
#------------------------Normalization----------------------------------------
blocks <- blocks/sqrt(var(blocks)) * signal
bumps <- bumps/sqrt(var(bumps)) * signal
heavi <- heavi/sqrt(var(heavi)) * signal
doppler <- doppler/sqrt(var(doppler)) * signal
if(noisy == TRUE) {
values <- list(blocks = blocks + rnorm(n, 0, signal/rsnr),
bumps = bumps + rnorm(n, 0, signal/rsnr), heavi = heavi +
rnorm(n, 0, signal/rsnr), doppler = doppler + rnorm(n,
0, signal/rsnr))
}
else {
values <- list(blocks = blocks, bumps = bumps, heavi = heavi,
doppler = doppler)
}
if(plotfn == TRUE) {
par(mfrow = c(3, 2))
plot(x, values$blocks, type = "l", ylab = "(a) Blocks")
plot(x, values$bumps, type = "l", ylab = "(b) Bumps")
plot(x, values$heavi, type = "l", ylab = "(c) HeaviSine")
plot(x, values$doppler, type = "l", ylab = "(d) Doppler")
}
return(values)
}
"FullWaveletCV"<-
function(noisy, ll = 3, type = "soft", filter.number = 10, family =
"DaubLeAsymm", tol = 0.01, verbose = 0)
{
noisywd <- wd(noisy, filter.number = filter.number, family = family)
softuv <- threshold(noisywd, levels = ll:(nlevelsWT(noisywd)- 1), type =
"soft", policy = "universal", dev = madmad, return.thresh = TRUE)
H <- filter.select(filter.number = filter.number, family = family)$H
ntt <- switch(type,
hard = 1,
soft = 2)
error <- verbose
xvthresh <- 0
ans <- .C("FullWaveletCV",
noisy = as.double(noisy),
nnoisy = as.integer(length(noisy)),
UniversalThresh = as.double(softuv),
H = as.double(H),
LengthH = as.integer(length(H)),
ntt = as.integer(ntt),
ll = as.integer(ll),
tol = as.double(tol),
xvthresh = as.double(xvthresh),
error = as.integer(error), PACKAGE = "wavethresh")
if(ans$error != 0) {
cat("Error code returned was ", ans$error, "\n")
stop("Error detected from C routine")
}
ans$xvthresh
}
"GenW"<-
function(n = 8, filter.number = 10, family = "DaubLeAsymm", bc = "periodic")
{
z <- rep(0, n)
if(bc == "periodic") {
w <- matrix(0, nrow = n, ncol = n)
for(i in 1:n) {
v <- z
v[i] <- 1
wobj <- wd(v, filter.number = filter.number, family =
family, bc = bc)
w[i, 1] <- accessC(wobj, lev = 0)
w[i, 2:n] <- wobj$D
}
}
else {
w <- NULL
for(i in 1:n) {
v <- z
v[i] <- 1
wobj <- wd(v, filter.number = filter.number, family =
family, bc = bc)
wrow <- c(accessC(wobj, lev = 0, boundary = TRUE), wobj$D)
w <- rbind(w, wrow)
}
}
w
}
"GetRSSWST"<-
function(ndata, threshold, levels, family = "DaubLeAsymm", filter.number = 10,
type = "soft", norm = l2norm, verbose = 0, InverseType = "average")
{
thverb <- FALSE
if(verbose > 1)
thverb <- TRUE
if(InverseType != "average" && InverseType != "minent") stop(paste(
"Unknown InverseType: ", InverseType)) #
# Get odds and evens
#
oddsv <- seq(from = 1, to = length(ndata), by = 2)
evensv <- seq(from = 2, to = length(ndata), by = 2)
odata <- ndata[oddsv]
edata <- ndata[evensv] #
#
# Build odd thresholded estimate, then, threshold and rebuild
#
odataWST <- wst(odata, filter.number = filter.number, family = family)
odataWSTt <- threshold.wst(odataWST, levels = levels, policy = "manual",
value = threshold, verbose = thverb)
if(InverseType == "average")
odataWSTr <- AvBasis.wst(odataWSTt) #
else if(InverseType == "minent") {
odataNV <- MaNoVe(odataWSTt)
cat("ODD Node Vector\n")
cat("---------------\n")
print(odataNV)
odataWSTr <- InvBasis.wst(odataWSTt, nv = odataNV)
}
else stop(paste("Unknown InverseType: ", InverseType))
ip <- (odataWSTr[1:(length(odataWSTr) - 1)] + odataWSTr[2:length(
odataWSTr)])/2
ip <- c(ip, (odataWSTr[length(odataWSTr)] + odataWSTr[1])/2) #
#
# Now compute prediction error
#
pe <- norm(ip, edata) #
#
# Now repeat all the above the other way around.
#
#
# Build even thresholded estimate, then, threshold and rebuild
#
edataWST <- wst(edata, filter.number = filter.number, family = family)
edataWSTt <- threshold.wst(edataWST, levels = levels, policy = "manual",
value = threshold, verbose = thverb)
if(InverseType == "average")
edataWSTr <- AvBasis.wst(edataWSTt) #
else if(InverseType == "minent") {
edataNV <- MaNoVe(edataWSTt)
cat("EVEN Node Vector\n")
cat("---------------\n")
print(edataNV)
edataWSTr <- InvBasis.wst(edataWSTt, nv = edataNV)
}
else stop(paste("Unknown InverseType: ", InverseType))
ip <- (edataWSTr[1:(length(edataWSTr) - 1)] + edataWSTr[2:length(
edataWSTr)])/2
ip <- c(ip, (edataWSTr[length(edataWSTr)] + edataWSTr[1])/2) #
#
# Now compute prediction error
#
pe <- (pe + norm(ip, odata))/2
if(verbose != 0) {
cat("For threshold value\n")
print(threshold)
cat("The pe estimate is ", pe, "\n")
}
pe
}
"HaarConcat"<-
function()
{
x1 <- HaarMA(n = 128, order = 1)
x2 <- HaarMA(n = 128, order = 2)
x3 <- HaarMA(n = 128, order = 3)
x4 <- HaarMA(n = 128, order = 4)
c(x1, x2, x3, x4)
}
"HaarMA"<-
function(n, sd = 1, order = 5)
{
#
# Generate Haar MA realization
#
# n - number of observations; sd=variance of increments; order=MA order
#
z <- rnorm(n = n + (2^order) - 1, mean = 0, sd = sd)
J <- order
x <- rep(0, n)
for(i in (2^J):(2^(J - 1) + 1))
x <- x + z[i:(n + i - 1)]
for(i in (2^(J - 1)):1)
x <- x - z[i:(n + i - 1)]
x <- x * 2^( - J/2)
return(x)
}
"InvBasis"<-
function(...)
UseMethod("InvBasis")
"InvBasis.wp"<-
function(wp, nvwp, pktlist, verbose = FALSE, ...)
{
nlev <- nlevelsWT(wp)
if(missing(pktlist)) {
pktlist <- print.nvwp(nvwp, printing = FALSE)
if(nlev != nlevelsWT(nvwp)) {
stop("The node vector you supplied cannot have arisen from the wavelet packet object you supplied as they have different numbers of levels"
)
}
}
lpkts <- length(pktlist$level)
ndata <- 2^nlev
cfvc <- rep(0, ndata)
ixvc <- cfvc
counter <- 0
for(i in 1:lpkts) {
lev <- pktlist$level[i]
pkt <- pktlist$pkt[i]
coefs <- getpacket(wp, level = lev, index = pkt)
pklength <- 2^lev
pkleftix <- pkt * pklength + 1
pkrightix <- pkleftix + pklength - 1
cfvc[pkleftix:pkrightix] <- coefs
ixvc[pkleftix:pkrightix] <- counter
if(verbose == TRUE) {
cat("Level: ", lev, "\n")
cat("Packet: ", pkt, "\n")
cat("coefs: ")
print(coefs)
cat("---\n")
cat("Packet length: ", pklength, "\n")
cat("Packet left ix: ", pkleftix, "\n")
cat("Packet right ix: ", pkrightix, "\n")
cat("ixvc: ")
print(ixvc)
cat("---\n")
cat("cfvc: ")
print(cfvc)
cat("---\n")
}
counter <- counter + 1
}
if(verbose == TRUE) {
cat("SWEEPER Stage\n")
}
sweeper <- rle(ixvc)$lengths
mx <- min(sweeper)
while(mx < ndata) {
ix <- ((1:length(sweeper))[sweeper == mx])[1]
csweeper <- cumsum(c(1, sweeper))[1:length(sweeper)]
lix <- sweeper[ix]
rix <- sweeper[ix + 1]
if(lix != rix)
stop(paste(
"wavethresh error: lix and rix are not the same. lix is ",
lix, " rix is ", rix))
if(verbose == TRUE) {
cat("Sweeper: ")
print(sweeper)
cat("Cumsum Sweeper: ")
print(csweeper)
cat("At sweeper index position ", ix, "\n")
cat("Left ix is ", lix, "\n")
cat("Right ix is ", rix, "\n")
cat("Corresponds to ", csweeper[ix], csweeper[ix + 1],
"\n")
}
cfixl <- csweeper[ix]
cfixr <- csweeper[ix + 1]
pklength <- lix
c.in <- cfvc[cfixl:(cfixl + pklength - 1)]
d.in <- cfvc[cfixr:(cfixr + pklength - 1)]
c.out <- conbar(c.in, d.in, wp$filter)
cfvc[cfixl:(cfixr + pklength - 1)] <- c.out
sweeper <- sweeper[ - ix]
sweeper[ix] <- rix + lix
mx <- min(sweeper)
}
cfvc
}
"InvBasis.wst"<-
function(wst, nv, ...)
{
#
#
# Perform an inverse on wst given specification in nv
#
# indexlist is a list of packet indices for access into appropriate levels of
# wst, nrsteps will be the number of reconstruction steps
#
pnv <- print.nv(nv, printing = FALSE)
indexlist <- rev(pnv$indexlist)
rvector <- pnv$rvector
nrsteps <- length(indexlist) #
#
# blevel is the bottom level in the decomposition
#
blevel <- nlevelsWT(nv) - nrsteps #
#
# Now extract the data and put it all in a vector
#
rdata <- getpacket(wst, level = blevel, index = indexlist[1], type =
"C")
ldata <- length(rdata)
D <- getpacket(wst, level = blevel, index = indexlist[1])
rdata <- c(rdata, D)
ldata <- c(ldata, length(D))
for(i in 2:nrsteps) {
D <- getpacket(wst, level = (blevel + i - 1), index = indexlist[
i])
rdata <- c(rdata, D)
ldata <- c(ldata, length(D))
}
error <- 0
invswr <- .C("wavepackrecon",
rdata = as.double(rdata),
ldata = as.integer(ldata),
nrsteps = as.integer(nrsteps),
rvector = as.integer(rvector),
H = as.double(wst$filter$H),
LengthH = as.integer(length(wst$filter$H)),
error = as.integer(error), PACKAGE = "wavethresh")
if(invswr$error != 0)
stop(paste("Error code was ", invswr$error,
" from wavepackrecon"))
return(invswr$rdata)
}
"IsEarly"<-
function(x)
UseMethod("IsEarly")
"IsEarly.default"<-
function(x)
{
return(FALSE)
}
"IsEarly.wd"<-
function(x)
{
if(is.null(x$date))
return(TRUE)
else return(FALSE)
}
"IsPowerOfTwo"<-
function(n)
{
tvec <- (n == trunc(n))
r <- log(n)/log(2)
tvec <- tvec & (r == trunc(r))
r[tvec == FALSE] <- NA
r
}
"LocalSpec"<-
function(...)
UseMethod("LocalSpec")
"LocalSpec.wd"<-
function(wdS, lsmooth = "none", nlsmooth = FALSE, prefilter = TRUE, verbose = FALSE,
lw.number = wdS$filter$filter.number, lw.family = wdS$filter$family,
nlw.number = wdS$filter$filter.number, nlw.family = wdS$filter$family,
nlw.policy = "LSuniversal", nlw.levels = 0:(nlevelsWT(wdS) - 1), nlw.type
= "hard", nlw.by.level = FALSE, nlw.value = 0, nlw.dev = var, nlw.boundary
= FALSE, nlw.verbose = FALSE, nlw.cvtol = 0.01, nlw.Q = 0.050000000000000003,
nlw.alpha = 0.050000000000000003, nlw.transform = I, nlw.inverse = I,
debug.spectrum = FALSE, ...)
{
#
#
# Check the class of the object
#
cwd <- class(wdS)
if(is.null(cwd) || cwd != "wd")
stop("Object must be of class wd to perform energy computation"
)
else if(wdS$type != "station")
stop("swd type should be station (nondecimated)")
lnlevels <- nlevelsWT(wdS)
N <- 2^lnlevels
if(verbose == TRUE) cat("Original data length was:", N, "\n") #
#
# Decide whether to do no smoothing, Fourier smoothing or wavelet
# linear smoothing.
#
if(lsmooth == "none") {
#
#
# Just square the coefficients in the wdS object
#
if(verbose == TRUE) cat("Squaring coefficients on level: ")
for(i in (lnlevels - 1):0) {
if(verbose == TRUE)
cat(i, " ")
v <- accessD(wdS, level = i)
if(debug.spectrum == TRUE)
spectrum(v, spans = c(11, 9, 7))
v <- v^2
if(debug.spectrum == TRUE)
spectrum(v, spans = c(11, 9, 7))
wdS <- putD(wdS, level = i, v = v)
}
if(verbose == TRUE)
cat("\n")
}
else if(lsmooth == "Fourier") {
#
# Perform smoothing using Fourier methods.
# For each level take the real cts Fourier transform and smooth
# by removing a proportion of the coefficients and inverting the
# transform.
#
# The amount of smoothing is controlled by the fracsmooth variable
# Initially this is set to 1/2 as the frequencies we want to remove
# are 1/2 to 1. When we move a level up the frequencies we want to
# remove are above 1/4 and so on. Note that smoothing starts at
# level J-2 (not J-1 as these are the frequencies between 1 and 2
# and I'm not sure what to do with these yet).
#
#
if(verbose == TRUE) {
cat("Performing Fourier linear smoothing\n")
cat("Processing level: ")
}
fracsmooth <- 1/2
for(i in (lnlevels - 2):0) {
v <- accessD(wdS, level = i)
if(debug.spectrum == TRUE)
spectrum(v, spans = c(11, 9, 7))
if(verbose == TRUE) cat(i, " ") #
#
# Do prefiltering if necessary. This low-passes the actual coefficients
# to that the cut-off is at the highest frequency of the current
# (Littlewood-Paley) wavelet.
#
if(prefilter == TRUE) {
if(verbose == TRUE)
cat("prefilter\n")
vfft <- rfft(v)
n <- length(vfft)
start <- 1 + n * fracsmooth
if(start <= n)
vfft[max(1, start):n] <- 0
v <- rfftinv(vfft)
if(debug.spectrum == TRUE)
spectrum(v, spans = c(11, 9, 7))
}
#
#
# Square the coefficients!
#
v <- v^2
if(debug.spectrum == TRUE) spectrum(v, spans = c(11, 9, 7)
) #
#
# Now carry out the Fourier smoothing.
#
vfft <- rfft(v)
n <- length(vfft)
start <- 1 + n * fracsmooth #
#
# Maybe use something like this to adapt to
# the shape of the wavelet?
# start <- start * 0.77
#
if(start <= n)
vfft[max(1, start):n] <- 0
v <- rfftinv(vfft)
fracsmooth <- fracsmooth/2
if(debug.spectrum == TRUE && i != 0)
spectrum(v, spans = c(11, 9, 7))
wdS <- putD(wdS, level = i, v = v)
}
if(verbose == TRUE)
cat("\nSquaring top level only\n")
v <- accessD(wdS, level = lnlevels - 1)
if(debug.spectrum == TRUE)
spectrum(v, spans = c(11, 9, 7))
v <- v^2
if(debug.spectrum == TRUE)
spectrum(v, spans = c(11, 9, 7))
wdS <- putD(wdS, level = lnlevels - 1, v)
}
else if(lsmooth == "wavelet") {
#
#
# Do LINEAR wavelet smoothing
#
if(verbose == TRUE) {
cat("Performing LINEAR wavelet smoothing\n")
cat("Processing level ")
}
fracsmooth <- 1/2
for(i in 0:(lnlevels - 2)) {
if(verbose == TRUE)
cat(i, " ")
v <- accessD(wdS, level = i) #
#
# Do prefiltering if necessary. This low-passes the actual coefficients
# to that the cut-off is at the highest frequency of the current
# (Littlewood-Paley) wavelet.
#
if(debug.spectrum == TRUE)
spectrum(v, spans = c(11, 9, 7))
if(prefilter == TRUE) {
if(verbose == TRUE)
cat("prefilter\n")
vfft <- rfft(v)
n <- length(vfft)
start <- 1 + n * fracsmooth
if(start <= n)
vfft[max(1, start):n] <- 0
v <- rfftinv(vfft)
if(debug.spectrum == TRUE)
spectrum(v, spans = c(11, 9, 7))
}
#
#
# Square the coefficients
#
v <- v^2 #
#
# Now do the linear wavelet smoothing. This takes each level (i), applies
# the standard discrete wavelet transform and nulls levels higher than
# the one we are at (j>i). The inverse transform is then applied and
# the coefficients restored in the wdS object.
#
if(debug.spectrum == TRUE)
spectrum(v, spans = c(11, 9, 7))
realwd <- wd(v, filter.number = lw.number, family = lw.family)
realwd <- nullevels(realwd, levels = (i + 1):(nlevelsWT(
realwd) - 1))
v <- wr(realwd)
if(debug.spectrum == TRUE && i != 0)
spectrum(v, spans = c(11, 9, 7))
wdS <- putD(wdS, level = i, v = v)
}
if(verbose == TRUE)
cat("\nSquaring top level only\n")
v <- accessD(wdS, level = lnlevels - 1)
if(debug.spectrum == TRUE)
spectrum(v, spans = c(11, 9, 7))
v <- v^2
if(debug.spectrum == TRUE)
spectrum(v, spans = c(11, 9, 7))
wdS <- putD(wdS, level = lnlevels - 1, v)
}
else stop(paste("Unknown lsmooth:", lsmooth)) #
if(nlsmooth == TRUE) {
if(verbose == TRUE) {
cat("Performing non-linear wavelet smoothing\n")
cat("Processing level: ")
}
for(i in ((lnlevels - 1):0)) {
if(verbose == TRUE)
cat(i, " ")
v <- accessD(wdS, level = i)
v <- nlw.transform(v)
vwd <- wd(v, filter.number = nlw.number, family = nlw.family)
vwdt <- threshold(vwd, levels = nlw.levels, type =
nlw.type, policy = nlw.policy, by.level =
nlw.by.level, value = nlw.value, dev = nlw.dev,
boundary = nlw.boundary, verbose = nlw.verbose,
cvtol = nlw.cvtol, Q = nlw.Q, alpha = nlw.alpha
)
v <- wr(vwdt)
v <- nlw.inverse(v)
wdS <- putD(wdS, level = i, v = v)
}
if(verbose == TRUE)
cat("\n")
}
wdS
}
"LocalSpec.wst"<-
function(wst, ...)
{
LocalSpec.wd(convert.wst(wst), ...)
}
"MaNoVe"<-
function(...)
UseMethod("MaNoVe")
"MaNoVe.wp"<-
function(wp, verbose = FALSE, ...)
{
nlevels <- nlevelsWT(wp)
LengthData <- dim(wp$wp)[[2]]
upperctrl <- rep(0, LengthData - 1)
upperl <- upperctrl
firstl <- rev(c(0, cumsum(2^(0:(nlevels - 2)))))
if(verbose == TRUE)
verbose <- 1
error <- 0
tmp <- .C("wpCmnv",
wp = as.double(wp$wp),
LengthData = as.integer(LengthData),
nlevels = as.integer(nlevels),
upperctrl = as.integer(upperctrl),
upperl = as.double(upperl),
firstl = as.integer(firstl),
verbose = as.integer(verbose),
error = as.integer(error), PACKAGE = "wavethresh")
if(tmp$error != 0)
stop(paste("Error condition ", tmp$error,
" reported from wpCmnv")) #
node.list <- vector("list", nlevels)
matchcodes <- c("T", "B")
vlength <- 2^(nlevels - 1) #
#
# Convert C to S
#
firstl <- firstl + 1
for(i in 1:nlevels) {
first <- firstl[i]
sv <- first:(first + vlength - 1)
node.list[[i]]$upperctrl <- matchcodes[tmp$upperctrl[sv]]
node.list[[i]]$upperl <- tmp$upperl[sv]
vlength <- vlength/2
}
node.vector <- list(node.list = node.list, nlevels = nlevels)
class(node.vector) <- "nvwp"
node.vector
}
"MaNoVe.wst"<-
function(wst, entropy = Shannon.entropy, verbose = FALSE, stopper = FALSE, alg = "C", ...)
{
#
# Make a node vector. Use C code rather than the slow S code
#
if(alg == "C") {
if(verbose == TRUE)
cat("Using C code version\n")
nlevels <- nlevelsWT(wst)
# node.vector <- vector("list", nlevels)
# matchcodes <- c("S", "L", "R")
LengthData <- dim(wst$wp)[[2]]
upperctrl <- rep(0, LengthData - 1)
upperl <- upperctrl
firstl <- rev(c(0, cumsum(2^(0:(nlevels - 2)))))
if(verbose == TRUE)
verbose <- 1
error <- 0
tmp <- .C("Cmnv",
wst = as.double(wst$wp),
wstC = as.double(wst$Carray),
LengthData = as.integer(LengthData),
nlevels = as.integer(nlevels),
upperctrl = as.integer(upperctrl),
upperl = as.double(upperl),
firstl = as.integer(firstl),
verbose = as.integer(verbose),
error = as.integer(error), PACKAGE = "wavethresh")
if(tmp$error != 0)
stop(paste("Error condition ", tmp$error,
" reported from Cmnv")) #
node.list <- vector("list", nlevels)
matchcodes <- c("S", "L", "R")
vlength <- 2^(nlevels - 1) #
#
# Convert C to S
#
firstl <- firstl + 1
for(i in 1:nlevels) {
first <- firstl[i]
sv <- first:(first + vlength - 1)
node.list[[i]]$upperctrl <- matchcodes[tmp$upperctrl[sv
]]
node.list[[i]]$upperl <- tmp$upperl[sv]
vlength <- vlength/2
}
node.vector <- list(node.list = node.list, nlevels = nlevels)
}
else {
if(verbose == TRUE)
cat("Using S code version\n")
nlevels <- nlevelsWT(wst)
node.vector <- vector("list", nlevels)
matchcodes <- c("S", "L", "R")
for(i in 0:(nlevels - 1)) {
if(verbose == TRUE)
cat("Lower level: ", i, "\n")
nll <- 2^(nlevels - i)
lowerl <- rep(0, nll)
nul <- nll/2
upperl <- rep(0, nul)
upperctrl <- rep("", nul)
if(verbose == TRUE)
cat("Packets. Lower: ", nll, " Upper ", nul,
"\n")
for(j in 0:(nul - 1)) {
if(verbose == TRUE)
cat("Upper level index: ", j, "\n")
kl <- 2 * j
kr <- 2 * j + 1
mother.entropy <- entropy(getpacket(wst, level
= i + 1, index = j, type = "C"))
if(i == 0) {
daughter.left.entropy <- entropy(c(getpacket(
wst, level = i, index = kl), getpacket(wst,
level = i, index = kl, type = "C")))
daughter.right.entropy <- entropy(c(getpacket(
wst, level = i, index = kr), getpacket(wst,
level = i, index = kr, type = "C")))
}
else {
if(verbose == TRUE)
cat("Left Ent C contrib ", node.vector[[i]]$
upperl[kl + 1], "\n")
daughter.left.entropy <- entropy(getpacket(
wst, level = i, index = kl)) + node.vector[[
i]]$upperl[kl + 1]
if(verbose == TRUE)
cat("Right Ent C contrib ", node.vector[[i
]]$upperl[kr + 1], "\n")
daughter.right.entropy <- entropy(getpacket(
wst, level = i, index = kr)) + node.vector[[
i]]$upperl[kr + 1]
}
if(verbose == TRUE) {
cat("\tMother ent.: ", mother.entropy, "\n")
cat("\tDaug. l .ent: ", daughter.left.entropy,
"\n")
cat("\tDaug. r .ent: ",
daughter.right.entropy, "\n")
}
ents <- c(mother.entropy, daughter.left.entropy,
daughter.right.entropy)
pos <- match(min(ents), ents)
upperctrl[j + 1] <- matchcodes[pos]
upperl[j + 1] <- min(ents)
if(verbose == TRUE)
cat("\tSelected ", upperctrl[j + 1], upperl[j +
1], "\n")
if(stopper == TRUE)
scan()
}
node.vector[[i + 1]] <- list(upperctrl = upperctrl,
upperl = upperl)
if(verbose == TRUE)
print(node.vector)
}
node.vector <- list(node.list = node.vector, nlevels = nlevels)
}
class(node.vector) <- "nv"
node.vector
}
"PsiJ"<-
function(J, filter.number = 10, family = "DaubLeAsymm", tol = 1e-100, OPLENGTH
= 10^7, verbose=FALSE)
{
if (verbose==TRUE)
cat("Computing PsiJ\n")
now <- proc.time()[1:2]
if(J >= 0)
stop("J must be negative integer")
if(J - round(J) != 0)
stop("J must be an integer")
Psiorig <- Psiname(J = J, filter.number = filter.number, family =
family) #
#
# See if matrix already exists. If so, return it
#
if(exists(Psiorig, envir=WTEnv)) {
if (verbose==TRUE)
cat("Returning precomputed version\n")
speed <- proc.time()[1:2] - now
if (verbose==TRUE)
cat("Took ", sum(speed), " seconds\n")
return(get(Psiorig, envir=WTEnv))
}
H <- filter.select(filter.number = filter.number, family = family)$H
wout <- rep(0, OPLENGTH)
rlvec <- rep(0, - J)
error <- 0
answer <- .C("PsiJ",
J = as.integer( - J),
H = as.double(H),
LengthH = as.integer(length(H)),
tol = as.double(tol),
wout = as.double(wout),
lwout = as.integer(length(wout)),
rlvec = as.integer(rlvec),
error = as.integer(error), PACKAGE = "wavethresh")
if(answer$error != 0) {
if(answer$error == 160)
cat("Increase ", OPLENGTH, " to be larger than ",
answer$lwout, "\n")
stop(paste("Error code was ", answer$error))
}
speed <- proc.time()[1:2] - now
if (verbose==TRUE)
cat("Took ", sum(speed), " seconds\n")
m <- vector("list", - J)
lj <- c(0, cumsum(2 * answer$rlvec - 1))
for(j in 1:( - J))
m[[j]] <- answer$wout[(lj[j] + 1):lj[j + 1]]
assign(Psiorig, m, envir=WTEnv)
m
}
"PsiJmat"<-
function(J, filter.number = 10, family = "DaubLeAsymm", OPLENGTH = 10^7)
{
J <- - J
P <- PsiJ( - J, filter.number = filter.number, family = family,
OPLENGTH = OPLENGTH)
nc <- length(P[[J]])
nr <- J
m <- matrix(0, nrow = nr, ncol = nc)
m[J, ] <- P[[J]]
for(j in 1:(J - 1)) {
lj <- length(P[[j]])
nz <- (nc - lj)/2
z <- rep(0, nz)
m[j, ] <- c(z, P[[j]], z)
}
m
}
"Psiname"<-
function(J, filter.number, family)
{
if(J >= 0)
stop("J must be a negative integer")
return(paste("Psi.", - J, ".", filter.number, ".", family, sep = ""))
}
"ScalingFunction"<-
function(filter.number = 10, family = "DaubLeAsymm", resolution = 4096,
itlevels = 50)
{
if(is.na(IsPowerOfTwo(resolution)))
stop("Resolution must be a power of two")
res <- 4 * resolution #
#
# Select filter and work out some fixed constants
#
H <- filter.select(filter.number = filter.number, family = family)$H
lengthH <- length(H)
ll <- lengthH
v <- rep(0, res) #
#
# Set initial coefficient to 1 in 2nd position on 1st level
#
v[2] <- 1 #
#
# Now iterate the successive filtering operations to build up the scaling
# function. The actual filtering is carried out by the C routine CScalFn.
#
for(it in 1:itlevels) {
ans <- rep(0, res)
z <- .C("CScalFn",
v = as.double(v),
ans = as.double(ans),
res = as.integer(res),
H = as.double(H),
lengthH = as.integer(lengthH), PACKAGE = "wavethresh") #
#
# We only ever take the first half of the result
#
v <- z$ans[1:(res/2)] #
#
# Set all other coefficients equal to zero. (This is because
# rounding errors sometimes cause small values to appear).
#
v[ - ((2^it + 1):(2^it + ll))] <- 0
# plot(seq(from = 0, to = 2 * filter.number - 1, length = ll), v[(
# 2^it + 1):(2^it + ll)], type = "l")
v <- sqrt(2) * v
llbef <- ll
vbef <- v #
#
# Check to see if the next iteration would send the number
# of coefficients over the resolution that we can have.
# Exit the loop if it does.
#
if(2^(it + 1) + lengthH + ll * 2 - 2 > res/2) {
cit <- it
break
}
#
#
# ll is the number of coefficients that are nonzero in
# any particular run. This formula updates ll for next time
# round.
#
ll <- lengthH + ll * 2 - 2 #
#
# Add some zeroes to v to make it the right length.
#
v <- c(v, rep(0, res - length(v)))
}
list(x = seq(from = 0, to = 2 * filter.number - 1, length = llbef), y
= vbef[(2^cit + 1):(2^cit + llbef)])
}
"Shannon.entropy"<-
function(v, zilchtol = 1e-300)
{
vsq <- v^2
if(sum(vsq) < zilchtol)
return(0)
else {
vsq[vsq == 0] <- 1
return( - sum(vsq * log(vsq)))
}
}
"TOgetthrda1"<-
function(dat, alpha)
{
datsq <- sort(dat^2)
a <- TOonebyone1(datsq, alpha)
if(length(a) == length(datsq))
if(1 - pchisq(datsq[1], 1) < alpha)
ggg <- 0
else ggg <- sqrt(datsq[1])
else ggg <- sqrt(datsq[length(datsq) - length(a) + 1])
return(ggg)
}
"TOgetthrda2"<-
function(dat, alpha)
{
a <- TOonebyone2(dat, alpha)
if(length(a) == length(dat))
if(1 - pchisq(min(dat), 1) < alpha)
ggg <- 0
else ggg <- sqrt(min(dat))
else ggg <- sqrt(max(dat[sort(order(dat)[1:(length(dat) - length(a) + 1
)])]))
return(ggg)
}
"TOkolsmi.chi2"<-
function(dat)
{
n <- length(dat)
return(max(abs(cumsum(dat) - ((1:n) * sum(dat))/n))/sqrt(2 * n))
}
"TOonebyone1"<-
function(dat, alpha)
{
i <- length(dat)
cc <- 1 - pchisq(dat[i], 1)^i
while(cc[length(cc)] < alpha && i > 1) {
i <- i - 1
cc <- c(cc, 1 - pchisq(dat[i], 1)^i)
}
return(cc)
}
"TOonebyone2"<-
function(dat, alpha)
{
crit <- c(seq(0.28000000000000003, 1.49, by = 0.01), seq(1.5, 2.48, by
= 0.02))
alph <- c(0.99999899999999997, 0.999996, 0.99999099999999996,
0.99997899999999995, 0.99995400000000001, 0.99990900000000005,
0.99982899999999997, 0.99969699999999995, 0.99948899999999996,
0.99917400000000001, 0.99871500000000002, 0.99807100000000004,
0.99719199999999997, 0.99602800000000002, 0.99452399999999996,
0.99262300000000003, 0.99026999999999998, 0.98741000000000001,
0.98399499999999995, 0.97997800000000002, 0.97531800000000002,
0.96998300000000004, 0.96394500000000005, 0.95718599999999998,
0.94969400000000004, 0.94146600000000003, 0.93250299999999997,
0.922817, 0.91242299999999998, 0.90134400000000003,
0.88960499999999998, 0.87724000000000002, 0.86428199999999999,
0.85077100000000005, 0.83677500000000005, 0.82224699999999995,
0.80732300000000001, 0.79201299999999997, 0.77636300000000003,
0.76041800000000004, 0.74421999999999999, 0.72781099999999999,
0.71123499999999995, 0.69452899999999995, 0.67773499999999998,
0.660887, 0.64401900000000001, 0.62716700000000003,
0.61036000000000001, 0.59362800000000004, 0.57699800000000001,
0.56049499999999997, 0.54414300000000004, 0.52795899999999996,
0.51197000000000004, 0.49619200000000002, 0.48063400000000001,
0.46531800000000001, 0.45025599999999999, 0.43545400000000001,
0.42093000000000003, 0.40668399999999999, 0.39273000000000002,
0.37907200000000002, 0.36571399999999998, 0.35266199999999998,
0.339918, 0.327484, 0.31536399999999998, 0.30355599999999999,
0.29205999999999999, 0.28087400000000001, 0.27000000000000002,
0.259434, 0.24917400000000001, 0.23921999999999999,
0.22956599999999999, 0.22020600000000001, 0.21113999999999999,
0.20236399999999999, 0.19387199999999999, 0.18565799999999999,
0.17771799999999999, 0.17005000000000001, 0.16264400000000001,
0.155498, 0.14860599999999999, 0.141962, 0.13555800000000001,
0.129388, 0.12345200000000001, 0.117742, 0.11225, 0.10697,
0.101896, 0.097028000000000003, 0.092352000000000004,
0.087868000000000002, 0.083568000000000003,
0.079444000000000001, 0.075495000000000007,
0.071711999999999998, 0.068092, 0.064630000000000007,
0.061317999999999998, 0.058152000000000002,
0.055128000000000003, 0.052243999999999999,
0.049487999999999997, 0.046857999999999997,
0.044350000000000001, 0.041959999999999997,
0.039682000000000002, 0.037513999999999999, 0.035448, 0.033484,
0.031618, 0.029842, 0.028153999999999998, 0.026551999999999999,
0.02503, 0.023588000000000001, 0.022218000000000002,
0.019689999999999999, 0.017422, 0.015389999999999999,
0.013573999999999999, 0.011952000000000001, 0.010508,
0.0092230000000000003, 0.0080829999999999999,
0.0070720000000000002, 0.0061770000000000002,
0.0053880000000000004, 0.0046909999999999999, 0.004078,
0.0035400000000000002, 0.003068, 0.0026540000000000001,
0.0022929999999999999, 0.001977, 0.0017030000000000001,
0.001464, 0.001256, 0.0010759999999999999,
0.00092100000000000005, 0.00078700000000000005,
0.00067100000000000005, 0.00057200000000000003, 0.000484,
0.00041199999999999999, 0.00035, 0.00029500000000000001,
0.00025000000000000001, 0.00021000000000000001,
0.00017799999999999999, 0.00014799999999999999, 0.000126,
0.00010399999999999999, 8.7999999999999998e-05,
7.3999999999999996e-05, 6.0000000000000002e-05, 5.1e-05,
4.1999999999999998e-05, 3.4999999999999997e-05,
3.0000000000000001e-05, 2.4000000000000001e-05,
2.0000000000000002e-05, 1.5999999999999999e-05,
1.2999999999999999e-05, 1.1e-05, 9.0000000000000002e-06)
if(alpha < min(alph) || alpha > max(alph))
stop("alpha =", alpha, "is out of range")
ind <- match(TRUE, alpha > alph)
critval <- crit[ind - 1] + ((alph[ind - 1] - alpha) * (crit[ind] - crit[
ind - 1]))/(alph[ind - 1] - alph[ind])
i <- length(dat)
cc <- TOkolsmi.chi2(dat)
while(cc[length(cc)] > critval && i > 1) {
i <- i - 1
cc <- c(cc, TOkolsmi.chi2(dat[sort(order(dat)[1:i])]))
}
return(cc)
}
"TOshrinkit"<-
function(coeffs, thresh)
{
sign(coeffs) * pmax(abs(coeffs) - thresh, 0)
}
"TOthreshda1"<-
function(ywd, alpha = 0.050000000000000003, verbose = FALSE, return.threshold = FALSE)
{
if(verbose)
cat("Argument checking\n")
ctmp <- class(ywd)
if(is.null(ctmp))
stop("ywd has no class")
else if(ctmp != "wd")
stop("ywd is not of class wd")
if(alpha <= 0 || alpha >= 1)
stop("alpha out of range")
ans <- ywd
n <- length(ywd$D)
nlev <- log(n + 1, base = 2) - 1
i <- nlev
iloc <- 1
while(i >= 0) {
gg <- ywd$D[iloc:(iloc + 2^i - 1)]
thresh <- TOgetthrda1(gg, alpha)
if(verbose) {
cat(paste("At level ", i, ", the threshold is ", thresh,
"\n", sep = ""))
}
if(return.threshold)
if(i == nlev)
rt <- thresh
else rt <- c(thresh, rt)
else ans$D[iloc:(iloc + 2^i - 1)] <- TOshrinkit(ywd$D[iloc:(
iloc + 2^i - 1)], thresh)
iloc <- iloc + 2^i
i <- i - 1
}
if(return.threshold)
return(rt)
else return(ans)
}
"TOthreshda2"<-
function(ywd, alpha = 0.050000000000000003, verbose = FALSE, return.threshold = FALSE)
{
if(verbose)
cat("Argument checking\n")
ctmp <- class(ywd)
if(is.null(ctmp))
stop("ywd has no class")
else if(ctmp != "wd")
stop("ywd is not of class wd")
if(alpha <= 9.0000000000000002e-06 || alpha >= 0.99999899999999997)
stop("alpha out of range")
ans <- ywd
n <- length(ywd$D)
nlev <- log(n + 1, base = 2) - 1
i <- nlev
iloc <- 1
while(i >= 0) {
gg <- ywd$D[iloc:(iloc + 2^i - 1)]
thresh <- TOgetthrda2(gg^2, alpha)
if(verbose) {
cat(paste("At level ", i, ", the threshold is ", thresh,
"\n", sep = ""))
}
if(return.threshold)
if(i == nlev)
rt <- thresh
else rt <- c(thresh, rt)
else ans$D[iloc:(iloc + 2^i - 1)] <- TOshrinkit(ywd$D[iloc:(
iloc + 2^i - 1)], thresh)
iloc <- iloc + 2^i
i <- i - 1
}
if(return.threshold)
return(rt)
else return(ans)
}
"WaveletCV"<-
function(ynoise, x = 1:length(ynoise), filter.number = 10, family =
"DaubLeAsymm", thresh.type = "soft", tol = 0.01, verbose = 0, plot.it
= TRUE, ll = 3)
{
#
# Switch on verbosity for function calls if necessary
#
if(verbose == 2) CallsVerbose <- TRUE else CallsVerbose <- FALSE
if(verbose == 1)
cat("WaveletCV: Wavelet model building\nThinking ")
n <- length(ynoise)
ywd <- wd(ynoise, filter.number = filter.number, family = family,
verbose = CallsVerbose)
univ.threshold <- threshold(ywd, type = thresh.type, return.threshold
= TRUE, lev = ll:(nlevelsWT(ywd) - 1), verbose = CallsVerbose,
policy="universal")[1]
if(verbose == 1) {
cat("Universal threshold: ", univ.threshold, "\n")
cat("Now doing universal threshold reconstruction...")
}
yuvtwd <- threshold(ywd, type = thresh.type, lev = ll:(nlevelsWT(ywd) - 1),
verbose = CallsVerbose, policy="universal")
if(verbose == 1)
cat("done\nNow reconstructing...")
yuvtwr <- wr(yuvtwd, verbose = CallsVerbose)
if(verbose == 1)
cat("done\nNow plotting universal thresholded\n")
if(plot.it == TRUE) {
oldpar <- par(mfrow = c(2, 2))
matplot(x, cbind(ynoise, yuvtwr), type = "l", main =
"Universal Threshold Reconstruction", xlab = "x", col
= c(3, 2), lty = c(3, 2))
}
if(verbose == 1)
cat("Now optimising cross-validated error estimate\n")
R <- 0.61803399000000003
C <- 1 - R
ax <- 0
bx <- univ.threshold/2
cx <- univ.threshold
x0 <- ax
x3 <- cx
if(abs(cx - bx) > abs(bx - ax)) {
x1 <- bx
x2 <- bx + C * (cx - bx)
}
else {
x2 <- bx
x1 <- bx - C * (bx - ax)
}
fa <- rsswav(ynoise, value = ax, filter.number = filter.number, family
= family, thresh.type = thresh.type, ll = ll)$ssq
fb <- rsswav(ynoise, value = bx, filter.number = filter.number, family
= family, thresh.type = thresh.type, ll = ll)$ssq
fc <- rsswav(ynoise, value = cx, filter.number = filter.number, family
= family, thresh.type = thresh.type, ll = ll)$ssq
f1 <- rsswav(ynoise, value = x1, filter.number = filter.number, family
= family, thresh.type = thresh.type, ll = ll)$ssq
f2 <- rsswav(ynoise, value = x2, filter.number = filter.number, family
= family, thresh.type = thresh.type, ll = ll)$ssq
xkeep <- c(ax, cx, x1, x2)
fkeep <- c(fa, fc, f1, f2)
if(plot.it == TRUE) {
plot(c(ax, bx, cx), c(fa, fb, fc))
text(c(x1, x2), c(f1, f2), lab = c("1", "2"))
}
cnt <- 3
while(abs(x3 - x0) > tol * (abs(x1) + abs(x2))) {
cat("x0=", x0, "x1=", x1, "x2=", x2, "x3=", x3, "\n")
cat("f1=", f1, "f2=", f2, "\n")
if(f2 < f1) {
x0 <- x1
x1 <- x2
x2 <- R * x1 + C * x3
f1 <- f2
f2 <- rsswav(ynoise, value = x2, filter.number =
filter.number, family = family, thresh.type =
thresh.type, ll = ll)
if(verbose == 2) {
cat("SSQ: ", signif(f2$ssq, 3), " DF: ", f2$df,
"\n")
}
else if(verbose == 1)
cat(".")
f2 <- f2$ssq
xkeep <- c(xkeep, x2)
fkeep <- c(fkeep, f2)
if(plot.it == TRUE)
text(x2, f2, lab = as.character(cnt))
cnt <- cnt + 1
}
else {
x3 <- x2
x2 <- x1
x1 <- R * x2 + C * x0
f2 <- f1
f1 <- rsswav(ynoise, value = x1, filter.number =
filter.number, family = family, thresh.type =
thresh.type, ll = ll)
if(verbose == 2)
cat("SSQ: ", signif(f1$ssq, 3), " DF: ", f1$df,
"\n")
else if(verbose == 1)
cat(".")
f1 <- f1$ssq
xkeep <- c(xkeep, x1)
fkeep <- c(fkeep, f1)
if(plot.it == TRUE)
text(x1, f1, lab = as.character(cnt))
cnt <- cnt + 1
}
}
if(f1 < f2)
tmp <- x1
else tmp <- x2
x1 <- tmp/sqrt(1 - log(2)/log(n))
if(verbose == 1)
cat("Correcting to ", x1, "\n")
else if(verbose == 1)
cat("\n")
xvwd <- threshold(ywd, policy = "manual", value = x1, type =
thresh.type, lev = ll:(nlevelsWT(ywd)- 1))
xvwddof <- dof(xvwd)
xvwr <- wr(xvwd)
if(plot.it == TRUE)
matplot(x, cbind(ynoise, yuvtwr, xvwr), type = "l", main =
"XV Threshold Reconstruction", xlab = "x", col = c(3, 2,
1))
g <- sort.list(xkeep)
xkeep <- xkeep[g]
fkeep <- fkeep[g]
list(x = x, ynoise = ynoise, xvwr = xvwr, yuvtwr = yuvtwr, xvthresh =
x1, uvthresh = univ.threshold, xvdof = xvwddof, uvdof = dof(
yuvtwd), xkeep = xkeep, fkeep = fkeep)
}
"Whistory"<-
function(...)
UseMethod("Whistory")
"Whistory.wst"<-
function(wst, all = FALSE, ...)
{
ntimes <- length(wst$date)
if(ntimes == 1)
cat("This object has not been modified\n")
cat("This object has been modified ", ntimes - 1, " times\n")
cat("The date of the last mod was ", wst$date[ntimes], "\n")
cat("That modification was\n")
cat(wst$history[ntimes - 1], "\n")
if(all == TRUE) {
cat("Complete history\n")
cat("Modification dates\n")
for(i in 1:ntimes)
cat(wst$date[i], "\n")
cat("Modification record\n")
for(i in 1:ntimes)
cat(wst$history[i - 1], "\n")
}
invisible()
}
"accessC"<-
function(...)
UseMethod("accessC")
"accessC.mwd"<-
function(mwd, level = nlevelsWT(mwd), ...)
{
#
# Get smoothed data from multiple wavelet structure.
#
ctmp <- class(mwd)
if(is.null(ctmp))
stop("mwd has no class")
else if(ctmp != "mwd")
stop("mwd is not of class mwd")
if(level < 0)
stop("Must have a positive level")
else if(level > nlevelsWT(mwd))
stop("Cannot exceed maximum number of levels")
level <- level + 1
first.last.c <- mwd$fl.dbase$first.last.c
first.level <- first.last.c[level, 1]
last.level <- first.last.c[level, 2]
offset.level <- first.last.c[level, 3]
n <- last.level + 1 - first.level
coeffs <- mwd$C[, (offset.level + 1):(offset.level + n)]
return(coeffs)
}
"accessC.wd"<-
function(wd, level = nlevelsWT(wd), boundary = FALSE, aspect = "Identity", ...)
{
if(IsEarly(wd)) {
ConvertMessage()
stop()
}
ctmp <- class(wd)
if(is.null(ctmp))
stop("wd has no class")
else if(ctmp != "wd")
stop("wd is not of class wd")
if(level < 0)
stop("Must have a positive level")
else if(level > nlevelsWT(wd))
stop(paste("Cannot exceed maximum number of levels", nlevelsWT(wd)
))
if(wd$bc == "interval") {
if(level != wd$current.scale)
stop(paste(
"Requested wd object was decomposed to level ",
wd$current.scale,
" and so for \"wavelets on the interval\" objects I can only show this level for the scaling function coefficients\n"
))
first.level <- wd$fl.dbase$first.last.c[1]
last.level <- wd$fl.dbase$first.last.c[2]
offset.level <- wd$fl.dbase$first.last.c[3]
n <- last.level - first.level + 1
coefs <- wd$transformed.vector[(offset.level + 1 - first.level):
(offset.level + n - first.level)]
}
else {
level <- level + 1
first.last.c <- wd$fl.dbase$first.last.c
first.level <- first.last.c[level, 1]
last.level <- first.last.c[level, 2]
offset.level <- first.last.c[level, 3]
if(boundary == TRUE) {
n <- last.level - first.level + 1
coefs <- wd$C[(offset.level + 1):(offset.level + n)]
}
else {
type <- wd$type
if(type == "wavelet")
n <- 2^(level - 1)
else if(type == "station")
n <- 2^nlevelsWT(wd)
else stop("Unknown type component")
coefs <- wd$C[(offset.level + 1 - first.level):(
offset.level + n - first.level)]
}
}
if(aspect == "Identity")
return(coefs)
else {
fn <- get(aspect)
return(fn(coefs))
}
}
"accessC.wp"<-
function(wp, ...)
{
stop("A wavelet packet object does not have ``levels'' of father wavelet coefficients. Use accessD to obtain levels of father and mother coefficients"
)
}
"accessC.wst"<-
function(wst, level, aspect = "Identity", ...)
{
#
#
# Get all coefficients at a particular level
# First work out how many packets there are at this level
#
nlevels <- nlevelsWT(wst)
if(level < 0)
stop("level must nonnegative")
else if(level > nlevels)
stop(paste("level must be smaller than ", nlevels - 1))
coefs <- wst$Carray[level + 1, ]
if(aspect == "Identity")
return(coefs)
else {
fn <- get(aspect)
return(fn(coefs))
}
}
"accessD"<-
function(...)
UseMethod("accessD")
"accessD.mwd"<-
function(mwd, level, ...)
{
#
# Get wavelet coefficients from multiple wavelet structure
#
ctmp <- class(mwd)
if(is.null(ctmp))
stop("mwd has no class")
else if(ctmp != "mwd")
stop("mwd is not of class mwd")
if(level < 0)
stop("Must have a positive level")
else if(level > (nlevelsWT(mwd) - 1))
stop("Cannot exceed maximum number of levels")
level <- level + 1
first.last.d <- mwd$fl.dbase$first.last.d
first.level <- first.last.d[level, 1]
last.level <- first.last.d[level, 2]
offset.level <- first.last.d[level, 3]
n <- last.level + 1 - first.level
coeffs <- mwd$D[, (offset.level + 1):(offset.level + n)]
return(coeffs)
}
"accessD.wd"<-
function(wd, level, boundary = FALSE, aspect = "Identity", ...)
{
if(IsEarly(wd)) {
ConvertMessage()
stop()
}
ctmp <- class(wd)
if(is.null(ctmp))
stop("wd has no class")
else if(ctmp != "wd")
stop("wd is not of class wd")
if(level < 0)
stop("Must have a positive level")
else if(level > (nlevelsWT(wd) - 1))
stop(paste("Cannot exceed maximum number of levels: ", wd$
nlevels - 1))
if(wd$bc == "interval") {
level <- level - wd$current.scale
objname <- deparse(substitute(wd))
if(level < 0)
stop(paste("The wd object: ", objname,
" was only decomposed down to level: ", wd$
current.scale, " Try a larger level"))
if(boundary == TRUE)
stop("There are no boundary elements in a wavelets on the interval transform!"
)
}
level <- level + 1
first.last.d <- wd$fl.dbase$first.last.d
first.level <- first.last.d[level, 1]
last.level <- first.last.d[level, 2]
offset.level <- first.last.d[level, 3]
if(boundary == TRUE) {
n <- last.level - first.level + 1
coefs <- wd$D[(offset.level + 1):(offset.level + n)]
}
else {
type <- wd$type
if(type == "wavelet") {
n <- 2^(level - 1)
if(wd$bc == "interval")
n <- last.level - first.level + 1
}
else if(type == "station")
n <- 2^nlevelsWT(wd)
else stop("Unknown type component")
if(wd$bc != "interval")
coefs <- wd$D[(offset.level + 1 - first.level):(
offset.level + n - first.level)]
else coefs <- wd$transformed.vector[(offset.level + 1 -
first.level):(offset.level + n - first.level)]
}
if(aspect == "Identity")
return(coefs)
else {
fn <- get(aspect)
return(fn(coefs))
}
}
"accessD.wd3D"<-
function(obj, level = nlevelsWT(obj) - 1, block, ...)
{
if(level < 0)
stop(paste("Level cannot be accessed. You tried to access level",
level, ". The minimum is zero"))
else if(level >= nlevelsWT(obj))
stop(paste("Level cannot be accessed. You tried to access level",
level, ". The maximum level is", nlevelsWT(obj) - 1))
halfsize <- 2^level
size <- dim(obj$a)[1]
GHH <- HGH <- GGH <- HHG <- GHG <- HGG <- GGG <- array(0, dim = rep(
halfsize, 3))
answer <- .C("getARRel",
Carray = as.double(obj$a),
size = as.integer(size),
level = as.integer(level),
GHH = as.double(GHH),
HGH = as.double(HGH),
GGH = as.double(GGH),
HHG = as.double(HHG),
GHG = as.double(GHG),
HGG = as.double(HGG),
GGG = as.double(GGG), PACKAGE = "wavethresh")
thedim <- rep(halfsize, 3) #
#
# Return HHH if level = 0
#
if(missing(block)) {
if(level == 0)
list(HHH = array(obj$a[1, 1, 1], dim = thedim), GHH =
array(answer$GHH, dim = thedim), HGH = array(
answer$HGH, dim = thedim), GGH = array(answer$
GGH, dim = thedim), HHG = array(answer$HHG, dim
= thedim), GHG = array(answer$GHG, dim =
thedim), HGG = array(answer$HGG, dim = thedim),
GGG = array(answer$GGG, dim = thedim))
else list(GHH = array(answer$GHH, dim = thedim), HGH = array(
answer$HGH, dim = thedim), GGH = array(answer$
GGH, dim = thedim), HHG = array(answer$HHG, dim
= thedim), GHG = array(answer$GHG, dim =
thedim), HGG = array(answer$HGG, dim = thedim),
GGG = array(answer$GGG, dim = thedim))
}
else {
if(level != 0 && block == "HHH")
stop("HHH only exists at level 0")
else return(switch(block,
HHH = array(obj$a[1, 1, 1], dim = thedim),
GHH = array(answer$GHH, dim = thedim),
HGH = array(answer$HGH, dim = thedim),
GGH = array(answer$GGH, dim = thedim),
HHG = array(answer$HHG, dim = thedim),
GHG = array(answer$GHG, dim = thedim),
HGG = array(answer$HGG, dim = thedim),
GGG = array(answer$GGG, dim = thedim)))
}
}
"accessD.wp"<-
function(wp, level, ...)
{
#
#
# Get all coefficients at a particular level
# First work out how many packets there are at this level
#
nlev <- nlevelsWT(wp)
if(level < 0)
stop("level must nonnegative")
else if(level > nlev - 1)
stop(paste("level must be smaller than ", nlev - 1))
npx <- 2^(nlev - level)
return(wp$wp[level + 1, ])
}
"accessD.wpst"<-
function(wpst, level, index, ...)
{
nlev <- nlevelsWT(wpst)
if(level < 0)
stop("Level must be greater than or equal to 0")
else if(level >= nlev)
stop(paste("Level must be less than ", nlev))
nwppkt <- 2^(nlev - level) #
#
# Check that packet index "index" is in range
#
if(index < 0)
stop("index must be greater than or equal to 0")
else if(index >= nwppkt)
stop(paste("index must be less than ", nwppkt))
primary.index <- c2to4(index) #
#
# Now compute extra multiples for lower levels
#
for(i in level:(nlev - 1)) {
em <- 2^(2 * nlev - 2 * i - 1)
primary.index <- c(primary.index, em + primary.index)
}
#
#
# Prepare some room for the answer
#
weave <- rep(0, 2^nlev)
ans <- .C("accessDwpst",
coefvec = as.double(wpst$wpst),
lansvec = as.integer(length(wpst$wpst)),
nlev = as.integer(nlev),
avixstart = as.integer(wpst$avixstart),
primary.index = as.integer(primary.index),
nwppkt = as.integer(nwppkt),
pklength = as.integer(2^level),
level = as.integer(level),
weave = as.double(weave),
lweave = as.double(length(weave)),
error = as.integer(0), PACKAGE = "wavethresh")
ans$weave
}
"accessD.wst"<-
function(wst, level, aspect = "Identity", ...)
{
#
#
# Get all coefficients at a particular level
# First work out how many packets there are at this level
#
nlevels <- nlevelsWT(wst)
if(level < 0)
stop("level must nonnegative")
else if(level > nlevels - 1)
stop(paste("level must be smaller than ", nlevels - 1))
npx <- 2^(nlevels - level)
coefs <- wst$wp[level + 1, ]
if(aspect == "Identity")
return(coefs)
else {
fn <- get(aspect)
return(fn(coefs))
}
}
"accessc"<-
function(irregwd.structure, level, boundary = FALSE)
{
ctmp <- class(irregwd.structure)
if(is.null(ctmp))
stop("irregwd.structure has no class")
else if(ctmp != "irregwd")
stop("irregwd.structure is not of class irregwd")
if(level < 0)
stop("Must have a positive level")
else if(level > (nlevelsWT(irregwd.structure) - 1))
stop("Cannot exceed maximum number of levels")
level <- level + 1
first.last.d <- irregwd.structure$fl.dbase$first.last.d
first.level <- first.last.d[level, 1]
last.level <- first.last.d[level, 2]
offset.level <- first.last.d[level, 3]
if(boundary == TRUE) {
n <- last.level - first.level + 1
coefs <- irregwd.structure$c[(offset.level + 1):(offset.level +
n)]
}
else {
n <- 2^(level - 1)
coefs <- irregwd.structure$c[(offset.level + 1 - first.level):(
offset.level + n - first.level)]
}
return(coefs)
}
"addpkt"<-
function(level, index, density, col, yvals)
{
if(density < 0 || density > 1)
stop("Density should be between 0 and 1")
density <- density * 40
y <- level
level <- level - 1
pktlength <- 2^level
x <- index * pktlength
h <- 1
w <- pktlength
if(missing(yvals))
drawbox(x, y, w, h, density = density, col = col)
else {
xco <- seq(from = x, to = x + w, length = length(yvals))
yco <- y + h/2 + (h * yvals)/(2 * max(abs(yvals)))
lines(xco, yco)
}
}
"av.basis"<-
function(wst, level, ix1, ix2, filter)
{
if(level != 0) {
cl <- conbar(av.basis(wst, level - 1, 2 * ix1, 2 * ix1 + 1,
filter), getpacket(wst, level = level, index = ix1),
filter = filter)
cr <- rotateback(conbar(av.basis(wst, level - 1, 2 * ix2, 2 *
ix2 + 1, filter), getpacket(wst, level = level, index
= ix2), filter = filter))
}
else {
cl <- conbar(getpacket(wst, level = level, index = ix1, type =
"C"), getpacket(wst, level = level, index = ix1),
filter)
cr <- rotateback(conbar(getpacket(wst, level = level, index =
ix2, type = "C"), getpacket(wst, level = level, index
= ix2), filter))
}
return(0.5 * (cl + cr))
}
"basisplot"<-
function(x, ...)
UseMethod("basisplot")
"basisplot.BP"<-
function(x, num = min(10, length(BP$level)), ...)
{
BP <- x
plotpkt(nlevelsWT(BP))
dnsvec <- BP$basiscoef[1:num]
dnsvec <- dnsvec/max(abs(dnsvec))
for(i in 1:num)
addpkt(BP$level[i], BP$pkt[i], dnsvec[i], col = 1)
}
"basisplot.wp"<-
function(x, draw.mode = FALSE, ...)
{
wp <- x
J <- nlevelsWT(wp)
oldl <- -1
zero <- rep(0, 2^J)
rh <- 2^(J - 1)
zwp <- wp(zero, filter.number = wp$filter$filter.number, family = wp$
filter$family)
plotpkt(J)
for(j in 0:(J - 1))
for(k in 0:(2^(J - j) - 1))
addpkt(j, k, 0, col = 1)
znv <- MaNoVe(zwp)
origznv <- znv
cat("Select packets: Left: select. Right: exit\n")
endit <- 0
while(endit == 0) {
n <- locator(n = 1)
if(length(n) == 0)
endit <- 1
else {
sellevel <- floor(n$y)
if(sellevel < 1 || sellevel > (J - 1))
cat("Click on shaded boxes\n")
else {
npkts <- 2^(J - sellevel)
if(n$x < 0 || n$x > rh)
cat("Click on shaded boxes\n")
else {
pknumber <- floor((npkts * n$x)/rh)
if(draw.mode == TRUE && oldl > -1) {
addpkt(oldl, oldpn, 1, col = 3)
}
addpkt(sellevel, pknumber, 1, col = 2)
znv$node.list[[sellevel]]$upperctrl[pknumber +
1] <- "T"
if(draw.mode == TRUE) {
oldl <- sellevel
oldpn <- pknumber
pktl <- 2^sellevel
nhalf <- floor(pktl/2)
pkt <- c(rep(0, nhalf), 1, rep(0, nhalf - 1
))
nzwp <- putpacket(zwp, level = sellevel,
index = pknumber, packet = pkt)
cat("Computing WAIT...")
ans <- InvBasis(nzwp, nv = znv)
cat("d o n e.\n")
znv <- origznv
dev.set()
ts.plot(ans, xlab = "x", ylab =
"Wavelet packet basis function")
dev.set()
}
}
}
}
}
znv
}
"c2to4"<-
function(index)
{
#
# Represent index in base 2. Then use this representation and think of
# it in base 4 to get the number
#
ans <- .C("c2to4",
index = as.integer(index),
answer = as.integer(0) ,PACKAGE = "wavethresh")
ans$answer
}
"compare.filters"<-
function(f1, f2)
{
if(f1$family != f2$family)
return(FALSE)
else if(f1$filter.number != f2$filter.number)
return(FALSE)
else return(TRUE)
}
"compress"<-
function(...)
UseMethod("compress")
"compress.default"<-
function(v, verbose = FALSE, ...)
{
n <- length(v)
r <- sum(v != 0)
if(n > 2 * r) {
position <- (1:n)[v != 0]
values <- v[position]
answer <- list(position = position, values = values,
original.length = n)
class(answer) <- "compressed"
if(verbose == TRUE)
cat("Compressed ", n, " into ", 2 * r, "(", signif((100 *
2 * r)/n, 3), "%)\n")
return(answer)
}
else {
answer <- list(vector = v)
class(answer) <- "uncompressed"
if(verbose == TRUE)
cat("No compression\n")
return(answer)
}
}
"compress.imwd"<-
function(x, verbose = FALSE, ...)
{
if(verbose == TRUE) cat("Argument checking...") #
#
# Check class of imwd
#
if(verbose == TRUE)
cat("Argument checking\n")
ctmp <- class(x)
if(is.null(ctmp))
stop("imwd has no class")
else if(ctmp != "imwd")
stop("imwd is not of class imwd")
squished <- list(nlevels = nlevelsWT(x), fl.dbase = x$fl.dbase,
filter = x$filter, w0Lconstant = x$w0Lconstant, type =
x$type, bc = x$bc) #
#
# Go round loop compressing each set of coefficients
#
for(level in 0:(nlevelsWT(x) - 1)) {
if(verbose == TRUE)
cat("Level ", level, "\n\t")
nm <- lt.to.name(level, "CD")
if(verbose == TRUE)
cat("CD\t")
squished[[nm]] <- compress.default(x[[nm]], verbose = verbose)
nm <- lt.to.name(level, "DC")
if(verbose == TRUE)
cat("\tDC\t")
squished[[nm]] <- compress.default(x[[nm]], verbose = verbose)
nm <- lt.to.name(level, "DD")
if(verbose == TRUE)
cat("\tDD\t")
squished[[nm]] <- compress.default(x[[nm]], verbose = verbose)
}
class(squished) <- c("imwdc")
if(verbose == TRUE)
cat("Overall compression: Was: ", w <- object.size(x),
" Now:", s <- object.size(squished), " (", signif((100 *
s)/w, 3), "%)\n")
squished
}
"conbar"<-
function(c.in, d.in, filter)
{
#
# S interface to C routine conbar
#
LengthCout <- 2 * length(c.in)
c.out <- rep(0, LengthCout)
answer <- .C("conbarL",
c.in = as.double(c.in),
LengthCin = as.integer(length(c.in)),
firstCin = as.integer(0),
d.in = as.double(d.in),
LengthDin = as.integer(length(d.in)),
firstDin = as.integer(0),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
c.out = as.double(c.out),
LengthCout = as.integer(LengthCout),
firstCout = as.integer(0),
lastCout = as.integer(LengthCout - 1),
type = as.integer(1),
bc = as.integer(1), PACKAGE = "wavethresh")
answer$c.out
}
"convert"<-
function(...)
UseMethod("convert")
"convert.wd"<-
function(wd, ...)
{
#
#
# Convert a wd station object into a wst object
#
#
# First create object of same size and type of desired return object.
#
if(wd$type != "station") stop(
"Object to convert must be of type \"station\" ")
n <- 2^nlevelsWT(wd)
dummy <- rep(0, n)
tmpwst <- wst(dummy, filter.number = wd$filter$filter.number, family = wd$
filter$family)
tmpwst$date <- wd$date #
#
# Now we've got the skeleton let's fill in all the details.
#
arrvec <- getarrvec(nlevelsWT(wd), sort = FALSE)
for(lev in (nlevelsWT(wd) - 1):1) {
ds <- accessD.wd(wd, level = lev)
cs <- accessC.wd(wd, level = lev)
ds <- ds[arrvec[, nlevelsWT(wd) - lev]]
cs <- cs[arrvec[, nlevelsWT(wd) - lev]]
tmpwst <- putD(tmpwst, level = lev, v = ds)
tmpwst <- putC(tmpwst, level = lev, v = cs)
}
#
#
# And put final level in for Cs and Ds (for wst only)
#
tmpwst <- putC(tmpwst, level = nlevelsWT(wd), v = accessC(wd, level = wd$
nlevels)) #
tmpwst <- putD(tmpwst, level = nlevelsWT(wd), v = accessC(wd, level = wd$
nlevels)) #
#
# And zeroth level
#
tmpwst <- putC(tmpwst, level = 0, v = accessC(wd, level = 0))
arrvec <- sort.list(levarr(1:n, levstodo = nlevelsWT(wd)))
tmpwst <- putD(tmpwst, level = 0, v = accessD(wd, level = 0)[arrvec])
tmpwst
}
"convert.wst"<-
function(wst, ...)
{
#
#
# Convert a wst object into a wd type station object
#
#
# First create object of same size and type of desired return object.
#
n <- 2^nlevelsWT(wst)
dummy <- rep(0, n)
tmpwd <- wd(dummy, type = "station", filter.number = wst$filter$filter.number,
family = wst$filter$family)
tmpwd$date <- wst$date #
#
# Now we've got the skeleton let's fill in all the details.
#
arrvec <- getarrvec(nlevelsWT(wst))
for(lev in (nlevelsWT(wst) - 1):1) {
ds <- accessD.wst(wst, level = lev)
cs <- accessC.wst(wst, level = lev)
ds <- ds[arrvec[, nlevelsWT(wst) - lev]]
cs <- cs[arrvec[, nlevelsWT(wst) - lev]]
ixs <- putD(tmpwd, level = lev, v = ds, index = TRUE)
tmpwd$D[ixs$ix1:ixs$ix2] <- ds
ixs <- putC(tmpwd, level = lev, v = cs, index = TRUE)
tmpwd$C[ixs$ix1:ixs$ix2] <- cs
}
#
#
# And put final level in for Cs
#
tmpwd <- putC(tmpwd, level = nlevelsWT(wst), v = accessC(wst, level = wst$
nlevels)) #
#
# And zeroth level
#
tmpwd <- putC(tmpwd, level = 0, v = accessC(wst, level = 0))
arrvec <- levarr(1:n, levstodo = nlevelsWT(wst))
tmpwd <- putD(tmpwd, level = 0, v = accessD(wst, level = 0)[arrvec])
tmpwd
}
"dof"<-
function(wd)
{
cwd <- class(wd)
if(is.null(cwd)) {
stop("Object has no class")
}
else if(cwd != "wd")
stop("Object is not of class wd")
else {
#
# Count number of non-zero coefficients
#
nlev <- nlevelsWT(wd) #
#
# nnonzero counts the number of nonzero coefficients
# This is already 1, since the C contains first level constant
#
nnonzero <- 1
for(i in 0:(nlev - 1)) {
nnonzero <- nnonzero + sum(accessD(wd, lev = i) != 0)
}
}
nnonzero
}
"doppler"<-
function(t)
{
sqrt(t * (1 - t)) * sin((2 * pi * 1.05)/(t + 0.050000000000000003))
}
"draw"<-
function(...)
UseMethod("draw")
"draw.default"<-
function(filter.number = 10, family = "DaubLeAsymm", resolution = 8192, verbose
= FALSE, plot.it = TRUE, main = "Wavelet Picture", sub = zwd$filter$name,
xlab = "x", ylab = "psi", dimension = 1, twodplot = persp, enhance = TRUE,
efactor = 0.050000000000000003, scaling.function = FALSE, type="l", ...)
{
if(is.na(IsPowerOfTwo(resolution)))
stop("Resolution must be a power of two")
if(scaling.function == FALSE) {
resolution <- resolution/2 #
#
# First obtain support widths
#
sp <- support(filter.number = filter.number, family = family, m
= 0, n = 0)
lh <- c(sp$phi.lh, sp$phi.rh)
lh <- lh[1]
rh <- sp$phi.rh + 2 * resolution - 1
if(verbose == TRUE)
cat("Support of highest resolution wavelets is [", lh,
", ", rh, "]\n") #
pic.support <- support(filter.number = filter.number, family =
family, m = 0, n = 0)
pic.support <- c(pic.support$psi.lh, pic.support$psi.rh) #
#
# Now go through all levels and see what is the lowest resolution wavelet
# that we can use to get the whole wavelet in the support range of the
# highest resolution wavelets.
#
lowest.level <- log(resolution)/log(2)
if(verbose == TRUE)
cat("Lowest level is: ", lowest.level, "\n")
selection <- NULL
candidates <- NULL
for(m in lowest.level:0) {
if(verbose == TRUE) cat("Level ", m, " testing\n") #
#
# Go through each wavelet at this level and find out
# it's support. Then check to see if it lies in the
# union of the supports of the highest resolution
# wavelets, and select it if it does.
#
# If fact we examine all the ones that will fit, and choose one that
# is near the middle - to get a nice picture.
#
for(n in 0:(2^(lowest.level - m) - 1)) {
lhs <- support(filter.number = filter.number,
family = family, m = m, n = n)
rhs <- lhs$rh
lhs <- lhs$lh
if(verbose == TRUE)
cat("LHS: ", lhs, " RHS: ", rhs, "\n")
if((lhs >= lh) && (rhs <= rh)) {
candidates <- c(candidates, n)
if(verbose == TRUE)
cat("Level ", m, " Position: ", n,
" selected\n")
}
}
if(!is.null(candidates)) {
if(verbose == TRUE) {
cat("Candidates are \n")
print(candidates)
}
n <- floor(median(candidates))
if(verbose == TRUE)
cat("Choosing ", n, "\n")
selection <- list(m = m, n = n)
lhs <- support(filter.number = filter.number,
family = family, m = m, n = n)
rhs <- lhs$rh
lhs <- lhs$lh
break
}
if(!is.null(selection))
break
}
#
#
# If we haven't selected anything, then set the coefficient to
# be one of the highest resolution coefficients. ALL of these
# are guaranteed to be in the union of all their supports!
# The picture will be crap though!
#
if(is.null(selection)) selection <- list(m = 0, n = 0) #
#
# Build a wd object structure consisting solely of zeroes.
#
zwd <- wd(rep(0, length = resolution * 2), filter.number =
filter.number, family = family, bc = "symmetric") #
#
# Insert a vector containing a 1 where we want to put the coefficient
#
wd.lev <- lowest.level - selection$m
if(verbose == TRUE)
cat("Coefficient insertion at wd level: ", wd.lev, "\n"
)
if(wd.lev == 0)
pickout <- 1
else {
pickout <- rep(0, 2^wd.lev)
pickout[selection$n + 1] <- 1
}
zwd <- putD(zwd, level = wd.lev, v = pickout) #
#
# Reconstruct
#
zwr <- wr(zwd) #
#
# Scales
#
if(verbose == TRUE) {
cat("ps: ", pic.support[1], pic.support[2], "\n")
cat("lh,rh: ", lh, rh, "\n")
cat("lhs,rhs: ", lhs, rhs, "\n")
}
aymax <- ((pic.support[2] - pic.support[1]) * (rh - lh))/(rhs -
lhs)
ax <- pic.support[1] - (aymax * (lhs - lh))/(rh - lh)
ay <- ax + aymax
if(verbose == TRUE) cat("ax,ay ", ax, ay, "\n") #
#
# Scale up y values, because we're actually using a higher "resolution"
# wavelet than psi(x)
#
zwr <- zwr * sqrt(2)^(selection$m + 1) #
#
# Plot it if required
#
x <- seq(from = ax, to = ay, length = resolution * 2)
if(enhance == TRUE) {
sv <- (abs(zwr) > efactor * range(abs(zwr))[2])
sv <- (1:length(sv))[sv]
tr <- range(sv)
sv <- tr[1]:tr[2]
x <- x[sv]
zwr <- zwr[sv]
main <- paste(main, " (Enhanced)")
}
if(plot.it == TRUE) {
if(dimension == 1)
plot(x = x, y = zwr, main = main, sub = sub,
xlab = xlab, ylab = ylab, type = type, ...)
else if(dimension == 2) {
twodplot(x = x, y = x, z = outer(zwr, zwr),
xlab = xlab, ylab = xlab, zlab = ylab, ...)
title(main = main, sub = sub)
invisible()
}
else stop("Can only do 1 or 2 dimensional plots")
}
else {
if(dimension == 1)
return(list(x = x, y = zwr))
else if(dimension == 2)
return(list(x = x, y = x, z = outer(zwr, zwr)))
else stop("Can only do 1 or 2 dimensional plots")
}
}
else {
if(dimension != 1)
stop("Can only generate one-dimensional scaling function"
)
if(enhance == TRUE) {
enhance <- FALSE
warning("Cannot enhance picture of scaling function")
}
if(missing(main))
main <- "Scaling Function"
if(missing(ylab))
ylab <- "phi"
if(missing(sub))
sub <- filter.select(filter.number = filter.number,
family = family)$name
phi <- ScalingFunction(filter.number = filter.number, family =
family, resolution = resolution)
if(plot.it == TRUE) {
plot(x = phi$x, y = phi$y, main = main, sub = sub, xlab
= xlab, ylab = ylab, type = type, ...)
}
else return(list(x = phi$x, y = phi$y))
}
}
"draw.imwd"<-
function(wd, resolution = 128, ...)
{
filter <- wd$filter
draw.default(filter.number = filter$filter.number, family = filter$
family, dimension = 2, resolution = resolution, ...)
}
"draw.imwdc"<-
function(wd, resolution = 128, ...)
{
filter <- wd$filter
draw.default(filter.number = filter$filter.number, family = filter$
family, dimension = 2, resolution = resolution, ...)
}
"draw.mwd"<-
function(mwd, phi = 0, psi = 0, return.funct = FALSE, ...)
{
#draw.mwd
#
# plots one of the scaling or
# wavelet functions used to create mwd
#
#check phi and psi
if(phi > 0 && psi > 0) stop("only one of phi and psi should be nonzero"
)
if(phi == 0 && psi < 0)
stop("bad psi arguement")
if(phi < 0 && psi == 0)
stop("bad phi arguement")
if(phi == 0 && psi == 0)
phi <- 1
if(phi > mwd$filter$nphi)
stop("There aren't that many scaling functions")
if(psi > mwd$filter$npsi) stop("There aren't that many wavelets")
#for the specified case insert a single 1 and reconstruct.
if(phi != 0) {
main <- c("scaling function No.", phi)
M <- matrix(rep(0, 2 * mwd$filter$nphi), nrow = mwd$filter$nphi
)
M[phi, 1] <- 1
mwd$D <- matrix(rep(0, mwd$filter$npsi * mwd$fl.dbase$nvecs.d),
nrow = mwd$filter$npsi)
mwd <- putC.mwd(mwd, level = 1, M)
}
if(psi != 0) {
M <- matrix(rep(0, 2 * mwd$filter$npsi), nrow = mwd$filter$npsi
)
M[psi, 1] <- 1
mwd$C <- matrix(rep(0, mwd$filter$nphi * mwd$fl.dbase$nvecs.c),
nrow = mwd$filter$nphi)
mwd$D <- matrix(rep(0, mwd$filter$npsi * mwd$fl.dbase$nvecs.d),
nrow = mwd$filter$npsi)
mwd <- putD.mwd(mwd, level = 1, M)
}
fun <- mwr(mwd, start.level = 1)
x <- (2 * (0:(length(fun) - 1)))/length(fun) #
#
#plotit
plot(x, fun, type = "l", ...)
if(return.funct == TRUE)
return(fun)
}
"draw.wd"<-
function(wd, ...)
{
if(IsEarly(wd)) {
ConvertMessage()
stop()
}
filter <- wd$filter
draw.default(filter.number = filter$filter.number, family = filter$
family, type = "l", ...)
}
"draw.wp"<-
function(wp, level, index, plot.it = TRUE, main = "Wavelet Packet", sub = paste(wp$
name, " Level=", level, "Index= ", index), xlab = "Position", ylab =
"Wavelet Packet Value", ...)
{
tmp <- drawwp.default(level = level, index = index, filter.number = wp$
filter$filter.number, family = wp$filter$family, ...)
if(plot.it == TRUE) {
plot(1:length(tmp), y = tmp, main = main, sub = sub, xlab =
xlab, ylab = ylab, type = "l", ...)
}
else return(list(x = 1:length(tmp), y = tmp))
}
"draw.wst"<-
function(wst, ...)
{
filter <- wst$filter
draw.default(filter.number = filter$filter.number, family = filter$
family, type = "l", ...)
}
"drawbox"<-
function(x, y, w, h, density, col)
{
xc <- c(x, x + w, x + w, x)
yc <- c(y, y, y + h, y + h)
polygon(x = xc, y = yc, density = density, col = col)
}
"drawwp.default"<-
function(level, index, filter.number = 10, family = "DaubLeAsymm", resolution
= 64 * 2^level)
{
#
# First construct a zeroed wp object
#
z <- rep(0, resolution) #
#
# Now take the wp transform
#
zwp <- wp(z, filter.number = filter.number, family = family) #
#
#
# The packet to install
#
if(level == 0) {
newpkt <- 1
}
else {
newpkt <- rep(0, 2^level)
newpkt[(2^level)/2] <- 1
}
zwp <- putpacket(zwp, level = level, index = index, packet = newpkt) #
#
# Now set up the packet list
#
nlev <- nlevelsWT(zwp)
npkts <- 2^(nlev - level)
levvec <- rep(level, npkts)
pkt <- 0:(npkts - 1)
basiscoef <- rep(0, npkts)
pktlist <- list(nlevels = nlev, level = levvec, pkt = pkt) #
#
# Do the inverse
#
zwr <- InvBasis(zwp, pktlist = pktlist)
zwr
}
"ewspec"<-
function(x, filter.number = 10, family = "DaubLeAsymm", UseLocalSpec = TRUE, DoSWT
= TRUE, WPsmooth = TRUE, verbose = FALSE, smooth.filter.number = 10,
smooth.family = "DaubLeAsymm", smooth.levels = 3:(nlevelsWT(WPwst) - 1),
smooth.dev = madmad, smooth.policy = "LSuniversal", smooth.value = 0,
smooth.by.level = FALSE, smooth.type = "soft", smooth.verbose = FALSE,
smooth.cvtol = 0.01, smooth.cvnorm = l2norm, smooth.transform = I,
smooth.inverse = I)
{
#
#
# Coarser is an old parameter, not needed now
#
coarser <- 0
if(verbose) cat("Smoothing then inversion\n") #
#
# First compute the SWT
#
if(DoSWT == TRUE) {
if(verbose)
cat("Computing nondecimated wavelet transform of data\n")
xwdS <- wd(x, filter.number = filter.number, family = family,
type = "station")
}
else xwdS <- x
if(UseLocalSpec == TRUE) {
if(verbose)
cat("Computing raw wavelet periodogram\n")
xwdWP <- LocalSpec(xwdS, lsmooth = "none", nlsmooth = FALSE)
}
else xwdWP <- x
J <- nlevelsWT(xwdWP) #
#
# Compute the vSNK matrix
#
if(verbose)
cat("Computing A matrix\n")
rm <- ipndacw( - J, filter.number = filter.number, family = family) #
# Compute the inverse of the vSNK matrix
#
if(verbose)
cat("Computing inverse of A\n")
irm <- solve(rm) #
#
# Create a matrix to store the wavelet periodogram in
#
if(verbose)
cat("Putting wavelet periodogram into a matrix\n")
WavPer <- matrix(0, nrow = (J - coarser), ncol = 2^J) #
#
# Now create the Wavelet Periodogram matrix
#
# n.b. J is coarsest 0 in wavethresh notation
# 1 is finest J-1 in wavethresh notation
#
# Conversion is j -> J-j
#
for(j in 1:(J - coarser)) {
WavPer[j, ] <- accessD(xwdWP, lev = J - j)
}
#
#
# Smooth the wavelet periodogram
#
if(WPsmooth == TRUE) {
if(verbose) {
cat("Smoothing the wavelet periodogram\n")
cat("Smoothing level: ")
}
for(j in 1:(J - coarser)) {
if(verbose)
cat(J - j)
WP <- WavPer[j, ]
WP <- smooth.transform(WP)
WPwst <- wst(WP, filter.number = smooth.filter.number,
family = smooth.family)
if(verbose == TRUE)
cat(".w")
WPwstT <- threshold.wst(WPwst, levels = smooth.levels,
dev = smooth.dev, policy = smooth.policy, value
= smooth.value, by.level = smooth.by.level,
type = smooth.type, verbose = smooth.verbose,
cvtol = smooth.cvtol, cvnorm = smooth.cvnorm)
if(verbose == TRUE)
cat(".t")
WPwsrR <- AvBasis(WPwstT)
if(verbose == TRUE)
cat(".i")
WavPer[j, ] <- smooth.inverse(WPwsrR)
}
if(verbose == TRUE)
cat("\n")
}
#
#
# Need a smaller inverse Rainer matrix if don't do all levels
#
irm <- irm[1:(J - coarser), 1:(J - coarser)] #
#
# Now multiply the inverse matrix into the WavPer
#
S <- irm %*% WavPer #
#
# Store these levels in the xwdS object
#
xwdS <- xwdWP
for(j in 1:(J - coarser)) {
xwdS <- putD(xwdS, lev = J - j, v = S[j, ])
}
if(coarser > 0)
for(j in (J - coarser + 1):J)
xwdS <- putD(xwdS, lev = J - j, v = rep(0, 2^J))
list(S = xwdS, WavPer = xwdWP, rm = rm, irm = irm)
}
"example.1"<-
function()
{
x <- seq(0, 1, length = 513)
x <- x[1:512]
y <- rep(0, length(x))
xsv <- (x <= 0.5) # Left hand end
y[xsv] <- -16 * x[xsv]^3 + 12 * x[xsv]^2
xsv <- (x > 0.5) & (x <= 0.75) # Middle section
y[xsv] <- (x[xsv] * (16 * x[xsv]^2 - 40 * x[xsv] + 28))/3 - 1.5
xsv <- x > 0.75 #Right hand end
y[xsv] <- (x[xsv] * (16 * x[xsv]^2 - 32 * x[xsv] + 16))/3
list(x = x, y = y)
}
"first.last"<-
function(LengthH, DataLength, type = "wavelet", bc = "periodic", current.scale
= 0)
{
if(type == "station" && bc != "periodic")
stop("Can only do periodic boundary conditions with station")
if(type != "station" && type != "wavelet")
stop("Type can only be wavelet or station")
levels <- log(DataLength)/log(2)
first.last.c <- matrix(0, nrow = levels + 1, ncol = 3, dimnames = list(
NULL, c("First", "Last", "Offset")))
first.last.d <- matrix(0, nrow = levels - current.scale, ncol = 3,
dimnames = list(NULL, c("First", "Last", "Offset")))
if(bc == "periodic") {
# Periodic boundary correction
if(type == "wavelet") {
first.last.c[, 1] <- rep(0, levels + 1)
first.last.c[, 2] <- 2^(0:levels) - 1
first.last.c[, 3] <- rev(c(0, cumsum(rev(1 +
first.last.c[, 2]))[1:levels]))
first.last.d[, 1] <- rep(0, levels)
first.last.d[, 2] <- 2^(0:(levels - 1)) - 1
first.last.d[, 3] <- rev(c(0, cumsum(rev(1 +
first.last.d[, 2]))[1:(levels - 1)]))
ntotal <- 2 * DataLength - 1
ntotal.d <- DataLength - 1
}
else if(type == "station") {
first.last.c[, 1] <- rep(0, levels + 1)
first.last.c[, 2] <- 2^levels - 1
first.last.c[, 3] <- rev(c(0, cumsum(rev(1 +
first.last.c[, 2]))[1:levels]))
first.last.d[, 1] <- rep(0, levels)
first.last.d[, 2] <- 2^levels - 1
first.last.d[, 3] <- rev(c(0, cumsum(rev(1 +
first.last.d[, 2]))[1:(levels - 1)]))
ntotal <- (levels + 1) * 2^levels
ntotal.d <- levels * 2^levels
}
}
else if(bc == "symmetric") {
# Symmetric boundary reflection
first.last.c[levels + 1, 1] <- 0
first.last.c[levels + 1, 2] <- DataLength - 1
first.last.c[levels + 1, 3] <- 0
ntotal <- first.last.c[levels + 1, 2] - first.last.c[levels + 1,
1] + 1
ntotal.d <- 0
for(i in levels:1) {
first.last.c[i, 1] <- trunc(0.5 * (1 - LengthH +
first.last.c[i + 1, 1]))
first.last.c[i, 2] <- trunc(0.5 * first.last.c[i + 1, 2
])
first.last.c[i, 3] <- first.last.c[i + 1, 3] +
first.last.c[i + 1, 2] - first.last.c[i + 1, 1] +
1
first.last.d[i, 1] <- trunc(0.5 * (first.last.c[i + 1,
1] - 1))
first.last.d[i, 2] <- trunc(0.5 * (first.last.c[i + 1,
2] + LengthH - 2))
if(i != levels) {
first.last.d[i, 3] <- first.last.d[i + 1, 3] +
first.last.d[i + 1, 2] - first.last.d[i + 1,
1] + 1
}
ntotal <- ntotal + first.last.c[i, 2] - first.last.c[i,
1] + 1
ntotal.d <- ntotal.d + first.last.d[i, 2] -
first.last.d[i, 1] + 1
}
}
else if(bc == "interval") {
first.last.d[, 1] <- rep(0, levels - current.scale)
first.last.d[, 3] <- 2^(current.scale:(levels - 1))
first.last.d[, 2] <- first.last.d[, 3] - 1
first.last.c <- c(0, 2^current.scale - 1, 0)
return(list(first.last.c = first.last.c, first.last.d =
first.last.d))
}
else {
stop("Unknown boundary correction method")
}
names(ntotal) <- NULL
names(ntotal.d) <- NULL
list(first.last.c = first.last.c, ntotal = ntotal, first.last.d =
first.last.d, ntotal.d = ntotal.d)
}
"firstdot"<-
function(s)
{
ls <- length(s)
nc <- nchar(s)
fd <- rep(0, ls)
for(i in 1:ls) {
for(j in 1:nc[i]) {
ss <- substring(s[i], j, j)
if(ss == ".") {
fd[i] <- j
break
}
}
}
fd
}
"getarrvec"<-
function(nlevels, sort = TRUE)
{
n <- 2^nlevels
v <- 1:n
arrvec <- matrix(0, nrow = n, ncol = nlevels - 1)
if(sort == TRUE) {
for(i in 1:ncol(arrvec))
arrvec[, i] <- sort.list(levarr(v, i))
}
else {
for(i in 1:ncol(arrvec))
arrvec[, i] <- levarr(v, i)
}
arrvec
}
"getpacket"<-
function(...)
UseMethod("getpacket")
"getpacket.wp"<-
function(wp, level, index, ...)
{
if(!inherits(wp, "wp"))
stop("wp object is not of class wp")
if(level > nlevelsWT(wp))
stop("Not that many levels in wp object")
unit <- 2^level
LocalIndex <- unit * index + 1
if(index > 2^(nlevelsWT(wp) - level) - 1) {
cat("Index was too high, maximum for this level is ", 2^(wp$
nlevels - level) - 1, "\n")
stop("Error occured")
}
if(LocalIndex < 0)
stop("Index must be non-negative")
packet <- wp$wp[level + 1, (LocalIndex:(LocalIndex + unit - 1))]
packet
}
"getpacket.wpst"<-
function(wpst, level, index, ...)
{
nlev <- nlevelsWT(wpst)
if(level < 0)
stop("Level must be greater than or equal to 0")
else if(level > nlev)
stop(paste("Level must be less than or equal to ", nlev))
npkts <- 4^(nlev - level)
if(index < 0)
stop("Packet index must be greater than or equal to 0")
else if(index > npkts - 1)
stop(paste("Packet index must be less than or equal to ", npkts -
1))
pktlength <- 2^level
lix <- 1 + wpst$avixstart[level + 1] + pktlength * index
rix <- lix + pktlength - 1
wpst$wpst[lix:rix]
}
"getpacket.wst"<-
function(wst, level, index, type = "D", aspect = "Identity", ...)
{
if(type != "D" && type != "C")
stop("Type of access must be C or D")
class(wst) <- "wp"
if(type == "C")
wst$wp <- wst$Carray
coefs <- getpacket.wp(wst, level = level, index = index)
if(aspect == "Identity")
return(coefs)
else {
fn <- get(aspect)
return(fn(coefs))
}
}
"getpacket.wst2D"<-
function(wst2D, level, index, type = "S", Ccode = TRUE, ...)
{
nlev <- nlevelsWT(wst2D)
if(level > nlev - 1)
stop(paste("Maximum level is ", nlev - 1, " you supplied ",
level))
else if(level < 0)
stop(paste("Minimum level is 0 you supplied ", level))
if(type != "S" && type != "H" && type != "V" && type != "D")
stop("Type must be one of S, H, V or D")
if(nchar(index) != nlev - level)
stop(paste("Index must be ", nlev - level,
" characters long for level ", level))
for(i in 1:nchar(index)) {
s1 <- substring(index, i, i)
if(s1 != "0" && s1 != "1" && s1 != "2" && s1 != "3")
stop(paste("Character ", i,
" in index is not a 0, 1, 2 or 3. It is ", s1))
}
if(Ccode == TRUE) {
ntype <- switch(type,
S = 0,
H = 1,
V = 2,
D = 3)
amdim <- dim(wst2D$wst2D)
sl <- 2^level
out <- matrix(0, nrow = sl, ncol = sl)
ans <- .C("getpacketwst2D",
am = as.double(wst2D$wst2D),
d1 = as.integer(amdim[1]),
d12 = as.integer(amdim[1] * amdim[2]),
maxlevel = as.integer(nlev - 1),
level = as.integer(level),
index = as.integer(index),
ntype = as.integer(ntype),
out = as.double(out),
sl = as.integer(sl), PACKAGE = "wavethresh")
return(matrix(ans$out, nrow = ans$sl))
}
else {
x <- y <- 0
ans <- .C("ixtoco",
level = as.integer(level),
maxlevel = as.integer(nlev - 1),
index = as.integer(index),
x = as.integer(x),
y = as.integer(y), PACKAGE = "wavethresh")
cellength <- 2^level
tmpx <- switch(type,
S = 0,
H = 0,
V = cellength,
D = cellength)
tmpy <- switch(type,
S = 0,
H = cellength,
V = 0,
D = cellength)
x <- ans$x + tmpx + 1
y <- ans$y + tmpy + 1
cat("x ", x, "y: ", y, "x+cellength-1 ", x + cellength - 1,
"y+cellength-1", y + cellength - 1, "\n")
return(wst2D$wst2D[level + 1, x:(x + cellength - 1), y:(y +
cellength - 1)])
}
}
"guyrot"<-
function(v, n)
{
l <- length(v)
n <- n %% l
if(n == 0)
return(v)
tmp <- v[(l - n + 1):l]
v[(n + 1):l] <- v[1:(l - n)]
v[1:n] <- tmp
v
}
"image.wd"<-
function(x, strut = 10, type = "D", transform = I, ...)
{
if(x$type != "station")
stop("You have not supplied a nondecimated wd object")
nlev <- nlevelsWT(x)
if(type == "D" ) {
m <- matrix(0, nrow = nlev, ncol = 2^nlev)
for(i in 0:(nlev - 1)) {
m[i, ] <- accessD(x, lev = i)
}
}
if(type == "C") {
mC <- matrix(0, nrow = nlev + 1, ncol = 2^nlev)
for(i in 0:nlev) {
mC[i, ] <- accessC(x, lev = i)
}
}
nr <- nlev
mz <- matrix(0, nrow = nlev, ncol = 2^nlev)
if(type == "D") {
image(transform(m[rep(1:nr, rep(strut, nr)), ]),
main="Wavelet coefficients")
}
else if(type == "C")
image(transform(mC[rep(1:nr, rep(strut, nr)), ]),
main = "Scaling function coefficients")
}
"image.wst"<-
function(x, nv, strut = 10, type = "D", transform = I, ...)
{
m <- x$wp
mC <- x$Carray
nr <- nrow(m)
nlev <- nlevelsWT(x)
mz <- matrix(0, nrow = nrow(mC), ncol = ncol(mC))
if(!missing(nv)) {
pknums <- print.nv(nv, printing = FALSE)$indexlist
mpk <- matrix(0, nrow = nrow(mC), ncol = ncol(mC))
for(i in seq(along = pknums)) {
lev <- nlev - i + 1
pklength <- 2^(lev - 1)
f <- pknums[i] * pklength + 1
l <- f + pklength - 1
mpk[lev, f:l] <- 1
}
}
if(type == "D") {
image(transform(m[rep(1:nr, rep(strut, nr)), ]),
main =
"Wavelet coefficients")
}
else if(type == "C")
image(transform(mC[rep(1:nr, rep(strut, nr)), ]),
main =
"Scaling function coefficients"
)
}
"imwd"<-
function(image, filter.number = 10, family = "DaubLeAsymm", type = "wavelet",
bc = "periodic", RetFather = TRUE, verbose = FALSE)
{
if(verbose == TRUE)
cat("Argument checking...")
if(nrow(image) != ncol(image))
stop("Number of rows and columns in image are not identical")
if(verbose == TRUE) cat("...done\nFilter...") #
#
# Select wavelet filter
#
filter <- filter.select(filter.number = filter.number, family = family)
Csize <- nrow(image) #
#
# Check that Csize is a power of 2
#
nlev <- IsPowerOfTwo(Csize)
if(is.na(nlev)) stop(paste("The image size (", Csize,
") is not a power of 2")) #
#
# Set-up first/last database
#
if(verbose == TRUE)
cat("...selected\nFirst/last database...")
fl.dbase <- first.last(LengthH = length(filter$H), DataLength = Csize,
bc = bc, type = type)
first.last.c <- fl.dbase$first.last.c
first.last.d <- fl.dbase$first.last.d #
#
# Set up answer list
#
image.decomp <- list(nlevels = nlev, fl.dbase = fl.dbase, filter =
filter, type = type, bc = bc, date = date()) #
#
#
if(verbose == TRUE) cat("...built\n") #
#
# Ok, go round loop doing decompositions
#
nbc <- switch(bc,
periodic = 1,
symmetric = 2)
if(is.null(nbc))
stop("Unknown boundary handling")
if(type == "station" && bc == "symmetric")
stop("Cannot do nondecimated transform with symmetric boundary conditions"
)
ntype <- switch(type,
wavelet = 1,
station = 2)
if(is.null(ntype)) stop("Unknown type of transform") #
#
# Load up original image
#
smoothed <- as.vector(image)
if(verbose == TRUE) {
cat(bc, " boundary handling\n")
cat("Decomposing...")
}
for(level in seq(nrow(first.last.d), 1, -1)) {
if(verbose == TRUE)
cat(level - 1, "")
LengthCin <- first.last.c[level + 1, 2] - first.last.c[level +
1, 1] + 1
LengthCout <- first.last.c[level, 2] - first.last.c[level, 1] +
1
LengthDout <- first.last.d[level, 2] - first.last.d[level, 1] +
1
ImCC <- rep(0, (LengthCout * LengthCout))
ImCD <- rep(0, (LengthCout * LengthDout))
ImDC <- rep(0, (LengthDout * LengthCout))
ImDD <- rep(0, (LengthDout * LengthDout))
error <- 0
z <- .C("StoIDS",
C = as.double(smoothed),
Csize = as.integer(LengthCin),
firstCin = as.integer(first.last.c[level + 1, 1]),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
LengthCout = as.integer(LengthCout),
firstCout = as.integer(first.last.c[level, 1]),
lastCout = as.integer(first.last.c[level, 2]),
LengthDout = as.integer(LengthDout),
firstDout = as.integer(first.last.d[level, 1]),
lastDout = as.integer(first.last.d[level, 2]),
ImCC = as.double(ImCC),
ImCD = as.double(ImCD),
ImDC = as.double(ImDC),
ImDD = as.double(ImDD),
nbc = as.integer(nbc),
ntype = as.integer(ntype),
error = as.integer(error), PACKAGE = "wavethresh")
error <- z$error
if(error != 0) {
cat("Error was ", error, "\n")
stop("Error reported")
}
smoothed <- z$ImCC
if(RetFather == TRUE) {
nm <- lt.to.name(level - 1, "CC")
image.decomp[[nm]] <- z$ImCC
}
nm <- lt.to.name(level - 1, "CD")
image.decomp[[nm]] <- z$ImCD
nm <- lt.to.name(level - 1, "DC")
image.decomp[[nm]] <- z$ImDC
nm <- lt.to.name(level - 1, "DD")
image.decomp[[nm]] <- z$ImDD
}
if(verbose == TRUE)
cat("\nReturning answer...\n")
image.decomp$w0Lconstant <- smoothed
image.decomp$bc <- bc
image.decomp$date <- date()
class(image.decomp) <- "imwd"
image.decomp
}
"imwr"<-
function(...)
UseMethod("imwr")
"imwr.imwd"<-
function(imwd, bc = imwd$bc, verbose = FALSE, ...)
{
if(verbose == TRUE) cat("Argument checking...") #
#
# Check class of imwd
#
ctmp <- class(imwd)
if(is.null(ctmp))
stop("imwd has no class")
else if(ctmp != "imwd")
stop("imwd is not of class imwd")
if(imwd$type == "station")
stop("Cannot invert nonodecimated wavelet transform using imwr")
filter <- imwd$filter
if(verbose == TRUE)
cat("...done\nFirst/last database...")
fl.dbase <- imwd$fl.dbase
first.last.c <- fl.dbase$first.last.c
first.last.d <- fl.dbase$first.last.d
if(verbose == TRUE)
cat("...extracted\n")
ImCC <- imwd$w0Lconstant
if(verbose == TRUE) cat("Reconstructing...") #
#
# Ok, go round loop doing reconstructions
#
for(level in seq(2, 1 + nlevelsWT(imwd))) {
if(verbose == TRUE)
cat(level - 1, " ")
LengthCin <- first.last.c[level - 1, 2] - first.last.c[level -
1, 1] + 1
LengthCout <- first.last.c[level, 2] - first.last.c[level, 1] +
1
LengthDin <- first.last.d[level - 1, 2] - first.last.d[level -
1, 1] + 1
error <- 0
ImOut <- rep(0, LengthCout^2)
nbc <- switch(bc,
periodic = 1,
symmetric = 2)
if(is.null(nbc))
stop("Unknown boundary handling")
z <- .C("StoIRS",
ImCC = as.double(ImCC),
ImCD = as.double(imwd[[lt.to.name(level - 2, "CD")]]),
ImDC = as.double(imwd[[lt.to.name(level - 2, "DC")]]),
ImDD = as.double(imwd[[lt.to.name(level - 2, "DD")]]),
LengthCin = as.integer(LengthCin),
firstCin = as.integer(first.last.c[level - 1, 1]),
LengthDin = as.integer(LengthDin),
firstDin = as.integer(first.last.d[level - 1, 1]),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
LengthCout = as.integer(LengthCout),
firstCout = as.integer(first.last.c[level, 1]),
lastCout = as.integer(first.last.c[level, 2]),
ImOut = as.double(ImOut),
nbc = as.integer(nbc),
error = as.integer(error), PACKAGE = "wavethresh")
error <- z$error
if(error != 0) {
cat("Error was ", error, "\n")
stop("Error reported")
}
# Do something with ImOut
ImCC <- z$ImOut
}
if(verbose == TRUE)
cat("\nReturning image\n") # Return the image
matrix(ImCC, nrow = 2^(nlevelsWT(imwd)))
}
"imwr.imwdc"<-
function(imwd, verbose = FALSE, ...)
{
if(verbose == TRUE)
cat("Uncompressing...\n")
imwd2 <- uncompress(imwd, ver = verbose)
if(verbose == TRUE)
cat("Reconstructing...\n")
imwr(imwd2, verbose = verbose, ...)
}
"ipndacw"<-
function(J, filter.number = 10, family = "DaubLeAsymm", tol = 1e-100, verbose
= FALSE, ...)
{
if(verbose == TRUE)
cat("Computing ipndacw\n")
now <- proc.time()[1:2]
if(J >= 0)
stop("J must be negative integer")
if(J - round(J) != 0)
stop("J must be an integer") #
rmnorig <- rmname(J = J, filter.number = filter.number, family = family
) #
#
# See if matrix already exists. If so, return it
#
rm.there <- rmget(requestJ = - J, filter.number = filter.number,
family = family)
if(!is.null(rm.there)) {
if(verbose == TRUE)
cat("Returning precomputed version: using ", rm.there,
"\n")
speed <- proc.time()[1:2] - now
if(verbose == TRUE)
cat("Took ", sum(speed), " seconds\n")
rmnexists <- rmname(J = - rm.there, filter.number =
filter.number, family = family)
tmp <- get(rmnexists, envir=WTEnv)[1:( - J), 1:( - J)]
assign(rmnorig, tmp, envir=WTEnv)
return(tmp)
}
#
#
# See if partially computed matrix exists. If so, use it.
#
if(J != -1) {
for(j in (1 + J):(-1)) {
rmn <- rmname(J = j, filter.number = filter.number,
family = family)
if(exists(rmn, envir=WTEnv)) {
if(verbose == TRUE) {
cat("Partial matrix: ", rmn, " exists (")
cat(paste(round(100 - (100 * (j * j))/(J * J),
digits = 1), "% left to do)\n", sep = ""))
}
fmat <- rep(0, J * J)
H <- filter.select(filter.number =
filter.number, family = family)$H
error <- 0
answer <- .C("rainmatPARTIAL",
J = as.integer( - J),
j = as.integer( - j),
H = as.double(H),
LengthH = as.integer(length(H)),
fmat = as.double(fmat),
tol = as.double(tol),
error = as.integer(error), PACKAGE = "wavethresh")
if(answer$error != 0)
stop(paste("Error code was ", answer$error))
m <- matrix(answer$fmat, nrow = - J)
m[1:( - j), 1:( - j)] <- get(rmn, envir=WTEnv)
nm <- as.character(-1:J)
dimnames(m) <- list(nm, nm)
speed <- proc.time()[1:2] - now
if(verbose == TRUE)
cat("Took ", sum(speed), " seconds\n")
assign(rmnorig, m, envir=WTEnv)
return(m)
}
}
}
#
#
# Otherwise have to compute whole matrix
#
fmat <- rep(0, J * J)
H <- filter.select(filter.number = filter.number, family = family)$H
error <- 0
answer <- .C("rainmatPARENT",
J = as.integer( - J),
H = as.double(H),
LengthH = as.integer(length(H)),
fmat = as.double(fmat),
tol = as.double(tol),
error = as.integer(error), PACKAGE = "wavethresh")
if(answer$error != 0)
stop(paste("Error code was ", answer$error))
speed <- proc.time()[1:2] - now
if(verbose == TRUE)
cat("Took ", sum(speed), " seconds\n")
m <- matrix(answer$fmat, nrow = - J)
nm <- as.character(-1:J)
dimnames(m) <- list(nm, nm)
assign(rmnorig, m, envir=WTEnv)
m
}
"irregwd"<-
function(gd, filter.number = 2, family = "DaubExPhase", bc = "periodic",
verbose = FALSE)
{
type <- "wavelet"
if(verbose == TRUE)
cat("wd: Argument checking...")
ctmp <- class(gd)
if(is.null(ctmp))
stop("gd has no class")
else if(ctmp != "griddata")
stop("gd is not of class griddata")
data <- gd$gridy
if(!is.atomic(data))
stop("Data is not atomic")
DataLength <- length(data) #
#
# Check that we have a power of 2 data elements
#
nlevels <- nlevelsWT(data) #
if(is.na(nlevels)) stop("Data length is not power of two")
# Check for correct type
#
if(type != "wavelet" && type != "station")
stop("Unknown type of wavelet decomposition")
if(type == "station" && bc != "periodic") stop(
"Can only do periodic boundary conditions with station"
) #
# Select the appropriate filter
#
if(verbose == TRUE)
cat("...done\nFilter...")
filter <- filter.select(filter.number = filter.number, family = family)
#
#
# Build the first/last database
#
if(verbose == TRUE)
cat("...selected\nFirst/last database...")
fl.dbase <- first.last(LengthH = length(filter$H), DataLength =
DataLength, type = type, bc = bc) #
#
# Put in the data
#
C <- rep(0, fl.dbase$ntotal)
C[1:DataLength] <- data #
if(verbose == TRUE)
error <- 1
else error <- 0
if(verbose == TRUE) cat("built\n") #
#
# Compute the decomposition
#
if(verbose == TRUE)
cat("Decomposing...\n")
nbc <- switch(bc,
periodic = 1,
symmetric = 2)
if(is.null(nbc))
stop("Unknown boundary condition")
ntype <- switch(type,
wavelet = 1,
station = 2)
if(is.null(filter$G)) {
wavelet.decomposition <- .C("wavedecomp",
C = as.double(C),
D = as.double(rep(0, fl.dbase$ntotal.d)),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
nlevels = as.integer(nlevels),
firstC = as.integer(fl.dbase$first.last.c[, 1]),
lastC = as.integer(fl.dbase$first.last.c[, 2]),
offsetC = as.integer(fl.dbase$first.last.c[, 3]),
firstD = as.integer(fl.dbase$first.last.d[, 1]),
lastD = as.integer(fl.dbase$first.last.d[, 2]),
offsetD = as.integer(fl.dbase$first.last.d[, 3]),
ntype = as.integer(ntype),
nbc = as.integer(nbc),
error = as.integer(error), PACKAGE = "wavethresh")
tmp <- .C("computec",
n = as.integer(length(gd$Gleft)),
c = as.double(rep(0, fl.dbase$ntotal.d)),
gridn = as.integer(length(gd$G)),
G = as.double(gd$G),
Gindex = as.integer(gd$Gindex),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
nbc = as.integer(nbc), PACKAGE = "wavethresh")
}
else {
wavelet.decomposition <- .C("comwd",
CR = as.double(Re(C)),
CI = as.double(Im(C)),
LengthC = as.integer(fl.dbase$ntotal),
DR = as.double(rep(0, fl.dbase$ntotal.d)),
DI = as.double(rep(0, fl.dbase$ntotal.d)),
LengthD = as.integer(fl.dbase$ntotal.d),
HR = as.double(Re(filter$H)),
HI = as.double( - Im(filter$H)),
GR = as.double(Re(filter$G)),
GI = as.double( - Im(filter$G)),
LengthH = as.integer(length(filter$H)),
nlevels = as.integer(nlevels),
firstC = as.integer(fl.dbase$first.last.c[, 1]),
lastC = as.integer(fl.dbase$first.last.c[, 2]),
offsetC = as.integer(fl.dbase$first.last.c[, 3]),
firstD = as.integer(fl.dbase$first.last.d[, 1]),
lastD = as.integer(fl.dbase$first.last.d[, 2]),
offsetD = as.integer(fl.dbase$first.last.d[, 3]),
ntype = as.integer(ntype),
nbc = as.integer(nbc),
error = as.integer(error), PACKAGE = "wavethresh")
}
if(verbose == TRUE)
cat("done\n")
error <- wavelet.decomposition$error
if(error != 0) {
cat("Error ", error, " occured in wavedecomp\n")
stop("Error")
}
if(is.null(filter$G)) {
l <- list(C = wavelet.decomposition$C, D =
wavelet.decomposition$D, c = tmp$c * (tmp$c > 0),
nlevels = nlevelsWT(wavelet.decomposition), fl.dbase =
fl.dbase, filter = filter, type = type, bc = bc, date
= date())
}
else {
l <- list(C = complex(real = wavelet.decomposition$CR,
imaginary =
wavelet.decomposition$CI), D = complex(real =
wavelet.decomposition$DR, imaginary = wavelet.decomposition$DI
), nlevels = nlevelsWT(wavelet.decomposition), fl.dbase =
fl.dbase, filter = filter, type = type, bc = bc, date
= date())
}
class(l) <- "irregwd"
return(l)
}
"l2norm"<-
function(u, v)
sqrt(sum((u - v)^2))
"levarr"<-
function(v, levstodo)
{
if(levstodo != 0) {
sv <- seq(from = 1, to = length(v), by = 2)
return(c(levarr(v[sv], levstodo - 1), levarr(v[ - sv], levstodo -
1)))
}
else return(v)
}
"linfnorm"<-
function(u, v)
{
max(abs(u - v))
}
"lt.to.name"<-
function(level, type)
{
#
# This function converts the level and type (horizontal, vertical, diagonal)
# of wavelet coefficients to a character string "wnLx" which should be
# interpreted as "nth Level, coefficients x", where x is 1, 2 or 3 in the
# scheme of Mallat. (So 1 is horizontal, 2 is vertical and 3 is diagonal).
# w is on the front to indicate that these are wavelet coefficients
#
return(paste("w", as.character(level), "L", switch(type,
CD = "1",
DC = "2",
DD = "3",
CC = "4"), sep = ""))
}
"madmad"<-
function(x)
mad(x)^2
"makegrid"<-
function(t, y, gridn = 2^(floor(log(length(t) - 1, 2)) + 1))
{
#
# 30th October 2018. Enhancements to do some argument sanity checks
#
lt <- length(t)
ly <- length(y)
if (lt != ly)
stop("Length of t and y vectors has to be the same")
isp2gridn <- IsPowerOfTwo(gridn)
if (is.na(isp2gridn))
stop("Length of gridn has to be a power of two")
tmp <- .C("makegrid",
x = as.double(t),
y = as.double(y),
n = length(t),
gridt = as.double(rep(0, gridn)),
gridy = as.double(rep(0, gridn)),
gridn = as.integer(gridn),
G = as.double(rep(0, gridn)),
Gindex = as.integer(rep(0, gridn)), PACKAGE = "wavethresh")
l <- list(gridt = tmp$gridt, gridy = tmp$gridy, G = tmp$G, Gindex = tmp$
Gindex)
class(l) <- "griddata"
l
}
"makewpstDO"<-
function(timeseries, groups, filter.number = 10, family = "DaubExPhase", mincor
= 0.69999999999999996)
{
#
#
# Using the data in timeseries (which should be a length a power of two)
# and the group information (only two groups presently). Create an object
# of class wpstDO (nondecimated wavelet packet Discrimination Object).
#
# Given this wpstDO and another timeseries a function exists to predict
# the group membership of each timeseries element
#
#
# First build nondecimated wavelet packet object
#
twpst <- wpst(timeseries, filter.number = filter.number, family =
family) #
#
# Now convert this to a w2d object including the group information.
#
tw2d <- wpst2discr(wpstobj = twpst, groups = groups) #
#
# Now extract the best 1D classifying columns.
#
tBP <- Best1DCols(w2d = tw2d, mincor = mincor) #
#
# Do a discriminant analysis
#
tBPd <- BMdiscr(tBP)
l <- list(BPd = tBPd, BP = tBP, filter = twpst$filter)
class(l) <- "wpstDO"
l
}
"mfilter.select"<-
function(type = "Geronimo")
{
#
# mfilter.select
# returns the filter information for a specified
# multiple wavelet basis
#
# Copyright Tim Downie 1995-6.
#
#
if(type == "Geronimo") {
name <- "Geronimo Multiwavelets"
nphi <- 2
npsi <- 2
NH <- 4
ndecim <- 2
H <- rep(0, 16)
G <- rep(0, 16)
H[1] <- 0.42426406871193001
H[2] <- 0.80000000000000004
H[3] <- -0.050000000000000003
H[4] <- -0.21213203435596001
H[5] <- 0.42426406871193001
H[7] <- 0.45000000000000001
H[8] <- 0.70710678118655002
H[11] <- 0.45000000000000001
H[12] <- -0.21213203435596001
H[15] <- -0.050000000000000003 #
# H6,9,10,13,14,16 are zero.
#
G[1] <- -0.050000000000000003
G[2] <- -0.21213203435596401
G[3] <- 0.070710678118654793
G[4] <- 0.29999999999999999
G[5] <- 0.45000000000000001
G[6] <- -0.70710678118654802
G[7] <- -0.63639610306789296
G[9] <- 0.45000000000000001
G[10] <- -0.21213203435596401
G[11] <- 0.63639610306789296
G[12] <- -0.29999999999999999
G[13] <- -0.050000000000000003
G[15] <- -0.070710678118654793 #
# G8,14,16 are zero.
#
}
else if(type == "Donovan3") {
name <- "Donovan Multiwavelets, 3 functions"
nphi <- 3
npsi <- 3
NH <- 4
ndecim <- 2
H <- rep(0, 36)
G <- rep(0, 36)
H[2] <- ( - sqrt(154) * (3 + 2 * sqrt(5)))/3696
H[3] <- (sqrt(14) * (2 + 5 * sqrt(5)))/1232
H[10] <- ( - sqrt(2) * (3 + 2 * sqrt(5)))/44
H[11] <- (sqrt(154) * (67 + 30 * sqrt(5)))/3696
H[12] <- (sqrt(14) * (-10 + sqrt(5)))/112
H[19] <- 1/sqrt(2)
H[20] <- (sqrt(154) * (67 - 30 * sqrt(5)))/3696
H[21] <- (sqrt(14) * (10 + sqrt(5)))/112
H[23] <- (3 * sqrt(2))/8
H[24] <- (sqrt(22) * (-4 + sqrt(5)))/88
H[26] <- (sqrt(22) * (32 + 7 * sqrt(5)))/264
H[27] <- (sqrt(2) * (-5 + 4 * sqrt(5)))/88
H[28] <- (sqrt(2) * (-3 + 2 * sqrt(5)))/44
H[29] <- (sqrt(154) * (-3 + 2 * sqrt(5)))/3696
H[30] <- (sqrt(14) * (-2 + 5 * sqrt(5)))/1232
H[31] <- sqrt(154)/22
H[32] <- (3 * sqrt(2))/8
H[33] <- (sqrt(22) * (4 + sqrt(5)))/88
H[34] <- - sqrt(70)/22
H[35] <- (sqrt(22) * (-32 + 7 * sqrt(5)))/264
H[36] <- ( - sqrt(2) * (5 + 4 * sqrt(5)))/88 #
# H1,4,5,6,7,8,9,13,14,15,16,17,18,22,25 are zero.
#
G[5] <- (sqrt(154) * (3 + 2 * sqrt(5)))/3696
G[6] <- ( - sqrt(14) * (2 + 5 * sqrt(5)))/1232
G[8] <- ( - sqrt(7) * (1 + sqrt(5)))/336
G[9] <- (sqrt(77) * (-1 + 3 * sqrt(5)))/1232
G[13] <- (sqrt(2) * (3 + 2 * sqrt(5)))/44
G[14] <- ( - sqrt(154) * (67 + 30 * sqrt(5)))/3696
G[15] <- (sqrt(14) * (10 - sqrt(5)))/112
G[16] <- ( - sqrt(11) * (1 + sqrt(5)))/44
G[17] <- (sqrt(7) * (29 + 13 * sqrt(5)))/336
G[18] <- (sqrt(77) * (-75 + 17 * sqrt(5)))/1232
G[20] <- (sqrt(77) * (-2 + sqrt(5)))/264
G[21] <- (sqrt(7) * (13 - 6 * sqrt(5)))/88
G[22] <- 1/sqrt(2)
G[23] <- (sqrt(154) * (-67 + 30 * sqrt(5)))/3696
G[24] <- ( - sqrt(14) * (10 + sqrt(5)))/112
G[26] <- (sqrt(7) * (-29 + 13 * sqrt(5)))/336
G[27] <- ( - sqrt(77) * (75 + 17 * sqrt(5)))/1232
G[28] <- 13/22
G[29] <- ( - sqrt(77) * (2 + sqrt(5)))/264
G[30] <- ( - sqrt(7) * (13 + 6 * sqrt(5)))/88
G[31] <- (sqrt(2) * (3 - 2 * sqrt(5)))/44
G[32] <- (sqrt(154) * (3 - 2 * sqrt(5)))/3696
G[33] <- (sqrt(14) * (2 - 5 * sqrt(5)))/1232
G[34] <- (sqrt(11) * (1 - sqrt(5)))/44
G[35] <- (sqrt(7) * (1 - sqrt(5)))/336
G[36] <- ( - sqrt(77) * (1 + 3 * sqrt(5)))/1232 #
# G1,2,3,4,7,10,11,12,19,25 are zero.
#
}
else (stop("bad filter specified\n"))
return(list(type = type, name = name, nphi = nphi, npsi = npsi, NH = NH,
ndecim = ndecim, H = H, G = G))
}
"mfirst.last"<-
function(LengthH, nlevels, ndecim, type = "wavelet", bc = "periodic")
{
#
# mfirst.last
# Sets up a coefficient data base for a multiple wavelet object
# The structure is analogous to that used in first.last
# but returns more information required by mwd and mwr.
#
# Copyright Tim Downie 1995-1996
#
#
if(type != "wavelet") stop("Type can only be wavelet")
first.last.c <- matrix(0, nrow = nlevels + 1, ncol = 3, dimnames = list(
NULL, c("First", "Last", "Offset")))
first.last.d <- matrix(0, nrow = nlevels, ncol = 3, dimnames = list(
NULL, c("First", "Last", "Offset")))
if(bc == "periodic") {
# Periodic boundary correction
if(type == "wavelet") {
first.last.c[, 1] <- rep(0, nlevels + 1)
first.last.c[, 2] <- ndecim^(0:nlevels) - 1
first.last.c[, 3] <- rev(c(0, cumsum(rev(1 +
first.last.c[, 2]))[1:nlevels]))
first.last.d[, 1] <- rep(0, nlevels)
first.last.d[, 2] <- ndecim^(0:(nlevels - 1)) - 1
first.last.d[, 3] <- rev(c(0, cumsum(rev(1 +
first.last.d[, 2]))[1:(nlevels - 1)]))
nvecs.c <- first.last.c[1, 3] + 1
nvecs.d <- first.last.d[1, 3] + 1
}
else if(type == "station") {
#
#
# in case nondecimated Multiple wavelet transform is implemented
# then this code might be of use (will need adapting)
#
first.last.c[, 1] <- rep(0, nlevels + 1)
first.last.c[, 2] <- 2^nlevels - 1
first.last.c[, 3] <- rev(c(0, cumsum(rev(1 +
first.last.c[, 2]))[1:nlevels]))
first.last.d[, 1] <- rep(0, nlevels)
first.last.d[, 2] <- 2^nlevels - 1
first.last.d[, 3] <- rev(c(0, cumsum(rev(1 +
first.last.d[, 2]))[1:(nlevels - 1)]))
ntotal <- (nlevels + 1) * 2^nlevels
ntotal.d <- nlevels * 2^nlevels
}
}
else if(bc == "symmetric") {
# Symmetric boundary reflection
first.last.c[nlevels + 1, 1] <- 0
first.last.c[nlevels + 1, 2] <- 2^nlevels - 1
first.last.c[nlevels + 1, 3] <- 0
nvecs.c <- first.last.c[nlevels + 1, 2] - first.last.c[nlevels +
1, 1] + 1
nvecs.d <- 0
for(i in nlevels:1) {
first.last.c[i, 1] <- trunc(0.5 * (1 - LengthH +
first.last.c[i + 1, 1]))
first.last.c[i, 2] <- trunc(0.5 * first.last.c[i + 1, 2
])
first.last.c[i, 3] <- first.last.c[i + 1, 3] +
first.last.c[i + 1, 2] - first.last.c[i + 1, 1] +
1
first.last.d[i, 1] <- trunc(0.5 * (first.last.c[i + 1,
1] - 1))
first.last.d[i, 2] <- trunc(0.5 * (first.last.c[i + 1,
2] + LengthH - 2))
if(i != nlevels) {
first.last.d[i, 3] <- first.last.d[i + 1, 3] +
first.last.d[i + 1, 2] - first.last.d[i + 1,
1] + 1
}
nvecs.c <- nvecs.c + first.last.c[i, 2] - first.last.c[
i, 1] + 1
nvecs.d <- nvecs.d + first.last.d[i, 2] - first.last.d[
i, 1] + 1
}
}
else {
stop("Unknown boundary correction method")
}
names(nvecs.c) <- NULL
names(nvecs.d) <- NULL
list(first.last.c = first.last.c, nvecs.c = nvecs.c, first.last.d =
first.last.d, nvecs.d = nvecs.d)
}
"modernise"<-
function(...)
UseMethod("modernise")
"modernise.wd"<-
function(wd, ...)
{
if(IsEarly(wd)) {
cat("Converting wavelet object to latest release\n")
wd$type <- "wavelet"
wd$date <- date()
}
else cat("Object is already up to date\n")
wd
}
"mpostfilter"<-
function(C, prefilter.type, filter.type, nphi, npsi, ndecim, nlevels, verbose
= FALSE)
{
ndata <- ndecim^nlevels * nphi
if(prefilter.type == "Repeat")
ndata <- ndecim^(nlevels - 1) * nphi
data <- rep(0, ndata)
if(filter.type == "Geronimo") {
if(prefilter.type == "Minimal") {
if(verbose == TRUE)
cat(" O.K.\nPostfilter (Minimal)\n")
w <- 1
data[(1:(ndata/2)) * 2 - 1] <- 2/w * C[2, (1:(ndata/2))
]
data[(1:(ndata/2)) * 2] <- - sqrt(2)/w * C[1, (1:(
ndata/2))] + 4/w * C[2, (1:(ndata/2))]
}
else if(prefilter.type == "Identity") {
if(verbose == TRUE)
cat(" O.K.\nPostfilter (identity)\n")
data[(1:(ndata/2)) * 2 - 1] <- C[1, (1:(ndata/2))]
data[(1:(ndata/2)) * 2] <- C[2, (1:(ndata/2))]
}
else if(prefilter.type == "Repeat") {
if(verbose == TRUE)
cat(" O.K.\nPostfilter (weighted average)\n")
for(k in 1:ndata)
data[k] <- (C[2, k] + C[1, k]/sqrt(2))/2
}
else if(prefilter.type == "Interp" || prefilter.type ==
"default") {
if(verbose == TRUE)
cat(" O.K.\nPostfilter (interpolation)\n")
t <- sqrt(96/25)
u <- sqrt(3)
data[2 * (1:(ndata/2))] <- u * C[2, (1:(ndata/2))]
data[2 * (2:(ndata/2)) - 1] <- t * C[1, (2:(ndata/2))] -
0.29999999999999999 * (data[2 * (2:(ndata/2)) -
2] + data[2 * (2:(ndata/2))])
data[1] <- t * C[1, 1] - 0.29999999999999999 * (data[
ndata] + data[2])
}
else if(prefilter.type == "Xia") {
if(verbose == TRUE)
cat(" O.K.\nPostfilter (Xia)\n")
epsilon1 <- 0
epsilon2 <- 0.10000000000000001
root2 <- sqrt(2)
x <- (2 * root2)/(5 * (root2 * epsilon2 - epsilon1))
a <- (x - epsilon1 + epsilon2 * 2 * root2)/2
b <- (x + epsilon1 - epsilon2 * 2 * root2)/2
c <- (x + 4 * epsilon1 - epsilon2 * 3 * root2)/(root2 *
2)
d <- (x - 4 * epsilon1 + epsilon2 * 3 * root2)/(root2 *
2)
data[2 * (1:(ndata/2))] <- d * C[1, 1:(ndata/2)] - b *
C[2, 1:(ndata/2)]
data[2 * (1:(ndata/2)) - 1] <- a * C[2, 1:(ndata/2)] -
c * C[1, 1:(ndata/2)]
}
else if(prefilter.type == "Roach1") {
q1 <- 0.32982054290000001
q2 <- 0.23184851840000001
q3 <- 0.8187567536
q4 <- -0.29459505809999997
q5 <- -0.1629787369
q6 <- 0.23184851840000001
q7 <- -0.23184851840000001
q8 <- -0.1629787369
q9 <- 0.29459505809999997
q10 <- 0.8187567536
q11 <- -0.23184851840000001
q12 <- 0.32982054290000001
nn <- (ndata - 2)/2
QB <- matrix(c(q2, q1, q8, q7), ncol = 2, byrow = TRUE)
QA <- matrix(c(q4, q3, q10, q9), ncol = 2, byrow = TRUE)
QZ <- matrix(c(q6, q5, q12, q11), ncol = 2, byrow = TRUE)
partition <- matrix(data, nrow = 2, byrow = FALSE)
partition[, (2:nn)] <- QB %*% C[, (2:nn) - 1] + QA %*%
C[, (2:nn)] + QZ %*% C[, (2:nn) + 1]
partition[, 1] <- QB %*% C[, nn + 1] + QA %*% C[, 1] +
QZ %*% C[, 2]
partition[, nn + 1] <- QB %*% C[, nn] + QA %*% C[, nn +
1] + QZ %*% C[, 1]
data <- c(partition)
}
else if(prefilter.type == "Roach3") {
q1 <- 0.084397403440000004
q2 <- -0.0036003129089999999
q3 <- 0.084858161210000005
q4 <- 0.99279918550000001
q5 <- -0.00015358592229999999
q6 <- -0.0036003129089999999
q7 <- -0.0036003129089999999
q8 <- 0.00015358592229999999
q9 <- 0.99279918550000001
q10 <- -0.084858161210000005
q11 <- -0.0036003129089999999
q12 <- -0.084397403440000004
nn <- (ndata - 2)/2
QZ <- matrix(c(q7, q8, q1, q2), ncol = 2, byrow = TRUE)
QA <- matrix(c(q9, q10, q3, q4), ncol = 2, byrow = TRUE)
QB <- matrix(c(q11, q12, q5, q6), ncol = 2, byrow = TRUE)
partition <- matrix(data, nrow = 2, byrow = FALSE)
partition[, (2:nn)] <- QB %*% C[, (2:nn) - 1] + QA %*%
C[, (2:nn)] + QZ %*% C[, (2:nn) + 1]
partition[, 1] <- QB %*% C[, nn + 1] + QA %*% C[, 1] +
QZ %*% C[, 2]
partition[, nn + 1] <- QB %*% C[, nn] + QA %*% C[, nn +
1] + QZ %*% C[, 1]
data <- c(partition)
}
else stop("Specified postfilter not available for given multiwavelet"
)
}
else if(filter.type == "Donovan3") {
if(prefilter.type == "Identity") {
if(verbose == TRUE)
cat(" O.K.\nPostfilter (identity)\n")
data[(1:(ndata/3)) * 3 - 2] <- C[1, (1:(ndata/3))]
data[(1:(ndata/3)) * 3 - 1] <- C[2, (1:(ndata/3))]
data[(1:(ndata/3)) * 3] <- C[3, (1:(ndata/3))]
}
else if(prefilter.type == "Linear") {
cat(" O.K.\nPostfilter (Linear)\n")
if(verbose == TRUE)
data[(1:(ndata/3)) * 3 - 2] <- C[1, (1:(ndata/3
))] * -4.914288 + 4.914288 * C[2, (1:(ndata/3
))]
data[(1:(ndata/3)) * 3 - 1] <- C[1, (1:(ndata/3))] *
-2.778375 + 3.778375 * C[2, (1:(ndata/3))]
data[(1:(ndata/3)) * 3] <- C[1, (1:(ndata/3))] *
-2.298365 + 3.298365 * C[2, (1:(ndata/3))] + C[
3, (1:(ndata/3))]
}
else if(prefilter.type == "Interp" || prefilter.type ==
"default") {
if(verbose == TRUE)
cat(" O.K.\nPostfilter (interpolation)\n")
w <- sqrt(5)
lc <- length(data)/3
data[3 * (0:(lc - 1)) + 1] <- C[1, 1:lc] * sqrt(11/7)
data[2] <- ( - (2 + 6 * w) * C[1, lc] - (3 + 2 * w) * C[
1, 1] + 6 * sqrt(77) * C[2, 1] + ((103 - 24 * w
) * sqrt(7))/(16 - 5 * w) * C[3, 1])/(9 * sqrt(
77))
data[3 * (1:(lc - 1)) + 2] <- ( - (2 + 6 * w) * C[1, 1:(
lc - 1)] - (3 + 2 * w) * C[1, (2:lc)] + 6 *
sqrt(77) * C[2, (2:lc)] + ((103 - 24 * w) *
sqrt(7))/(16 - 5 * w) * C[3, (2:lc)])/(9 * sqrt(
77))
data[3] <- ((-3 + 2 * w)/(3 * sqrt(231)) * C[1, lc] + (
-2
+ 6 * w)/(3 * sqrt(231)) * C[1, 1] + 2/sqrt(3) *
C[2, 1] + (306 - 112 * w)/((16 - 5 * w) * 3 *
sqrt(33)) * C[3, 1])/sqrt(3)
data[3 * (2:lc)] <- ((-3 + 2 * w)/(3 * sqrt(231)) * C[1,
(1:(lc - 1))] + (-2 + 6 * w)/(3 * sqrt(231)) *
C[1, (2:lc)] + 2/sqrt(3) * C[2, (2:lc)] + (306 -
112 * w)/((16 - 5 * w) * 3 * sqrt(33)) * C[3, (
2:lc)])/sqrt(3)
}
else stop("Specified postfilter not available for given multiwavelet"
)
}
else stop("No postfilters for type of multiwavelet")
return(data)
}
"mprefilter"<-
function(data, prefilter.type, filter.type, nlevels, nvecs.c, nphi, npsi,
ndecim, verbose = FALSE)
{
#function that takes original data and computes the starting level
#coefficients for the wavelet decompostion
#
ndata <- length(data)
C <- matrix(rep(0, nvecs.c * nphi), nrow = nphi) #
#jump to type of multiwavelet
if(filter.type == "Geronimo") {
if(prefilter.type == "Minimal") {
if(verbose == TRUE)
cat(" O.K.\nPrefilter (Minimal)...")
w <- 1
C[1, 1:(ndata/2)] <- w * sqrt(2) * data[(1:(ndata/2)) *
2 - 1] - w/sqrt(2) * data[(1:(ndata/2)) * 2]
C[2, 1:(ndata/2)] <- w * 0.5 * data[(1:(ndata/2)) * 2 -
1]
}
else if(prefilter.type == "Identity") {
if(verbose == TRUE)
cat(" O.K.\nPrefilter (Identity)...")
for(l in 1:nphi) {
C[l, 1:(ndata/nphi)] <- data[(0:((ndata/nphi) -
1)) * nphi + l]
}
}
else if(prefilter.type == "Repeat") {
if(verbose == TRUE)
cat(" O.K.\nRepeating signal...")
C[1, 1:(ndata)] <- data[1:ndata] * sqrt(2)
C[2, 1:(ndata)] <- data[1:ndata]
}
else if(prefilter.type == "Interp" || prefilter.type ==
"default") {
if(verbose == TRUE)
cat(" O.K.\nPrefilter (interpolation)...")
r <- sqrt(25/96)
s <- sqrt(1/3)
a <- -0.29999999999999999
C[2, (1:(ndata/2))] <- s * data[2 * (1:(ndata/2))]
C[1, 1] <- r * (data[1] - a * (data[ndata] + data[2]))
C[1, (2:(ndata/2))] <- r * (data[2 * (2:(ndata/2)) - 1] -
a * (data[2 * (2:(ndata/2)) - 2] + data[2 * (2:(
ndata/2))]))
}
else if(prefilter.type == "Xia") {
if(verbose == TRUE)
cat(" O.K.\nPrefilter (Xia) ...")
epsilon1 <- 0
epsilon2 <- 0.10000000000000001
root2 <- sqrt(2)
x <- (2 * root2)/(5 * (root2 * epsilon2 - epsilon1))
a <- (x - epsilon1 + epsilon2 * 2 * root2)/2
b <- (x + epsilon1 - epsilon2 * 2 * root2)/2
c <- (x + 4 * epsilon1 - epsilon2 * 3 * root2)/(root2 *
2)
d <- (x - 4 * epsilon1 + epsilon2 * 3 * root2)/(root2 *
2)
C[1, (1:(ndata/2))] <- a * data[2 * (1:(ndata/2))] + b *
data[2 * (1:(ndata/2)) - 1]
C[2, (1:(ndata/2))] <- c * data[2 * (1:(ndata/2))] + d *
data[2 * (1:(ndata/2)) - 1]
}
else if(prefilter.type == "Roach1") {
q1 <- 0.32982054290000001
q2 <- 0.23184851840000001
q3 <- 0.8187567536
q4 <- -0.29459505809999997
q5 <- -0.1629787369
q6 <- 0.23184851840000001
q7 <- -0.23184851840000001
q8 <- -0.1629787369
q9 <- 0.29459505809999997
q10 <- 0.8187567536
q11 <- -0.23184851840000001
q12 <- 0.32982054290000001
QB <- matrix(c(q2, q1, q8, q7), ncol = 2, byrow = TRUE)
QA <- matrix(c(q4, q3, q10, q9), ncol = 2, byrow = TRUE)
QZ <- matrix(c(q6, q5, q12, q11), ncol = 2, byrow = TRUE)
nn <- (ndata - 2)/2
partition <- matrix(data, nrow = 2, byrow = FALSE)
C[, (2:nn)] <- QB %*% partition[, (2:nn) - 1] + QA %*%
partition[, (2:nn)] + QZ %*% partition[, (2:nn) +
1]
C[, 1] <- QB %*% partition[, nn + 1] + QA %*% partition[
, 1] + QZ %*% partition[, 2]
C[, nn + 1] <- QB %*% partition[, nn] + QA %*%
partition[, nn + 1] + QZ %*% partition[, 1]
}
else if(prefilter.type == "Roach3") {
q1 <- 0.084397403440000004
q2 <- -0.0036003129089999999
q3 <- 0.084858161210000005
q4 <- 0.99279918550000001
q5 <- -0.00015358592229999999
q6 <- -0.0036003129089999999
q7 <- -0.0036003129089999999
q8 <- 0.00015358592229999999
q9 <- 0.99279918550000001
q10 <- -0.084858161210000005
q11 <- -0.0036003129089999999
q12 <- -0.084397403440000004
nn <- (ndata - 2)/2
QB <- matrix(c(q7, q8, q1, q2), ncol = 2, byrow = FALSE)
QA <- matrix(c(q9, q10, q3, q4), ncol = 2, byrow = FALSE)
QZ <- matrix(c(q11, q12, q5, q6), ncol = 2, byrow = FALSE)
partition <- matrix(data, nrow = 2, byrow = FALSE)
C[, (2:nn)] <- QB %*% partition[, (2:nn) - 1] + QA %*%
partition[, (2:nn)] + QZ %*% partition[, (2:nn) +
1]
C[, 1] <- QB %*% partition[, nn + 1] + QA %*% partition[
, 1] + QZ %*% partition[, 2]
C[, nn + 1] <- QB %*% partition[, nn] + QA %*%
partition[, nn + 1] + QZ %*% partition[, 1]
}
else stop("Bad prefilter for specified multiwavelet filter")
}
else if(filter.type == "Donovan3") {
if(prefilter.type == "Identity") {
if(verbose == TRUE)
cat(" O.K.\nPrefilter (Identity)...")
for(l in 1:nphi) {
C[l, 1:(ndata/nphi)] <- data[(0:((ndata/nphi) -
1)) * nphi + l]
}
}
else if(prefilter.type == "Linear") {
if(verbose == TRUE)
cat(" O.K.\nPrefilter (Linear)...")
C[1, 1:(ndata/3)] <- data[3 * 0:((ndata/3) - 1) + 1] *
-0.76885512
+ data[3 * 0:((ndata/3) - 1) + 2]
C[2, 1:(ndata/3)] <- data[3 * 0:((ndata/3) - 1) + 1] *
-0.56536682999999999
+ data[3 * 0:((ndata/3) - 1) + 2]
C[3, 1:(ndata/3)] <- data[3 * 0:((ndata/3) - 1) + 1] *
0.097676540000000006 - data[3 * 0:((ndata/3) -
1) + 2] + data[3 * 1:(ndata/3)]
}
else if(prefilter.type == "Interp" || prefilter.type ==
"default") {
if(verbose == TRUE)
cat(" O.K.\nPrefilter (Interpolation)...")
w <- sqrt(5)
lc <- length(data)/3
C[1, 1:lc] <- data[3 * (0:(lc - 1)) + 1] * sqrt(7/11)
C[3, 1] <- ((sqrt(3) * (data[2] - data[3]) + (C[1, lc] * (
-1 + 8 * w))/3/sqrt(231) + (C[1, 1] * (1 + 8 *
w))/3/sqrt(231)) * 3 * sqrt(33) * (16 - 5 * w))/
(-203 + 88 * w)
C[3, 2:lc] <- ((sqrt(3) * (data[3 * (1:(lc - 1)) + 2] -
data[3 * (2:lc)]) + (C[1, 1:(lc - 1)] * (-1 + 8 *
w))/3/sqrt(231) + (C[1, 2:lc] * (1 + 8 * w))/3/
sqrt(231)) * 3 * sqrt(33) * (16 - 5 * w))/(-203 +
88 * w)
C[2, 1] <- ((sqrt(3) * data[2] + (C[1, lc] * (2 + 6 * w
))/3/sqrt(231) + (C[1, 1] * (3 + 2 * w))/3/sqrt(
231) - (C[3, 1] * (103 - 24 * w))/3/sqrt(33)/(
16 - 5 * w)) * sqrt(3))/2
C[2, 2:lc] <- ((sqrt(3) * data[3 * (1:(lc - 1)) + 2] + (
C[1, 1:(lc - 1)] * (2 + 6 * w))/3/sqrt(231) + (
C[1, 2:lc] * (3 + 2 * w))/3/sqrt(231) - (C[3, 2:
lc] * (103 - 24 * w))/3/sqrt(33)/(16 - 5 * w)) *
sqrt(3))/2
}
else stop("Bad prefilter for specified multiwavelet filter")
}
else stop("No prefilter for the multiwavelet filter")
return(C)
}
"mwd"<-
function(data, prefilter.type = "default", filter.type = "Geronimo", bc =
"periodic", verbose = FALSE)
{
#
#applies the Discrete Multiple wavelet Transform to data
#copyrigt Tim Downie 1995-1996
#
if(verbose == TRUE) cat("Multiple wavelet decomposition\n")
if(verbose == TRUE)
cat("Checking Arguements...")
if(bc != "periodic")
stop("\nOnly periodic boundary conditions allowed at the moment"
)
filter <- mfilter.select(type = filter.type)
ndata <- length(data) #
#
# check ndata = filter$nphi * filter$ndecim ^ nlevels
#
#
nlevels <- log(ndata/filter$nphi)/log(filter$ndecim) #
#
# repeated signal prefilter has one extra level
#
if(prefilter.type == "Repeat")
nlevels <- nlevels + 1
if(nlevels != round(nlevels) || nlevels < 1)
stop("\nbad number of data points for this filter\n")
if(verbose == TRUE)
cat(" O.K.\nBuilding first/last database ...")
fl <- mfirst.last(LengthH = filter$NH, nlevels = nlevels, ndecim =
filter$ndecim, type = "wavelet", bc = bc) #
if(bc == "periodic")
nbc <- 1
else if(bc == "symmetric")
nbc <- 2
C <- mprefilter(data, prefilter.type, filter.type, nlevels, fl$nvecs.c,
filter$nphi, filter$npsi, filter$ndecim, verbose)
if(verbose == TRUE)
cat(" O.K.\nRunning decomposition algorithm...")
gwd <- .C("multiwd",
C = as.double(C),
lengthc = as.integer(fl$nvecs.c * filter$nphi),
D = as.double(rep(0, fl$nvecs.d * filter$npsi)),
lengthd = as.integer(fl$nvecs.d * filter$npsi),
nlevels = as.integer(nlevels),
nphi = as.integer(filter$nphi),
npsi = as.integer(filter$npsi),
ndecim = as.integer(filter$ndecim),
H = as.double(filter$H),
G = as.double(filter$G),
NH = as.integer(filter$NH),
lowerc = as.integer(fl$first.last.c[, 1]),
upperc = as.integer(fl$first.last.c[, 2]),
offsetc = as.integer(fl$first.last.c[, 3]),
lowerd = as.integer(fl$first.last.d[, 1]),
upperd = as.integer(fl$first.last.d[, 2]),
offsetd = as.integer(fl$first.last.d[, 3]),
nbc = as.integer(nbc), PACKAGE = "wavethresh") #
# the C function returns the C and D coefficients as a vector
# convert into a matrix with nphi rows.
#
gwd$C <- matrix(gwd$C, nrow = filter$nphi)
gwd$D <- matrix(gwd$D, nrow = filter$npsi)
outlist <- list(C = gwd$C, D = gwd$D, nlevels = nlevels, ndata = ndata,
filter = filter, fl.dbase = fl, type = "wavelet", bc = bc,
prefilter = prefilter.type, date = date())
class(outlist) <- "mwd"
if(verbose == TRUE)
cat(" O.K.\nReturning Multiple Wavelet Decomposition\n")
return(outlist)
}
"mwr"<-
function(mwd, prefilter.type = mwd$prefilter, verbose = FALSE, start.level = 0,
returnC = FALSE)
{
#function to reconstruct the data from an object of class mwd
#a multiwavelet decomposition
#Tim Downie
#last updated May 96
if(verbose == TRUE) cat("Multiple wavelet reconstruction\nArguement checking ..."
)
ctmp <- class(mwd)
if(is.null(ctmp))
stop("Input must have class mwd")
else if(ctmp != "mwd")
stop("Input must have class mwd")
if(mwd$prefilter != prefilter.type)
warning("The pre/postfilters are inconsistent\n")
if(start.level < 0 || start.level >= nlevelsWT(mwd)) stop(
"Start.level out of range\n") #
# keep the value of the Cs at level 0 reset all the others
#
if(verbose == TRUE)
cat(" O.K.\nInitialising variables ...")
C <- matrix(rep(0, length(mwd$C)), nrow = mwd$filter$nphi)
c0low <- mwd$fl.dbase$first.last.c[start.level + 1, 3] + 1
c0high <- c0low + mwd$fl.dbase$first.last.c[start.level + 1, 2] - mwd$
fl.dbase$first.last.c[start.level + 1, 1]
for(l in 1:mwd$filter$nphi)
C[l, c0low:c0high] <- mwd$C[l, c0low:c0high]
if(mwd$bc == "periodic")
nbc <- 1
else if(mwd$bc == "symmetric")
nbc <- 2
else stop("bad boundary conditions")
if(verbose == TRUE)
cat(" O.K.\nRunning Reconstruction algorithm...")
reconstr <- .C("multiwr",
C = as.double(C),
lengthc = as.integer(mwd$fl.dbase$ntotal),
D = as.double(mwd$D),
lengthd = as.integer(mwd$fl.dbase$ntotal.d),
nlevels = as.integer(nlevelsWT(mwd)),
nphi = as.integer(mwd$filter$nphi),
npsi = as.integer(mwd$filter$npsi),
ndecim = as.integer(mwd$filter$ndecim),
H = as.double(mwd$filter$H),
G = as.double(mwd$filter$G),
NH = as.integer(mwd$filter$NH),
lowerc = as.integer(mwd$fl.dbase$first.last.c[, 1]),
upperc = as.integer(mwd$fl.dbase$first.last.c[, 2]),
offsetc = as.integer(mwd$fl.dbase$first.last.c[, 3]),
lowerd = as.integer(mwd$fl.dbase$first.last.d[, 1]),
upperd = as.integer(mwd$fl.dbase$first.last.d[, 2]),
offsetd = as.integer(mwd$fl.dbase$first.last.d[, 3]),
nbc = as.integer(nbc),
startlevel = as.integer(start.level), PACKAGE = "wavethresh")
ndata <- mwd$filter$ndecim^nlevelsWT(mwd)* mwd$filter$nphi
reconstr$C <- matrix(reconstr$C, nrow = mwd$filter$nphi)
if(returnC == TRUE) {
if(verbose == TRUE)
cat(" O.K.\nReturning starting coefficients\n")
return(reconstr$C[, (1:(ndata/mwd$filter$nphi))])
}
if(verbose == TRUE)
cat(" O.K.\nApply post filter...")
ndata <- mwd$filter$ndecim^nlevelsWT(mwd)* mwd$filter$nphi
data <- mpostfilter(reconstr$C, prefilter.type, mwd$filter$type, mwd$
filter$nphi, mwd$filter$npsi, mwd$filter$ndecim, nlevelsWT(mwd),
verbose)
if(verbose == TRUE)
cat(" O.K.\nReturning data\n")
return(data)
}
"newsure"<-
function(s, x)
{
x <- abs(x)
d <- length(x)
sl <- sort.list(x)
y <- x[sl]
sigma <- s[sl]
cy <- cumsum(y^2)
cy <- c(0, cy[1:(length(cy) - 1)])
csigma <- cumsum(sigma^2)
csigma <- c(0, csigma[1:(length(csigma) - 1)])
ans <- d - 2 * csigma + cy + d:1 * y^2
m <- min(ans)
index <- (1:length(ans))[m == ans]
return(y[index])
}
"nlevelsWT"<-
function(...)
UseMethod("nlevelsWT")
#"nlevels.default"<-
#function(object, ...)
#{
# if(is.null(object$nlevels)) {
# n <- length(object)
# return(IsPowerOfTwo(n))
# }
# else return(object$nlevels)
#}
#MAN: changed function below to cope with $nlevels deprecation (R-2.6.0 onwards).
"nlevelsWT.default"<-
function(object, ...)
{
if (is.list(object)){
if(!is.null(object$nlevels)){ # "normal" object */
return(object$nlevels)
}
else{
if(isa(object,"uncompressed")){ # 2 special cases
return(IsPowerOfTwo(object$v))
}
else if(isa(object, "griddata")){
return(IsPowerOfTwo(object$gridy))
}
else{ # what to do? e.g. tpwd,wpstDO,compressed classes.
print("I don't know what to do with this object!\n")
stop("unknown nlevels")
}
}
}
else{ #data should be atomic (numeric)...
return(IsPowerOfTwo(length(object)))
}
}
"nullevels"<-
function(...)
UseMethod("nullevels")
"nullevels.imwd"<-
function(imwd, levelstonull, ...)
{
nlevels <- nlevelsWT(imwd)
if(max(levelstonull) > nlevels - 1)
stop(paste("Illegal level to null, maximum is ", nlevels - 1))
if(min(levelstonull) < 0)
stop(paste("Illegal level to null, minimum is ", nlevels - 1))
for(lev in levelstonull) {
n1 <- lt.to.name(lev, type = "CD")
n2 <- lt.to.name(lev, type = "DC")
n3 <- lt.to.name(lev, type = "DD")
imwd[[n1]] <- rep(0, length(imwd[[n1]]))
imwd[[n2]] <- rep(0, length(imwd[[n2]]))
imwd[[n3]] <- rep(0, length(imwd[[n3]]))
}
imwd
}
"nullevels.wd"<-
function(wd, levelstonull, ...)
{
nlevels <- nlevelsWT(wd)
if(max(levelstonull) > nlevels - 1)
stop(paste("Illegal level to null, maximum is ", nlevels - 1))
if(min(levelstonull) < 0)
stop(paste("Illegal level to null, minimum is ", nlevels - 1))
for(lev in levelstonull) {
d <- accessD(wd, level = lev)
d <- rep(0, length(d))
wd <- putD(wd, level = lev, v = d)
}
wd
}
"nullevels.wst"<-
function(wst, levelstonull, ...)
{
nullevels.wd(wst, levelstonull = levelstonull)
}
"numtonv"<-
function(number, nlevels)
{
if(nlevels < 1)
stop("nlevels cannot be less than 1")
if(number < 0)
stop("Number cannot be less than 0")
else if(number > 2^nlevels - 1)
stop(paste("Number cannot be more than", 2^nlevels - 1))
node.vector <- vector("list", nlevels)
matchcodes <- c("L", "R")
mask <- 2^(nlevels - 1)
cmc <- NULL
for(i in (nlevels - 1):0) {
index <- floor(number/mask)
if(index == 1)
number <- number - mask
mask <- mask/2
cmc <- c(cmc, index)
}
for(i in (nlevels - 1):0) {
index <- cmc[i + 1]
nul <- 2^(nlevels - i - 1)
upperl <- rep(0, nul)
upperctrl <- rep(matchcodes[index + 1], nul)
node.vector[[i + 1]] <- list(upperctrl = upperctrl, upperl =
upperl)
}
node.vector <- list(node.list = node.vector, nlevels = nlevels)
class(node.vector) <- "nv"
node.vector
}
"plot.imwd"<-
function(x, scaling = "by.level", co.type = "abs", package = "R",
plot.type = "mallat", arrangement = c(3, 3), transform = FALSE, tfunction
= sqrt, ...)
{
#
#
# Check class of imwd
#
if(package != "R" && package != "S") stop("Unknown package")
ctmp <- class(x)
if(is.null(ctmp))
stop("imwd has no class")
else if(ctmp != "imwd")
stop("imwd is not of class imwd")
if(x$type == "station" && plot.type == "mallat")
stop("Cannot do Mallat type plot on nondecimated wavelet object")
Csize <- 2^(nlevelsWT(x))
m <- matrix(0, nrow = Csize, ncol = Csize)
first.last.d <- x$fl.dbase$first.last.d
first.last.c <- x$fl.dbase$first.last.c
if(plot.type == "mallat") {
for(level in (nlevelsWT(x)):1) {
ndata <- 2^(level - 1)
firstD <- first.last.d[level, 1]
lastD <- first.last.d[level, 2]
LengthD <- lastD - firstD + 1
sel <- seq(from = (1 - firstD), length = ndata) #
#
# Extract CD for this level
#
nm <- lt.to.name(level - 1, "CD")
msub1 <- matrix(x[[nm]], nrow = LengthD, ncol =
LengthD) #
#
# Extract DC for this level
#
nm <- lt.to.name(level - 1, "DC")
msub2 <- matrix(x[[nm]], nrow = LengthD, ncol =
LengthD) #
#
# Extract DD for this level
#
nm <- lt.to.name(level - 1, "DD")
msub3 <- matrix(x[[nm]], nrow = LengthD, ncol =
LengthD) #
#
#
# Work out if we want to display the absolute values or the actual
# values
#
if(co.type == "abs") {
msub1 <- abs(msub1)
msub2 <- abs(msub2)
msub3 <- abs(msub3)
}
else if(co.type == "mabs") {
msub1 <- - abs(msub1)
msub2 <- - abs(msub2)
msub3 <- - abs(msub3)
}
else if(co.type != "none")
stop("Unknown co.type")
if(transform == TRUE) {
msub1 <- tfunction(msub1)
msub2 <- tfunction(msub2)
msub3 <- tfunction(msub3)
}
if(scaling == "by.level") {
if(ndata == 1) {
r.m1 <- range(c(as.vector(msub1), as.vector(
msub2), as.vector(msub3)))
r.m2 <- r.m1
r.m3 <- r.m1
}
else {
r.m1 <- range(msub1)
r.m2 <- range(msub2)
r.m3 <- range(msub3)
}
if(r.m1[2] - r.m1[1] == 0) {
msub1[, ] <- 0
}
else {
mu1 <- 249/(r.m1[2] - r.m1[1])
msub1 <- mu1 * (msub1 - r.m1[1])
}
if(r.m2[2] - r.m2[1] == 0) {
msub2[, ] <- 0
}
else {
mu2 <- 249/(r.m2[2] - r.m2[1])
msub2 <- mu2 * (msub2 - r.m2[1])
}
if(r.m3[2] - r.m3[1] == 0) {
msub3[, ] <- 0
}
else {
mu3 <- 249/(r.m3[2] - r.m3[1])
msub3 <- mu3 * (msub3 - r.m3[1])
}
}
else {
range.msub <- range(c(msub1, msub2, msub3))
multiplier <- 255/(range.msub[2] - range.msub[1
])
msub1 <- multiplier * (msub1 - range.msub[1])
msub2 <- multiplier * (msub2 - range.msub[1])
msub3 <- multiplier * (msub3 - range.msub[1]) #
}
m[(ndata + 1):(2 * ndata), 1:ndata] <- msub1[sel, sel]
m[1:ndata, (ndata + 1):(2 * ndata)] <- msub2[sel, sel]
m[(ndata + 1):(2 * ndata), (ndata + 1):(2 * ndata)] <-
msub3[sel, sel]
}
if(package == "R") {
image(m, xaxt = "n", yaxt = "n",...)
axis(1, at = c(0, 2^((nlevelsWT(x)- 3):(nlevelsWT(x)))
))
axis(2, at = c(0, 2^((nlevelsWT(x)- 3):(nlevelsWT(x)))
))
}
else return(m)
}
else if(plot.type == "cols") {
oldpar <- par(mfrow = arrangement, pty = "s")
for(level in (nlevelsWT(x):1)) {
ndata <- 2^(level - 1)
firstD <- first.last.d[level, 1]
lastD <- first.last.d[level, 2]
LengthD <- lastD - firstD + 1
sel <- seq(from = (1 - firstD), length = ndata) #
#
# Extract CD for this level
#
nm <- lt.to.name(level - 1, "CD")
msub1 <- matrix(x[[nm]], nrow = LengthD, ncol =
LengthD) #
#
# Extract DC for this level
#
nm <- lt.to.name(level - 1, "DC")
msub2 <- matrix(x[[nm]], nrow = LengthD, ncol =
LengthD) #
#
# Extract DD for this level
#
nm <- lt.to.name(level - 1, "DD")
msub3 <- matrix(x[[nm]], nrow = LengthD, ncol =
LengthD) #
#
#
# Work out if we want to display the absolute values or the actual
# values
#
if(co.type == "abs") {
msub1 <- abs(msub1)
msub2 <- abs(msub2)
msub3 <- abs(msub3)
}
else if(co.type == "mabs") {
msub1 <- - abs(msub1)
msub2 <- - abs(msub2)
msub3 <- - abs(msub3)
}
else if(co.type != "none")
stop("Unknown co.type")
if(transform == TRUE) {
msub1 <- tfunction(msub1)
msub2 <- tfunction(msub2)
msub3 <- tfunction(msub3)
}
if(package == "R") {
xlabstr <- paste("Level", level - 1,
"(horizonatal)")
image(msub1, xlab = xlabstr)
xlabstr <- paste("Level", level - 1,
"(vertical)")
image(msub2, xlab = xlabstr)
xlabstr <- paste("Level", level - 1,
"(diagonal)")
image(msub3, xlab = xlabstr,...)
}
else {
warning("Not using R")
}
}
par(oldpar)
}
else stop("Unknown plot.type")
}
"plot.imwdc"<-
function(x, verbose = FALSE, ...)
{
imwd <- uncompress(x, verbose = verbose)
return(plot(imwd, ...))
}
plot.irregwd <-
function (x, xlabels, first.level = 1, main = "Wavelet Decomposition Coefficients",
scaling = "by.level", rhlab = FALSE, sub, ...)
{
ctmp <- class(x)
if (is.null(ctmp))
stop("irregwd has no class")
else if (ctmp != "irregwd")
stop("irregwd is not of class irregwd")
iwd <- x
wd <- x
class(wd) <- "wd"
levels <- nlevelsWT(wd)
nlevels <- levels - first.level
n <- 2^(levels - 1)
if (missing(sub))
sub <- wd$filter$name
plot(c(0, 0, n, n), c(0, nlevels + 1, nlevels + 1, 0), type = "n",
xlab = "Translate", ylab = "Resolution Level", main = main,
yaxt = "n", xaxt = "n", sub = sub, ...)
axis(2, at = 1:(nlevels), labels = ((levels - 1):first.level))
if (missing(xlabels)) {
axx <- c(0, 2^(nlevels - 2), 2^(nlevels - 1), 2^(nlevels -
1) + 2^(nlevels - 2), 2^nlevels)
axis(1, at = axx)
}
else {
axx <- pretty(1:n, n = 3)
if (axx[length(axx)] > n)
axx[length(axx)] <- n
axx[axx == 0] <- 1
axl <- signif(xlabels[axx], digits = 3)
axis(1, at = axx, labels = axl)
}
x <- 1:n
height <- 1
first.last.d <- wd$fl.dbase$first.last.d
axr <- NULL
if (scaling == "global") {
my <- 0
for (i in ((levels - 1):first.level)) {
y <- accessc(iwd, i)
my <- max(c(my, abs(y)))
}
}
for (i in ((levels - 1):first.level)) {
n <- 2^i
y <- accessc(iwd, i)
xplot <- x
ly <- length(y)
if (scaling == "by.level")
my <- max(abs(y))
y <- (0.5 * y)/my
axr <- c(axr, my)
segments(xplot, height, xplot, height + y)
if (i != first.level) {
x1 <- x[seq(1, n - 1, 2)]
x2 <- x[seq(2, n, 2)]
x <- (x1 + x2)/2
height <- height + 1
}
}
if (rhlab == TRUE)
axis(4, at = 1:length(axr), labels = signif(axr, 3))
axr
}
"plot.mwd"<-
function(x, first.level = 1, main = "Wavelet Decomposition Coefficients",
scaling = "compensated", rhlab = FALSE, sub = x$filter$name, NotPlotVal
= 0.050000000000000003, xlab = "Translate", ylab = "Resolution level",
return.scale = TRUE, colour = (2:(npsi + 1)), ...)
{
#plot.mwd
#plot a multiwavelet decompostion
#
#Tim Downie 1995-1996
#
#
# Check class of mwd
#
ctmp <- class(x)
if(is.null(ctmp))
stop("mwd has no class")
else if(ctmp == "wd")
stop("object is of class wd use plot.wd or plot")
else if(ctmp != "mwd")
stop("object is not of class mwd")
nlevels <- nlevelsWT(x)- first.level
mx <- x$ndata
xlabs <- seq(0, mx/2, length = 5)
plot(c(0, 0, mx, mx), c(0, nlevels + 1, nlevels + 1, 0), type = "n",
xlab = xlab, ylab = ylab, main = main, yaxt = "n", xaxt = "n",
sub=sub, ...)
axis(1, at = seq(0, mx, length = 5), labels = xlabs)
axis(2, at = 1:(nlevels), labels = (nlevelsWT(x)- 1):first.level)
delta <- 1
npsi <- x$filter$npsi
ndecim <- x$filter$ndecim
height <- 1
first.last.d <- x$fl.dbase$first.last.d
axr <- NULL
if(scaling == "global") {
my <- 0
for(i in ((nlevelsWT(x)- 1):first.level)) {
y <- c(accessD(x, i))
my <- max(c(my, abs(y)))
}
}
if(scaling == "compensated") {
my <- 0
for(i in ((nlevelsWT(x)- 1):first.level)) {
y <- c(accessD(x, i)) * x$filter$ndecim^(i/2)
my <- max(c(my, abs(y)))
}
}
for(i in ((nlevelsWT(x)- 1):first.level)) {
y <- c(accessD(x, i))
ly <- length(y)
n <- ly/npsi
if(scaling == "by.level")
my <- max(abs(y))
if(scaling == "compensated")
y <- y * ndecim^(i/2)
if(my == 0)
y <- rep(0, ly)
else y <- (0.5 * y)/my
axr <- c(axr, my)
xplot <- rep(((1:n) * mx)/(n + 1), rep(npsi, ly/npsi)) + (0:(
npsi - 1)) * delta
segments(xplot, height, xplot, height + y, col = colour)
height <- height + 1
}
if(rhlab == TRUE)
axis(4, at = 1:length(axr), labels = signif(axr, 3))
if(return.scale == TRUE)
return(axr)
else return(NULL)
}
"plot.nvwp"<-
function(x, ...)
{
plotpkt(nlevelsWT(x))
pktlist <- print.nvwp(x, printing = FALSE)
for(i in 1:length(pktlist$level))
addpkt(pktlist$level[i], pktlist$pkt[i], 1, col = 1)
}
"plot.wd"<-
function(x, xlabvals, xlabchars, ylabchars, first.level = 0, main =
"Wavelet Decomposition Coefficients", scaling = "global", rhlab = FALSE,
sub, NotPlotVal = 0.0050000000000000001, xlab = "Translate", ylab =
"Resolution Level", aspect = "Identity", ...)
{
if(IsEarly(x)) {
ConvertMessage()
stop()
}
if(is.complex(x$D) && aspect == "Identity") aspect <- "Mod" #
# Check class of wd
#
ctmp <- class(x)
if(is.null(ctmp))
stop("wd has no class")
else if(ctmp != "wd")
stop("wd is not of class wd")
levels <- nlevelsWT(x)
if(x$bc == "interval") {
if(first.level < x$current.scale)
warning(paste("plot.wd plotted from level", x$
current.scale,
" because \"wavelets on the interval\" transform was only computed to this level\n"
))
first.level <- x$current.scale
}
nlevels <- levels - first.level
type <- x$type
if(IsEarly(x)) {
ConvertMessage()
stop()
}
if(type == "wavelet")
n <- 2^(levels - 1)
else if(type == "station")
n <- 2^levels
else stop("Unknown type for wavelet object")
if(missing(sub))
sub <- paste(switch(type,
wavelet = "Standard transform",
station = "Nondecimated transform"), x$filter$name)
if(aspect != "Identity")
sub <- paste(sub, "(", aspect, ")")
plot(c(0, 0, n, n), c(0, nlevels + 1, nlevels + 1, 0), type = "n", xlab
= xlab, ylab = ylab, main = main, yaxt = "n", xaxt = "n", sub
= sub, ...)
yll <- (levels - 1):first.level
if(missing(ylabchars))
axis(2, at = 1:(nlevels), labels = yll)
else if(length(ylabchars) != nlevels)
stop(paste("Should have ", nlevels, " entries in ylabchars"))
else axis(2, at = 1:(nlevels), labels = ylabchars)
if(missing(xlabchars)) {
if(missing(xlabvals)) {
if(type == "wavelet")
axx <- c(0, 2^(levels - 3), 2^(levels - 2), 2^(
levels - 2) + 2^(levels - 3), 2^(levels - 1))
else axx <- c(0, 2^(levels - 2), 2^(levels - 1), 2^(
levels - 1) + 2^(levels - 2), 2^levels)
axis(1, at = axx)
}
else {
lx <- pretty(xlabvals, n = 4)
cat("lx is ", lx, "\n")
if(lx[1] < min(xlabvals))
lx[1] <- min(xlabvals)
if(lx[length(lx)] > max(xlabvals))
lx[length(lx)] <- max(xlabvals)
cat("lx is ", lx, "\n")
xix <- NULL
for(i in 1:length(lx)) {
u <- (xlabvals - lx[i])^2
xix <- c(xix, (1:length(u))[u == min(u)])
}
axx <- xix
if(type == "wavelet")
axx <- xix/2
axl <- signif(lx, digits = 2)
axis(1, at = axx, labels = axl)
}
}
else axis(1, at = xlabvals, labels = xlabchars)
myxx <- 1:n
height <- 1
first.last.d <- x$fl.dbase$first.last.d
axr <- NULL
if(scaling == "global") {
my <- 0
for(i in ((levels - 1):first.level)) {
y <- accessD(x, i, aspect = aspect)
my <- max(c(my, abs(y)))
}
}
if(scaling == "compensated") {
my <- 0
for(i in ((levels - 1):first.level)) {
y <- accessD(x, i, aspect = aspect) * 2^(i/2)
my <- max(c(my, abs(y)))
}
}
if(scaling == "super") {
my <- 0
for(i in ((levels - 1):first.level)) {
y <- accessD(x, i, aspect = aspect) * 2^i
my <- max(c(my, abs(y)))
}
}
shift <- 1
for(i in ((levels - 1):first.level)) {
y <- accessD(x, i, aspect = aspect)
if(type == "wavelet")
n <- 2^i
else {
y <- y[c((n - shift + 1):n, 1:(n - shift))]
shift <- shift * 2
}
xplot <- myxx
ly <- length(y)
if(scaling == "by.level")
my <- max(abs(y))
if(scaling == "compensated")
y <- y * 2^(i/2)
if(scaling == "super")
y <- y * 2^i
if(my == 0) {
y <- rep(0, length(y))
}
else y <- (0.5 * y)/my
axr <- c(axr, my)
if(max(abs(y)) > NotPlotVal)
segments(xplot, height, xplot, height + y)
if(i != first.level) {
if(type == "wavelet") {
x1 <- myxx[seq(1, n - 1, 2)]
x2 <- myxx[seq(2, n, 2)]
myxx <- (x1 + x2)/2
}
height <- height + 1
}
}
if(rhlab == TRUE)
axis(4, at = 1:length(axr), labels = signif(axr, digits=3))
axr
}
"plot.wp"<-
function(x, nvwp = NULL, main = "Wavelet Packet Decomposition", sub,
first.level = 5, scaling = "compensated", dotted.turn.on = 5,
color.force = FALSE, WaveletColor = 2, NodeVecColor = 3, fast = FALSE,
SmoothedLines = TRUE, ...)
{
#
# Check class of wp
#
ctmp <- class(x)
if(is.null(ctmp))
stop("wp has no class")
else if(ctmp != "wp")
stop("wp is not of class wp")
levels <- nlevelsWT(x)
dotted.turn.on <- levels - dotted.turn.on
N <- 2^levels # The number of original data points
#
#
# Check validity of command line args
#
if(first.level < 0 || first.level > levels)
stop("first.level must between zero and the number of levels")
#
if(dotted.turn.on < 0 || dotted.turn.on > levels) stop(
"dotted.turn.on must between zero and number of levels"
) #
# Do subtitling
#
if(missing(sub)) sub <- paste("Filter: ", x$filter$name) #
#
# Set plotting region and do axes of plot
#
oldpar <- par(mfrow = c(1, 1))
if(!is.null(nvwp))
sub <- paste(sub, "(selected packets in color 3)")
plot(c(0, N + 1), c(-1, levels - first.level + 1), type = "n", main =
main, xlab = "Packet Number", ylab = "Resolution Level", yaxt
= "n", sub = sub, ...)
axis(2, at = 0:(levels - first.level), labels = levels:first.level) #
#
# Check out how to do things in a different colour if we can
#
if(color.force == FALSE) {
if(CanUseMoreThanOneColor() == FALSE) {
if(WaveletColor > 1) {
warning(
"Can't (or can't find out how) display wavelets in color"
)
WaveletColor <- 1
}
if(NodeVecColor > 1) {
warning(
"Can't (or can't find out how) display node vector packets in color"
)
NodeVecColor <- 1
}
}
}
origdata <- getpacket(x, lev = levels, index = 0) #
#
# Scaling for the original data is always the same
#
sf <- max(abs(origdata))
if(sf == 0) {
stop("Original data is the zero function\n")
}
scale.origdata <- (0.5 * origdata)/sf
lines(1:N, scale.origdata)
if(first.level == levels) return() #
#
# Draw the vertical seperators if necessary
#
for(i in 1:(levels - first.level)) {
N <- N/2
if(i > dotted.turn.on)
break
else for(j in 1:(2^i - 1)) {
segments(N * (j - 1) + N + 0.5, i - 0.5, N * (j -
1) + N + 0.5, i + 0.5, lty = 2)
}
}
#
#
# Get all the coefficients
#
CoefMatrix <- x$wp #
#
# Remove the original data cos we've already plotted that
#
CoefMatrix <- CoefMatrix[ - (levels + 1), ] #
# Compute Global Scale Factor if necessary
#
Sf <- 0
if(scaling == "global")
Sf <- max(abs(CoefMatrix), na.rm = TRUE)
else if(scaling == "compensated") {
for(i in 1:(levels - first.level)) {
Coefs <- CoefMatrix[levels - i + 1, ] * 2^((levels - i
)/2)
Sf <- max(c(Sf, abs(Coefs)), na.rm = TRUE)
}
}
if(scaling == "global")
sf <- Sf
if(is.null(nvwp)) {
#
# If there is no associated node vector then plot the wavelet packet
# table using the matrix of coefficients. This is faster than the
# packet by packet method that is used when we have a node vector
# (but probably not much)
#
#
for(i in 1:(levels - first.level)) {
PKLength <- 2^(levels - i)
Coefs <- CoefMatrix[levels - i + 1, ]
if(scaling == "by.level")
sf <- max(abs(Coefs), na.rm = TRUE)
else if(scaling == "compensated")
sf <- Sf/2^((levels - i)/2)
if(is.na(sf) || sf == 0)
Coefs <- rep(0, length(Coefs))
else Coefs <- (0.5 * Coefs)/sf
pkl <- 1:PKLength
if(SmoothedLines == TRUE)
lines(pkl, i + Coefs[pkl])
else segments(pkl, i, pkl, i + Coefs[pkl])
pkl <- PKLength + pkl
segments(pkl, i, pkl, i + Coefs[pkl], col=WaveletColor)
pkl <- (2 * PKLength + 1):length(Coefs)
segments(pkl, i, pkl, i + Coefs[pkl])
}
}
else {
pklist <- print.nvwp(nvwp, printing = FALSE)
for(i in 1:(levels - first.level)) {
#
# Scaling issues
#
Coefs <- CoefMatrix[levels - i + 1, ]
if(scaling == "by.level")
sf <- max(abs(Coefs), na.rm = TRUE)
else if(scaling == "compensated")
sf <- Sf/2^((levels - i)/2)
if(is.na(sf) || sf == 0)
Coefs <- rep(0, length(Coefs))
else Coefs <- (0.5 * Coefs)/sf
CoefMatrix[levels - i + 1, ] <- Coefs
x$wp <- CoefMatrix
the.lev <- levels - i
PKLength <- 2^the.lev
npkts <- 2^i
pkl <- 1:PKLength
for(j in 1:npkts) {
pkt <- getpacket(x, level = the.lev, index = j -
1)
lcol <- 1
if(any(pklist$level == the.lev)) {
lpklist <- pklist$pkt[pklist$level == the.lev
]
if(any(lpklist == (j - 1)))
lcol <- NodeVecColor
else if(j == 2)
lcol <- WaveletColor
}
else if(j == 2)
lcol <- WaveletColor
if(j == 1) {
if(SmoothedLines == TRUE)
lines(pkl, i + pkt, col=lcol)
else segments(pkl, i, pkl, i + pkt, col=lcol)
}
else segments(pkl, i, pkl, i + pkt, col=lcol)
pkl <- pkl + PKLength
}
}
}
invisible()
}
"plot.wst"<-
function(x, main = "Nondecimated Wavelet (Packet) Decomposition", sub,
first.level = 5, scaling = "compensated", dotted.turn.on = 5, aspect =
"Identity", ...)
{
#
# Check class of wst
#
ctmp <- class(x)
if(is.null(ctmp))
stop("wst has no class")
else if(ctmp != "wst")
stop("wst is not of class wst")
levels <- nlevelsWT(x)
dotted.turn.on <- levels - dotted.turn.on
if(is.complex(x$wp) && aspect == "Identity")
aspect <- "Mod"
N <- 2^levels # The number of original data points
#
#
# Check validity of command line args
#
if(first.level < 0 || first.level > levels)
stop("first.level must between zero and the number of levels")
#
if(dotted.turn.on < 0 || dotted.turn.on > levels) stop(
"dotted.turn.on must between zero and number of levels"
) #
# Do subtitling
#
if(missing(sub)) sub <- paste("Filter: ", x$filter$name) #
#
# Set plotting region and do axes of plot
#
if(aspect != "Identity")
sub <- paste(sub, "(", aspect, ")")
plot(c(0, N + 1), c(-1, levels - first.level + 1), type = "n", main =
main, xlab = "Packet Number", ylab = "Resolution Level", yaxt
= "n", sub = sub, ...)
axis(2, at = 0:(levels - first.level), labels = levels:first.level) #
origdata <- getpacket(x, lev = levels, index = 0, aspect = aspect) #
#
# Scaling for the original data is always the same
#
sf <- max(abs(origdata))
if(sf == 0) {
scale.origdata <- rep(0, length(origdata))
}
else scale.origdata <- (0.5 * origdata)/sf
lines(1:N, scale.origdata)
if(first.level == levels) return() #
#
# Draw the vertical seperators if necessary
#
for(i in 1:(levels - first.level)) {
N <- N/2
if(i > dotted.turn.on)
break
else for(j in 1:(2^i - 1)) {
segments(N * (j - 1) + N + 0.5, i - 0.5, N * (j -
1) + N + 0.5, i + 0.5, lty = 2)
}
}
#
#
# Get all the coefficients
#
if(aspect == "Identity")
CoefMatrix <- x$wp
else {
fn <- get(aspect)
CoefMatrix <- fn(x$wp)
}
CoefMatrix <- CoefMatrix[ - (levels + 1), ] #
# Compute Global Scale Factor if necessary
#
Sf <- 0
if(scaling == "global")
Sf <- max(abs(CoefMatrix), na.rm = TRUE)
else if(scaling == "compensated") {
for(i in 1:(levels - first.level)) {
Coefs <- CoefMatrix[levels - i + 1, ] * 2^((levels - i
)/2)
Sf <- max(c(Sf, abs(Coefs)), na.rm = TRUE)
}
}
if(scaling == "global")
sf <- Sf
for(i in 1:(levels - first.level)) {
PKLength <- 2^(levels - i)
Coefs <- CoefMatrix[levels - i + 1, ]
if(scaling == "by.level")
sf <- max(abs(Coefs), na.rm = TRUE)
else if(scaling == "compensated")
sf <- Sf/2^((levels - i)/2)
if(is.na(sf) || sf == 0)
Coefs <- rep(0, length(Coefs))
else Coefs <- (0.5 * Coefs)/sf
pkl <- 1:PKLength
segments(pkl, i, pkl, i + Coefs[pkl])
pkl <- PKLength + pkl
segments(pkl, i, pkl, i + Coefs[pkl])
pkl <- (2 * PKLength + 1):length(Coefs)
segments(pkl, i, pkl, i + Coefs[pkl])
}
}
"plot.wst2D"<-
function(x, plot.type = "level", main = "", ...)
{
nlev <- nlevelsWT(x)
sz <- dim(x$wst2D)[2]
if(plot.type == "level") {
for(i in 0:(nlev - 1)) {
image(matrix(x$wst2D[i + 1, , ], nrow = sz))
st <- paste("Level", i)
title(main = main, sub = st)
}
}
}
"plotpkt"<-
function(J)
{
x <- c(0, 2^(J - 1))
y <- c(0, J)
plot(x, y, type = "n", xlab = "Packet indices", ylab = "Level", xaxt =
"n")
axis(1, at = seq(from = 0, to = 2^(J - 1), by = 0.5), labels = 0:2^J)
}
"print.BP"<-
function(x, ...)
{
cat("BP class object. Contains \"best basis\" information\n")
cat("Components of object:")
print(names(x))
cat("Number of levels ", nlevelsWT(x), "\n")
cat("List of \"best\" packets\n")
m <- cbind(x$level, x$pkt, x$basiscoef)
dimnames(m) <- list(NULL, c("Level id", "Packet id", "Basis coef"))
print(m)
}
"print.imwd"<-
function(x, ...)
{
cat("Class 'imwd' : Discrete Image Wavelet Transform Object:\n")
cat(" ~~~~ : List with", length(x), "components with names\n")
cat(" ", names(x), "\n\n")
cat("$ wNLx are LONG coefficient vectors !\n")
cat("\nsummary(.):\n----------\n")
summary.imwd(x)
}
"print.imwdc"<-
function(x, ...)
{
cat("Class 'imwdc' : Compressed Discrete Image Wavelet Transform Object:\n"
)
cat(" ~~~~~ : List with", length(x), "components with names\n")
cat(" ", names(x), "\n\n")
cat("$ wNLx are LONG coefficient vectors !\n")
cat("\nsummary(.):\n----------\n")
summary.imwdc(x)
}
"print.mwd"<-
function(x, ...)
{
ctmp <- class(x)
if(is.null(ctmp))
stop("Input must have class mwd")
else if(ctmp != "mwd")
stop("Input must have class mwd")
cat("Class 'mwd' : Discrete Multiple Wavelet Transform Object:\n")
cat(" ~~~ : List with", length(x), "components with names\n")
cat(" ", names(x), "\n\n")
cat("$ C and $ D are LONG coefficient vectors !\n")
cat("\nCreated on :", x$date, "\n")
cat("Type of decomposition: ", x$type, "\n")
cat("\nsummary:\n----------\n")
summary.mwd(x)
}
"print.nv"<-
function(x, printing = TRUE, verbose = FALSE, ...)
{
if(verbose == TRUE & printing == TRUE) {
cat("Printing node vector as a list\n")
cat("------------------------------\n")
print(as.list(x))
cat("Printing node vector as format\n")
cat("------------------------------\n")
}
node.vector <- x$node.list
acsel <- 0
acsellist <- NULL
cntr <- 0
power <- 1
rvector <- 0
for(i in (nlevelsWT(x)- 1):0) {
nl <- node.vector[[i + 1]]
action <- nl$upperctrl[acsel + 1]
actent <- nl$upperl[acsel + 1]
cntr <- cntr + 1
if(action == "S") {
if(printing == TRUE)
cat("There are ", cntr,
" reconstruction steps\n")
return(invisible(list(indexlist = acsellist, rvector =
rvector)))
}
else if(action == "L")
acsel <- 2 * acsel
else {
acsel <- 2 * acsel + 1
rvector <- rvector + power
}
power <- power * 2
if(printing == TRUE) {
cat("Level : ", i, " Action is ", action)
cat(" (getpacket Index: ", acsel, ")\n")
}
acsellist <- c(acsellist, acsel)
}
if(printing == TRUE)
cat("There are ", cntr, " reconstruction steps\n")
invisible(list(indexlist = acsellist, rvector = rvector))
}
"print.nvwp"<-
function(x, printing = TRUE, ...)
{
nlev <- nlevelsWT(x)
pkt <- NULL
level <- NULL
decompose <- x$node.list[[nlev]]$upperctrl
if(decompose == "B") {
parent.decompose <- 0
for(i in nlev:1) {
child.lev <- i - 1
child.decompose <- sort(c(2 * parent.decompose, 2 *
parent.decompose + 1))
if(child.lev == 0)
ctrl <- rep("T", 2^nlev)
else ctrl <- x$node.list[[child.lev]]$upperctrl
for(j in 1:length(child.decompose)) {
if(ctrl[child.decompose[j] + 1] == "T") {
level <- c(level, child.lev)
pkt <- c(pkt, child.decompose[j])
if(printing == TRUE)
cat("Level: ", child.lev, " Packet: ",
child.decompose[j], "\n")
}
}
if(child.lev != 0) {
ctrl <- ctrl[child.decompose + 1]
sv <- ctrl == "B"
parent.decompose <- child.decompose[sv]
}
if (length(parent.decompose)==0)
break
}
}
else {
level <- nlev
pkt <- 0
if(printing == TRUE) {
cat("Original data is best packet!\n")
}
}
invisible(list(level = level, pkt = pkt))
}
"print.w2d"<-
function(x, ...)
{
cat("w2d class object.\n")
cat("A composite object containing the components\n")
cat("\t")
print(names(x))
cat("Number of levels: ", nlevelsWT(x), "\n")
cat("Number of data points: ", nrow(x$m), "\n")
cat("Number of bases: ", ncol(x$m), "\n")
cat("Groups vector: ")
print(x$k)
}
"print.wd"<-
function(x, ...)
{
if(IsEarly(x)) {
ConvertMessage()
stop()
}
cat("Class 'wd' : Discrete Wavelet Transform Object:\n")
cat(" ~~ : List with", length(x), "components with names\n")
cat(" ", names(x), "\n\n")
if(x$bc == "interval")
cat("$transformed.vector is a LONG coefficient vector!\n")
else cat("$C and $D are LONG coefficient vectors\n")
cat("\nCreated on :", x$date, "\n")
cat("Type of decomposition: ", x$type, "\n")
cat("\nsummary(.):\n----------\n")
summary.wd(x)
}
"print.wd3D"<-
function(x, ...)
{
if(IsEarly(x)) {
ConvertMessage()
stop()
}
cat("Class 'wd3d' : 3D DWT Object:\n")
cat(" ~~~~ : List with", length(x), "components with names\n")
cat(" ", names(x), "\n\n")
cat("$ a is the wavelet coefficient array\n")
cat("Dimension of a is ")
print(dim(x$a))
cat("\nCreated on :", x$date, "\n")
cat("\nsummary(.):\n----------\n")
summary.wd3D(x)
}
"print.wp"<-
function(x, ...)
{
if(IsEarly(x)) {
ConvertMessage()
stop()
}
cat("Class 'wp' : Wavelet Packet Object:\n")
cat(" ~~ : List with", length(x), "components with names\n")
cat(" ", names(x), "\n\n")
cat("$wp is the wavelet packet matrix\n")
cat("\nCreated on :", x$date, "\n")
cat("\nsummary(.):\n----------\n")
summary.wp(x)
}
"print.wpst"<-
function(x, ...)
{
if(IsEarly(x)) {
ConvertMessage()
stop()
}
cat("Class 'wpst' : Nondecimated Wavelet Packet Transform Object:\n")
cat(" ~~~ : List with", length(x), "components with names\n")
cat(" ", names(x), "\n\n")
cat("$wpst is a coefficient vector\n")
cat("\nCreated on :", x$date[1], "\n")
cat("\nsummary(.):\n----------\n")
summary.wpst(x)
}
"print.wpstCL"<-
function(x, ...)
{
cat("wpstCL class object\n")
cat("Results of applying discriminator to time series\n")
cat("Components: ", names(x), "\n")
}
"print.wpstDO"<-
function(x, ...)
{
cat("Nondecimated wavelet packet discrimination object\n")
cat("Composite object containing components:")
print(names(x))
cat("Fisher's discrimination: done\n")
cat("BP component has the following information\n")
print(x$BP)
}
"print.wst"<-
function(x, ...)
{
if(IsEarly(x)) {
ConvertMessage()
stop()
}
cat("Class 'wst' : Packet-ordered Nondecimated Wavelet Transform Object:\n")
cat(" ~~~ : List with", length(x), "components with names\n")
cat(" ", names(x), "\n\n")
cat("$wp and $Carray are the coefficient matrices\n")
cat("\nCreated on :", x$date[1], "\n")
cat("\nsummary(.):\n----------\n")
summary.wst(x)
}
"print.wst2D"<-
function(x, ...)
{
cat("Class 'wst2D' : 2D Packet-ordered Nondecimated Wavelet Transform Object:\n")
cat(" ~~~~~ : List with", length(x), "components with names\n")
cat(" ", names(x), "\n\n")
cat("$wst2D is the coefficient array\n")
cat("\nCreated on :", x$date[1], "\n")
cat("\nsummary(.):\n----------\n")
summary.wst2D(x)
}
"putC"<-
function(...)
UseMethod("putC")
"putC.mwd"<-
function(mwd, level, M, boundary = FALSE, index = FALSE, ...)
{
#
#putC.mwd, changes the C coefficients at the given level.
#Tim Downie
#last update May 1996
#
if(is.null(class(mwd))) stop("mwd is not class mwd object")
if(!inherits(mwd, "mwd"))
stop("mwd is not class mwd object")
if(level < 0)
stop("level too small")
else if(level > nlevelsWT(mwd))
stop("level too big")
flc <- mwd$fl.dbase$first.last.c[level + 1, ]
if(boundary == FALSE) {
if(mwd$type == "wavelet")
n <- 2^level
else n <- 2^nlevelsWT(mwd)
i1 <- flc[3] + 1 - flc[1]
i2 <- flc[3] + n - flc[1]
}
else {
n <- flc[2] - flc[1] + 1
i1 <- flc[3] + 1
i2 <- flc[3] + n
}
if(index == FALSE) {
if(length(M) != mwd$filter$npsi * n)
stop("The length of M is wrong")
mwd$C[, i1:i2] <- M
return(mwd)
}
else return(list(ix1 = i1, ix2 = i2))
}
"putC.wd"<-
function(wd, level, v, boundary = FALSE, index = FALSE, ...)
{
if(IsEarly(wd)) {
ConvertMessage()
stop()
}
if(!inherits(wd, "wd"))
stop("wd is not class wd object")
if(level < 0)
stop("level should be zero or larger")
else if(level > nlevelsWT(wd))
stop(paste("Level should be less than or equal to ", nlevelsWT(wd
)))
if(wd$bc == "interval") {
if(level != wd$current.scale)
stop(paste(
"Requested wd object was decomposed to level ",
wd$current.scale,
" and so for \"wavelets on the interval\" object\ns I can only alter this level for the scaling function coefficients\n"
))
first.level <- wd$fl.dbase$first.last.c[1]
last.level <- wd$fl.dbase$first.last.c[2]
offset.level <- wd$fl.dbase$first.last.c[3]
n <- last.level - first.level + 1
if(length(v) != n)
stop(paste(
"I think the length of \"v\" is wrong. I think it should be of length ",
n))
wd$transformed.vector[(offset.level + 1 - first.level):(
offset.level + n - first.level)] <- v
return(wd)
}
flc <- wd$fl.dbase$first.last.c[level + 1, ]
if(boundary == FALSE) {
if(wd$type == "wavelet")
n <- 2^level
else n <- 2^nlevelsWT(wd)
i1 <- flc[3] + 1 - flc[1]
i2 <- flc[3] + n - flc[1]
}
else {
n <- flc[2] - flc[1] + 1
i1 <- flc[3] + 1
i2 <- flc[3] + n
}
if(length(v) != n)
stop(paste("I think the length of \"v\" is wrong. I think it should be of length ",
n))
wd$C[i1:i2] <- v
if(index == FALSE)
return(wd)
else return(list(ix1 = i1, ix2 = i2))
}
"putC.wp"<-
function(wp, ...)
{
stop("A wavelet packet object does not have ``levels'' of father wavelet coefficients. Use putD to obtain levels of father and mother coefficients"
)
}
"putC.wst"<-
function(wst, level, value, ...)
{
#
#
# Get all coefficients at a particular level
# First work out how many packets there are at this level
#
nlevels <- nlevelsWT(wst)
if(2^nlevels != length(value))
stop("Input data value of wrong length")
wst$Carray[level + 1, ] <- value
wst
}
"putD"<-
function(...)
UseMethod("putD")
"putD.mwd"<-
function(mwd, level, M, boundary = FALSE, index = FALSE, ...)
{
#
#putD.mwd
#replaces D coefficients at given level with M
#Tim Downie
#last update May 1996
#
#
if(is.null(class(mwd))) stop("mwd is not class mwd object")
if(!inherits(mwd, "mwd"))
stop("mwd is not class mwd object")
if(level < 0)
stop("level too small")
else if(level >= nlevelsWT(mwd))
stop("level too big")
fld <- mwd$fl.dbase$first.last.d[level + 1, ]
if(boundary == FALSE) {
if(mwd$type == "wavelet")
n <- 2^level
else n <- 2^nlevelsWT(mwd)
i1 <- fld[3] + 1 - fld[1]
i2 <- fld[3] + n - fld[1]
}
else {
n <- fld[2] - fld[1] + 1
i1 <- fld[3] + 1
i2 <- fld[3] + n
}
if(index == FALSE) {
if(length(M) != mwd$filter$npsi * n)
stop("The length of M is wrong")
mwd$D[, i1:i2] <- M
return(mwd)
}
else return(list(ix1 = i1, ix2 = i2))
}
"putD.wd"<-
function(wd, level, v, boundary = FALSE, index = FALSE, ...)
{
if(IsEarly(wd)) {
ConvertMessage()
stop()
}
if(!inherits(wd, "wd"))
stop("wd is not class wd object")
if(level < 0)
stop("level too small")
else if(level > nlevelsWT(wd)- 1)
stop(paste("Level too big. Maximum level is ", nlevelsWT(wd)- 1))
if(wd$bc == "interval") {
level <- level - wd$current.scale
objname <- deparse(substitute(wd))
if(level < 0)
stop(paste("The wd object: ", objname,
" was only decomposed down to level: ", wd$
current.scale, " Try a larger level"))
if(boundary == TRUE)
stop("There are no boundary elements in a wavelets on th\ne interval transform!"
)
}
fld <- wd$fl.dbase$first.last.d[level + 1, ]
if(boundary == FALSE) {
if(wd$type == "wavelet")
n <- 2^level
else n <- 2^nlevelsWT(wd)
if(wd$bc == "interval")
n <- fld[2] - fld[1] + 1
i1 <- fld[3] + 1 - fld[1]
i2 <- fld[3] + n - fld[1]
}
else {
n <- fld[2] - fld[1] + 1
i1 <- fld[3] + 1
i2 <- fld[3] + n
}
if(length(v) != n)
stop("I think that the length of v is wrong")
if(wd$bc == "interval")
wd$transformed.vector[i1:i2] <- v
else wd$D[i1:i2] <- v
if(index == FALSE)
return(wd)
else return(list(ix1 = i1, ix2 = i2))
}
"putD.wd3D"<-
function(x, v, ...)
{
truesize <- dim(x$a)[1]
nlx <- nlevelsWT(x)
vlev <- v$lev
va <- v$a
putDwd3Dcheck(lti = vlev, dima = dim(va), block = v$block, nlx = nlx)
Iarrayix <- switch(v$block,
HHH = 0,
GHH = 1,
HGH = 2,
GGH = 3,
HHG = 4,
GHG = 5,
HGG = 6,
GGG = 7)
if(Iarrayix == 0 && vlev != 0)
stop("Can only insert HHH into level 0")
if(is.null(Iarrayix))
stop(paste("Unknown block to insert: ", v$block))
tmp <- .C("putarr",
Carray = as.double(x$a),
truesize = as.integer(truesize),
level = as.integer(vlev),
Iarrayix = as.integer(Iarrayix),
Iarray = as.double(va), PACKAGE = "wavethresh")
x$a <- array(tmp$Carray, dim = dim(x$a))
x
}
"putD.wp"<-
function(wp, level, value, ...)
{
#
# Insert coefficients "value" into "wp" at resolution "level".
# First work out how many packets there are at this level
#
nlev <- nlevelsWT(wp)
if(2^nlev != length(value))
stop("Input data value of wrong length")
wp$wp[level + 1, ] <- value
wp
}
"putD.wst"<-
function(wst, level, value, ...)
{
#
#
# Get all coefficients at a particular level
# First work out how many packets there are at this level
#
nlevels <- nlevelsWT(wst)
if(2^nlevels != length(value))
stop("Input data value of wrong length")
wst$wp[level + 1, ] <- value
wst
}
"putDwd3Dcheck"<-
function(lti, dima, block, nlx)
{
if(lti < 0)
stop(paste("Level cannot be negative for block:", block))
else if(lti > nlx - 1)
stop(paste("Maximum level for block: ", block, " is ", nlx - 1)
)
if(length(dima) != 3)
stop(paste(block, "array is not three-dimensional"))
if(any(dima != dima[1]))
stop(paste(block, " dimensions are not all the same"))
arrdimlev <- IsPowerOfTwo(dima[1])
if(is.na(arrdimlev))
stop(paste(block, " dimensions are not power of two"))
if(arrdimlev != lti)
stop(paste(block,
"dimensions will not fit into cube at that level"))
}
"putpacket"<-
function(...)
UseMethod("putpacket")
"putpacket.wp"<-
function(wp, level, index, packet, ...)
{
# cat("PUTPACKET: Level:", level, " Index:", index, " Pack Length ",
# length(packet), "\n")
if(!inherits(wp, "wp")) stop("wp object is not of class wp")
if(level > nlevelsWT(wp))
stop("Not that many levels in wp object")
unit <- 2^level
LocalIndex <- unit * index + 1
if(index > 2^(nlevelsWT(wp)- level) - 1) {
cat("Index was too high, maximum for this level is ", 2^(wp$
nlevels - level) - 1, "\n")
stop("Error occured")
}
if(LocalIndex < 0)
stop("Index must be non-negative")
if(length(packet) != unit)
stop("Packet is not of correct length\n")
wp$wp[level + 1, (LocalIndex:(LocalIndex + unit - 1))] <- packet
wp
}
"putpacket.wst"<-
function(wst, level, index, packet, ...)
{
class(wst) <- "wp"
l <- putpacket.wp(wst, level = level, index = index, packet = packet)
class(l) <- "wst"
l
}
"putpacket.wst2D"<-
function(wst2D, level, index, type = "S", packet, Ccode = TRUE, ...)
{
cellength <- 2^level
nlev <- nlevelsWT(wst2D)
if(!is.matrix(packet))
stop("packet should be a matrix")
nr <- nrow(packet)
nc <- ncol(packet)
if(nr != nc)
stop("packet should be a square matrix")
else if(nr != cellength)
stop(paste("packet matrix should be square of dimension ",
cellength, " if you're inserting at level ", level,
" not ", nr))
if(level > nlev - 1)
stop(paste("Maximum level is ", nlev - 1, " you supplied ",
level))
else if(level < 0)
stop(paste("Minimum level is 0 you supplied ", level))
if(type != "S" && type != "H" && type != "V" && type != "D")
stop("Type must be one of S, H, V or D")
if(nchar(index) != nlev - level)
stop(paste("Index must be ", nlev - level,
" characters long for level ", level))
for(i in 1:nchar(index)) {
s1 <- substring(index, i, i)
if(s1 != "0" && s1 != "1" && s1 != "2" && s1 != "3")
stop(paste("Character ", i,
" in index is not a 0, 1, 2 or 3. It is ", s1))
}
if(Ccode == TRUE) {
ntype <- switch(type,
S = 0,
H = 1,
V = 2,
D = 3)
amdim <- dim(wst2D$wst2D)
ans <- .C("putpacketwst2D",
am = as.double(wst2D$wst2D),
d1 = as.integer(amdim[1]),
d12 = as.integer(amdim[1] * amdim[2]),
maxlevel = as.integer(nlev - 1),
level = as.integer(level),
index = as.integer(index),
ntype = as.integer(ntype),
packet = as.double(packet),
sl = as.integer(nr), PACKAGE = "wavethresh")
wst2D$wst2D <- array(ans$am, dim = amdim)
}
else {
x <- y <- 0
ans <- .C("ixtoco",
level = as.integer(level),
maxlevel = as.integer(nlev - 1),
index = as.integer(index),
x = as.integer(x),
y = as.integer(y), PACKAGE = "wavethresh")
tmpx <- switch(type,
S = 0,
H = 0,
V = cellength,
D = cellength)
tmpy <- switch(type,
S = 0,
H = cellength,
V = 0,
D = cellength)
x <- ans$x + tmpx + 1
y <- ans$y + tmpy + 1
cat("x ", x, "y: ", y, "x+cellength-1 ", x + cellength - 1,
"y+cellength-1", y + cellength - 1, "\n")
wst2D$wst2D[level + 1, x:(x + cellength - 1), y:(y + cellength -
1)] <- packet
}
wst2D
}
"rcov"<-
function(x)
{
#
#rcov
#
#computes a robust correlation matrix of x
# x must be a matrix with the columns as observations
#which is the opposite to the S function var (don't get confused!)
#Method comes from Huber's "Robust Statistics"
#
if(!is.matrix(x)) stop("x must be a matrix")
m <- dim(x)[1]
n <- dim(x)[2]
b1 <- b2 <- b3 <- 0
a <- rep(0, m)
sigma <- matrix(rep(0, m^2), nrow = m)
for(i in 1:m) {
a[i] <- 1/mad(x[i, ])
sigma[i, i] <- 1/a[i]^2
}
if(m > 1) {
for(i in 2:m)
for(j in 1:(i - 1)) {
b1 <- mad(a[i] * x[i, ] + a[j] * x[j, ])^2
b2 <- mad(a[i] * x[i, ] - a[j] * x[j, ])^2
b3 <- mad(a[j] * x[j, ] - a[i] * x[i, ])^2
sigma[i, j] <- (b1 - b2)/((b1 + b2) * a[i] * a[
j])
sigma[j, i] <- (b1 - b3)/((b1 + b3) * a[i] * a[
j])
}
}
return(sigma)
}
"rfft"<-
function(x)
{
# given a vector x computes the real continuous fourier transform of
# x; ie regards x as points on a periodic function on [0,1] starting at
# 0 and finding the coefficients of the functions 1, sqrt(2)cos 2 pi t,
# sqrt(2) sin 2 pi t, etc that give an expansion of the interpolant of
# x The number of terms in the expansion is the length of x.
# If x is of even length, the last
# coefficient will be that of a cosine term with no matching sine.
#
nx <- length(x)
z <- fft(x)
z1 <- sqrt(2) * z[2:(1 + floor(nx/2))]
rz <- c(Re(z)[1], as.vector(rbind(Re(z1), - Im(z1))))/nx
return(rz[1:nx])
}
"rfftinv"<-
function(rz, n = length(rz))
{
# Inverts the following transform----
# given a vector rz computes the inverse real continuous fourier transform of
# rz; ie regards rz as the coefficients of the expansion of a
# periodic function f in terms of the functions
# 1, sqrt(2)cos 2 pi t, sqrt(2) sin 2 pi t, etc .
# The output of the function is f evaluated
# at a regular grid of n points, starting at 0.
# If n is not specified it is taken to be the length of rz;
# the results are unpredictable if n < length(rz).
#
nz <- length(rz)
z <- complex(n)
nz1 <- floor(nz/2)
nz2 <- ceiling(nz/2) - 1
z[1] <- rz[1] + (0i)
z[2:(nz1 + 1)] <- (1/sqrt(2)) * rz[seq(from = 2, by = 2, length = nz1)]
z[2:(nz2 + 1)] <- z[2:(nz2 + 1)] - (1i) * (1/sqrt(2)) * rz[seq(from = 3,
by = 2, length = nz2)]
z[n:(n + 1 - nz1)] <- Conj(z[2:(nz1 + 1)])
x <- Re(fft(z, inverse = TRUE))
return(x)
}
"rfftwt"<-
function(xrfft, wt)
{
# weight the real fourier series xrfft of even length
# by a weight sequence wt
# The first term of xrfft is left alone, and the weights are
# then applied to pairs of terms in xrfft.
# wt is of length half n .
xsrfft <- xrfft * c(1, rep(wt, c(rep(2, length(wt) - 1), 1)))
return(xsrfft)
}
"rm.det"<-
function(wd.int.obj)
{
len <- length(wd.int.obj$transformed.vector)
n <- len
maxscale <- log(len, 2)
minscale <- wd.int.obj$current.scale
for(i in c(maxscale:(minscale + 1)))
n <- n/2
for(i in c((n + 1):len))
wd.int.obj$transformed.vector[i] <- 0
return(wd.int.obj)
}
"rmget"<-
function(requestJ, filter.number, family)
{
ps <- paste("rm.*.", filter.number, ".", family, sep = "")
cand <- objects(envir = WTEnv, pattern = ps)
if(length(cand) == 0)
return(NULL)
cand <- substring(cand, first = 4)
candfd <- firstdot(cand)
cand <- as.numeric(substring(cand, first = 1, last = candfd - 1))
cand <- cand[cand >= requestJ]
if(length(cand) == 0)
return(NULL)
else return(min(cand))
}
"rmname"<-
function(J, filter.number, family)
{
if(J >= 0)
stop("J must be a negative integer")
return(paste("rm.", - J, ".", filter.number, ".", family, sep = ""))
}
"rotateback"<-
function(v)
{
lv <- length(v)
v[c(lv, 1:(lv - 1))]
}
"rsswav"<-
function(noisy, value = 1, filter.number = 10, family = "DaubLeAsymm",
thresh.type = "hard", ll = 3)
{
lo <- length(noisy)
oodd <- noisy[seq(from = 1, by = 2, length = lo/2)]
oeven <- noisy[seq(from = 2, by = 2, length = lo/2)] #
#
# Do decomposition of odd
#
oddwd <- wd(oodd, filter.number = filter.number, family = family)
oddwdt <- threshold(oddwd, policy = "manual", value = value, type =
thresh.type, lev = ll:(nlevelsWT(oddwd)- 1))
oddwr <- wr(oddwdt) #
# Interpolate evens
#
eint <- (c(oeven[1], oeven) + c(oeven, oeven[length(oeven)]))/2
eint <- eint[1:(length(eint) - 1)]
ssq1 <- ssq(eint, oddwr) #
# ts.plot(oddwr, main = paste("Odd plot, ssq=", ssq1)) #
# Now do decomposition of even
#
evenwd <- wd(oeven, filter.number = filter.number, family = family)
evenwdt <- threshold(evenwd, policy = "manual", value = value, type =
thresh.type, lev = ll:(nlevelsWT(evenwd)- 1))
evenwr <- wr(evenwdt) #
#
# Inerpolate odds
#
oint <- (c(oodd[1], oodd) + c(oodd, oodd[length(oodd)]))/2
oint <- oint[1:(length(oint) - 1)]
ssq2 <- ssq(oint, evenwr)
# ts.plot(evenwr, main = paste("Even plot, ssq=", ssq2))
answd <- wd(noisy, filter.number = filter.number, family = family)
ll <- list(ssq = (ssq1 + ssq2)/2, df = dof(threshold(answd, policy =
"manual", value = value, type = thresh.type, lev = ll:(answd$
nlevels - 1))))
return(ll)
}
"simchirp"<-
function(n = 1024)
{
x <- 1.0000000000000001e-05 + seq(from = -1, to = 1, length = n + 1)[1:
n]
y <- sin(pi/x)
list(x = x, y = y)
}
"ssq"<-
function(u, v)
{
sum((u - v)^2)
}
"summary.imwd"<-
function(object, ...)
{
#
#
# Check class of imwd
#
ctmp <- class(object)
if(is.null(ctmp))
stop("imwd has no class")
else if(ctmp != "imwd")
stop("imwd is not of class imwd")
first.last.c <- object$fl.dbase$first.last.c
pix <- first.last.c[nlevelsWT(object)+ 1, 2] - first.last.c[nlevelsWT(object)+
1, 1] + 1
cat("UNcompressed image wavelet decomposition structure\n")
cat("Levels: ", nlevelsWT(object), "\n")
cat("Original image was", pix, "x", pix, " pixels.\n")
cat("Filter was: ", object$filter$name, "\n")
cat("Boundary handling: ", object$bc, "\n")
}
"summary.imwdc"<-
function(object, ...)
{
#
#
# Check class of imwdc
#
ctmp <- class(object)
if(is.null(ctmp))
stop("imwdc has no class")
else if(ctmp != "imwdc")
stop("imwdc is not of class imwdc")
first.last.c <- object$fl.dbase$first.last.c
pix <- first.last.c[nlevelsWT(object)+ 1, 2] - first.last.c[nlevelsWT(object)+
1, 1] + 1
cat("Compressed image wavelet decomposition structure\n")
cat("Levels: ", nlevelsWT(object), "\n")
cat("Original image was", pix, "x", pix, " pixels.\n")
cat("Filter was: ", object$filter$name, "\n")
cat("Boundary handling: ", object$bc, "\n")
}
"summary.mwd"<-
function(object, ...)
{
ctmp <- class(object, ...)
if(is.null(ctmp))
stop("Input must have class mwd")
else if(ctmp != "mwd")
stop("Input must have class mwd")
cat("Length of original: ", object$ndata, "\n")
cat("Levels: ", nlevelsWT(object), "\n")
cat("Filter was: ", object$filter$name, "\n")
cat("Scaling fns: ", object$filter$nphi, "\n")
cat("Wavelet fns: ", object$filter$npsi, "\n")
cat("Prefilter: ", object$prefilter, "\n")
cat("Scaling factor: ", object$filter$ndecim, "\n")
cat("Boundary handling: ", object$bc, "\n")
cat("Transform type: ", object$type, "\n")
cat("Date: ", object$date, "\n")
}
"summary.wd"<-
function(object, ...)
{
if(IsEarly(object)) {
ConvertMessage()
stop()
}
if(object$bc != "interval")
pix <- length(accessC(object))
else pix <- 2^nlevelsWT(object)
cat("Levels: ", nlevelsWT(object), "\n")
cat("Length of original: ", pix, "\n")
cat("Filter was: ", object$filter$name, "\n")
cat("Boundary handling: ", object$bc, "\n")
if(object$bc == "interval")
if(object$preconditioned == TRUE)
cat("Preconditioning is ON\n")
else cat("Preconditioning is OFF\n")
cat("Transform type: ", object$type, "\n")
cat("Date: ", object$date, "\n")
}
"summary.wd3D"<-
function(object, ...)
{
if(IsEarly(object)) {
ConvertMessage()
stop()
}
cat("Levels: ", nlevelsWT(object), "\n")
cat("Filter number was: ", object$filter.number, "\n")
cat("Filter family was: ", object$family, "\n")
cat("Date: ", object$date, "\n")
}
"summary.wp"<-
function(object, ...)
{
if(IsEarly(object)) {
ConvertMessage()
stop()
}
wpdim <- dim(object$wp)
cat("Levels: ", nlevelsWT(object), "\n")
cat("Length of original: ", wpdim[2], "\n")
cat("Filter was: ", object$filter$name, "\n")
}
"summary.wpst"<-
function(object, ...)
{
if(IsEarly(object)) {
ConvertMessage()
stop()
}
pix <- 2^nlevelsWT(object)
cat("Levels: ", nlevelsWT(object), "\n")
cat("Length of original: ", pix, "\n")
cat("Filter was: ", object$filter$name, "\n")
cat("Date: ", object$date[1], "\n")
if(length(object$date) != 1)
cat("This object has been modified. Use \"Whistory\" to find out what's happened\n"
)
}
"summary.wst"<-
function(object, ...)
{
if(IsEarly(object)) {
ConvertMessage()
stop()
}
pix <- 2^nlevelsWT(object)
cat("Levels: ", nlevelsWT(object), "\n")
cat("Length of original: ", pix, "\n")
cat("Filter was: ", object$filter$name, "\n")
cat("Date: ", object$date[1], "\n")
if(length(object$date) != 1)
cat("This object has been modified. Use \"Whistory\" to find out what's happened\n"
)
}
"summary.wst2D"<-
function(object, ...)
{
nlev <- nlevelsWT(object)
cat("Levels: ", nlev, "\n")
cat("Length of original: ", 2^nlev, "x", 2^nlev, "\n")
cat("Filter was: ", object$filter$name, "\n")
cat("Date: ", object$date[1], "\n")
if(length(object$date) != 1)
cat("This object has been modified. Use \"Whistory\" to find out what's happened\n"
)
}
"support"<-
function(filter.number = 10, family = "DaubLeAsymm", m = 0, n = 0)
{
m <- m + 1
if(family == "DaubExPhase") {
a <- - (filter.number - 1)
b <- filter.number
lh <- 2^( + m) * (a + n)
rh <- 2^( + m) * (b + n)
return(list(lh = lh, rh = rh, psi.lh = - (filter.number - 1),
psi.rh = filter.number, phi.lh = 0, phi.rh = 2 *
filter.number - 1))
}
else if(family == "DaubLeAsymm") {
a <- - (filter.number - 1)
b <- filter.number
lh <- 2^( + m) * (a + n)
rh <- 2^( + m) * (b + n)
return(list(lh = lh, rh = rh, psi.lh = - (filter.number - 1),
psi.rh = filter.number, phi.lh = 0, phi.rh = 2 *
filter.number - 1))
}
else {
stop(paste("Family: ", family, " not supported for support!\n")
)
}
}
"sure"<-
function(x)
{
#
# The SURE function of Donoho and Johnstone
# Finds the minimum
#
x <- abs(x)
d <- length(x)
y <- sort(x) #
#
# Form cumulative sum
#
cy <- cumsum(y^2)
cy <- c(0, cy[1:(length(cy) - 1)]) #
#
# Now the answer
#
ans <- d - 2 * 1:d + cy + d:1 * y^2 # cat("ans is\n")
# print(ans)
m <- min(ans)
index <- (1:length(ans))[m == ans]
return(y[index])
}
"threshold"<-
function(...)
UseMethod("threshold")
"threshold.imwd"<-
function(imwd, levels = 3:(nlevelsWT(imwd)- 1), type = "hard", policy =
"universal", by.level = FALSE, value = 0, dev = var, verbose = FALSE,
return.threshold = FALSE, compression = TRUE, Q = 0.050000000000000003, ...)
{
#
#
# Check class of imwd
#
if(verbose == TRUE) cat("Argument checking\n")
ctmp <- class(imwd)
if(is.null(ctmp))
stop("imwd has no class")
else if(ctmp != "imwd")
stop("imwd is not of class imwd")
if(policy != "universal" && policy != "manual" && policy !=
"probability" && policy != "fdr")
stop("Only policys are universal, manual, fdr and probability at present"
)
if(type != "hard" && type != "soft")
stop("Only hard or soft thresholding at present")
r <- range(levels)
if(r[1] < 0)
stop("levels out of range, level too small")
if(r[2] > nlevelsWT(imwd)- 1)
stop("levels out of range, level too big")
if(r[1] > nlevelsWT(imwd)- 1) {
warning("no thresholding done")
return(imwd)
}
if(r[2] < 0) {
warning("no thresholding done")
return(imwd)
}
nthresh <- length(levels)
d <- NULL
n <- 2^(2 * nlevelsWT(imwd)) #
# Decide which policy to adopt
# The next if-else construction should define a vector called
# "thresh" that contains the threshold value for each level
# in "levels". This may be the same threshold value
# a global threshold.
#
if(policy == "universal") {
if(verbose == TRUE)
cat("Universal policy...")
if(by.level == FALSE) {
if(verbose == TRUE)
cat("All levels at once\n")
for(i in 1:nthresh) {
d <- c(d, imwd[[lt.to.name(levels[i], "CD")]],
imwd[[lt.to.name(levels[i], "DC")]], imwd[[
lt.to.name(levels[i], "DD")]])
}
noise.level <- sqrt(dev(d))
thresh <- sqrt(2 * log(n)) * noise.level
if(verbose == TRUE)
cat("Global threshold is: ", thresh, "\n")
thresh <- rep(thresh, length = nthresh)
}
else {
if(verbose == TRUE)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
for(i in 1:nthresh) {
d <- c(imwd[[lt.to.name(levels[i], "CD")]],
imwd[[lt.to.name(levels[i], "DC")]], imwd[[
lt.to.name(levels[i], "DD")]])
noise.level <- sqrt(dev(d))
thresh[i] <- sqrt(2 * log(n)) * noise.level
if(verbose == TRUE)
cat("Threshold for level: ", levels[i],
" is ", thresh[i], "\n")
}
}
}
else if(policy == "manual") {
if(verbose == TRUE)
cat("Manual policy...\n")
thresh <- rep(value, length = nthresh)
if(length(value) != 1 && length(value) != nthresh)
warning("your threshold is not the same length as number of levels"
)
}
else if(policy == "fdr") {
#
#
# Threshold chosen by FDR-procedure
#
if(verbose == TRUE) cat("FDR policy...")
if(by.level == FALSE) {
if(verbose == TRUE)
cat("All levels at once\n")
for(i in 1:nthresh) {
d <- c(d, imwd[[lt.to.name(levels[i], "CD")]],
imwd[[lt.to.name(levels[i], "DC")]], imwd[[
lt.to.name(levels[i], "DD")]])
}
if(length(value) != 1)
stop("Length of value should be 1")
noise.level <- sqrt(dev(c(imwd[[lt.to.name(levels[
nthresh], "CD")]], imwd[[lt.to.name(levels[
nthresh], "DC")]], imwd[[lt.to.name(levels[
nthresh], "DD")]])))
minit <- n
dinit <- d
thinit <- qnorm(1 - Q/2) * noise.level
if(log(n, 2) > 15)
ninit <- 4
else {
if(log(n, 2) > 12)
ninit <- 3
else {
if(log(n, 2) > 10)
ninit <- 2
else ninit <- 1
}
}
for(k in seq(1, ninit)) {
dinit1 <- dinit[abs(dinit) >= thinit]
minit <- length(dinit1)
if(minit == 0)
thresh <- max(abs(d)) * 1.0001
else {
thinit <- qnorm(1 - (Q * minit)/(2 * n)) *
noise.level
minit1 <- length(dinit1[abs(dinit1) >= thinit
])
if(minit1 == minit || minit1 == 0)
break
dinit <- dinit1
}
}
if(noise.level > 0) {
m <- length(d)
minit <- length(dinit)
p <- (2 - 2 * pnorm(abs(dinit)/noise.level))
index <- order(p)
j <- seq(1, minit)
m0 <- max(j[p[index] <= (Q * j)/m])
if(m0 != "NA" && m0 < minit)
thresh <- abs(dinit[index[m0]])
else {
if(m0 == "NA")
thresh <- max(abs(dinit)) * 1.0001
else thresh <- 0
}
}
else thresh <- 0
thresh <- rep(thresh, length = nthresh)
if(verbose == TRUE)
cat("Global threshold is: ", thresh[1], "\n",
"sigma is: ", noise.level, "\n")
}
else {
if(verbose == TRUE)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
for(i in 1:nthresh) {
d <- c(imwd[[lt.to.name(levels[i], "CD")]],
imwd[[lt.to.name(levels[i], "DC")]], imwd[[
lt.to.name(levels[i], "DD")]])
m <- length(d)
noise.level <- sqrt(dev(d))
thinit <- qnorm(1 - Q/2) * noise.level
dinit <- d[abs(d) >= thinit]
minit <- length(dinit)
if(minit == 0)
thresh[i] <- max(abs(d)) * 1.0001
else {
if(noise.level > 0) {
p <- (2 - 2 * pnorm(abs(dinit)/noise.level)
)
index <- order(p)
j <- seq(1, minit)
m0 <- max(j[p[index] <= (Q * j)/m])
if(m0 != "NA" && m0 < minit)
thresh[i] <- abs(dinit[index[m0]])
else {
if(m0 == "NA")
thresh[i] <- max(abs(dinit)) * 1.0001
else thresh[i] <- 0
}
}
else thresh[i] <- 0
}
if(verbose == TRUE)
cat("Threshold for level: ", levels[i], "is",
thresh[i], "\n")
}
}
}
else if(policy == "probability") {
if(verbose == TRUE)
cat("Probability policy...")
if(by.level == FALSE) {
if(verbose == TRUE)
cat("All levels at once\n")
for(i in 1:nthresh) {
d <- c(d, imwd[[lt.to.name(levels[i], "CD")]],
imwd[[lt.to.name(levels[i], "DC")]], imwd[[
lt.to.name(levels[i], "DD")]])
}
if(length(value) != 1)
stop("Length of value should be 1")
thresh <- rep(quantile(abs(d), prob = value), length =
nthresh)
if(verbose == TRUE)
cat("Global threshold is: ", thresh[1], "\n")
}
else {
if(verbose == TRUE)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
if(length(value) == 1)
value <- rep(value, nthresh)
if(length(value) != nthresh)
stop("Wrong number of probability values")
for(i in 1:nthresh) {
d <- c(imwd[[lt.to.name(levels[i], "CD")]],
imwd[[lt.to.name(levels[i], "DC")]], imwd[[
lt.to.name(levels[i], "DD")]])
thresh[i] <- quantile(abs(d), prob = value[i])
if(verbose == TRUE)
cat("Threshold for level: ", levels[i],
" is ", thresh[i], "\n")
}
}
}
if(return.threshold == TRUE)
return(thresh)
for(i in 1:nthresh) {
dCD <- imwd[[lt.to.name(levels[i], "CD")]]
dDC <- imwd[[lt.to.name(levels[i], "DC")]]
dDD <- imwd[[lt.to.name(levels[i], "DD")]]
if(type == "hard") {
dCD[abs(dCD) <= thresh[i]] <- 0
dDC[abs(dDC) <= thresh[i]] <- 0
dDD[abs(dDD) <= thresh[i]] <- 0
if(verbose == TRUE) {
cat("Level: ", levels[i], " there are ", sum(
dCD == 0), ":", sum(dDC == 0), ":", sum(dDD ==
0), " zeroes and: ")
cat(sum(dCD != 0), ":", sum(dDC != 0), ":", sum(
dDD != 0), " nonzeroes\n")
}
}
else if(type == "soft") {
dCD <- sign(dCD) * (abs(dCD) - thresh[i]) * (abs(dCD) >
thresh[i])
dDC <- sign(dDC) * (abs(dDC) - thresh[i]) * (abs(dDC) >
thresh[i])
dDD <- sign(dDD) * (abs(dDD) - thresh[i]) * (abs(dDD) >
thresh[i])
if(verbose == TRUE) {
cat("Level: ", levels[i], " there are ", sum(
dCD == 0), ":", sum(dDC == 0), ":", sum(dDD ==
0), " zeroes and: ")
cat(sum(dCD != 0), ":", sum(dDC != 0), ":", sum(
dDD != 0), " nonzeroes\n")
}
}
imwd[[lt.to.name(levels[i], "CD")]] <- dCD
imwd[[lt.to.name(levels[i], "DC")]] <- dDC
imwd[[lt.to.name(levels[i], "DD")]] <- dDD
}
if(compression == TRUE)
return(compress(imwd, verbose = verbose))
else return(imwd)
}
"threshold.imwdc"<-
function(imwdc, verbose = FALSE, ...)
{
warning("You are probably thresholding an already thresholded object")
imwd <- uncompress(imwdc, verbose = verbose)
return(threshold(imwd, verbose = TRUE, ...))
}
"threshold.irregwd"<-
function(irregwd, levels = 3:(nlevelsWT(wd)- 1), type = "hard", policy =
"universal", by.level = FALSE, value = 0, dev = var, boundary = FALSE, verbose
= FALSE, return.threshold = FALSE, force.sure = FALSE, cvtol = 0.01, Q =
0.050000000000000003, alpha = 0.050000000000000003, ...)
{
if(verbose == TRUE)
cat("threshold.irregwd:\n")
if(IsEarly(wd)) {
ConvertMessage()
stop()
}
#
# Check class of wd
#
if(verbose == TRUE)
cat("Argument checking\n")
ctmp <- class(irregwd)
if(is.null(ctmp))
stop("irregwd has no class")
else if(ctmp != "irregwd")
stop("irregwd is not of class irregwd")
wd <- irregwd
class(wd) <- "wd"
if(policy != "universal" && policy != "manual" && policy !=
"probability" && policy != "sure" && policy != "mannum" &&
policy != "cv" && policy != "fdr" && policy != "op1" && policy !=
"op2" && policy != "LSuniversal")
stop("Only policys are universal, manual, mannum, sure, LSuniversal, cv, op1, op2 and probability at present"
)
if(type != "hard" && type != "soft")
stop("Only hard or soft thresholding at present")
r <- range(levels)
if(r[1] < 0)
stop("levels out of range, level too small")
if(r[2] > nlevelsWT(wd)- 1)
stop("levels out of range, level too big")
if(r[1] > nlevelsWT(wd)- 1) {
warning("no thresholding done")
return(wd)
}
if(r[2] < 0) {
warning("no thresholding done")
return(wd)
}
n <- 2^nlevelsWT(wd)
nthresh <- length(levels) #
# Estimate sigma
if(by.level == FALSE) {
d <- NULL
ccc <- NULL
for(i in 1:nthresh) {
d <- c(d, accessD(wd, level = levels[i], boundary =
boundary))
ccc <- c(ccc, accessc(irregwd, level = levels[i],
boundary = boundary))
}
ind <- (1:length(d))[abs(ccc) > 1.0000000000000001e-05]
sigma <- sqrt(dev(d[ind]/sqrt(ccc[ind])))
sigma <- rep(sigma, nthresh)
}
else {
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i], boundary = boundary
)
ccc <- accessc(irregwd, level = levels[i], boundary =
boundary)
ind <- (1:length(d))[abs(ccc) > 1.0000000000000001e-05]
sigma[i] <- sqrt(dev(d[ind]/sqrt(ccc[ind])))
}
}
if(verbose == TRUE)
print(sigma)
d <- NULL
ccc <- NULL #
# Check to see if we're thresholding a complex wavelet transform.
# We can only do certain things in this case
#
if(is.complex(wd$D)) {
stop("Complex transform not implemented")
}
#
#
# Decide which policy to adopt
# The next if-else construction should define a vector called
# "thresh" that contains the threshold value for each level
# in "levels". This may be the same threshold value
# a global threshold.
#
if(policy == "universal") {
#
#
# Donoho and Johnstone's universal policy
#
if(verbose == TRUE) cat("Universal policy...")
if(by.level == FALSE) {
if(verbose == TRUE)
cat("All levels at once\n")
for(i in 1:nthresh)
d <- c(d, accessD(wd, level = levels[i],
boundary = boundary))
nd <- length(d)
thresh <- sqrt(2 * log(nd))
if(verbose == TRUE)
cat("Global threshold is: ", thresh, "\n")
thresh <- rep(thresh, length = nthresh)
}
else {
if(verbose == TRUE)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i], boundary =
boundary)
nd <- length(d)
thresh[i] <- sqrt(2 * log(nd))
if(verbose == TRUE)
cat("Threshold for level: ", levels[i],
" is ", thresh[i], "\n")
}
}
expo <- 1
}
else if(policy == "LSuniversal") {
#
#
# The universal policy modified for local spectral smoothing
# This should only be used via the LocalSpec function
#
if(verbose == TRUE) cat("Local spectral universal policy...")
if(by.level == FALSE) {
if(verbose == TRUE)
cat("All levels at once\n")
for(i in 1:nthresh)
d <- c(d, accessD(wd, level = levels[i],
boundary = boundary))
nd <- length(d)
thresh <- log(nd)
if(verbose == TRUE)
cat("Global threshold is: ", thresh, "\n")
thresh <- rep(thresh, length = nthresh)
}
else {
if(verbose == TRUE)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i], boundary =
boundary)
nd <- length(d)
thresh[i] <- log(nd)
if(verbose == TRUE)
cat("Threshold for level: ", levels[i],
" is ", thresh[i], "\n")
}
}
expo <- 1
}
else if(policy == "sure") {
if(type == "hard")
stop("Can only do soft thresholding with sure policy")
if(by.level == FALSE) {
if(verbose == TRUE)
cat("All levels at once\n")
for(i in 1:nthresh) {
d <- c(d, accessD(wd, level = levels[i],
boundary = boundary))
ccc <- c(ccc, accessc(irregwd, level = levels[i
], boundary = boundary))
}
ind <- (1:length(d))[abs(ccc) > 1.0000000000000001e-05]
nd <- length(ind)
neta.d <- (log(nd, base = 2)^(3/2))
sd2 <- (sum((d[ind]/(sigma[1] * ccc)[ind])^2 - 1)/nd)
if(verbose == TRUE) {
cat("neta.d is ", neta.d, "\nsd2 is ", sd2,
"\n")
cat("nd is ", nd, "\n")
cat("noise.level ", noise.level, "\n")
}
if(force.sure == TRUE || sd2 > neta.d/sqrt(nd)) {
if(verbose == TRUE) {
cat("SURE: Using SURE\n")
}
thresh <- newsure(sqrt(ccc) * sigma[1], d)
expo <- 0
}
else {
if(verbose == TRUE)
cat("SURE: (sparse) using sqrt 2log n\n")
thresh <- sqrt(2 * log(nd))
}
thresh <- rep(thresh, length = nthresh)
if(verbose == TRUE)
cat("Global threshold is ", thresh, "\n")
}
else {
#
#
# By level is true
#
print("Sure for level- and coefficient-dependenet thresholding is not adapted"
)
if(verbose == TRUE)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
collect <- NULL
for(i in 1:nthresh)
collect <- c(collect, accessD(wd, level =
levels[i], boundary = boundary))
noise.level <- sqrt(dev(collect))
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i], boundary =
boundary)
nd <- length(d)
neta.d <- (log(nd, base = 2)^(3/2))
sd2 <- (sum((d/noise.level)^2 - 1)/nd)
if(verbose == TRUE) {
cat("neta.d is ", neta.d, "\nsd2 is ", sd2,
"\n")
cat("nd is ", nd, "\n")
cat("noise.level ", noise.level, "\n")
}
if(force.sure == TRUE || sd2 > neta.d/sqrt(nd)) {
if(verbose == TRUE) {
cat("SURE: Using SURE\n")
}
thresh[i] <- sure(d/noise.level)
}
else {
if(verbose == TRUE)
cat("SURE: (sparse) using sqrt 2log n\n")
thresh[i] <- sqrt(2 * log(nd))
}
if(verbose == TRUE)
cat("Threshold for level: ", levels[i],
" is ", thresh[i], "\n")
}
}
}
else if(policy == "manual") {
#
#
# User supplied threshold policy
#
if(verbose == TRUE) cat("Manual policy\n")
thresh <- rep(value, length = nthresh)
expo <- 1
if(length(value) != 1 && length(value) != nthresh)
warning("your threshold is not the same length as number of levels"
)
}
else if(policy == "mannum") {
if(verbose == TRUE) {
cat("Manual policy using ", value, " of the")
cat(" largest coefficients\n")
}
if(value < 1) {
stop("Have to select an integer larger than 1 for value"
)
}
else if(value > length(wd$D)) {
stop(paste("There are only ", length(wd$D),
" coefficients, you specified ", value))
}
coefs <- wd$D
scoefs <- sort(abs(coefs))
scoefs <- min(rev(scoefs)[1:value])
wd$D[abs(wd$D) < scoefs] <- 0
return(wd)
}
else if(policy == "probability") {
#
#
# Threshold is quantile based
#
if(verbose == TRUE) cat("Probability policy...")
if(by.level == FALSE) {
if(verbose == TRUE)
cat("All levels at once\n")
for(i in 1:nthresh)
d <- c(d, accessD(wd, level = levels[i],
boundary = boundary))
if(length(value) != 1)
stop("Length of value should be 1")
thresh <- rep(quantile(abs(d), prob = value), length =
nthresh)
if(verbose == TRUE)
cat("Global threshold is: ", thresh[1], "\n")
}
else {
if(verbose == TRUE)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
if(length(value) == 1)
value <- rep(value, nthresh)
if(length(value) != nthresh)
stop("Wrong number of probability values")
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i], boundary =
boundary)
thresh[i] <- quantile(abs(d), prob = value[i])
if(verbose == TRUE)
cat("Threshold for level: ", levels[i],
" is ", thresh[i], "\n")
}
}
}
if(return.threshold == TRUE)
return(thresh)
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i], boundary = boundary)
ccc <- accessc(irregwd, level = levels[i], boundary = boundary)
actthresh <- thresh[i] * (sigma[i] * sqrt(ccc))^expo
# is vector
if(type == "hard") {
d[abs(d) <= actthresh] <- 0
if(verbose == TRUE)
cat("Level: ", levels[i], " there are ", sum(d ==
0), " zeroes\n")
}
else if(type == "soft") {
d <- (d * (abs(d) - actthresh) * (abs(d) > actthresh))/
abs(d)
d[is.na(d)] <- 0
}
wd <- putD(wd, level = levels[i], v = d, boundary = boundary)
}
wd
}
"threshold.mwd"<-
function(mwd, levels = 3:(nlevelsWT(mwd)- 1), type = "hard", policy = "universal",
boundary = FALSE, verbose = FALSE, return.threshold = FALSE, threshold = 0, covtol
= 1.0000000000000001e-09, robust = TRUE, return.chisq = FALSE, bivariate = TRUE, ...)
{
#threshold.mwd
#thresholds a multiple wavelet object
#Tim Downie
#last updated May 1996
#
#
# Check arguments
#
if(verbose == TRUE) cat("threshold.mwd:\n")
if(verbose == TRUE)
cat("Argument checking\n")
ctmp <- class(mwd)
if(is.null(ctmp))
stop("mwd has no class")
else if(ctmp != "mwd")
stop("mwd is not of class mwd")
if(policy != "manual" && policy != "universal" && policy !=
"visushrink")
stop("Only policies are universal manual and visushrink at present"
)
if(type != "hard" && type != "soft")
stop("Only hard or soft thresholding at present")
nlevels <- nlevelsWT(mwd)
npsi <- mwd$filter$npsi
r <- range(levels)
if(r[1] < 0)
stop("levels out of range, level too small")
if(r[2] > nlevelsWT(mwd)- 1)
stop("levels out of range, level too big")
if(r[1] > nlevelsWT(mwd)- 1) {
warning("no thresholding done, returning input")
return(mwd)
}
if(r[2] < 0) {
warning("no thresholding done, returning input")
return(mwd)
}
if(policy == "manual" && threshold <= 0) stop(
"If you want manual thresholding, you must supply\na positive threshold"
) #
#
#Apply the a single wavelet policy to multiwavelets
#so far only universal thresholding
#visushrink visushrink can be done if using the single policy
#
if(bivariate == FALSE) {
if(verbose == TRUE)
cat("Thresholding multiple wavelets using single wavelet universal thresholding\n"
)
noise.level <- rep(0, npsi)
thresh <- rep(0, npsi)
ninlev <- rep(0, length(levels))
if(robust == FALSE)
dev <- var
else dev <- mad
D <- NULL
for(i in levels) {
index <- i + 1 - levels[1]
ninlev[index] <- dim(accessD(mwd, level = i, boundary
= boundary))[2]
D <- matrix(c(D, accessD(mwd, level = i, boundary =
boundary)), nrow = npsi)
}
nD <- dim(D)[2]
for(i in 1:npsi) {
noise.level[i] <- sqrt(dev(D[i, ]))
if(policy == "visushrink")
thresh[i] <- (sqrt(2 * log(nD)) * noise.level[i
])/sqrt(nD)
else if(policy == "manual")
thresh[i] <- threshold[i]
else thresh[i] <- (sqrt(2 * log(nD)) * noise.level[i])
}
if(verbose == TRUE) {
cat("Threshold for each wavelet is: ", thresh, "\n")
cat("noise levels are : ", noise.level, "\n")
}
for(i in 1:npsi) {
d <- D[i, ]
if(type == "hard") {
d[abs(d) <= thresh[i]] <- 0
}
else if(type == "soft") {
d <- sign(d) * (abs(d) - thresh[i]) * (abs(d) >
thresh[i])
}
D[i, ] <- d
}
jump <- 1
for(i in levels) {
index <- i + 1 - levels[1]
mwd <- putD(mwd, level = i, M = D[, jump:(jump + ninlev[
index] - 1)], boundary = boundary)
jump <- jump + ninlev[index]
}
if(return.threshold == TRUE)
return(thresh)
else return(mwd)
}
#
#
#If we get here then do Multivariate thresholding
#
if(policy == "universal" || policy == "manual") {
n <- 0
nj <- rep(0, length(levels))
chisq <- NULL
chisqkeep <- NULL
chisqnewkeep <- NULL
for(i in 1:length(levels)) {
level <- levels[i]
d <- accessD(mwd, level = level)
nj[i] <- dim(d)[2]
Y <- rep(0, nj[i])
# VHAT is the Var/Covar matrix of the data at each level
# estinated using normal estimates or robust estimates
#
if(robust == FALSE)
VHAT <- var(t(d))
if(robust == TRUE) VHAT <- rcov(d) #
# If the smallest eigen value of VHAT is less than covtol
# we may run into problems when inverting VHAT
# so code chisq as -1 and return the same vector coeff as was input
#
if(min(abs(eigen(VHAT, only.values = TRUE)$values)) <
covtol) {
warning(paste(
"singular variance structure in level ",
level, "this level not thresholded"))
Y <- rep(-1, nj[i])
}
else {
VINV <- solve(VHAT)
for(s in 1:npsi)
Y <- Y + d[s, ]^2 * VINV[s, s]
for(s in 2:npsi)
for(t in 1:(s - 1))
Y <- Y + 2 * d[s, ] * d[t, ] * VINV[s, t]
n <- n + nj[i] #
# The above line means that the threshold is caculated using only
# the thresholdable coefficients.
}
chisq <- c(chisq, Y)
}
}
if(policy != "manual")
chithresh <- 2 * log(n)
else chithresh <- threshold
if(return.threshold == TRUE) {
return(chithresh)
}
if(return.chisq == TRUE)
return(chisq)
lc <- length(chisq)
dnew <- matrix(rep(0, 2 * lc), nrow = 2)
d <- NULL
for(i in 1:length(levels)) {
d <- matrix(c(d, accessD(mwd, level = levels[i])), nrow = 2)
}
if(type == "hard") {
for(i in 1:lc) {
keep <- 1 * ((chisq[i] >= chithresh) || (chisq[i] == -1
))
dnew[, i] <- d[, i] * keep
}
}
if(type == "soft") {
for(i in 1:lc) {
if(chisq[i] != -1)
chisqnew <- max(chisq[i] - chithresh, 0)
if(chisq[i] > 0)
shrink <- (max(chisq[i] - chithresh, 0))/chisq[
i]
else shrink <- 0
dnew[, i] <- d[, i] * shrink
}
}
low <- 1
for(i in 1:length(levels)) {
mwd <- putD(mwd, level = levels[i], M = dnew[, low:(low - 1 +
nj[i])])
low <- low + nj[i]
}
if(verbose == TRUE)
cat("returning wavelet decomposition\n")
return(mwd)
}
"threshold.wd"<-
function(wd, levels = 3:(nlevelsWT(wd)- 1), type = "soft", policy = "sure",
by.level = FALSE, value = 0, dev = madmad, boundary = FALSE, verbose = FALSE,
return.threshold = FALSE, force.sure = FALSE, cvtol = 0.01,
cvmaxits=500, Q =
0.050000000000000003, OP1alpha = 0.050000000000000003, alpha = 0.5,
beta = 1, C1 = NA, C2 = NA, C1.start = 100, al.check=TRUE, ...)
{
if(verbose == TRUE)
cat("threshold.wd:\n")
if(IsEarly(wd)) {
ConvertMessage()
stop()
}
#
# Check class of wd
#
if(verbose == TRUE)
cat("Argument checking\n")
ctmp <- class(wd)
if(is.null(ctmp))
stop("wd has no class")
else if(ctmp != "wd")
stop("wd is not of class wd")
if(policy != "universal" && policy != "manual" && policy !=
"probability" && policy != "sure" && policy != "mannum" &&
policy != "cv" && policy != "fdr" && policy != "op1" && policy !=
"op2" && policy != "LSuniversal" && policy != "BayesThresh")
stop("Only policys are universal, BayesThresh, manual, mannum, sure, LSuniversal, cv, op1, op2 and probability at present"
)
if(type != "hard" && type != "soft")
stop("Only hard or soft thresholding at present")
r <- range(levels)
if(r[1] < 0)
stop("levels out of range, level too small. Minimum level is 0"
)
if(r[2] > nlevelsWT(wd) - 1)
stop(paste("levels out of range, level too big. Maximum level is",
nlevelsWT(wd) - 1))
if(r[1] > nlevelsWT(wd)- 1) {
warning("no thresholding done")
return(wd)
}
if(r[2] < 0) {
warning("no thresholding done")
return(wd)
}
if (al.check==TRUE)
if (all(sort(levels)==levels)==FALSE)
warning("Entries in levels vector are not ascending. Please check this is what you intend. If so, you can turn this warning off with al.check argument")
d <- NULL
n <- 2^nlevelsWT(wd)
nthresh <- length(levels) #
#
# Check to see if we're thresholding a complex wavelet transform.
# We can only do certain things in this case
#
if(is.complex(wd$D)) {
stop("Please use cthresh package for complex-valued wavelet shrinkage")
}
#
#
# Decide which policy to adopt
# The next if-else construction should define a vector called
# "thresh" that contains the threshold value for each level
# in "levels". This may be the same threshold value
# a global threshold.
#
if(policy == "universal") {
#
#
# Donoho and Johnstone's universal policy
#
if(verbose == TRUE) cat("Universal policy...")
if(by.level == FALSE) {
if(verbose == TRUE)
cat("All levels at once\n")
for(i in 1:nthresh)
d <- c(d, accessD(wd, level = levels[i],
boundary = boundary))
noise.level <- sqrt(dev(d))
nd <- length(d)
thresh <- sqrt(2 * log(nd)) * noise.level
if(verbose == TRUE)
cat("Global threshold is: ", thresh, "\n")
thresh <- rep(thresh, length = nthresh)
}
else {
if(verbose == TRUE)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i], boundary =
boundary)
noise.level <- sqrt(dev(d))
nd <- length(d)
thresh[i] <- sqrt(2 * log(nd)) *
noise.level
if(verbose == TRUE)
cat("Threshold for level: ", levels[i],
" is ", thresh[i], "\n")
}
}
}
else if(policy == "LSuniversal") {
#
#
# The universal policy modified for local spectral smoothing
# This should only be used via the LocalSpec function
#
if(verbose == TRUE) cat("Local spectral universal policy...")
if(by.level == FALSE) {
if(verbose == TRUE)
cat("All levels at once\n")
for(i in 1:nthresh)
d <- c(d, accessD(wd, level = levels[i],
boundary = boundary))
noise.level <- sqrt(dev(d))
nd <- length(d)
thresh <- log(nd) * noise.level
if(verbose == TRUE)
cat("Global threshold is: ", thresh, "\n")
thresh <- rep(thresh, length = nthresh)
}
else {
if(verbose == TRUE)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i], boundary =
boundary)
noise.level <- sqrt(dev(d))
nd <- length(d)
thresh[i] <- log(nd) * noise.level
if(verbose == TRUE)
cat("Threshold for level: ", levels[i],
" is ", thresh[i], "\n")
}
}
}
else if(policy == "sure") {
if(type == "hard")
stop("Can only do soft thresholding with sure policy")
if(by.level == FALSE) {
if(verbose == TRUE)
cat("All levels at once\n")
for(i in 1:nthresh)
d <- c(d, accessD(wd, level = levels[i],
boundary = boundary))
noise.level <- sqrt(dev(d))
nd <- length(d)
neta.d <- (log(nd, base = 2)^(3/2))
sd2 <- (sum((d/noise.level)^2 - 1)/nd)
if(verbose == TRUE) {
cat("neta.d is ", neta.d, "\nsd2 is ", sd2,
"\n")
cat("nd is ", nd, "\n")
cat("noise.level ", noise.level, "\n")
}
if(force.sure == TRUE || sd2 > neta.d/sqrt(nd)) {
if(verbose == TRUE) {
cat("SURE: Using SURE\n")
}
thresh <- sure(d/noise.level)
}
else {
if(verbose == TRUE)
cat("SURE: (sparse) using sqrt 2log n\n")
thresh <- sqrt(2 * log(nd))
}
thresh <- rep(thresh * noise.level, length = nthresh)
if(verbose == TRUE)
cat("Global threshold is ", thresh, "\n")
}
else {
#
#
# By level is true
#
if(verbose == TRUE) cat("Level by level\n")
thresh <- rep(0, length = nthresh)
collect <- NULL
for(i in 1:nthresh)
collect <- c(collect, accessD(wd, level =
levels[i], boundary = boundary))
noise.level <- sqrt(dev(collect))
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i], boundary =
boundary)
nd <- length(d)
neta.d <- (log(nd, base = 2)^(3/2))
sd2 <- (sum((d/noise.level)^2 - 1)/nd)
if(verbose == TRUE) {
cat("neta.d is ", neta.d, "\nsd2 is ", sd2,
"\n")
cat("nd is ", nd, "\n")
cat("noise.level ", noise.level, "\n")
}
if(force.sure == TRUE || sd2 > neta.d/sqrt(nd)) {
if(verbose == TRUE) {
cat("SURE: Using SURE\n")
}
thresh[i] <- sure(d/noise.level)
}
else {
if(verbose == TRUE)
cat("SURE: (sparse) using sqrt 2log n\n")
thresh[i] <- sqrt(2 * log(nd))
}
if(verbose == TRUE)
cat("Threshold for level: ", levels[i],
" is ", thresh[i], "\n")
}
}
}
else if(policy == "BayesThresh") {
#
# Check that all hyperparameters of the prior are non-negative
#
if(alpha < 0) stop("parameter alpha is negative")
if(beta < 0)
stop("parameter beta is negative")
nthresh <- length(levels)
nsignal <- rep(0, nthresh)
noise.level <- sqrt(dev(accessD(wd, level = (nlevelsWT(wd)- 1))))
v <- 2^( - alpha * levels)
if(is.na(C1)) {
#
# Estimation of C1 and C2 via universal threshodling
#
if(C1.start < 0) stop("C1.start is negative")
universal <- threshold(wd, policy = "universal", type
= "hard", dev = dev, by.level = FALSE, levels =
levels)
sum2 <- rep(0, nthresh)
for(i in 1:nthresh) {
dun <- accessD(universal, level = levels[i])
nsignal[i] <- sum(abs(dun) > 10^-10)
if(nsignal[i] > 0)
sum2[i] <- sum(dun[abs(dun) > 0]^2)
}
if(sum(nsignal) == 0) {
wd <- nullevels(wd, levelstonu = levels)
if(verbose == TRUE)
cat(
"hyperparameters of the prior are: alpha = ",
alpha, "C1 = 0", "beta = ", beta,
"C2 = 0\n")
return(wd)
}
else {
fntoopt <- function(C, nsignal, noise.level, wd, sum2, v) {
ans<- nsignal * (log(noise.level^2 + C^2 *
v) - 2 * log(pnorm(( - noise.level * sqrt(2 *
log(2^nlevelsWT(wd))))/sqrt(noise.level^2 + C^2 *
v)))) + sum2/(noise.level^2 + C^2 * v)
sum(ans)
}
C1 <- optimize(f=fntoopt, interval=c(0, 50*sqrt(C1.start)),
nsignal=nsignal, noise.level=noise.level, wd=wd, sum2=sum2, v=v)$minimum^2
}
}
if(C1 < 0)
stop("parameter C1 is negative")
tau2 <- C1 * v
if(is.na(C2)) {
p <- 2 * pnorm(( - noise.level * sqrt(2 * log(2^wd$
nlevels)))/sqrt(noise.level^2 + tau2))
if(beta == 1)
C2 <- sum(nsignal/p)/nlevelsWT(wd)
else C2 <- (1 - 2^(1 - beta))/(1 - 2^((1 - beta) * wd$
nlevels)) * sum(nsignal/p)
}
if(C2 < 0)
stop("parameter C2 is negative")
if(verbose == TRUE) cat("noise.level is: ", round(noise.level, 4),
"\nhyperparameters of the prior are: alpha = ",
alpha, "C1 = ", round(C1, 4), "beta = ", beta,
"C2 = ", round(C2, 4), "\n") #
#
# Bayesian Thresholding
#
if(C1 == 0 | C2 == 0)
wd <- nullevels(wd, levelstonu = levels)
else {
pr <- pmin(1, C2 * 2^( - beta * levels))
rat <- tau2/(noise.level^2 + tau2) #
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i])
w <- (1 - pr[i])/pr[i]/sqrt((noise.level^2 *
rat[i])/tau2[i]) * exp(( - rat[i] * d^2)/2/
noise.level^2)
z <- 0.5 * (1 + pmin(w, 1))
d <- sign(d) * pmax(0, rat[i] * abs(d) -
noise.level * sqrt(rat[i]) * qnorm(z))
wd <- putD(wd, level = levels[i], v = d)
}
}
return(wd)
}
else if(policy == "cv") {
#
#
# Threshold chosen by cross-validation
#
if(verbose == TRUE) cat("Cross-validation policy\n") #
if(by.level == TRUE) stop(
"Cross-validation policy does not permit by.level\n\t\t\tthresholding (yet)"
) #
# Reconstruct the function for CWCV (this should be quick)
#
ynoise <- wr(wd)
thresh <- CWCV(ynoise = ynoise, x = 1:length(ynoise),
filter.number = wd$filter$filter.number, family = wd$
filter$family, thresh.type = type, tol = cvtol, maxits=cvmaxits,
verbose = 0, plot.it = FALSE, ll = min(levels))$xvthresh
thresh <- rep(thresh, length = nthresh)
}
else if(policy == "fdr") {
#
#
# Threshold chosen by FDR-procedure
#
if(verbose == TRUE) cat("FDR policy...")
if(by.level == FALSE) {
if(verbose == TRUE)
cat("All levels at once\n")
for(i in 1:nthresh) {
d <- c(d, accessD(wd, level = levels[i],
boundary = boundary))
}
if(length(value) != 1)
stop("Length of value should be 1")
noise.level <- sqrt(dev(accessD(wd, level = (nlevelsWT(wd)-
1))))
minit <- length(d)
dinit <- d
thinit <- qnorm(1 - Q/2) * noise.level
if(log(n, 2) > 12)
ninit <- 3
else {
if(log(n, 2) > 10)
ninit <- 2
else ninit <- 1
}
for(k in seq(1, ninit)) {
dinit1 <- dinit[abs(dinit) >= thinit]
minit <- length(dinit1)
if(minit == 0)
thresh <- max(abs(d)) * 1.0001
else {
thinit <- qnorm(1 - (Q * minit)/(2 * n)) *
noise.level
minit1 <- length(dinit1[abs(dinit1) >= thinit
])
if(minit1 == minit || minit1 == 0)
break
dinit <- dinit1
}
}
if(noise.level > 0) {
m <- length(d)
minit <- length(dinit)
p <- (2 - 2 * pnorm(abs(dinit)/noise.level))
index <- order(p)
j <- seq(1, minit)
m0 <- max(j[p[index] <= (Q * j)/m])
if(m0 != "NA" && m0 < minit)
thresh <- abs(dinit[index[m0]])
else {
if(m0 == "NA")
thresh <- max(abs(dinit)) * 1.0001
else thresh <- 0
}
}
else thresh <- 0
thresh <- rep(thresh, length = nthresh)
if(verbose == TRUE)
cat("Global threshold is: ", thresh[1], "\n",
"sigma is: ", noise.level, "\n")
}
else {
if(verbose == TRUE)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i], boundary =
boundary)
m <- length(d)
noise.level <- sqrt(dev(d))
thinit <- qnorm(1 - Q/2) * noise.level
dinit <- d[abs(d) >= thinit]
minit <- length(dinit)
if(minit == 0)
thresh[i] <- max(abs(d)) * 1.0001
else {
if(noise.level > 0) {
p <- (2 - 2 * pnorm(abs(dinit)/noise.level)
)
index <- order(p)
j <- seq(1, minit)
m0 <- max(j[p[index] <= (Q * j)/m])
if(m0 != "NA" && m0 < minit)
thresh[i] <- abs(dinit[index[m0]])
else {
if(m0 == "NA")
thresh[i] <- max(abs(dinit)) * 1.0001
else thresh[i] <- 0
}
}
else thresh[i] <- 0
}
if(verbose == TRUE)
cat("Threshold for level: ", levels[i], "is",
thresh[i], "\n")
}
}
}
else if(policy == "op1") {
#
#
# Ogden and Parzen's first policy
#
if(verbose == TRUE) cat("Ogden and Parzen's first policy\n")
if(by.level == FALSE)
stop("Ogden and Parzen's first policy only computes level-dependent policies"
)
thresh <- TOthreshda1(ywd = wd, alpha = OP1alpha, verbose =
verbose, return.threshold = return.threshold)
return(thresh)
}
else if(policy == "op2") {
#
#
# Ogden and Parzen's second policy
#
if(verbose == TRUE) cat("Ogden and Parzen's second policy\n")
if(by.level == FALSE)
stop("Ogden and Parzen's second policy only computes level-dependent policies"
)
thresh <- TOthreshda2(ywd = wd, alpha = OP1alpha, verbose =
verbose, return.threshold = return.threshold)
return(thresh)
}
else if(policy == "manual") {
#
#
# User supplied threshold policy
#
if(verbose == TRUE) cat("Manual policy\n")
thresh <- rep(value, length = nthresh)
if(length(value) != 1 && length(value) != nthresh)
warning("your threshold is not the same length as number of levels"
)
}
else if(policy == "mannum") {
if(verbose == TRUE) {
cat("Manual policy using ", value, " of the")
cat(" largest coefficients\n")
}
if(value < 1) {
stop("Have to select an integer larger than 1 for value"
)
}
else if(value > length(wd$D)) {
stop(paste("There are only ", length(wd$D),
" coefficients, you specified ", value))
}
coefs <- wd$D
scoefs <- sort(abs(coefs))
scoefs <- min(rev(scoefs)[1:value])
wd$D[abs(wd$D) < scoefs] <- 0
return(wd)
}
else if(policy == "probability") {
#
#
# Threshold is quantile based
#
if(verbose == TRUE) cat("Probability policy...")
if(by.level == FALSE) {
if(verbose == TRUE)
cat("All levels at once\n")
for(i in 1:nthresh)
d <- c(d, accessD(wd, level = levels[i],
boundary = boundary))
if(length(value) != 1)
stop("Length of value should be 1")
thresh <- rep(quantile(abs(d), prob = value), length =
nthresh)
if(verbose == TRUE)
cat("Global threshold is: ", thresh[1], "\n")
}
else {
if(verbose == TRUE)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
if(length(value) == 1)
value <- rep(value, nthresh)
if(length(value) != nthresh)
stop("Wrong number of probability values")
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i], boundary =
boundary)
thresh[i] <- quantile(abs(d), prob = value[i])
if(verbose == TRUE)
cat("Threshold for level: ", levels[i],
" is ", thresh[i], "\n")
}
}
}
if(return.threshold == TRUE)
return(thresh)
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i], boundary = boundary)
if(type == "hard") {
d[abs(d) <= thresh[i]] <- 0
}
else if(type == "soft") {
d <- (d * (abs(d) - thresh[i]) * (abs(d) > thresh[i]))/
abs(d)
d[is.na(d)] <- 0
}
if(verbose == TRUE)
cat("Level: ", levels[i], " there are ", sum(d == 0),
" zeroes\n")
wd <- putD(wd, level = levels[i], v = d, boundary = boundary)
}
wd
}
"threshold.wd3D"<-
function(wd3D, levels = 3:(nlevelsWT(wd3D)- 1), type = "hard", policy =
"universal", by.level = FALSE, value = 0, dev = var, verbose = FALSE,
return.threshold = FALSE, ...)
{
if(verbose == TRUE) cat("threshold.wd3D:\n") #
#
# Check class of wd3D
#
if(verbose == TRUE)
cat("Argument checking\n")
ctmp <- class(wd3D)
if(is.null(ctmp))
stop("wd3D has no class")
else if(ctmp != "wd3D")
stop("wd3D is not of class wd3D")
if(policy != "universal" && policy != "manual")
stop("Only policys are universal, manual")
if(type != "hard" && type != "soft")
stop("Only hard or soft thresholding at present")
r <- range(levels)
if(r[1] < 0)
stop("levels out of range, level too small")
if(r[2] > nlevelsWT(wd3D) - 1)
stop(paste("levels out of range, level too big. Maximum level is ",
nlevelsWT(wd3D) - 1))
if(r[1] > nlevelsWT(wd3D) - 1) {
warning("no thresholding done")
return(wd3D)
}
if(r[2] < 0) {
warning("no thresholding done")
return(wd3D)
}
d <- NULL
n <- (2^nlevelsWT(wd3D))^3
nthresh <- length(levels) #
#
#
# Decide which policy to adopt
# The next if-else construction should define a vector called
# "thresh" that contains the threshold value for each level
# in "levels". This may be the same threshold value
# a global threshold.
#
if(policy == "universal") {
#
#
# Donoho and Johnstone's universal policy
#
if(verbose == TRUE) cat("Universal policy...")
if(by.level == FALSE) {
if(verbose == TRUE)
cat("All levels at once\n")
for(i in 1:nthresh) {
v <- accessD(wd3D, level = levels[i])
d <- c(v$GHH, v$HGH, v$GGH, v$HHG, v$GHG, v$HGG,
v$GGG)
if(levels[i] == 0)
d <- c(d, v$HHH)
}
noise.level <- sqrt(dev(d))
nd <- length(d)
thresh <- sqrt(2 * log(nd)) * noise.level
if(verbose == TRUE)
cat("Global threshold is: ", thresh, "\n")
thresh <- rep(thresh, length = nthresh)
}
else {
if(verbose == TRUE)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
for(i in 1:nthresh) {
v <- accessD(wd3D, level = levels[i])
d <- c(v$GHH, v$HGH, v$GGH, v$HHG, v$GHG, v$HGG,
v$GGG)
if(levels[i] == 0)
d <- c(d, v$HHH)
noise.level <- sqrt(dev(d))
nd <- length(d)
thresh[i] <- sqrt(2 * log(nd)) * noise.level
if(verbose == TRUE)
cat("Threshold for level: ", levels[i],
" is ", thresh[i], "\n")
}
}
}
else if(policy == "manual") {
#
#
# User supplied threshold policy
#
if(verbose == TRUE) cat("Manual policy\n")
thresh <- rep(value, length = nthresh)
if(length(value) != 1 && length(value) != nthresh)
warning("your threshold is not the same length as number of levels"
)
}
if(return.threshold == TRUE)
return(thresh)
blocktypes <- c("GHH", "HGH", "GGH", "HHG", "GHG", "HGG", "GGG")
for(i in 1:nthresh) {
if(levels[i] == 0)
lblocks <- c("HHH", blocktypes)
else lblocks <- blocktypes
nblocks <- length(lblocks)
thedim <- rep(2^(levels[i]), 3)
for(j in 1:nblocks) {
d <- as.vector(accessD(wd3D, level = levels[i], block
= lblocks[j]))
if(type == "hard") {
d[abs(d) <= thresh[i]] <- 0
if(verbose == TRUE)
cat("Level: ", levels[i], " there are ", sum(
d == 0), " zeroes\n")
}
else if(type == "soft") {
d <- (d * (abs(d) - thresh[i]) * (abs(d) >
thresh[i]))/abs(d)
d[is.na(d)] <- 0
}
vinsert <- list(lev = levels[i], block = lblocks[j], a
= array(d, dim = thedim))
wd3D <- putD(wd3D, v = vinsert)
}
}
wd3D
}
"threshold.wp"<-
function(wp, levels = 3:(nlevelsWT(wp) - 1), dev = madmad, policy = "universal",
value = 0, by.level = FALSE, type = "soft", verbose = FALSE, return.threshold
= FALSE, cvtol = 0.01, cvnorm = l2norm, add.history = TRUE, ...)
{
#
# Do some arg checking
#
rn <- range(levels)
if(rn[1] < 0)
stop("all selected levels must be larger than zero")
if(rn[2] > nlevelsWT(wp) - 1)
stop(paste("all selected levels must be smaller than", nlevelsWT(
wp) - 1))
nr <- nrow(wp$wp)
nc <- ncol(wp$wp) #
#
# Figure out the threshold
#
if(policy == "manual") {
if(length(value) == 1) {
if(verbose == TRUE)
cat("Univariate threshold\n")
threshv <- rep(value, length(levels))
}
else if(length(value) == length(levels)) {
if(verbose == TRUE)
cat("Multivariate threshold\n")
threshv <- value
}
else stop("Manual policy. Your threshold vector is neither of length 1 or the length of the number of levels that you wish to threshold"
)
}
else if(policy == "universal") {
if(verbose == TRUE)
cat("Universal threshold\n")
if(by.level == FALSE) {
#
# Global threshold
#
d <- NULL
for(lev in 1:length(levels)) {
d <- c(d, accessD(wp, level = levels[lev]))
}
sigma <- dev(d)
threshv <- sqrt(2 * log(nc) * sigma)
threshv <- rep(threshv, length(levels))
}
else {
#
#
# Level by level threshold
#
threshv <- rep(0, length(levels))
for(lev in 1:length(levels)) {
d <- accessD(wp, level = levels[lev])
sigma <- dev(d)
threshv[lev] <- sqrt(2 * log(nc) * sigma)
}
}
}
if(verbose == TRUE) {
cat("Threshold is ")
print(threshv)
cat("\n")
}
#
#
# Now apply the threshold
#
if(return.threshold == TRUE)
return(threshv)
for(lev in 1:length(levels)) {
if(verbose == TRUE) {
cat("Applying threshold ", threshv[lev], " to level ",
levels[lev], "\n")
}
d <- accessD(wp, level = levels[lev])
if(type == "hard")
d[abs(d) <= threshv[lev]] <- 0
else if(type == "soft")
d <- sign(d) * (abs(d) - threshv[lev]) * (abs(d) >
threshv[lev])
wp <- putD(wp, level = levels[lev], v = d)
}
wp$date <- c(wp$date, date())
if(add.history == TRUE)
wp$history <- c(wp$history, paste("Thresholded:", paste(
as.character(threshv), collapse = "; "), "Levels: ",
paste(as.character(levels), collapse = "; "),
"Policy: ", policy, "Type: ", type))
wp
}
"threshold.wst"<-
function(wst, levels = 3:(nlevelsWT(wst) - 1), dev = madmad, policy = "universal",
value = 0, by.level = FALSE, type = "soft", verbose = FALSE, return.threshold
= FALSE, cvtol = 0.01, cvnorm = l2norm, add.history = TRUE, ...)
{
#
# Do some arg checking
#
call <- match.call()
rn <- range(levels)
if(rn[1] < 0)
stop("all selected levels must be larger than zero")
if(rn[2] > nlevelsWT(wst) - 1)
stop(paste("all selected levels must be smaller than", nlevelsWT(
wst) - 1))
nr <- nrow(wst$wp)
nc <- ncol(wst$wp) #
#
# Figure out the threshold
#
if(policy == "manual") {
if(length(value) == 1) {
if(verbose == TRUE)
cat("Univariate threshold\n")
threshv <- rep(value, length(levels))
}
else if(length(value) == length(levels)) {
if(verbose == TRUE)
cat("Multivariate threshold\n")
threshv <- value
}
else stop("Manual policy. Your threshold vector is neither of length 1 or the length of the number of levels that you wish to threshold"
)
}
else if(policy == "universal") {
if(verbose == TRUE)
cat("Universal threshold\n")
if(by.level == FALSE) {
#
# Global threshold
#
d <- NULL
for(lev in 1:length(levels)) {
d <- c(d, accessD(wst, level = levels[lev]))
}
sigma <- dev(d)
threshv <- sqrt(2 * log(nc) * sigma)
threshv <- rep(threshv, length(levels))
}
else {
#
#
# Level by level threshold
#
threshv <- rep(0, length(levels))
for(lev in 1:length(levels)) {
d <- accessD(wst, level = levels[lev])
sigma <- dev(d)
threshv[lev] <- sqrt(2 * log(nc) * sigma)
}
}
}
else if(policy == "LSuniversal") {
if(verbose == TRUE)
cat("Local Spec universal threshold\n")
if(by.level == FALSE) {
#
# Global threshold
#
d <- NULL
for(lev in 1:length(levels)) {
d <- c(d, accessD(wst, level = levels[lev]))
}
sigma <- dev(d)
threshv <- log(nc) * sqrt(sigma)
threshv <- rep(threshv, length(levels))
}
else {
#
#
# Level by level threshold
#
threshv <- rep(0, length(levels))
for(lev in 1:length(levels)) {
d <- accessD(wst, level = levels[lev])
sigma <- dev(d)
threshv[lev] <- log(nc) * sqrt(sigma)
}
}
}
else if(policy == "sure") {
if(verbose == TRUE)
cat("SURE threshold\n")
if(by.level == FALSE) {
#
# Global threshold
#
d <- NULL
for(lev in 1:length(levels)) {
d <- c(d, accessD(wst, level = levels[lev]))
}
sigma <- sqrt(dev(d))
threshv <- sigma * sure(d/sigma)
threshv <- rep(threshv, length(levels))
}
else {
#
#
# Level by level threshold
#
threshv <- rep(0, length(levels))
for(lev in 1:length(levels)) {
d <- accessD(wst, level = levels[lev])
sigma <- sqrt(dev(d))
threshv[lev] <- sigma * sure(d/sigma)
}
}
}
else if(policy == "cv") {
if(verbose == TRUE)
cat("Cross-validation threshold\n")
ynoise <- AvBasis(wst)
if(by.level == TRUE) {
if(verbose == TRUE)
cat("by-level\n")
if(length(levels) != 1)
warning(
"Taking minimum level as first level for level-dependent cross-validation"
)
levels <- min(levels):(nlevelsWT(wst) - 1)
threshv <- wstCVl(ndata = ynoise, ll = min(levels),
type = type, filter.number = wst$filter$
filter.number, family = wst$filter$family, tol
= cvtol, verbose = 0, plot.it = FALSE, norm =
cvnorm, InverseType = "average")$xvthresh
if(verbose == TRUE)
cat("Cross-validation threshold is ", threshv,
"\n")
}
else {
if(verbose == TRUE)
cat("global\n")
threshv <- wstCV(ndata = ynoise, ll = min(levels), type
= type, filter.number = wst$filter$
filter.number, family = wst$filter$family, tol
= cvtol, verbose = 0, plot.it = FALSE, norm =
cvnorm, InverseType = "average")$xvthresh
threshv <- rep(threshv, length(levels))
}
}
else {
stop(paste("Unknown policy: ", policy))
}
if(verbose == TRUE) {
cat("Threshold is ")
print(threshv)
cat("\n")
}
#
#
# Now apply the threshold
#
if(return.threshold == TRUE)
return(threshv)
for(lev in 1:length(levels)) {
if(verbose == TRUE) {
cat("Applying threshold ", threshv[lev], " to level ",
levels[lev], "(type is ", type, ")\n")
}
d <- accessD(wst, level = levels[lev])
if(type == "hard")
d[abs(d) <= threshv[lev]] <- 0
else if(type == "soft")
d <- sign(d) * (abs(d) - threshv[lev]) * (abs(d) >
threshv[lev])
wst <- putD(wst, level = levels[lev], v = d)
}
wst$date <- c(wst$date, date())
if(add.history == TRUE)
wst$history <- c(wst$history, paste("Thresholded:", paste(
as.character(threshv), collapse = "; "), "Levels: ",
paste(as.character(levels), collapse = "; "),
"Policy: ", policy, "Type: ", type))
wst
}
"tpwd"<-
function(image, filter.number = 10, family = "DaubLeAsymm", verbose = FALSE)
{
if(!is.matrix(image))
stop("image should be a matrix")
nr <- nrow(image)
lr <- IsPowerOfTwo(nr)
if(is.na(lr))
stop(paste("Number of rows (", nr, ") should be a power of 2.")
)
nc <- ncol(image)
lc <- IsPowerOfTwo(nc)
if(is.na(lc))
stop(paste("Number of cols (", nc, ") should be a power of 2.")
)
bc <- "periodic"
type <- "wavelet"
nbc <- switch(bc,
periodic = 1,
symmetric = 2)
if(is.null(nbc))
stop("Unknown boundary condition")
ntype <- switch(type,
wavelet = 1,
station = 2) #
#
# Select the appropriate filter
#
if(verbose == TRUE)
cat("...done\nFilter...")
filter <- filter.select(filter.number = filter.number, family = family)
#
#
# Build the first/last database
#
if(verbose == TRUE)
cat("...selected\nFirst/last database...")
fl.dbaseR <- first.last(LengthH = length(filter$H), DataLength = nr,
type = type, bc = bc) #
fl.dbaseC <- first.last(LengthH = length(filter$H), DataLength = nc,
type = type, bc = bc) #
error <- 0
answer <- .C("tpwd",
image = as.double(image),
nr = as.integer(nr),
nc = as.integer(nc),
lr = as.integer(lr),
lc = as.integer(lc),
firstCr = as.integer(fl.dbaseR$first.last.c[, 1]),
lastCr = as.integer(fl.dbaseR$first.last.c[, 2]),
offsetCr = as.integer(fl.dbaseR$first.last.c[, 3]),
firstDr = as.integer(fl.dbaseR$first.last.d[, 1]),
lastDr = as.integer(fl.dbaseR$first.last.d[, 2]),
offsetDr = as.integer(fl.dbaseR$first.last.d[, 3]),
firstCc = as.integer(fl.dbaseC$first.last.c[, 1]),
lastCc = as.integer(fl.dbaseC$first.last.c[, 2]),
offsetCc = as.integer(fl.dbaseC$first.last.c[, 3]),
firstDc = as.integer(fl.dbaseC$first.last.d[, 1]),
lastDc = as.integer(fl.dbaseC$first.last.d[, 2]),
offsetDc = as.integer(fl.dbaseC$first.last.d[, 3]),
ntype = as.integer(ntype),
nbc = as.integer(nbc),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
error = as.integer(error), PACKAGE = "wavethresh")
theanswer <- list(tpwd = matrix(answer$image, nrow = nr, ncol = nc),
filter.number = filter.number, family = family, type = type, bc
= bc, date = date())
class(theanswer) <- "tpwd"
theanswer
}
"tpwr"<-
function(tpwdobj, verbose = FALSE)
{
if(!inherits(tpwdobj, "tpwd"))
stop("tpwdobj is not of class tpwd")
nr <- nrow(tpwdobj$tpwd)
lr <- IsPowerOfTwo(nr)
nc <- ncol(tpwdobj$tpwd)
lc <- IsPowerOfTwo(nc)
bc <- tpwdobj$bc
type <- tpwdobj$type
nbc <- switch(bc,
periodic = 1,
symmetric = 2)
ntype <- switch(type,
wavelet = 1,
station = 2) #
#
# Select the appropriate filter
#
if(verbose == TRUE)
cat("...done\nFilter...")
filter <- filter.select(filter.number = tpwdobj$filter.number, family
= tpwdobj$family) #
#
# Build the first/last database
#
if(verbose == TRUE)
cat("...selected\nFirst/last database...")
fl.dbaseR <- first.last(LengthH = length(filter$H), DataLength = nr,
type = type, bc = bc) #
fl.dbaseC <- first.last(LengthH = length(filter$H), DataLength = nc,
type = type, bc = bc) #
error <- 0
answer <- .C("tpwr",
image = as.double(tpwdobj$tpwd),
nr = as.integer(nr),
nc = as.integer(nc),
lr = as.integer(lr),
lc = as.integer(lc),
firstCr = as.integer(fl.dbaseR$first.last.c[, 1]),
lastCr = as.integer(fl.dbaseR$first.last.c[, 2]),
offsetCr = as.integer(fl.dbaseR$first.last.c[, 3]),
firstDr = as.integer(fl.dbaseR$first.last.d[, 1]),
lastDr = as.integer(fl.dbaseR$first.last.d[, 2]),
offsetDr = as.integer(fl.dbaseR$first.last.d[, 3]),
firstCc = as.integer(fl.dbaseC$first.last.c[, 1]),
lastCc = as.integer(fl.dbaseC$first.last.c[, 2]),
offsetCc = as.integer(fl.dbaseC$first.last.c[, 3]),
firstDc = as.integer(fl.dbaseC$first.last.d[, 1]),
lastDc = as.integer(fl.dbaseC$first.last.d[, 2]),
offsetDc = as.integer(fl.dbaseC$first.last.d[, 3]),
ntype = as.integer(ntype),
nbc = as.integer(nbc),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
error = as.integer(error), PACKAGE = "wavethresh")
if(answer$error != 0)
stop(paste("Error code was ", answer$error))
theanswer <- matrix(answer$image, nrow = nr, ncol = nc)
theanswer
}
"uncompress"<-
function(...)
UseMethod("uncompress")
"uncompress.default"<-
function(v, verbose = FALSE, ...)
{
ctmp <- class(v)
if(is.null(ctmp)) {
stop("Object v has no class")
}
else if(ctmp == "uncompressed") {
if(verbose == TRUE)
cat("Not compressed\n")
return(unclass(v$vector))
}
else if(ctmp == "compressed") {
answer <- rep(0, length = v$original.length)
answer[v$position] <- v$values
if(verbose == TRUE)
cat("Uncompressed to length ", length(answer), "\n")
return(answer)
}
else stop("v has unknown class")
}
"uncompress.imwdc"<-
function(x, verbose = FALSE, ...)
{
if(verbose == TRUE)
cat("Argument checking\n")
ctmp <- class(x)
if(is.null(ctmp))
stop("imwd has no class")
else if(ctmp != c("imwdc"))
stop("imwd is not of class imwdc")
unsquished <- list(nlevels = nlevelsWT(x), fl.dbase = x$fl.dbase,
filter = x$filter, w0Lconstant = x$w0Lconstant, bc = x$
bc, type = x$type) #
#
# Go round loop compressing each set of coefficients
#
for(level in 0:(nlevelsWT(x)- 1)) {
if(verbose == TRUE)
cat("Level ", level, "\n\t")
nm <- lt.to.name(level, "CD")
if(verbose == TRUE)
cat("CD\t")
unsquished[[nm]] <- uncompress.default(x[[nm]], verbose =
verbose)
nm <- lt.to.name(level, "DC")
if(verbose == TRUE)
cat("\tDC\t")
unsquished[[nm]] <- uncompress.default(x[[nm]], verbose =
verbose)
nm <- lt.to.name(level, "DD")
if(verbose == TRUE)
cat("\tDD\t")
unsquished[[nm]] <- uncompress.default(x[[nm]], verbose =
verbose)
}
class(unsquished) <- "imwd"
if(verbose == TRUE)
cat("Overall inflation: Was: ", w <- object.size(x), " Now:",
s <- object.size(unsquished), " (", signif((100 * s)/w,
digits=3), "%)\n")
unsquished
}
"wavegrow"<-
function(n = 64, filter.number = 10, family = "DaubLeAsymm", type = "wavelet",
random = TRUE, read.value = TRUE, restart = FALSE)
{
nlev <- IsPowerOfTwo(n)
if(is.na(nlev))
stop("n is not a power of two")
coords <- vector("list", nlev)
if(type == "wavelet") {
x <- 1:(n/2)
coords[[nlev]] <- x
nn <- n/2
for(i in (nlev - 1):1) {
x1 <- x[seq(1, nn - 1, 2)]
x2 <- x[seq(2, nn, 2)]
x <- (x1 + x2)/2
nn <- nn/2
coords[[i]] <- x
}
}
else for(i in 1:nlev)
coords[[i]] <- 1:n
if(is.null(dev.list()))
stop("Please start 2 graphical devices before using me")
if(length(dev.list()) < 2)
stop("Please start another graphics device\n")
ndev <- length(dev.list())
gd1 <- dev.list()[ndev - 1]
gd2 <- dev.list()[ndev]
v <- rnorm(n, sd = 1e-10)
vwr <- v
vwdS <- wd(v, filter.number = filter.number, family = family, type = type)
toplev <- nlevelsWT(vwdS) - 1
ans <- "y"
while(ans == "y" | ans == "yes" | ans == "Y") {
dev.set(which = gd1)
ts.plot(v)
dev.set(which = gd2)
plot(vwdS, NotPlotVal = 0)
while(1) {
co <- locator(1)
if(is.null(co))
break
lev <- 1 + toplev - round(co$y)
cvec <- coords[[lev + 1]]
ix <- (cvec - co$x)^2
nvec <- length(cvec)
ix <- (1:nvec)[ix == min(ix)]
if(type == "station") {
ix <- ix - 2^(nlev - lev - 1)
ix <- ((ix - 1) %% n) + 1
}
cat("Level ", lev, " Coordinate ", ix, "\n")
if(random == TRUE)
new <- rnorm(1)
else {
if(read.value == TRUE) {
cat("Type in coefficient value ")
new <- scan(n = 1)
}
else new <- 1
}
v <- accessD(vwdS, lev = lev)
v[ix] <- new
vwdS <- putD(vwdS, lev = lev, v = v)
plot(vwdS, NotPlotVal = 0)
dev.set(which = gd1)
if(type == "station") {
vwdWST <- convert(vwdS)
vwr <- AvBasis(vwdWST)
}
else vwr <- wr(vwdS)
ts.plot(vwr)
dev.set(which = gd2)
if(restart == TRUE) {
v <- rep(1, n)
vwdS <- wd(v, filter.number = filter.number, family =
family, type = type)
}
}
cat("Do you want to continue? ")
ans <- readline()
if(ans == "y" | ans == "yes" | ans == "Y") {
v <- rnorm(n, sd = 1e-10)
vwdS <- wd(v, filter.number = filter.number, family = family,
type = type)
}
}
return(list(ts = vwr, wd = vwdS))
}
"wd.int"<-
function(data, preferred.filter.number, min.scale, precond)
{
storage.mode(data) <- "double"
storage.mode(preferred.filter.number) <- "integer"
storage.mode(min.scale) <- "integer"
storage.mode(precond) <- "logical"
size <- length(data)
storage.mode(size) <- "integer"
max.scale <- log(size, 2)
filter.history <- integer(max.scale - min.scale)
temp <- .C("dec",
vect = data,
size,
preferred.filter.number,
min.scale,
precond,
history = filter.history, PACKAGE = "wavethresh")
wav.int.object <- list(transformed.vector = temp$vect, current.scale =
min.scale, filters.used = temp$history, preconditioned =
precond, date = date())
return(wav.int.object)
}
"wd3D"<-
function(a, filter.number = 10, family = "DaubLeAsymm")
{
d <- dim(a)
if(length(d) != 3)
stop(paste("a is not a three-dimensional object"))
for(i in 1:3)
if(is.na(IsPowerOfTwo(d[i]))) stop(paste("Dimension ", i,
" of a is not of dyadic length"))
if(any(d != d[1]))
stop("Number of elements in each dimension is not identical")
error <- 0
nlevels <- IsPowerOfTwo(d[1])
H <- filter.select(filter.number = filter.number, family = family)$H
ans <- .C("wd3D",
Carray = as.double(a),
size = as.integer(d[1]),
H = as.double(H),
LengthH = as.integer(length(H)),
error = as.integer(error), PACKAGE = "wavethresh")
if(ans$error != 0)
stop(paste("Error code was ", ans$error))
l <- list(a = array(ans$Carray, dim = d), filter.number = filter.number,
family = family, date = date(), nlevels = nlevels)
class(l) <- "wd3D"
l
}
"wp"<-
function(data, filter.number = 10, family = "DaubLeAsymm", verbose = FALSE)
{
if(verbose == TRUE)
cat("Argument checking...")
DataLength <- length(data) #
#
# Check that we have a power of 2 data elements
#
nlevels <- log(DataLength)/log(2)
if(round(nlevels) != nlevels)
stop("The length of data is not a power of 2") #
if(verbose == TRUE) {
cat("There are ", nlevels, " levels\n")
}
#
# Select the appropriate filter
#
if(verbose == TRUE)
cat("...done\nFilter...")
filter <- filter.select(filter.number = filter.number, family = family)
#
#
# Compute the decomposition
#
if(verbose == TRUE)
cat("Decomposing...\n")
newdata <- c(rep(0, DataLength * nlevels), data)
wavelet.packet <- .C("wavepackde",
newdata = as.double(newdata),
DataLength = as.integer(DataLength),
levels = as.integer(nlevels),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)), PACKAGE = "wavethresh")
wpm <- matrix(wavelet.packet$newdata, ncol = DataLength, byrow = TRUE)
wp <- list(wp = wpm, nlevels = nlevels, filter = filter, date = date())
class(wp) <- "wp"
wp
}
"wpst"<-
function(data, filter.number = 10, family = "DaubLeAsymm", FinishLevel = 0)
{
nlev <- nlevelsWT(data)
n <- length(data)
if(FinishLevel < 0)
stop("FinishLevel must be larger than zero")
else if(FinishLevel >= nlev)
stop(paste("FinishLevel must be < ", nlev)) #
lansvec <- n * (2 * n - 1)
ansvec <- rep(0, lansvec) #
#
# Now create vector that keeps track of where levels start/stop
#
# Note that the vector avixstart stores index entry values in C
# notation. If you use it in Splus you'll have to add on 1
#
npkts <- function(level, nlev)
4^(nlev - level)
pktlength <- function(level)
2^level
avixstart <- rep(0, nlev + 1)
for(i in 1:nlev)
avixstart[i + 1] <- avixstart[i] + npkts(i - 1, nlev) *
pktlength(i - 1) #
#
# Copy in original data
#
ansvec[(avixstart[nlev + 1] + 1):lansvec] <- data #
#
# Call the C routine
#
filter <- filter.select(filter.number = filter.number, family = family)
ans <- .C("wpst",
ansvec = as.double(ansvec),
lansvec = as.integer(lansvec),
nlev = as.integer(nlev),
FinishLevel = as.integer(FinishLevel),
avixstart = as.integer(avixstart),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
error = as.integer(0), PACKAGE = "wavethresh")
rv <- list(wpst = ans$ansvec, nlevels = nlev, avixstart = avixstart,
filter = filter, date = date())
class(rv) <- "wpst"
rv
}
"wpst2discr"<-
function(wpstobj, groups)
{
#
# Function to convert wpst object and associated groups vector into
# data matrix and k vector required as the input to the discr function.
#
# Input: wpstobj: a wpst object of a time-series
# groups: a vector of length ncases containing the group
# membership of each case.
#
# Returns: wpstm - a matrix. Number of rows is the number of cases
# The rows are ordered according to the group
# memberships of the cases. E.g. The first n1 rows
# contain the group 1 cases, the second n2 rows
# contain the group 2 cases, ... the ng rows
# contain the group g cases.
#
# level - a vector of length npkts. Each entry refers to
# the level that the col of wpstm comes from.
#
# pktix - a vector of length npkts. Each entry refers to
# the packet index that the col of wpstm comes from.
#
#
# k - a vector of length ng (the number of groups).
# k[1] contains the number of members for group 1,
# k[2] contains the number of members for group 2, ...
# k[ng] contains the number of members for group ng.
#
#
#
J <- nlev <- nlevelsWT(wpstobj)
grot <- compgrot(J, filter.number=2)
nbasis <- 2 * (2^nlev - 1)
ndata <- 2^nlev
m <- matrix(0, nrow = ndata, ncol = nbasis)
level <- rep(0, nbasis)
pktix <- rep(0, nbasis)
cnt <- 1
cat("Level: ")
for(j in 0:(nlev - 1)) {
cat(j, " ")
lcnt <- 0
npkts <- 2^(nlev - j)
prcnt <- as.integer(npkts/10)
if (prcnt == 0)
prcnt <- 1
for(i in 0:(npkts - 1)) {
pkcoef <- guyrot(accessD(wpstobj, level = j, index = i),
grot[J - j])/(sqrt(2)^(J - j))
m[, cnt] <- log(pkcoef^2)
level[cnt] <- j
pktix[cnt] <- i
lcnt <- lcnt + 1
cnt <- cnt + 1
if(lcnt %% prcnt == 0) {
lcnt <- 0
cat(".")
}
}
cat("\n")
}
cat("\n")
l <- list(m = m, groups = groups, level = level, pktix = pktix, nlevels = J)
class(l) <- "w2d"
l
}
"wpstCLASS"<-
function(newTS, wpstDO)
{
#
#
# Apply wpst to new TS
#
newwpst <- wpst(newTS, filter.number = wpstDO$filter$filter.number,
family = wpstDO$filter$family) #
#
# Extract the "best packets"
#
goodlevel <- wpstDO$BP$level
goodpkt <- wpstDO$BP$pkt
npkts <- length(goodpkt)
ndata <- length(newTS)
m <- matrix(0, nrow = ndata, ncol = npkts)
J <- nlevelsWT(newwpst)
grot <- compgrot(J, filter.number=2)
for(i in 1:npkts) {
j <- goodlevel[i]
m[, i] <- guyrot(accessD(newwpst, level = j, index = goodpkt[i]
), grot[J - j])/(sqrt(2)^(J - j))
m[, i] <- log(m[, i]^2)
}
mTd <- predict(wpstDO$BPd$dm, m)
l <- list(BasisMatrix=m, BasisMatrixDM=m%*%wpstDO$BPd$dm$scaling,
wpstDO=wpstDO, PredictedOP=mTd, PredictedGroups=mTd$class)
class(l) <- "wpstCL"
l
}
"wr"<-
function(...)
UseMethod("wr")
"wr.int"<-
function(wav.int.object, ...)
{
data <- wav.int.object$transformed.vector
storage.mode(data) <- "double"
size <- length(data)
storage.mode(size) <- "integer"
filter.history <- wav.int.object$filters.used
storage.mode(filter.history) <- "integer"
current.scale <- wav.int.object$current.scale
storage.mode(current.scale) <- "integer"
precond <- wav.int.object$preconditioned
storage.mode(precond) <- "logical"
temp <- .C("rec",
vect = data,
size,
filter.history,
current.scale,
precond, PACKAGE = "wavethresh")
return(temp$vect)
}
"wr.mwd"<-
function(...)
{
#calling mwr directly would be better but
#just in case...
mwr(...)
}
"wr3D"<-
function(obj)
{
ClassObj <- class(obj)
if(is.null(ClassObj))
stop("obj has no class")
if(ClassObj != "wd3D")
stop("obj is not of class wd3D")
Carray <- obj$a
H <- filter.select(filter.number = obj$filter.number, family = obj$
family)$H
answer <- .C("wr3D",
Carray = as.double(Carray),
truesize = as.integer(dim(Carray)[1]),
H = as.double(H),
LengthH = as.integer(length(H)),
error = as.integer(0), PACKAGE = "wavethresh")
array(answer$Carray, dim = dim(Carray))
}
"wst2D"<-
function(m, filter.number = 10, family = "DaubLeAsymm")
{
nr <- nrow(m)
J <- IsPowerOfTwo(nr)
dimv <- c(J, 2 * nr, 2 * nr)
am <- array(0, dim = dimv)
filter <- filter.select(filter.number = filter.number, family = family)
error <- 0
ans <- .C("SWT2Dall",
m = as.double(m),
nm = as.integer(nr),
am = as.double(am),
J = as.integer(J),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
error = as.integer(error), PACKAGE = "wavethresh")
if(ans$error != 0)
stop(paste("Error code was ", ans$error))
l <- list(wst2D = array(ans$am, dim = dimv), nlevels = J, filter =
filter, date = date())
class(l) <- "wst2D"
l
}
"wstCV"<-
function(ndata, ll = 3, type = "soft", filter.number = 10, family =
"DaubLeAsymm", tol = 0.01, verbose = 0, plot.it = FALSE, norm = l2norm,
InverseType = "average", uvdev = madmad)
{
nlev <- log(length(ndata))/log(2)
levels <- ll:(nlev - 1)
nwst <- wst(ndata, filter.number = filter.number, family = family)
uv <- threshold(nwst, levels = levels, type = type, policy =
"universal", dev = madmad, return.thresh = TRUE)[1]
if(verbose == 1)
cat("Now optimising cross-validated error estimate\n")
levels <- ll:(nlev - 2)
R <- 0.61803399000000003
C <- 1 - R
ax <- 0
bx <- uv/2
cx <- uv
x0 <- ax
x3 <- cx
if(abs(cx - bx) > abs(bx - ax)) {
x1 <- bx
x2 <- bx + C * (cx - bx)
}
else {
x2 <- bx
x1 <- bx - C * (bx - ax)
}
fa <- GetRSSWST(ndata, threshold = ax, levels = levels, type = type,
filter.number = filter.number, family = family, norm = norm,
verbose = verbose, InverseType = InverseType)
cat("Done 1\n")
fb <- GetRSSWST(ndata, threshold = bx, levels = levels, type = type,
filter.number = filter.number, family = family, norm = norm,
verbose = verbose, InverseType = InverseType)
cat("Done 2\n")
fc <- GetRSSWST(ndata, threshold = cx, levels = levels, type = type,
filter.number = filter.number, family = family, norm = norm,
verbose = verbose, InverseType = InverseType)
cat("Done 3\n")
f1 <- GetRSSWST(ndata, threshold = x1, levels = levels, type = type,
filter.number = filter.number, family = family, norm = norm,
verbose = verbose, InverseType = InverseType)
cat("Done 4\n")
f2 <- GetRSSWST(ndata, threshold = x2, levels = levels, type = type,
filter.number = filter.number, family = family, norm = norm,
verbose = verbose, InverseType = InverseType)
cat("Done 5\n")
xkeep <- c(ax, cx, x1, x2)
fkeep <- c(fa, fc, f1, f2)
if(plot.it == TRUE) {
plot(c(ax, bx, cx), c(fa, fb, fc))
text(c(x1, x2), c(f1, f2), lab = c("1", "2"))
}
cnt <- 3
while(abs(x3 - x0) > tol * (abs(x1) + abs(x2))) {
if(verbose > 0) {
cat("x0=", x0, "x1=", x1, "x2=", x2, "x3=", x3, "\n")
cat("f1=", f1, "f2=", f2, "\n")
}
if(f2 < f1) {
x0 <- x1
x1 <- x2
x2 <- R * x1 + C * x3
f1 <- f2
f2 <- GetRSSWST(ndata, threshold = x2, levels = levels,
type = type, filter.number = filter.number,
family = family, norm = norm, verbose = verbose,
InverseType = InverseType)
if(verbose == 2) {
cat("SSQ: ", signif(f2, digits=3), "\n")
}
else if(verbose == 1)
cat(".")
xkeep <- c(xkeep, x2)
fkeep <- c(fkeep, f2)
if(plot.it == TRUE)
text(x2, f2, lab = as.character(cnt))
cnt <- cnt + 1
}
else {
x3 <- x2
x2 <- x1
x1 <- R * x2 + C * x0
f2 <- f1
f1 <- GetRSSWST(ndata, threshold = x1, levels = levels,
type = type, filter.number = filter.number,
family = family, norm = norm, verbose = verbose,
InverseType = InverseType)
if(verbose == 2)
cat("SSQ: ", signif(f1, digits=3), "\n")
else if(verbose == 1)
cat(".")
xkeep <- c(xkeep, x1)
fkeep <- c(fkeep, f1)
if(plot.it == TRUE)
text(x1, f1, lab = as.character(cnt))
cnt <- cnt + 1
}
}
if(f1 < f2)
tmp <- x1
else tmp <- x2
x1 <- tmp/sqrt(1 - log(2)/log(length(ndata)))
if(verbose == 1)
cat("Correcting to ", x1, "\n")
else if(verbose == 1)
cat("\n")
g <- sort.list(xkeep)
xkeep <- xkeep[g]
fkeep <- fkeep[g]
if(verbose >= 1) {
cat("Reconstructing CV \n")
}
nwstT <- threshold(nwst, type = type, levels = levels, policy =
"manual", value = x1) #
#
# Now threshold the top level using universal thresholding
#
nwstT <- threshold(nwstT, type = type, levels = nlevelsWT(nwstT) - 1,
policy = "universal", dev = uvdev)
xvwr <- AvBasis.wst(nwstT)
list(ndata = ndata, xvwr = xvwr, xvwrWSTt = nwstT, uvt = uv, xvthresh
= x1, xkeep = xkeep, fkeep = fkeep)
}
"wstCVl"<-
function(ndata, ll = 3, type = "soft", filter.number = 10, family =
"DaubLeAsymm", tol = 0.01, verbose = 0, plot.it = FALSE, norm = l2norm,
InverseType = "average", uvdev = madmad)
{
nlev <- log(length(ndata))/log(2)
levels <- ll:(nlev - 2)
nwst <- wst(ndata, filter.number = filter.number, family = family)
uv <- threshold(nwst, levels = levels, type = type, policy =
"universal", dev = madmad, return.thresh = TRUE)[1]
if(verbose == 1)
cat("Now optimising cross-validated error estimate\n")
upper <- rep(uv, length(levels))
lower <- rep(0, length(levels))
start <- (lower + upper)/2
answer <- nlminb(start = start, objective = wvcvlrss, lower = lower,
upper = upper, ndata = ndata, levels = levels, type = type,
filter.number = filter.number, family = family, norm = norm,
verbose = verbose, InverseType = InverseType, control = list(rel.tol = tol))
x1 <- answer$par
if(verbose >= 2)
thverb <- TRUE
else thverb <- FALSE
xvwrWSTt <- threshold.wst(nwst, levels = levels, policy = "manual",
value = x1, verbose = thverb) #
# Now threshold the top level using universal thresholding
#
lastuvt <- threshold(xvwrWSTt, type = type, levels = nlevelsWT(xvwrWSTt) -
1, policy = "universal", dev = uvdev, return.thresh = TRUE)
xvwrWSTt <- threshold(xvwrWSTt, type = type, levels = nlevelsWT(xvwrWSTt) -
1, policy = "manual", value = lastuvt)
xvwr <- AvBasis.wst(xvwrWSTt)
list(ndata = ndata, xvwr = xvwr, xvwrWSTt = xvwrWSTt, uvt = uv,
xvthresh = c(x1, lastuvt), optres = answer)
}
"wvcvlrss"<-
function(threshold, ndata, levels, type, filter.number, family, norm, verbose,
InverseType)
{
answer <- GetRSSWST(ndata = ndata, threshold = threshold, levels =
levels, family = family, filter.number = filter.number, type =
type, norm = norm, verbose = verbose, InverseType = InverseType
)
return(answer)
}
"wvmoments"<-
function(filter.number = 10, family = "DaubLeAsymm", moment = 0,
scaling.function = FALSE)
{
WV <- draw.default(filter.number = filter.number, family = family,
plot.it = FALSE, enhance = FALSE, resolution = 32768, scaling.function =
scaling.function)
intfn <- function(x, moment, xwv, ywv)
{
x^moment * approx(x = xwv, y = ywv, xout = x, rule = 2)$y
}
plot(WV$x, intfn(WV$x, moment = moment, WV$x, WV$y), type = "l")
integrate(intfn, lower = -7, upper = 7, moment = moment, xwv = WV$x,
ywv = WV$y, subdivisions = 1000, keep.xy = TRUE)
}
"wvrelease"<-
function()
{
packageStartupMessage("WaveThresh: R wavelet software, release 4.7.2, installed\n")
packageStartupMessage("Copyright Guy Nason and others 1993-2022\n")
packageStartupMessage("Note: nlevels has been renamed to nlevelsWT\n")
}
|
/scratch/gouwar.j/cran-all/cranData/wavethresh/R/function.r
|
"Chires5" <-
function(x, tau=1, J, filter.number=10, family="DaubLeAsymm", nT=20)
# data x
# fine tuning parameter tau
# resolution level J
# family and filter.number specify the scaling function to be used
# nT is the number of iterations performed in the Daubechies-Lagarias algorithm
{
# calculate support of father wavelet
sup <- support(filter.number, family)
sup <- c(sup$phi.lh, sup$phi.rh)
# extract filter coefficients
filcf <- filter.select(filter.number, family)$H
# calculate primary resolution
p <- tau * 2^J
# calculate bounds on translation
kmin <- ceiling(p*min(x)-sup[2])
kmax <- floor(p*max(x)-sup[1])
# create vector to put estimated coefficients in
chat <- rep(0, kmax-kmin+1)
# call C code!
error <- 0
ans <- .C("SFDE5",
x = as.double(x),
nx = as.integer(length(x)),
p = as.double(p),
filter = as.double(filcf),
nf = as.integer(2*filter.number - 1),
prec = as.integer(nT),
chat = as.double(chat),
kmin = as.integer(kmin),
kmax = as.integer(kmax),
philh = as.double(sup[1]),
phirh = as.double(sup[2]),
error = as.integer(error), PACKAGE = "wavethresh")
if (ans$error != 0)
stop(paste("PLDF2 function returned error code:", ans$error))
filter <- list(filter.number=filter.number, family=family)
res <- list(p=p, tau=tau, J=J)
list(coef=ans$chat, klim=c(ans$kmin, ans$kmax), p=ans$p, filter=filter,
n=length(x), res=res)
}
"Chires6" <-
function(x, tau=1, J, filter.number=10, family="DaubLeAsymm", nT=20)
# data x
# fine tuning parameter tau
# resolution level J
# family and filter.number specify the scaling function to be used
# nT is the number of iterations performed in the Daubechies-Lagarias algorithm
{
# calculate support of father wavelet
sup <- support(filter.number, family)
sup <- c(sup$phi.lh, sup$phi.rh)
# extract filter coefficients
filcf <- filter.select(filter.number, family)$H
# calculate primary resolution
p <- tau * 2^J
# calculate bounds on translation
kmin <- ceiling(p*min(x)-sup[2])
kmax <- floor(p*max(x)-sup[1])
# create output vector/matrix
ncoef <- kmax-kmin+1
chat <- rep(0, ncoef)
covar <- matrix(0, nrow=ncoef, ncol=(2*filter.number-1))
# call C code!
error <- 0
ans <- .C("SFDE6",
x = as.double(x),
nx = as.integer(length(x)),
p = as.double(p),
filter = as.double(filcf),
nf = as.integer(2*filter.number - 1),
prec = as.integer(nT),
chat = as.double(chat),
covar = as.double(covar),
kmin = as.integer(kmin),
kmax = as.integer(kmax),
philh = as.double(sup[1]),
phirh = as.double(sup[2]),
error = as.integer(error), PACKAGE = "wavethresh")
if (ans$error != 0)
stop(paste("PLDF2 function returned error code:", ans$error))
filter <- list(filter.number=filter.number, family=family)
res <- list(p=p, tau=tau, J=J)
list(coef=ans$chat, covar=matrix(ans$covar, nrow=ncoef),
klim=c(ans$kmin, ans$kmax), p=ans$p, filter=filter,
n=length(x), res=res)
}
"dclaw" <-
function(x)
{
den <- dnorm(x)/2
for(i in 0:4){
den <- den + dnorm(x, mean=(i/2-1), sd=1/10)/10
}
den
}
"dencvwd" <-
function(hrproj, filter.number=hrproj$filter$filter.number,
family=hrproj$filter$family, type="wavelet", bc="zero", firstk=hrproj$klim,
RetFather=TRUE, verbose=FALSE)
{
image <- hrproj$covar
# Select wavelet filter
filter <- filter.select(filter.number = filter.number, family = family)
Csize <- nrow(image)
# Set-up first/last database
if(is.null(firstk))
firstk <- c(0, Csize-1)
if(verbose == TRUE)
cat("...selected\nFirst/last database...")
fl.dbase <- first.last.dh(LengthH = length(filter$H), DataLength = Csize,
bc = bc, type = type, firstk = firstk)
first.last.c <- fl.dbase$first.last.c
first.last.d <- fl.dbase$first.last.d
nlev <- nrow(first.last.d)
# Set up answer list
image.decomp <- list(nlevels = nlev, fl.dbase = fl.dbase, filter = filter,
type = type, bc = bc, date = date())
if(verbose == TRUE) cat("...built\n")
# Ok, go round loop doing decompositions
nbc <- switch(bc,
periodic = 1,
symmetric = 2,
zero = 3)
if(is.null(nbc))
stop("Unknown boundary handling")
if(type == "station" && bc == "symmetric")
stop("Cannot do stationary transform with symmetric boundary conditions"
)
ntype <- switch(type,
wavelet = 1,
station = 2)
if(is.null(ntype)) stop("Unknown type of transform")
# Load up original image
smoothed <- as.vector(image)
if(verbose == TRUE) {
cat(bc, " boundary handling\n")
cat("Decomposing...")
}
for(level in seq(nrow(first.last.d), 1, -1)) {
if(verbose == TRUE)
cat(level - 1, "")
LengthCin <- first.last.c[level+1, 2] - first.last.c[level+1, 1] + 1
LengthCout <- first.last.c[level, 2] - first.last.c[level, 1] + 1
LengthDout <- first.last.d[level, 2] - first.last.d[level, 1] + 1
ImCC <- rep(0, (LengthCout * (2*filter.number-1)))
ImDD <- rep(0, (LengthDout * (2*filter.number-1)))
error <- 0
z <- .C("StoDCDS",
C = as.double(smoothed),
Csize = as.integer(LengthCin),
firstCin = as.integer(first.last.c[level + 1, 1]),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
LengthCout = as.integer(LengthCout),
firstCout = as.integer(first.last.c[level, 1]),
lastCout = as.integer(first.last.c[level, 2]),
LengthDout = as.integer(LengthDout),
firstDout = as.integer(first.last.d[level, 1]),
lastDout = as.integer(first.last.d[level, 2]),
ImCC = as.double(ImCC),
ImDD = as.double(ImDD),
nbc = as.integer(nbc),
ntype = as.integer(ntype),
error = as.integer(error), PACKAGE = "wavethresh")
error <- z$error
if(error != 0) {
cat("Error was ", error, "\n")
stop("Error reported")
}
smoothed <- z$ImCC
if(RetFather == TRUE) {
nm <- lt.to.name(level - 1, "CC")
image.decomp[[nm]] <- matrix(z$ImCC, nrow=LengthCout)
}
nm <- lt.to.name(level - 1, "DD")
image.decomp[[nm]] <- matrix(z$ImDD, nrow=LengthDout)
}
if(verbose == TRUE)
cat("\nReturning answer...\n")
image.decomp$w0Lconstant <- smoothed
image.decomp$bc <- bc
image.decomp$date <- date()
class(image.decomp) <- "imwd"
l <- list(C=NULL, D=rep(0, fl.dbase$ntotal.d),
nlevels=nrow(fl.dbase$first.last.d), fl.dbase=fl.dbase,
filter=filter, type=type, bc=bc, date=date())
class(l) <- "wd"
for(level in 1:nlevelsWT(l)) {
covar <- image.decomp[[lt.to.name(level - 1, "DD")]]
l <- putD.wd(l, level-1, covar[,1], boundary=TRUE)
}
l
}
"denplot" <-
function(wr, coef, nT=20, lims, n=50)
# smoothed high level coefficients wr
# coef is the output from denproj for this analysis
# nT is the number of iterations performed in the Daubechies-Lagarias algorithm
# estimate is evaluated at n points between lims
{
p <- coef$res$p
filter <- coef$filter
# calculate support of father wavelet
sup <- support(filter$filter.number, filter$family)
sup <- c(sup$phi.lh, sup$phi.rh)
# extract filter coefficients
filcf <- filter.select(filter$filter.number, filter$family)$H
# create grid for drawing density estimate and vector to put values in
gx <- seq(lims[1], lims[2], length=n)
gy <- c(rep(0, length(gx)))
# find range of high resolution coefficients
kmin <- coef$klim[1]
kmax <- coef$klim[2]
# call C code!
error <- 0
ans <- .C("PLDE2",
C = as.double(wr),
p = as.double(p),
filter = as.double(filcf),
nf = as.integer(2*filter$filter.number - 1),
prec = as.integer(nT),
kmin = as.integer(kmin),
kmax = as.integer(kmax),
gx = as.double(gx),
gy = as.double(gy),
ng = as.integer(n),
philh = as.double(sup[1]),
phirh = as.double(sup[2]),
error = as.integer(error), PACKAGE = "wavethresh")
if (ans$error != 0)
stop(paste("PLDF2 function returned error code:", ans$error))
list(x=ans$gx, y=ans$gy)
}
"denproj" <-
function(x, tau=1, J, filter.number=10, family="DaubLeAsymm",
covar=FALSE, nT=20)
# data x
# fine tuning parameter tau
# resolution level J
# family and filter.number specify the scaling function to be used
# covar - logical variable indicating whether covariances should be calculated
# nT is the number of iterations performed in the Daubechies-Lagarias algorithm
{
if(covar)
ans <- Chires6(x, tau, J, filter.number, family, nT)
else
ans <- Chires5(x, tau, J, filter.number, family, nT)
ans
}
"denwd" <-
function(coef)
{
wd.dh(coef$coef, filter.number=coef$filter$filter.number,
family=coef$filter$family, bc="zero", firstk=coef$klim)
}
"denwr" <-
function(wd, start.level=0, verbose=FALSE, bc=wd$bc, return.object=FALSE,
filter.number=wd$filter$filter.number, family=wd$filter$family)
{
if(IsEarly(wd)) {
ConvertMessage()
stop()
}
if(verbose == TRUE)
cat("Argument checking...")
# Check class of wd
if(verbose == TRUE)
cat("Argument checking\n")
ctmp <- class(wd)
if(is.null(ctmp))
stop("wd has no class")
else if(ctmp != "wd")
stop("wd is not of class wd")
if(start.level < 0)
stop("start.level must be nonnegative")
if(start.level >= nlevelsWT(wd))
stop("start.level must be less than the number of levels")
if(is.null(wd$filter$filter.number))
stop("NULL filter.number for wd")
if(bc != wd$bc)
warning("Boundary handling is different to original")
if(wd$type == "station")
stop("Use convert to generate wst object and then AvBasis or InvBasis"
)
type <- wd$type
filter <- filter.select(filter.number = filter.number, family = family)
LengthH <- length(filter$H)
# Build the reconstruction first/last database
if(verbose == TRUE)
cat("...done\nFirst/last database...")
r.first.last.c <- wd$fl.dbase$first.last.c[(start.level+1):(nlevelsWT(wd)+1), ]
ntotal <- r.first.last.c[1,3] + r.first.last.c[1,2] -
r.first.last.c[1,1] + 1
names(ntotal) <- NULL
C <- accessC(wd, level = start.level, boundary = TRUE)
C <- c(rep(0, length = (ntotal - length(C))), C)
nlevels <- nlevelsWT(wd) - start.level
error <- 0
# Load object code
if(verbose == TRUE)
cat("...built\n")
if(verbose == TRUE) {
cat("Reconstruction...")
error <- 1
}
ntype <- switch(type,
wavelet = 1,
station = 2)
if(is.null(ntype))
stop("Unknown type of decomposition")
nbc <- switch(bc,
periodic = 1,
symmetric = 2,
zero = 3)
if(is.null(nbc))
stop("Unknown boundary handling")
if(!is.complex(wd$D)) {
wavelet.reconstruction <- .C("waverecons_dh",
C = as.double(C),
D = as.double(wd$D),
H = as.double(filter$H),
LengthH = as.integer(LengthH),
nlevels = as.integer(nlevels),
firstC = as.integer(r.first.last.c[, 1]),
lastC = as.integer(r.first.last.c[, 2]),
offsetC = as.integer(r.first.last.c[, 3]),
firstD = as.integer(wd$fl.dbase$first.last.d[, 1]),
lastD = as.integer(wd$fl.dbase$first.last.d[, 2]),
offsetD = as.integer(wd$fl.dbase$first.last.d[, 3]),
ntype = as.integer(ntype),
nbc = as.integer(nbc),
error = as.integer(error), PACKAGE = "wavethresh")
}
if(verbose == TRUE)
cat("done\n")
error <- wavelet.reconstruction$error
if(error != 0) {
cat("Error code returned from waverecons: ", error, "\n")
stop("waverecons returned error")
}
fl.dbase <- list(first.last.c=r.first.last.c,
ntotal=wavelet.reconstruction$LengthC,
first.last.d=wd$fl.dbase$ first.last.d, ntotal.d=wd$fl.dbase$ntotal.d)
if(!is.complex(wd$D)) {
l <- list(C=wavelet.reconstruction$C, D=wavelet.reconstruction$D,
fl.dbase=fl.dbase, nlevels=nlevelsWT(wavelet.reconstruction),
filter=filter, type=type, bc=bc, date=date())
}
class(l) <- "wd"
if(return.object == TRUE)
return(l)
else {
if(bc == "zero")
return(accessC(l, boundary = TRUE))
else return(accessC(l))
}
stop("Shouldn't get here\n")
}
"first.last.dh" <-
function(LengthH, DataLength, type = "wavelet",
bc = "periodic", firstk=c(0, DataLength-1))
{
if(type == "station" && bc != "periodic")
stop("Can only do periodic boundary conditions with station")
if(type != "station" && type != "wavelet")
stop("Type can only be wavelet or station")
if(bc=="periodic" || bc=="symmetric") {
levels <- log(DataLength)/log(2)
first.last.c <- matrix(0, nrow = levels + 1, ncol = 3,
dimnames = list(NULL, c("First", "Last", "Offset")))
first.last.d <- matrix(0, nrow = levels, ncol = 3,
dimnames = list(NULL, c("First", "Last", "Offset")))
}
if(bc == "periodic") {
# Periodic boundary correction
if(type == "wavelet") {
first.last.c[, 1] <- rep(0, levels + 1)
first.last.c[, 2] <- 2^(0:levels) - 1
first.last.c[, 3] <- rev(c(0, cumsum(rev(1 +
first.last.c[, 2]))[1:levels]))
first.last.d[, 1] <- rep(0, levels)
first.last.d[, 2] <- 2^(0:(levels - 1)) - 1
first.last.d[, 3] <- rev(c(0, cumsum(rev(1 +
first.last.d[, 2]))[1:(levels - 1)]))
ntotal <- 2 * DataLength - 1
ntotal.d <- DataLength - 1
}
else if(type == "station") {
first.last.c[, 1] <- rep(0, levels + 1)
first.last.c[, 2] <- 2^levels - 1
first.last.c[, 3] <- rev(c(0, cumsum(rev(1 +
first.last.c[, 2]))[1:levels]))
first.last.d[, 1] <- rep(0, levels)
first.last.d[, 2] <- 2^levels - 1
first.last.d[, 3] <- rev(c(0, cumsum(rev(1 +
first.last.d[, 2]))[1:(levels - 1)]))
ntotal <- (levels + 1) * 2^levels
ntotal.d <- levels * 2^levels
}
}
else if(bc == "symmetric") {
# Symmetric boundary reflection
first.last.c[levels + 1, 1] <- 0
first.last.c[levels + 1, 2] <- DataLength - 1
first.last.c[levels + 1, 3] <- 0
ntotal <- first.last.c[levels + 1, 2] - first.last.c[levels + 1,1] + 1
ntotal.d <- 0
for(i in levels:1) {
first.last.c[i, 1] <- trunc(0.5 * (1 - LengthH +
first.last.c[i + 1, 1]))
first.last.c[i, 2] <- trunc(0.5 * first.last.c[i + 1, 2])
first.last.c[i, 3] <- first.last.c[i + 1, 3] +
first.last.c[i + 1, 2] - first.last.c[i + 1, 1] + 1
first.last.d[i, 1] <- trunc(0.5 * (first.last.c[i + 1, 1] - 1))
first.last.d[i, 2] <- trunc(0.5 * (first.last.c[i + 1,
2] + LengthH - 2))
if(i != levels) {
first.last.d[i, 3] <- first.last.d[i + 1, 3] +
first.last.d[i + 1, 2] - first.last.d[i + 1, 1] + 1
}
ntotal <- ntotal + first.last.c[i, 2] - first.last.c[i,1] + 1
ntotal.d <- ntotal.d + first.last.d[i, 2] - first.last.d[i, 1] + 1
}
}
else if(bc=="zero") {
first.c <- firstk[1]
last.c <- firstk[2]
offset.c <- 0
first.d <- NULL
last.d <- NULL
offset.d <- 0
ntotal <- last.c - first.c + 1
ntotal.d <- 0
while( (first.c[1] > 2 - LengthH || first.c[1] < 1 - LengthH) ||
(last.c[1] > 0 || last.c[1] < -1) ) {
first.c <- c(ceiling(0.5*(first.c[1] - LengthH + 1)), first.c)
last.c <- c(floor(0.5*last.c[1]), last.c)
offset.c <- c(offset.c[1] + last.c[2] - first.c[2] +1, offset.c)
ntotal <- ntotal + last.c[1] - first.c[1] + 1
first.d <- c(ceiling(0.5*(first.c[2]-1)), first.d)
last.d <- c(floor(0.5*(last.c[2] + LengthH - 2)), last.d)
if(length(first.d) > 1)
offset.d <- c(offset.d[1] + last.d[2] -
first.d[2] + 1, offset.d)
ntotal.d <- ntotal.d + last.d[1] - first.d[1] +1
}
first.last.c <- matrix(c(first.c, last.c, offset.c), ncol=3,
dimnames=list(NULL, c("First", "Last", "Offset")))
first.last.d <- matrix(c(first.d, last.d, offset.d), ncol=3,
dimnames=list(NULL, c("First", "Last", "Offset")))
}
else {
stop("Unknown boundary correction method")
}
names(ntotal) <- NULL
names(ntotal.d) <- NULL
list(first.last.c = first.last.c, ntotal = ntotal, first.last.d =
first.last.d, ntotal.d = ntotal.d)
}
"pclaw" <-
function(q)
{
prob <- pnorm(q)/2
for(i in 0:4){
prob <- prob + pnorm(q, mean=(i/2-1), sd=1/10)/10
}
prob
}
"plotdenwd" <-
function(wd, xlabvals, xlabchars, ylabchars, first.level=0,
top.level=nlevelsWT(wd) - 1,
main="Wavelet Decomposition Coefficients", scaling="global", rhlab=FALSE,
sub, NotPlotVal=0.005, xlab="Translate",
ylab="Resolution Level", aspect="Identity", ...)
{
ctmp <- class(wd)
if(is.null(ctmp))
stop("wd has no class")
else if(ctmp != "wd")
stop("wd is not of class wd")
levels <- nlevelsWT(wd)
nlevels <- levels - first.level
cfac <- top.level - (levels-1)
sfac <- rep(2, nlevels) ^ c((nlevels-1):0)
first <- wd$fl.dbase$first.last.d[(first.level+1):levels,1]
first <- first * sfac + (sfac-1)/2
last <- wd$fl.dbase$first.last.d[(first.level+1):levels,2]
last <- last * sfac + (sfac-1)/2
xrange <- c(floor(min(first)), ceiling(max(last)))
type <- wd$type
if(type == "wavelet")
n <- 2^(levels-2)
if(missing(sub))
sub <- paste(switch(type,
wavelet = "Standard transform",
station = "Nondecimated transform"), wd$filter$name)
if(aspect != "Identity")
sub <- paste(sub, "(", aspect, ")")
plot(c(xrange[1], xrange[1], xrange[2], xrange[2]),
c(0, nlevels+1, nlevels+1, 0), type="n", xlab=xlab,
ylab=ylab, main=main, yaxt="n", xaxt="n", sub=sub, ...)
yll <- top.level:(first.level+cfac)
if(missing(ylabchars))
axis(2, at = 1:(nlevels), labels = yll)
else if(length(ylabchars) != nlevels)
stop(paste("Should have ", nlevels, " entries in ylabchars"))
else axis(2, at = 1:(nlevels), labels = ylabchars)
if(missing(xlabchars)) {
if(missing(xlabvals)) {
if(type == "wavelet") {
if(wd$bc != "zero") {
axx <- c(0, 2^(levels - 3), 2^(levels - 2),
2^(levels - 2) + 2^(levels - 3), 2^(levels - 1))
}
else {
jrange <- floor(logb(abs(xrange), 2))
xlabr <- sign(xrange) * 2^jrange
xsp <- diff(xlabr)
axx <- xlabr[1] + c(0, xsp/4, xsp/2, 3*xsp/4, xsp)
if((xlabr[2]+xsp/4) <= xrange[2])
axx <- c(axx, xlabr[2]+xsp/4)
if((xlabr[1]-xsp/4) >= xrange[1])
axx <- c(xlabr[1]-xsp/4, axx)
}
}
else axx <- c(0, 2^(levels - 2), 2^(levels - 1),
2^(levels - 1) + 2^(levels - 2), 2^levels)
axis(1, at = axx)
}
else {
lx <- pretty(xlabvals, n = 4)
cat("lx is ", lx, "\n")
if(lx[1] < min(xlabvals))
lx[1] <- min(xlabvals)
if(lx[length(lx)] > max(xlabvals))
lx[length(lx)] <- max(xlabvals)
cat("lx is ", lx, "\n")
xix <- NULL
for(i in 1:length(lx)) {
u <- (xlabvals - lx[i])^2
xix <- c(xix, (1:length(u))[u == min(u)])
}
axx <- xix
if(type == "wavelet")
axx <- xix/2
axl <- signif(lx, digits = 2)
axis(1, at = axx, labels = axl)
}
}
else axis(1, at = xlabvals, labels = xlabchars)
x <- 1:n
height <- 1
first.last.d <- wd$fl.dbase$first.last.d
axr <- NULL
if(scaling == "global") {
my <- 0
for(i in ((levels - 1):first.level)) {
y <- accessD(wd, i, boundary=TRUE, aspect = aspect)
my <- max(c(my, abs(y)))
}
}
if(scaling == "compensated") {
my <- 0
for(i in ((levels - 1):first.level)) {
y <- accessD(wd, i, boundary=TRUE, aspect = aspect) * 2^(i/2)
my <- max(c(my, abs(y)))
}
}
if(scaling == "super") {
my <- 0
for(i in ((levels - 1):first.level)) {
y <- accessD(wd, i, boundary=TRUE, aspect = aspect) * 2^i
my <- max(c(my, abs(y)))
}
}
shift <- 1
for(i in ((levels - 1):first.level)) {
y <- accessD(wd, i, boundary=TRUE, aspect = aspect)
if(type == "wavelet")
n <- first.last.d[i+1,2]-first.last.d[i+1,1]+1
else {
y <- y[c((n - shift + 1):n, 1:(n - shift))]
shift <- shift * 2
}
xplot <- seq(from=first[i-first.level+1], to=last[i-first.level+1], by=2^(nlevels-(i-first.level)-1))
ly <- length(y)
if(scaling == "by.level")
my <- max(abs(y))
if(scaling == "compensated")
y <- y * 2^(i/2)
if(scaling == "super")
y <- y * 2^i
if(my == 0) {
y <- rep(0, length(y))
}
else y <- (0.5 * y)/my
axr <- c(axr, my)
if(max(abs(y)) > NotPlotVal)
segments(xplot, height, xplot, height + y)
if(i != first.level) {
if(type == "wavelet") {
# x1 <- x[seq(1, n - 1, 2)]
# x2 <- x[seq(2, n, 2)]
# x <- (x1 + x2)/2
# x <- 1:n
}
height <- height + 1
}
}
if(rhlab == TRUE)
axis(4, at = 1:length(axr), labels = signif(axr, 3))
axr
}
"rclaw" <-
function(n)
{
nx <- rnorm(n)
p <- runif(n)
oldx <- nx
nx[p<=0.5] <- nx[p<=0.5]/10 + (trunc(p[p<=0.5] * 10)/2 -1)
nx
}
"wd.dh" <-
function(data, filter.number = 10, family = "DaubLeAsymm",
type = "wavelet", bc = "periodic", firstk=NULL, verbose = FALSE)
{
if(verbose == TRUE)
cat("wd: Argument checking...")
if(!is.atomic(data))
stop("Data is not atomic")
DataLength <- length(data)
# Check that we have a power of 2 data elements if not using zero bcs
if(bc=="periodic" || bc=="symmetric") {
nlevels <- nlevelsWT(data)
if(is.na(nlevels)) stop("Data length is not power of two")
}
# Check for correct type
if(type != "wavelet" && type != "station")
stop("Unknown type of wavelet decomposition")
if(type == "station" && bc != "periodic")
stop("Can only do periodic boundary conditions with station")
# Select the appropriate filter
if(verbose == TRUE)
cat("...done\nFilter...")
filter <- filter.select(filter.number = filter.number, family = family)
# Build the first/last database
if(verbose == TRUE)
cat("...selected\nFirst/last database...")
fl.dbase <- first.last.dh(LengthH = length(filter$H), DataLength =
DataLength, type = type, bc = bc, firstk = firstk)
# Find number of levels in zero bc case
if(bc=="zero")
nlevels <- nrow(fl.dbase$first.last.d)
# Put in the data
C <- rep(0, fl.dbase$ntotal)
C[1:DataLength] <- data
if(verbose == TRUE)
error <- 1
else error <- 0
if(verbose == TRUE) cat("built\n")
# Compute the decomposition
if(verbose == TRUE)
cat("Decomposing...\n")
nbc <- switch(bc,
periodic = 1,
symmetric = 2,
zero = 3)
if(is.null(nbc))
stop("Unknown boundary condition")
ntype <- switch(type,
wavelet = 1,
station = 2)
if(is.null(filter$G)) {
wavelet.decomposition <- .C("wavedecomp_dh",
C = as.double(C),
D = as.double(rep(0, fl.dbase$ntotal.d)),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
nlevels = as.integer(nlevels),
firstC = as.integer(fl.dbase$first.last.c[, 1]),
lastC = as.integer(fl.dbase$first.last.c[, 2]),
offsetC = as.integer(fl.dbase$first.last.c[, 3]),
firstD = as.integer(fl.dbase$first.last.d[, 1]),
lastD = as.integer(fl.dbase$first.last.d[, 2]),
offsetD = as.integer(fl.dbase$first.last.d[, 3]),
ntype = as.integer(ntype),
nbc = as.integer(nbc),
error = as.integer(error), PACKAGE = "wavethresh")
}
if(verbose == TRUE)
cat("done\n")
error <- wavelet.decomposition$error
if(error != 0) {
cat("Error ", error, " occured in wavedecomp\n")
stop("Error")
}
if(is.null(filter$G)) {
l <- list(C = wavelet.decomposition$C, D =
wavelet.decomposition$D, nlevels =
nlevelsWT(wavelet.decomposition), fl.dbase = fl.dbase,
filter = filter, type = type, bc = bc, date = date())
}
class(l) <- "wd"
return(l)
}
|
/scratch/gouwar.j/cran-all/cranData/wavethresh/R/wavde.r
|
#' Approximate potential in one dimension
#'
#' @param f One-dimensional representing the flow (right hand side of differential equation)
#' @param xs Vector of positions to evaluate
#' @param V0 (Optional) Value of V at first element of xs. When default, the global minimum is assigned 0
#'
#' @return The potential estimated at each point in xs
#' @export
#'
#' @author Pablo Rodríguez-Sánchez (\url{https://pabrod.github.io})
#' @references \url{https://arxiv.org/abs/1903.05615}
#'
#'
#' @seealso \code{\link{approxPot2D}, \link{deltaV}}
#'
#' @examples
#' # Flow
#' f = function(x) { sin(x) }
#'
#' # Sampling points
#' xs <- seq(0, 2*pi, length.out = 1e3)
#'
#' # Approximated potential
#' Vs <- approxPot1D(f, xs)
approxPot1D <- function(f, xs, V0 = 'auto') {
# Initialize
V <- vector(mode = 'numeric', length = length(xs))
# Assign initial value
# The algorithm is a recursion relationship. It needs an initial potential at the first integration point
if (V0 == 'auto') {
V[1] <- 0 # Assign any, it will be overriden later
} else {
V[1] <- V0 # Assign the desired reference potential
}
# Compute
for(i in 2:length(xs)) {
temp <- deltaV(f, xs[i], xs[i-1])
V[i] <- V[i-1] + temp$dV
}
if(V0 == 'auto') {
V <- V - min(V) # Make V_min = 0
}
return(V)
}
#' Approximate potential in two dimensions
#'
#' @param f Two-dimensional representing the flow (right hand side of differential equation)
#' @param xs Vector xs positions to evaluate
#' @param ys Vector of ys positions to evaluate
#' @param V0 (Optional) Value of V at first element of (xs,ys). When default, the global minimum is assigned 0
#' @param mode (Optional) Integration mode. Options are horizontal (default), vertical and mixed
#'
#' @return The potential estimated at each point (xs, ys)
#' @export
#'
#' @author Pablo Rodríguez-Sánchez (\url{https://pabrod.github.io})
#' @references \url{https://arxiv.org/abs/1903.05615}
#'
#'
#' @seealso \code{\link{approxPot1D}, \link{deltaV}}
#'
#' @examples
#' # Flow
#' f = function(x) {c(-x[1]*(x[1]^2 - 1.1), -x[2]*(x[2]^2 - 1))}
#'
#' # Sampling points
#' xs <- seq(-1.5, 1.5, length.out = 10)
#' ys <- seq(-1.5, 1.5, length.out = 15)
#'
#' # Approximated potential
#' Vs <- approxPot2D(f, xs, ys, mode = 'horizontal')
approxPot2D <- function(f, xs, ys, V0 = 'auto', mode = 'mixed') {
# Initialize
V <- matrix(0, nrow = length(xs), ncol = length(ys))
err <- matrix(0, nrow = length(xs), ncol = length(ys))
# Assign initial value
# The algorithm is a recursion relationship. It needs an initial potential at the first integration point
if (V0 == 'auto') {
V[1,1] <- 0 # Assign any, it will be overriden later
} else {
V[1,1] <- V0 # Assign the desired reference potential
}
# Compute
# We first compute along the first column...
for(i in 2:length(xs)) {
temp <- deltaV(f, c(xs[i], ys[1]), c(xs[i-1], ys[1]))
V[i,1] <- V[i-1,1] + temp$dV
err[i,1] <- temp$err
}
# ... and then along the first row...
for(j in 2:length(ys)) {
temp <- deltaV(f, c(xs[1], ys[j]), c(xs[1], ys[j-1]))
V[1,j] <- V[1,j-1] + temp$dV
err[1,j] <- temp$err
}
# ... and last but not least, we fill the inside gaps
for(i in 2:length(xs)) {
for(j in 2:length(ys)) {
if(mode == 'horizontal') { # Sweep horizontally
temp <- deltaV(f, c(xs[i], ys[j]), c(xs[i-1], ys[j]))
V[i,j] <- V[i-1,j] + temp$dV
err[i,j] <- temp$err
} else if(mode == 'vertical') { # Sweep vertically
temp <- deltaV(f, c(xs[i], ys[j]), c(xs[i], ys[j-1]))
V[i,j] <- V[i,j-1] + temp$dV
err[i,j] <- temp$err
} else if(mode == 'mixed') { # Sweep in both directions, then take the mean
temp_hor <- deltaV(f, c(xs[i], ys[j]), c(xs[i-1], ys[j]))
V_hor <- V[i-1,j] + temp_hor$dV
temp_ver <- deltaV(f, c(xs[i], ys[j]), c(xs[i], ys[j-1]))
V_ver <- V[i,j-1] + temp_ver$dV
V[i,j] <- mean(c(V_hor, V_ver))
err[i,j] <- mean(c(temp_hor$err, temp_ver$err))
} else {
stop('Error: supported modes are horizontal (default), vertical and mixed')
}
}
}
if(V0 == 'auto') {
V <- V - min(c(V)) # Make V_min = 0
}
return(list(V = V, err = err))
}
|
/scratch/gouwar.j/cran-all/cranData/waydown/R/approxPot.R
|
#' Approximate potential difference between two points
#'
#' @param x Position where we want to know the approximate potential
#' @param x0 Reference position (center of the Taylor expansion)
#' @param f Flow equations (right hand side of differential equation)
#' @param normType (default: 'f') Matrix norm used to compute the error
#'
#' @return A list containing the approximate potential difference between x and x0 and the estimated error
#' @export
#'
#' @author Pablo Rodríguez-Sánchez (\url{https://pabrod.github.io})
#' @references \url{https://arxiv.org/abs/1903.05615}
#'
#'
#' @seealso \code{\link{approxPot1D}, \link{approxPot2D}, \link{norm}}
#'
#' @examples
#' # One dimensional flow
#' f <- function(x) { cos(x) }
#'
#' # Evaluation points
#' x0 <- 1
#' x1 <- 1.02
#'
#' dV <- deltaV(f, x1, x0)
#'
#' # Two dimensional flow
#' f <- function(x) { c(
#' -2*x[1]*x[2],
#' -x[1]^2 - 1
#' )}
#'
#' # Evaluation points
#' x0 <- matrix(c(1,2), ncol = 1)
#' x1 <- matrix(c(0.98,2.01), ncol = 1)
#'
#' dV <- deltaV(f, x1, x0)
deltaV <- function(f, x, x0, normType='f') {
# Calculate the local Jacobian
J0 <- numDeriv::jacobian(f, x0)
# Perform the skew/symmetric decomposition
J_symm <- Matrix::symmpart(J0)
J_skew <- Matrix::skewpart(J0)
# Use J_symm to estimate the difference in potential as 2nd order Taylor expansion
#
# Detailed information available at https://arxiv.org/abs/1903.05615
dV <- as.numeric(
-f(x0) %*% (x - x0) + # Linear term
-0.5 * t(x-x0) %*% J_symm %*% (x - x0) # Quadratic term
)
# Use J_skew to estimate the relative error
#
# Detailed information available at https://arxiv.org/abs/1903.05615
rel_err <- norm(J_skew, type = normType)/(norm(J_skew, type = normType) + norm(J_symm, type = normType))
# Return
ls <- list(dV = dV, err = rel_err)
return(ls)
}
|
/scratch/gouwar.j/cran-all/cranData/waydown/R/deltaV.R
|
## ----setup, echo = FALSE, include=FALSE---------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----logo, echo=FALSE, fig.height=8.5, fig.pos="H", fig.align='center'--------
knitr::include_graphics('img/logo.png')
## ----libraries, echo=TRUE, message=FALSE--------------------------------------
library(waydown)
# To calculate some trajectories
library(deSolve)
# To plot our results
library(ggplot2)
# To arrange our plots in panels
library(latticeExtra)
library(gridExtra)
# For nicer plots
library(colorRamps)
## ----Allee-def----------------------------------------------------------------
r <- 1
A <- 0.5
K <- 1
f <- function(x) { r * x * (x/A - 1) * (1 - x/K) }
## ----Allee-points-------------------------------------------------------------
xs <- seq(0, 1.25, by = 0.01)
## ----Allee-algorithm, cache = TRUE--------------------------------------------
Vs <- approxPot1D(f, xs)
## ----Allee-plot---------------------------------------------------------------
plot(xs, Vs,
type = 'l', xlab = 'N', ylab = 'V')
## ----Four-def-----------------------------------------------------------------
f <- function(x) {c(-x[1]*(x[1]^2 - 1),
-x[2]*(x[2]^2 - 1))}
## ----Four-points--------------------------------------------------------------
xs <- seq(-1.5, 1.5, by = 0.025)
ys <- seq(-1.5, 1.5, by = 0.025)
## ----Four-algorithm, cache = TRUE---------------------------------------------
result <- approxPot2D(f, xs, ys)
## ----Four-extra, include=FALSE------------------------------------------------
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
eqPoints <- data.frame(x_eq = c(-1, -1, 0, 1, 1),
y_eq = c(-1, 1, 0, -1, 1),
equilibrium = factor(c('stable', 'stable', 'unstable', 'stable', 'stable')))
## ----Four-plot, echo=FALSE, message=FALSE, warning=FALSE----------------------
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
# geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
## ----Four-check---------------------------------------------------------------
max(result$err) == 0
## ----Curl-def-----------------------------------------------------------------
f <- function(x) {c(-x[2],
x[1])}
## ----Curl-points--------------------------------------------------------------
xs <- seq(-2, 2, by = 0.05)
ys <- seq(-2, 2, by = 0.05)
## ----Curl-algorithm, cache = TRUE---------------------------------------------
result <- approxPot2D(f, xs, ys)
## ----Curl-extra, include=FALSE------------------------------------------------
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
eqPoints <- data.frame(x_eq = c(0),
y_eq = c(0),
equilibrium = factor(c('unstable')))
## ----Curl-plot, echo=FALSE, message=FALSE, warning=FALSE----------------------
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
# geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
## ----Wadd-def-----------------------------------------------------------------
# Parameters
bx <- 0.2
ax <- 0.125
kx <- 0.0625
rx <- 1
by <- 0.05
ay <- 0.1094
ky <- 0.0625
ry <- 1
n <- 4
# Dynamics
f <- function(x) {c(bx - rx*x[1] + ax/(kx + x[2]^n),
by - ry*x[2] + ay/(ky + x[1]^n))}
## ----Wadd-points--------------------------------------------------------------
xs <- seq(0, 4, by = 0.05)
ys <- seq(0, 4, by = 0.05)
## ----Wadd-algorithm, cache = TRUE---------------------------------------------
result <- approxPot2D(f, xs, ys)
## ----Wadd-extra, include=FALSE------------------------------------------------
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
#
# Estimated with Wolfram Alpha
# Prompt: 0 = 0.2 - x + 0.125/(0.0625 + y^4); 0 = 0.05 - y + 0.1094/(0.0625 + x^4)
eqPoints <- data.frame(x_eq = c(0.213416, 0.559865, 2.19971),
y_eq = c(1.74417, 0.730558, 0.0546602),
equilibrium = factor(c('stable', 'unstable', 'stable')))
## ----Wadd-plot, echo=FALSE, message=FALSE, warning=FALSE----------------------
nbins <- 25
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
## ----Selkov-def---------------------------------------------------------------
# Parameters
a <- 0.1
b <- 0.5
# Dynamics
f <- function(x) {c(-x[1] + a*x[2] + x[1]^2*x[2],
b - a*x[2] - x[1]^2*x[2])}
## ----Selkov-solution, echo = FALSE--------------------------------------------
# Package desolve requires a slightly different syntax
f_dyn <- function(t, state, parameters) {
with(as.list(c(state, parameters)),{
# rate of change
df <- f(state)
dX <- df[1]
dY <- df[2]
# return the rate of change
list(c(dX, dY))
}) # end with(as.list ...
}
roi <- c(0, 2.5, 0, 2.5)
init_state <- c(1, .05)
ts <- seq(0, 1000, by = 0.01)
bs <- c(0.1, 0.6, 1.3)
for (b in bs) {
out <- ode(y = init_state, times = ts, func = f_dyn, parms = c(a = a, b = b))
colnames(out) <- c("time", "x", "y")
out <- as.data.frame(out)
xs <- seq(roi[1], roi[2], by = 0.05)
ys <- seq(roi[3], roi[4], by = 0.05)
result <- approxPot2D(f, xs, ys)
# Get the limit cycle attractor
attr <- dplyr::filter(as.data.frame(out), time > 0)
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle(sprintf("Error map. b = %.3f ", b)) +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
}
## ----VL-def-------------------------------------------------------------------
# Parameters
r <- 1
k <- 10
h <- 2
e <- 0.2
m <- 0.1
# Auxiliary function
g <- function(x) {1/(h + x)}
# Dynamics
f <- function(x) {c(r*x[1]*(1 - x[1]/k) -g(x[1])*x[1]*x[2],
e*g(x[1])*x[1]*x[2] - m*x[2])}
## ----VL-solution, echo = FALSE------------------------------------------------
# Package desolve requires a slightly different syntax
f_dyn <- function(t, state, parameters) {
with(as.list(c(state, parameters)),{
# rate of change
df <- f(state)
dX <- df[1]
dY <- df[2]
# return the rate of change
list(c(dX, dY))
}) # end with(as.list ...
}
parms <- c(r =r,
k = k,
h = h,
e = e,
m = m)
init_state <- c(1,2)
ts <- seq(0, 300, by = 0.01)
out <- ode(y = init_state, times = ts, func = f_dyn, parms = parms)
colnames(out) <- c("time", "x", "y")
out <- as.data.frame(out)
plot(out$x, out$y, type = 'l', asp = 1,
main = 'Trajectory', xlab = 'x (prey biomass)', ylab = 'y (predator biomass)')
## ----VL-points----------------------------------------------------------------
xs <- seq(0, 10, by = 0.05)
ys <- seq(0, 5, by = 0.05)
## ----VL-algorithm, cache = TRUE-----------------------------------------------
result <- approxPot2D(f, xs, ys)
## ----VL-extra, echo = FALSE---------------------------------------------------
# Get the limit cycle attractor
attr <- dplyr::filter(as.data.frame(out), time > 200)
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
eqPoints <- data.frame(x_eq = c(0),
y_eq = c(0),
equilibrium = factor(c('unstable')))
## ----VL-plot, echo=FALSE, message=FALSE, warning=FALSE------------------------
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
|
/scratch/gouwar.j/cran-all/cranData/waydown/inst/doc/examples.R
|
---
title: "Examples of usage"
author: "Pablo Rodríguez-Sánchez"
date: "`r Sys.Date()`"
output:
pdf_document:
html_document: default
vignette: >
%\VignetteIndexEntry{Examples of usage}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
---
```{r setup, echo = FALSE, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
```{r logo, echo=FALSE, fig.height=8.5, fig.pos="H", fig.align='center'}
knitr::include_graphics('img/logo.png')
```
# Installation
To install the package, please type
```
devtools::install_github("PabRod/waydown", ref = "master")
```
in your _R_ console.
## Libraries
In this vignette we will use the following libraries:
```{r libraries, echo=TRUE, message=FALSE}
library(waydown)
# To calculate some trajectories
library(deSolve)
# To plot our results
library(ggplot2)
# To arrange our plots in panels
library(latticeExtra)
library(gridExtra)
# For nicer plots
library(colorRamps)
```
# One dimensional examples
## Allee effect
A single-species population dynamics model with Allee effect is governed by the following differential equation:
$$
\frac{dN}{dt} = r N \left( \frac{N}{A} - 1 \right) \left( 1 - \frac{N}{K} \right)
$$
It is easy to see that this differential equation has three equilibrium points, $N = 0$, $N = K$ and $N = A$, being all of them stable but the latter one, which is unstable. We'll use the parameters $r = 1$, $A = 0.5$ and $K = 1$.
```{r Allee-def}
r <- 1
A <- 0.5
K <- 1
f <- function(x) { r * x * (x/A - 1) * (1 - x/K) }
```
We can use our method `approxPot1D` to approximate the potential function at a set of points. First, we have to create the points.
```{r Allee-points}
xs <- seq(0, 1.25, by = 0.01)
```
and then pass them to our algorithm:
```{r Allee-algorithm, cache = TRUE}
Vs <- approxPot1D(f, xs)
```
By plotting the result, we clearly see that the two stable equilibria appear at $N = 0$ and $N = K = 1$, and the unstable one at $N = A = 0.5$, as we expected.
```{r Allee-plot}
plot(xs, Vs,
type = 'l', xlab = 'N', ylab = 'V')
```
# Two dimensional examples
In this section we'll apply our method to a collection two dimensional systems.
## Synthetic examples
We generated some abstract, synthetic examples in order to test our method. Here we present some of them.
### A gradient system: the four well potential
In this section we'll deal with the two-dimensional differential equation given by:
$$
\begin{cases}
\frac{dx}{dt} = f(x,y) = -x(x^2-1) \\
\frac{dy}{dt} = g(x,y) = -y(y^2-1)
\end{cases}
$$
This is a gradient system (because $\frac{\partial f}{\partial x} = \frac{\partial g}{\partial y}$ everywhere). This means that the gradient - curl decomposition will have zero curl term everywhere and, thus, there exists a well defined potential. Particularly, the potential can be analytically proven to be:
$$
V(x,y) = \frac{x^2}{4}(x^2 - 2) + \frac{y^2}{4}(y^2 - 2) + V_0
$$
Let's try to compute it using our algorithm. First, we'll code our function as a vector:
```{r Four-def}
f <- function(x) {c(-x[1]*(x[1]^2 - 1),
-x[2]*(x[2]^2 - 1))}
```
Our region of interest is now two-dimensional. We need, thus, two vectors to create our grid of points:
```{r Four-points}
xs <- seq(-1.5, 1.5, by = 0.025)
ys <- seq(-1.5, 1.5, by = 0.025)
```
Now we are ready to apply `approxPot2D`:
```{r Four-algorithm, cache = TRUE}
result <- approxPot2D(f, xs, ys)
```
```{r Four-extra, include=FALSE}
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
eqPoints <- data.frame(x_eq = c(-1, -1, 0, 1, 1),
y_eq = c(-1, 1, 0, -1, 1),
equilibrium = factor(c('stable', 'stable', 'unstable', 'stable', 'stable')))
```
`result` is a list that contains two fields:
* `result$V` contains the estimated values of the potentials at each grid point
* `result$err` contains the estimated error at each grid point
By plotting them we see:
```{r Four-plot, echo=FALSE, message=FALSE, warning=FALSE}
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
# geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
```
Provided our example is a purely gradient system, it should not surprise us that the error is zero everywhere.
```{r Four-check}
max(result$err) == 0
```
### A non-gradient system
In this example we will apply our algorithm to the system given below:
$$
\begin{cases}
\frac{dx}{dt} = f(x,y) = -y \\
\frac{dy}{dt} = g(x,y) = x
\end{cases}
$$
This is an extreme case. The gradient - curl decomposition will give us zero gradient part everywhere. Let's feed our algorithm with these example to see what happens:
First, we code the dynamics in vector form:
```{r Curl-def}
f <- function(x) {c(-x[2],
x[1])}
```
Secondly, we define our region of interest:
```{r Curl-points}
xs <- seq(-2, 2, by = 0.05)
ys <- seq(-2, 2, by = 0.05)
```
And then we are ready to apply our algorithm:
```{r Curl-algorithm, cache = TRUE}
result <- approxPot2D(f, xs, ys)
```
```{r Curl-extra, include=FALSE}
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
eqPoints <- data.frame(x_eq = c(0),
y_eq = c(0),
equilibrium = factor(c('unstable')))
```
The resulting approximate potential is plotted below. Being this a purely non-gradient system we expect our pseudopotential to not be trustworthy. By calculating the error we can see that actually that's the case.
```{r Curl-plot, echo=FALSE, message=FALSE, warning=FALSE}
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
# geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
```
The fact that the underlying equations are non-gradient has been captured by the algorithm.
## Biological examples
Here we apply our methods to some dynamical equations well known in biology. While the abstract equations in the previous sections can be manipulated to increase or decrease their curl to gradient ratio, equations describing natural dynamical systems don't allow such a manipulation. Once again, the error map will let us know if our system allows a pseudopotential or not.
### Simple regulatory gene network
A bistable network cell fate model can be described by the set of equations:
$$
\begin{cases}
\frac{dx}{dt} = f(x,y) = b_x - r_x x + \frac{a_x}{k_x + y^n} \\
\frac{dy}{dt} = g(x,y) = b_y - r_y y + \frac{a_y}{k_y + x^n}
\end{cases}
$$
Such a system represents two genes ($x$ and $y$) that inhibit each other. This circuit works as a toggle switch with two stable steady states, one with dominant $x$ , the other with dominant $y$.
We can code it in vector form:
```{r Wadd-def}
# Parameters
bx <- 0.2
ax <- 0.125
kx <- 0.0625
rx <- 1
by <- 0.05
ay <- 0.1094
ky <- 0.0625
ry <- 1
n <- 4
# Dynamics
f <- function(x) {c(bx - rx*x[1] + ax/(kx + x[2]^n),
by - ry*x[2] + ay/(ky + x[1]^n))}
```
This set of equations is, in general, not gradient (because $\frac{\partial f}{\partial y} \neq \frac{\partial g}{\partial x}$). Anyways, we can use the method `approxPot2D` to compute the approximate potential.
First, we need to define our region of interest:
```{r Wadd-points}
xs <- seq(0, 4, by = 0.05)
ys <- seq(0, 4, by = 0.05)
```
And then we are ready to apply our algorithm:
```{r Wadd-algorithm, cache = TRUE}
result <- approxPot2D(f, xs, ys)
```
```{r Wadd-extra, include=FALSE}
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
#
# Estimated with Wolfram Alpha
# Prompt: 0 = 0.2 - x + 0.125/(0.0625 + y^4); 0 = 0.05 - y + 0.1094/(0.0625 + x^4)
eqPoints <- data.frame(x_eq = c(0.213416, 0.559865, 2.19971),
y_eq = c(1.74417, 0.730558, 0.0546602),
equilibrium = factor(c('stable', 'unstable', 'stable')))
```
The resulting approximate potential is plotted below. Being this not a gradient system it is advisable to plot also the estimated error. The areas in green represent small approximation error, so the potential can be safely used in those regions.
```{r Wadd-plot, echo=FALSE, message=FALSE, warning=FALSE}
nbins <- 25
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
```
### Selkov equation
The Selkov model for glycolysis reads like:
$$
\begin{cases}
\frac{dx}{dt} & = -x + ay + x^2 y \\
\frac{dy}{dt} & = b - a y - x^2 y
\end{cases}
$$
where $x$ and $y$ represent the concentrations of two chemicals. If we fix $a = 0.1$, such a system shows a limit cycle for $b \in [0.42, 0.79]$. At each side of this interval, a Hopf bifurcation happens.
We can code it in vector form:
```{r Selkov-def}
# Parameters
a <- 0.1
b <- 0.5
# Dynamics
f <- function(x) {c(-x[1] + a*x[2] + x[1]^2*x[2],
b - a*x[2] - x[1]^2*x[2])}
```
This system is interesting because the Jacobian is independent of $b$, so our error map will remain constant along the bifurcation. Let's see what happens:
```{r Selkov-solution, echo = FALSE}
# Package desolve requires a slightly different syntax
f_dyn <- function(t, state, parameters) {
with(as.list(c(state, parameters)),{
# rate of change
df <- f(state)
dX <- df[1]
dY <- df[2]
# return the rate of change
list(c(dX, dY))
}) # end with(as.list ...
}
roi <- c(0, 2.5, 0, 2.5)
init_state <- c(1, .05)
ts <- seq(0, 1000, by = 0.01)
bs <- c(0.1, 0.6, 1.3)
for (b in bs) {
out <- ode(y = init_state, times = ts, func = f_dyn, parms = c(a = a, b = b))
colnames(out) <- c("time", "x", "y")
out <- as.data.frame(out)
xs <- seq(roi[1], roi[2], by = 0.05)
ys <- seq(roi[3], roi[4], by = 0.05)
result <- approxPot2D(f, xs, ys)
# Get the limit cycle attractor
attr <- dplyr::filter(as.data.frame(out), time > 0)
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle(sprintf("Error map. b = %.3f ", b)) +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
}
```
### Lotka-Volterra predator prey dynamics
Here we will use a variation of the classical Lotka-Volterra predator prey model. Particularly, the Rosenzweig-MacArthur model, that adds a function $g$ accounting for predator saturation and a carrying capacity $k$ to the prey growth.
The dynamics, being $x$ the prey biomass and $y$ the predator biomass, look like:
$$
\begin{cases}
\frac{dx}{dt} = f(x,y) = r x (1 - \frac{x}{k}) - g(x) x y \\
\frac{dy}{dt} = g(x,y) = e g(x) x y - m y
\end{cases}
$$
with $g(x)$ being a saturation function:
$$
g(x) = \frac{1}{h + x}
$$
We can code it in vector form:
```{r VL-def}
# Parameters
r <- 1
k <- 10
h <- 2
e <- 0.2
m <- 0.1
# Auxiliary function
g <- function(x) {1/(h + x)}
# Dynamics
f <- function(x) {c(r*x[1]*(1 - x[1]/k) -g(x[1])*x[1]*x[2],
e*g(x[1])*x[1]*x[2] - m*x[2])}
```
Such a system has a limit cycle attractor, as we can see simulating one of its trajectories (particularly, the one beginning at $x = 1$ and $y = 2$):
```{r VL-solution, echo = FALSE}
# Package desolve requires a slightly different syntax
f_dyn <- function(t, state, parameters) {
with(as.list(c(state, parameters)),{
# rate of change
df <- f(state)
dX <- df[1]
dY <- df[2]
# return the rate of change
list(c(dX, dY))
}) # end with(as.list ...
}
parms <- c(r =r,
k = k,
h = h,
e = e,
m = m)
init_state <- c(1,2)
ts <- seq(0, 300, by = 0.01)
out <- ode(y = init_state, times = ts, func = f_dyn, parms = parms)
colnames(out) <- c("time", "x", "y")
out <- as.data.frame(out)
plot(out$x, out$y, type = 'l', asp = 1,
main = 'Trajectory', xlab = 'x (prey biomass)', ylab = 'y (predator biomass)')
```
For such a system, we expect our pseudopotential to have a high error over large portions of the phase plane. Let's check it. First, we need to define our region of interest:
```{r VL-points}
xs <- seq(0, 10, by = 0.05)
ys <- seq(0, 5, by = 0.05)
```
And then we are ready to apply our algorithm:
```{r VL-algorithm, cache = TRUE}
result <- approxPot2D(f, xs, ys)
```
```{r VL-extra, echo = FALSE}
# Get the limit cycle attractor
attr <- dplyr::filter(as.data.frame(out), time > 200)
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
eqPoints <- data.frame(x_eq = c(0),
y_eq = c(0),
equilibrium = factor(c('unstable')))
```
Even for highly non-gradient systems, our method will compute some pseudopotential. By plotting the estimated error, we can see that it is high almost everywhere. This means that our algorithm noticed that the system is highly non-gradient, and thus, the previously computed pseudopotential is of very limited use.
```{r VL-plot, echo=FALSE, message=FALSE, warning=FALSE}
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
```
|
/scratch/gouwar.j/cran-all/cranData/waydown/inst/doc/examples.Rmd
|
---
title: "Examples of usage"
author: "Pablo Rodríguez-Sánchez"
date: "`r Sys.Date()`"
output:
pdf_document:
html_document: default
vignette: >
%\VignetteIndexEntry{Examples of usage}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
---
```{r setup, echo = FALSE, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
```{r logo, echo=FALSE, fig.height=8.5, fig.pos="H", fig.align='center'}
knitr::include_graphics('img/logo.png')
```
# Installation
To install the package, please type
```
devtools::install_github("PabRod/waydown", ref = "master")
```
in your _R_ console.
## Libraries
In this vignette we will use the following libraries:
```{r libraries, echo=TRUE, message=FALSE}
library(waydown)
# To calculate some trajectories
library(deSolve)
# To plot our results
library(ggplot2)
# To arrange our plots in panels
library(latticeExtra)
library(gridExtra)
# For nicer plots
library(colorRamps)
```
# One dimensional examples
## Allee effect
A single-species population dynamics model with Allee effect is governed by the following differential equation:
$$
\frac{dN}{dt} = r N \left( \frac{N}{A} - 1 \right) \left( 1 - \frac{N}{K} \right)
$$
It is easy to see that this differential equation has three equilibrium points, $N = 0$, $N = K$ and $N = A$, being all of them stable but the latter one, which is unstable. We'll use the parameters $r = 1$, $A = 0.5$ and $K = 1$.
```{r Allee-def}
r <- 1
A <- 0.5
K <- 1
f <- function(x) { r * x * (x/A - 1) * (1 - x/K) }
```
We can use our method `approxPot1D` to approximate the potential function at a set of points. First, we have to create the points.
```{r Allee-points}
xs <- seq(0, 1.25, by = 0.01)
```
and then pass them to our algorithm:
```{r Allee-algorithm, cache = TRUE}
Vs <- approxPot1D(f, xs)
```
By plotting the result, we clearly see that the two stable equilibria appear at $N = 0$ and $N = K = 1$, and the unstable one at $N = A = 0.5$, as we expected.
```{r Allee-plot}
plot(xs, Vs,
type = 'l', xlab = 'N', ylab = 'V')
```
# Two dimensional examples
In this section we'll apply our method to a collection two dimensional systems.
## Synthetic examples
We generated some abstract, synthetic examples in order to test our method. Here we present some of them.
### A gradient system: the four well potential
In this section we'll deal with the two-dimensional differential equation given by:
$$
\begin{cases}
\frac{dx}{dt} = f(x,y) = -x(x^2-1) \\
\frac{dy}{dt} = g(x,y) = -y(y^2-1)
\end{cases}
$$
This is a gradient system (because $\frac{\partial f}{\partial x} = \frac{\partial g}{\partial y}$ everywhere). This means that the gradient - curl decomposition will have zero curl term everywhere and, thus, there exists a well defined potential. Particularly, the potential can be analytically proven to be:
$$
V(x,y) = \frac{x^2}{4}(x^2 - 2) + \frac{y^2}{4}(y^2 - 2) + V_0
$$
Let's try to compute it using our algorithm. First, we'll code our function as a vector:
```{r Four-def}
f <- function(x) {c(-x[1]*(x[1]^2 - 1),
-x[2]*(x[2]^2 - 1))}
```
Our region of interest is now two-dimensional. We need, thus, two vectors to create our grid of points:
```{r Four-points}
xs <- seq(-1.5, 1.5, by = 0.025)
ys <- seq(-1.5, 1.5, by = 0.025)
```
Now we are ready to apply `approxPot2D`:
```{r Four-algorithm, cache = TRUE}
result <- approxPot2D(f, xs, ys)
```
```{r Four-extra, include=FALSE}
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
eqPoints <- data.frame(x_eq = c(-1, -1, 0, 1, 1),
y_eq = c(-1, 1, 0, -1, 1),
equilibrium = factor(c('stable', 'stable', 'unstable', 'stable', 'stable')))
```
`result` is a list that contains two fields:
* `result$V` contains the estimated values of the potentials at each grid point
* `result$err` contains the estimated error at each grid point
By plotting them we see:
```{r Four-plot, echo=FALSE, message=FALSE, warning=FALSE}
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
# geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
```
Provided our example is a purely gradient system, it should not surprise us that the error is zero everywhere.
```{r Four-check}
max(result$err) == 0
```
### A non-gradient system
In this example we will apply our algorithm to the system given below:
$$
\begin{cases}
\frac{dx}{dt} = f(x,y) = -y \\
\frac{dy}{dt} = g(x,y) = x
\end{cases}
$$
This is an extreme case. The gradient - curl decomposition will give us zero gradient part everywhere. Let's feed our algorithm with these example to see what happens:
First, we code the dynamics in vector form:
```{r Curl-def}
f <- function(x) {c(-x[2],
x[1])}
```
Secondly, we define our region of interest:
```{r Curl-points}
xs <- seq(-2, 2, by = 0.05)
ys <- seq(-2, 2, by = 0.05)
```
And then we are ready to apply our algorithm:
```{r Curl-algorithm, cache = TRUE}
result <- approxPot2D(f, xs, ys)
```
```{r Curl-extra, include=FALSE}
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
eqPoints <- data.frame(x_eq = c(0),
y_eq = c(0),
equilibrium = factor(c('unstable')))
```
The resulting approximate potential is plotted below. Being this a purely non-gradient system we expect our pseudopotential to not be trustworthy. By calculating the error we can see that actually that's the case.
```{r Curl-plot, echo=FALSE, message=FALSE, warning=FALSE}
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
# geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
```
The fact that the underlying equations are non-gradient has been captured by the algorithm.
## Biological examples
Here we apply our methods to some dynamical equations well known in biology. While the abstract equations in the previous sections can be manipulated to increase or decrease their curl to gradient ratio, equations describing natural dynamical systems don't allow such a manipulation. Once again, the error map will let us know if our system allows a pseudopotential or not.
### Simple regulatory gene network
A bistable network cell fate model can be described by the set of equations:
$$
\begin{cases}
\frac{dx}{dt} = f(x,y) = b_x - r_x x + \frac{a_x}{k_x + y^n} \\
\frac{dy}{dt} = g(x,y) = b_y - r_y y + \frac{a_y}{k_y + x^n}
\end{cases}
$$
Such a system represents two genes ($x$ and $y$) that inhibit each other. This circuit works as a toggle switch with two stable steady states, one with dominant $x$ , the other with dominant $y$.
We can code it in vector form:
```{r Wadd-def}
# Parameters
bx <- 0.2
ax <- 0.125
kx <- 0.0625
rx <- 1
by <- 0.05
ay <- 0.1094
ky <- 0.0625
ry <- 1
n <- 4
# Dynamics
f <- function(x) {c(bx - rx*x[1] + ax/(kx + x[2]^n),
by - ry*x[2] + ay/(ky + x[1]^n))}
```
This set of equations is, in general, not gradient (because $\frac{\partial f}{\partial y} \neq \frac{\partial g}{\partial x}$). Anyways, we can use the method `approxPot2D` to compute the approximate potential.
First, we need to define our region of interest:
```{r Wadd-points}
xs <- seq(0, 4, by = 0.05)
ys <- seq(0, 4, by = 0.05)
```
And then we are ready to apply our algorithm:
```{r Wadd-algorithm, cache = TRUE}
result <- approxPot2D(f, xs, ys)
```
```{r Wadd-extra, include=FALSE}
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
#
# Estimated with Wolfram Alpha
# Prompt: 0 = 0.2 - x + 0.125/(0.0625 + y^4); 0 = 0.05 - y + 0.1094/(0.0625 + x^4)
eqPoints <- data.frame(x_eq = c(0.213416, 0.559865, 2.19971),
y_eq = c(1.74417, 0.730558, 0.0546602),
equilibrium = factor(c('stable', 'unstable', 'stable')))
```
The resulting approximate potential is plotted below. Being this not a gradient system it is advisable to plot also the estimated error. The areas in green represent small approximation error, so the potential can be safely used in those regions.
```{r Wadd-plot, echo=FALSE, message=FALSE, warning=FALSE}
nbins <- 25
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
```
### Selkov equation
The Selkov model for glycolysis reads like:
$$
\begin{cases}
\frac{dx}{dt} & = -x + ay + x^2 y \\
\frac{dy}{dt} & = b - a y - x^2 y
\end{cases}
$$
where $x$ and $y$ represent the concentrations of two chemicals. If we fix $a = 0.1$, such a system shows a limit cycle for $b \in [0.42, 0.79]$. At each side of this interval, a Hopf bifurcation happens.
We can code it in vector form:
```{r Selkov-def}
# Parameters
a <- 0.1
b <- 0.5
# Dynamics
f <- function(x) {c(-x[1] + a*x[2] + x[1]^2*x[2],
b - a*x[2] - x[1]^2*x[2])}
```
This system is interesting because the Jacobian is independent of $b$, so our error map will remain constant along the bifurcation. Let's see what happens:
```{r Selkov-solution, echo = FALSE}
# Package desolve requires a slightly different syntax
f_dyn <- function(t, state, parameters) {
with(as.list(c(state, parameters)),{
# rate of change
df <- f(state)
dX <- df[1]
dY <- df[2]
# return the rate of change
list(c(dX, dY))
}) # end with(as.list ...
}
roi <- c(0, 2.5, 0, 2.5)
init_state <- c(1, .05)
ts <- seq(0, 1000, by = 0.01)
bs <- c(0.1, 0.6, 1.3)
for (b in bs) {
out <- ode(y = init_state, times = ts, func = f_dyn, parms = c(a = a, b = b))
colnames(out) <- c("time", "x", "y")
out <- as.data.frame(out)
xs <- seq(roi[1], roi[2], by = 0.05)
ys <- seq(roi[3], roi[4], by = 0.05)
result <- approxPot2D(f, xs, ys)
# Get the limit cycle attractor
attr <- dplyr::filter(as.data.frame(out), time > 0)
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle(sprintf("Error map. b = %.3f ", b)) +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
}
```
### Lotka-Volterra predator prey dynamics
Here we will use a variation of the classical Lotka-Volterra predator prey model. Particularly, the Rosenzweig-MacArthur model, that adds a function $g$ accounting for predator saturation and a carrying capacity $k$ to the prey growth.
The dynamics, being $x$ the prey biomass and $y$ the predator biomass, look like:
$$
\begin{cases}
\frac{dx}{dt} = f(x,y) = r x (1 - \frac{x}{k}) - g(x) x y \\
\frac{dy}{dt} = g(x,y) = e g(x) x y - m y
\end{cases}
$$
with $g(x)$ being a saturation function:
$$
g(x) = \frac{1}{h + x}
$$
We can code it in vector form:
```{r VL-def}
# Parameters
r <- 1
k <- 10
h <- 2
e <- 0.2
m <- 0.1
# Auxiliary function
g <- function(x) {1/(h + x)}
# Dynamics
f <- function(x) {c(r*x[1]*(1 - x[1]/k) -g(x[1])*x[1]*x[2],
e*g(x[1])*x[1]*x[2] - m*x[2])}
```
Such a system has a limit cycle attractor, as we can see simulating one of its trajectories (particularly, the one beginning at $x = 1$ and $y = 2$):
```{r VL-solution, echo = FALSE}
# Package desolve requires a slightly different syntax
f_dyn <- function(t, state, parameters) {
with(as.list(c(state, parameters)),{
# rate of change
df <- f(state)
dX <- df[1]
dY <- df[2]
# return the rate of change
list(c(dX, dY))
}) # end with(as.list ...
}
parms <- c(r =r,
k = k,
h = h,
e = e,
m = m)
init_state <- c(1,2)
ts <- seq(0, 300, by = 0.01)
out <- ode(y = init_state, times = ts, func = f_dyn, parms = parms)
colnames(out) <- c("time", "x", "y")
out <- as.data.frame(out)
plot(out$x, out$y, type = 'l', asp = 1,
main = 'Trajectory', xlab = 'x (prey biomass)', ylab = 'y (predator biomass)')
```
For such a system, we expect our pseudopotential to have a high error over large portions of the phase plane. Let's check it. First, we need to define our region of interest:
```{r VL-points}
xs <- seq(0, 10, by = 0.05)
ys <- seq(0, 5, by = 0.05)
```
And then we are ready to apply our algorithm:
```{r VL-algorithm, cache = TRUE}
result <- approxPot2D(f, xs, ys)
```
```{r VL-extra, echo = FALSE}
# Get the limit cycle attractor
attr <- dplyr::filter(as.data.frame(out), time > 200)
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
eqPoints <- data.frame(x_eq = c(0),
y_eq = c(0),
equilibrium = factor(c('unstable')))
```
Even for highly non-gradient systems, our method will compute some pseudopotential. By plotting the estimated error, we can see that it is high almost everywhere. This means that our algorithm noticed that the system is highly non-gradient, and thus, the previously computed pseudopotential is of very limited use.
```{r VL-plot, echo=FALSE, message=FALSE, warning=FALSE}
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
```
|
/scratch/gouwar.j/cran-all/cranData/waydown/vignettes/examples.Rmd
|
#' Agreement coefficients and related methods
#'
#' These functions calculate the agreement coefficient and mean product
#' difference (MPD), as well as their systematic and unsystematic components,
#' from Ji and Gallo (2006). Agreement coefficients provides a useful
#' measurement of agreement between two data sets which is bounded, symmetrical,
#' and can be decomposed into systematic and unsystematic components;
#' however, it assumes a linear relationship between the two data sets and
#' treats both "truth" and "estimate" as being of equal quality, and as such may
#' not be a useful metric in all scenarios.
#'
#' Agreement coefficient values range from 0 to 1, with 1 indicating perfect
#' agreement. `truth` and `estimate` must be the same length. This function is
#' not explicitly spatial and as such can be applied to data with any number of
#' dimensions and any coordinate reference system.
#'
#' @inheritParams yardstick::rmse
#' @inheritParams ww_area_of_applicability
#'
#' @return
#' A tibble with columns .metric, .estimator, and .estimate and 1 row of values.
#' For grouped data frames, the number of rows returned will be the same as the number of groups.
#' For `_vec()` functions, a single value (or NA).
#'
#' @family agreement metrics
#' @family yardstick metrics
#'
#' @examples
#' # Calculated values match Ji and Gallo 2006:
#' x <- c(6, 8, 9, 10, 11, 14)
#' y <- c(2, 3, 5, 5, 6, 8)
#'
#' ww_agreement_coefficient_vec(x, y)
#' ww_systematic_agreement_coefficient_vec(x, y)
#' ww_unsystematic_agreement_coefficient_vec(x, y)
#' ww_systematic_mpd_vec(x, y)
#' ww_unsystematic_mpd_vec(x, y)
#' ww_systematic_rmpd_vec(x, y)
#' ww_unsystematic_rmpd_vec(x, y)
#'
#' example_df <- data.frame(x = x, y = y)
#' ww_agreement_coefficient(example_df, x, y)
#' ww_systematic_agreement_coefficient(example_df, x, y)
#' ww_unsystematic_agreement_coefficient(example_df, x, y)
#' ww_systematic_mpd(example_df, x, y)
#' ww_unsystematic_mpd(example_df, x, y)
#' ww_systematic_rmpd(example_df, x, y)
#' ww_unsystematic_rmpd(example_df, x, y)
#'
#' @references
#' Ji, L. and Gallo, K. 2006. "An Agreement Coefficient for Image Comparison."
#' Photogrammetric Engineering & Remote Sensing 72(7), pp 823–833,
#' doi: 10.14358/PERS.72.7.823.
#'
#' @export
ww_agreement_coefficient <- function(data, ...) {
UseMethod("ww_agreement_coefficient")
}
ww_agreement_coefficient <- new_numeric_metric(ww_agreement_coefficient, direction = "maximize")
#' @rdname ww_agreement_coefficient
#' @export
ww_agreement_coefficient.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
...) {
yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
na_rm = na_rm,
name = "agreement_coefficient",
...
)
}
#' @rdname ww_agreement_coefficient
#' @export
ww_agreement_coefficient_vec <- function(truth,
estimate,
na_rm = TRUE,
...) {
ww_agreement_coefficient_impl <- function(truth, estimate, ...) {
est_SSD <- calc_ssd(truth, estimate)
est_SPOD <- calc_spod(truth, estimate)
1 - est_SSD / est_SPOD
}
yardstick_vec(
truth = truth,
estimate = estimate,
na_rm = na_rm,
impl = ww_agreement_coefficient_impl,
...
)
}
#' @rdname ww_agreement_coefficient
#' @export
ww_systematic_agreement_coefficient <- function(data, ...) {
UseMethod("ww_systematic_agreement_coefficient")
}
ww_systematic_agreement_coefficient <- new_numeric_metric(ww_systematic_agreement_coefficient, direction = "maximize")
#' @rdname ww_agreement_coefficient
#' @export
ww_systematic_agreement_coefficient.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
...) {
yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
na_rm = na_rm,
name = "systematic_agreement_coefficient",
...
)
}
#' @rdname ww_agreement_coefficient
#' @export
ww_systematic_agreement_coefficient_vec <- function(truth,
estimate,
na_rm = TRUE,
...) {
ww_systematic_agreement_coefficient_impl <- function(truth, estimate, ...) {
1 - (calc_spds(truth, estimate) / calc_spod(truth, estimate))
}
yardstick_vec(
truth = truth,
estimate = estimate,
na_rm = na_rm,
impl = ww_systematic_agreement_coefficient_impl,
...
)
}
#' @rdname ww_agreement_coefficient
#' @export
ww_unsystematic_agreement_coefficient <- function(data, ...) {
UseMethod("ww_unsystematic_agreement_coefficient")
}
ww_unsystematic_agreement_coefficient <- new_numeric_metric(ww_unsystematic_agreement_coefficient, direction = "maximize")
#' @rdname ww_agreement_coefficient
#' @export
ww_unsystematic_agreement_coefficient.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
...) {
yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
na_rm = na_rm,
name = "unsystematic_agreement_coefficient",
...
)
}
#' @rdname ww_agreement_coefficient
#' @export
ww_unsystematic_agreement_coefficient_vec <- function(truth,
estimate,
na_rm = TRUE,
...) {
ww_unsystematic_agreement_coefficient_impl <- function(truth, estimate, ...) {
1 - (calc_spdu(truth, estimate) / calc_spod(truth, estimate))
}
yardstick_vec(
truth = truth,
estimate = estimate,
na_rm = na_rm,
impl = ww_unsystematic_agreement_coefficient_impl,
...
)
}
#' @rdname ww_agreement_coefficient
#' @export
ww_unsystematic_mpd <- function(data, ...) {
UseMethod("ww_unsystematic_mpd")
}
ww_unsystematic_mpd <- new_numeric_metric(ww_unsystematic_mpd, direction = "maximize")
#' @rdname ww_agreement_coefficient
#' @export
ww_unsystematic_mpd.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
...) {
yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
na_rm = na_rm,
name = "unsystematic_mpd",
...
)
}
#' @rdname ww_agreement_coefficient
#' @export
ww_unsystematic_mpd_vec <- function(truth,
estimate,
na_rm = TRUE,
...) {
yardstick_vec(
truth = truth,
estimate = estimate,
na_rm = na_rm,
impl = ww_unsystematic_mpd_impl,
...
)
}
ww_unsystematic_mpd_impl <- function(truth, estimate, ...) {
calc_spdu(truth, estimate) / length(truth)
}
#' @rdname ww_agreement_coefficient
#' @export
ww_systematic_mpd <- function(data, ...) {
UseMethod("ww_systematic_mpd")
}
ww_systematic_mpd <- new_numeric_metric(ww_systematic_mpd, direction = "maximize")
#' @rdname ww_agreement_coefficient
#' @export
ww_systematic_mpd.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
...) {
yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
na_rm = na_rm,
name = "systematic_mpd",
...
)
}
#' @rdname ww_agreement_coefficient
#' @export
ww_systematic_mpd_vec <- function(truth,
estimate,
na_rm = TRUE,
...) {
yardstick_vec(
truth = truth,
estimate = estimate,
na_rm = na_rm,
impl = ww_systematic_mpd_impl,
...
)
}
ww_systematic_mpd_impl <- function(truth, estimate, ...) {
calc_spds(truth, estimate) / length(truth)
}
#' @rdname ww_agreement_coefficient
#' @export
ww_unsystematic_rmpd <- function(data, ...) {
UseMethod("ww_unsystematic_rmpd")
}
ww_unsystematic_rmpd <- new_numeric_metric(ww_unsystematic_rmpd, direction = "maximize")
#' @rdname ww_agreement_coefficient
#' @export
ww_unsystematic_rmpd.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
...) {
yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
na_rm = na_rm,
name = "unsystematic_rmpd",
...
)
}
#' @rdname ww_agreement_coefficient
#' @export
ww_unsystematic_rmpd_vec <- function(truth,
estimate,
na_rm = TRUE,
...) {
ww_unsystematic_rmpd_impl <- function(truth, estimate, ...) {
sqrt(ww_unsystematic_mpd_impl(truth, estimate, ...))
}
yardstick_vec(
truth = truth,
estimate = estimate,
na_rm = na_rm,
impl = ww_unsystematic_rmpd_impl,
...
)
}
#' @rdname ww_agreement_coefficient
#' @export
ww_systematic_rmpd <- function(data, ...) {
UseMethod("ww_systematic_rmpd")
}
ww_systematic_rmpd <- new_numeric_metric(ww_systematic_rmpd, direction = "maximize")
#' @rdname ww_agreement_coefficient
#' @export
ww_systematic_rmpd.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
...) {
yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
na_rm = na_rm,
name = "systematic_rmpd",
...
)
}
#' @rdname ww_agreement_coefficient
#' @export
ww_systematic_rmpd_vec <- function(truth,
estimate,
na_rm = TRUE,
...) {
ww_systematic_rmpd_impl <- function(truth, estimate, ...) {
sqrt(ww_systematic_mpd_impl(truth, estimate, ...))
}
yardstick_vec(
truth = truth,
estimate = estimate,
na_rm = na_rm,
impl = ww_systematic_rmpd_impl,
...
)
}
#' Return the sum of squared differences
#'
#' @inheritParams yardstick::rmse
#' @noRd
calc_ssd <- function(truth, estimate) sum((truth - estimate)^2)
#' Return Sum of Potential Difference from Ji and Gallo (2006)
#'
#' @inheritParams yardstick::rmse
#' @noRd
calc_spod <- function(truth, estimate) {
mean_truth <- mean(truth)
mean_estimate <- mean(estimate)
sum(
(abs(mean_truth - mean_estimate) + abs(truth - mean_truth)) *
(abs(mean_truth - mean_estimate) + abs(estimate - mean_estimate))
)
}
#' Return the coefficients of the GMFR line from Ji and Gallo (2006)
#'
#' @inheritParams yardstick::rmse
#' @noRd
gmfr <- function(truth, estimate) {
mean_truth <- mean(truth)
mean_estimate <- mean(estimate)
correlation_sign <- sign(stats::cor(truth, estimate))
b <- sqrt(
sum((truth - mean_truth)^2) /
sum((estimate - mean_estimate)^2)
)
b <- abs(b) * correlation_sign
a <- mean_truth - (b * mean_estimate)
list(
a = a,
b = b
)
}
#' Return the unsystematic sum product-difference from Ji and Gallo (2006)
#'
#' @inheritParams yardstick::rmse
#' @noRd
calc_spdu <- function(truth, estimate) {
mean_truth <- mean(truth)
mean_estimate <- mean(estimate)
gmfr_predict_truth <- gmfr(truth, estimate)
gmfr_predict_estimate <- gmfr(estimate, truth)
predicted_truth <- gmfr_predict_truth$a +
(gmfr_predict_truth$b * estimate)
predicted_estimate <- gmfr_predict_estimate$a +
(gmfr_predict_estimate$b * truth)
sum(
abs(estimate - predicted_estimate) *
abs(truth - predicted_truth)
)
}
#' Return the systematic sum product-difference from Ji and Gallo (2006)
#'
#' @inheritParams yardstick::rmse
#' @noRd
calc_spds <- function(truth, estimate) {
est_spdu <- calc_spdu(truth, estimate)
est_ssd <- calc_ssd(truth, estimate)
est_ssd - est_spdu
}
|
/scratch/gouwar.j/cran-all/cranData/waywiser/R/agreement_coefficient.R
|
#' Find the area of applicability
#'
#' This function calculates the "area of applicability" of a model, as
#' introduced by Meyer and Pebesma (2021). While the initial paper introducing
#' this method focused on spatial models, there is nothing inherently spatial
#' about the method; it can be used with any type of data (and, because it does
#' not care about the spatial arrangement of your data, can be used with 2D or
#' 3D spatial data, and with geographic or projected CRS).
#'
#' Predictions made on points "inside" the area of applicability should be as
#' accurate as predictions made on the data provided to `testing`.
#' That means that generally `testing` should be your final hold-out
#' set so that predictions on points inside the area of applicability are
#' accurately described by your reported model metrics.
#' When passing an `rset` object to `x`, predictions made on points "inside" the
#' area of applicability instead should be as accurate as predictions made on
#' the assessment sets during cross-validation.
#'
#' @section Differences from CAST:
#' This implementation differs from
#' Meyer and Pebesma (2021) (and therefore from CAST) when using cross-validated
#' data in order to minimize data leakage. Namely, in order to calculate
#' the dissimilarity index \eqn{DI_{k}}, CAST:
#'
#' 1. Rescales all data used for cross validation at once, lumping assessment
#' folds in with analysis data.
#' 2. Calculates a single \eqn{\bar{d}} as the mean distance between all points
#' in the rescaled data set, including between points in the same assessment
#' fold.
#' 3. For each point \eqn{k} that's used in an assessment fold, calculates
#' \eqn{d_{k}} as the minimum distance between \eqn{k} and any point in its
#' corresponding analysis fold.
#' 4. Calculates \eqn{DI_{k}} by dividing \eqn{d_{k}} by \eqn{\bar{d}} (which
#' was partially calculated as the distance between \eqn{k} and the rest of
#' the rescaled data).
#'
#' Because assessment data is used to calculate constants for rescaling analysis
#' data and \eqn{\bar{d}}, the assessment data may appear too "similar" to
#' the analysis data when calculating \eqn{DI_{k}}. As such, waywiser treats
#' each fold in an `rset` independently:
#'
#' 1. Each analysis set is rescaled independently.
#' 2. Separate \eqn{\bar{d}} are calculated for each fold, as the mean distance
#' between all points in the analysis set for that fold.
#' 3. Identically to CAST, \eqn{d_{k}} is the minimum distance between a point
#' \eqn{k} in the assessment fold and any point in the
#' corresponding analysis fold.
#' 4. \eqn{DI_{k}} is then found by dividing \eqn{d_{k}} by \eqn{\bar{d}},
#' which was calculated independently from \eqn{k}.
#'
#' Predictions are made using the full training data set, rescaled once (in
#' the same way as CAST), and the mean \eqn{\bar{d}} across folds, under the
#' assumption that the "final" model in use will be retrained using the entire
#' data set.
#'
#' In practice, this means waywiser produces very slightly higher \eqn{\bar{d}}
#' values than CAST and a slightly higher area of applicability threshold than
#' CAST when using `rset` objects.
#'
#' @param x Either a data frame, matrix, formula
#' (specifying predictor terms on the right-hand side), recipe
#' (from [recipes::recipe()], or `rset` object, produced by resampling functions
#' from rsample or spatialsample.
#'
#' If `x` is a recipe, it should be the same one used to pre-process the data
#' used in your model. If the recipe used to build the area of applicability
#' doesn't match the one used to build the model, the returned area of
#' applicability won't be correct.
#'
#' @param y Optional: a recipe (from [recipes::recipe()]) or formula.
#'
#' If `y` is a recipe, it should be the same one used to pre-process the data
#' used in your model. If the recipe used to build the area of applicability
#' doesn't match the one used to build the model, the returned area of
#' applicability won't be correct.
#'
#' @param data The data frame representing your "training" data, when using the
#' formula or recipe methods.
#'
#' @param testing A data frame or matrix containing the data used to
#' validate your model. This should be the same data as used to calculate all
#' model accuracy metrics.
#'
#' If this argument is `NULL`, then this function will use the training data
#' (from `x` or `data`) to calculate within-sample distances.
#' This may result in the area of applicability threshold being set too high,
#' with the result that too many points are classed as "inside" the area of
#' applicability.
#'
#' @param importance Either:
#'
#' * A data.frame with two columns: `term`, containing the names of each
#' variable in the training and testing data, and `estimate`, containing
#' the (raw or scaled) feature importance for each variable.
#' * An object of class `vi` with at least two columns, `Variable` and
#' `Importance`.
#'
#' All variables in the training data (`x` or `data`, depending on the context)
#' must have a matching importance estimate, and all terms with importance
#' estimates must be in the training data.
#'
#' @param na_rm A logical of length 1, indicating whether observations (in both
#' training and testing) with `NA` values in predictors should be removed. Only
#' predictor variables are considered, and this value has no impact on
#' predictions (where `NA` values produce `NA` predictions). If `na_rm = FALSE`
#' and `NA` values are found, this function returns an error.
#'
#' @param ... Not currently used.
#'
#' @details
#'
#' This method assumes your model was fit using dummy variables in the place of
#' any non-numeric predictor, and that you have one importance score per
#' dummy variable. Having non-numeric predictors will cause this function to
#' fail.
#'
#' @return
#'
#' A `ww_area_of_applicability` object, which can be used with [predict()] to
#' calculate the distance of new data to the original training data, and
#' determine if new data is within a model's area of applicability.
#'
#' @family area of applicability functions
#'
#' @examplesIf rlang::is_installed("vip")
#' train <- vip::gen_friedman(1000, seed = 101) # ?vip::gen_friedman
#' test <- train[701:1000, ]
#' train <- train[1:700, ]
#' pp <- stats::ppr(y ~ ., data = train, nterms = 11)
#' metric_name <- ifelse(
#' packageVersion("vip") > package_version("0.3.2"),
#' "rsq",
#' "rsquared"
#' )
#'
#' importance <- vip::vi_permute(
#' pp,
#' target = "y",
#' metric = metric_name,
#' pred_wrapper = predict,
#' train = train
#' )
#'
#' aoa <- ww_area_of_applicability(y ~ ., train, test, importance = importance)
#' predict(aoa, test)
#'
#' # Equivalent methods for calculating AOA:
#' ww_area_of_applicability(train[2:11], test[2:11], importance)
#' ww_area_of_applicability(
#' as.matrix(train[2:11]),
#' as.matrix(test[2:11]),
#' importance
#' )
#'
#' @references
#' H. Meyer and E. Pebesma. 2021. "Predicting into unknown space? Estimating
#' the area of applicability of spatial prediction models," Methods in Ecology
#' and Evolution 12(9), pp 1620 - 1633, doi: 10.1111/2041-210X.13650.
#'
#' @export
ww_area_of_applicability <- function(x, ...) {
UseMethod("ww_area_of_applicability")
}
#' @exportS3Method
ww_area_of_applicability.default <- function(x, ...) {
cls <- class(x)[1]
rlang::abort(
c(
"`x` isn't a supported object type.",
i = "`ww_area_of_applicability()` can only handle data.frames, matrices, formulas, and recipes.",
x = glue::glue("`x` is a {cls}")
),
call = rlang::caller_env()
)
}
#' @exportS3Method
#' @rdname ww_area_of_applicability
ww_area_of_applicability.data.frame <- function(x, testing = NULL, importance, ..., na_rm = FALSE) {
rlang::check_dots_empty()
training <- hardhat::mold(x, NA_real_)
if (!is.null(testing)) testing <- hardhat::mold(testing, NA_real_)
create_aoa(training, testing, importance, ..., na_rm = na_rm)
}
#' @exportS3Method
#' @rdname ww_area_of_applicability
ww_area_of_applicability.matrix <- ww_area_of_applicability.data.frame
#' @exportS3Method
#' @rdname ww_area_of_applicability
ww_area_of_applicability.formula <- function(x, data, testing = NULL, importance, ..., na_rm = FALSE) {
rlang::check_dots_empty()
# This method is also used for recipes, which don't need blueprints:
blueprint <- NULL
if (inherits(x, "formula")) {
blueprint <- hardhat::default_formula_blueprint(indicators = "none")
}
training <- hardhat::mold(x, data, blueprint = blueprint)
processed_testing <- NULL
if (!is.null(testing)) {
processed_testing <- hardhat::mold(x, testing, blueprint = blueprint)
}
# Catch non-numeric, non-base classes
# cf https://github.com/tidymodels/hardhat/issues/219
if (any(
purrr::map_lgl(
as.data.frame(data)[names(data) %in% names(training$predictors)],
function(x) !(inherits(x, "numeric") || inherits(x, "integer"))
),
purrr::map_lgl(
as.data.frame(testing)[names(testing) %in% names(processed_testing$predictors)],
function(x) !(inherits(x, "numeric") || inherits(x, "integer"))
)
)) {
rlang::abort(
"All variables in `data` and `testing` must inherit either numeric or integer classes."
)
}
create_aoa(training, processed_testing, importance, ..., na_rm = na_rm)
}
#' @exportS3Method
#' @rdname ww_area_of_applicability
ww_area_of_applicability.recipe <- ww_area_of_applicability.formula
#' @exportS3Method
#' @rdname ww_area_of_applicability
ww_area_of_applicability.rset <- function(x, y = NULL, importance, ..., na_rm = FALSE) {
rlang::check_dots_empty()
rlang::check_installed("rsample")
if (missing(y) || identical(y, NULL) || identical(y, NA)) y <- NA_real_
aoa_calcs <- purrr::map(
x$splits,
function(rsplit) {
training <- rsample::analysis(rsplit)
testing <- rsample::assessment(rsplit)
if (identical(y, NA_real_)) {
training <- hardhat::mold(training, y)
testing <- hardhat::mold(testing, y)
} else {
training <- hardhat::mold(y, training)
testing <- hardhat::mold(y, testing)
}
create_aoa(
training,
testing,
importance,
include_di = TRUE,
na_rm = na_rm
)
}
)
aoa <- aoa_calcs[[1]]
training <- if (identical(y, NA_real_)) {
hardhat::mold(x$splits[[1]]$data, NA_real_)$predictors
} else {
hardhat::mold(y, x$splits[[1]]$data)$predictors
}
if (na_rm) {
training <- training[complete_cases(training), , drop = FALSE]
}
aoa$sds <- purrr::map_dbl(training, stats::sd)
aoa$means <- purrr::map_dbl(training, mean)
aoa$transformed_training <- standardize_and_weight(
training,
aoa$sds,
aoa$means,
aoa$importance
)
aoa$d_bar <- mean(unlist(purrr::map(aoa_calcs, purrr::chuck, "d_bar")))
di <- unlist(purrr::map(aoa_calcs, purrr::chuck, "di"))
aoa["di"] <- NULL
aoa$aoa_threshold <- calc_aoa(di)
aoa
}
# Comments reference section numbers from Meyer and Pebesma 2021
# (doi: 10.1111/2041-210X.13650)
#' Workhorse function to create AOA objects from all input types
#'
#' @inheritParams ww_area_of_applicability
#' @param include_di Boolean: include DI in the returned object? Necessary for
#' post-processing AOA objects when working with rsets.
#'
#' @return
#' A `ww_area_of_applicability` object, which can be used with [predict()] to
#' calculate the distance of new data to the original training data, and
#' determine if new data is within a model's area of applicability.
#'
#' @noRd
create_aoa <- function(training, testing, importance, na_rm, ..., include_di = FALSE) {
aoa <- list(
training = training$predictors,
class = "ww_area_of_applicability",
blueprint = training$blueprint
)
if (length(na_rm) != 1) {
rlang::abort("Only one value can be passed to `na_rm`.")
}
if (na_rm) {
aoa$training <- aoa$training[complete_cases(aoa$training), , drop = FALSE]
} else if (yardstick_any_missing(aoa$training, NULL, NULL)) {
rlang::abort(
c(
"Missing values in training data.",
i = "Either process your data to fix NA values, or set `na_rm = TRUE`."
)
)
}
aoa$testing <- check_di_testing(aoa$training, testing, na_rm)
if (nrow(aoa$training) == 0) {
rlang::abort(
"0 rows were passed as training data."
)
}
if (!is.null(testing) && nrow(aoa$testing) == 0) {
rlang::abort(
"0 rows were passed as testing data."
)
}
check_di_columns_numeric(aoa$training, aoa$testing)
aoa$importance <- check_di_importance(aoa$training, importance)
# 2.1 Standardization of predictor variables
# Store standard deviations and means of all predictors from training
# We'll save these to standardize `testing`
# Then scale & center `training`
aoa$sds <- purrr::map_dbl(aoa$training, stats::sd)
aoa$means <- purrr::map_dbl(aoa$training, mean)
aoa$transformed_training <- standardize_and_weight(
aoa$training,
aoa$sds,
aoa$means,
aoa$importance
)
if (!is.null(aoa$testing)) {
# We can freely overwrite testing here;
# we don't need the untransformed version
aoa$testing <- standardize_and_weight(
aoa$testing,
aoa$sds,
aoa$means,
aoa$importance
)
}
aoa$d_bar <- calc_d_bar(aoa$transformed_training)
aoa$di <- calc_di(aoa$transformed_training, aoa$testing, aoa$d_bar)
aoa$aoa_threshold <- calc_aoa(aoa$di)
if (isTRUE(all.equal(aoa$aoa_threshold, 0))) {
rlang::warn(
c(
"The AOA threshold was 0, which is usually unexpected.",
i = "Did you accidentally pass the same data as testing and training?"
)
)
}
aoa <- aoa[c(
"transformed_training",
"sds",
"means",
"importance",
"di",
"d_bar",
"aoa_threshold",
"blueprint",
"class"
)]
if (!include_di) aoa["di"] <- NULL
do.call(hardhat::new_model, aoa)
}
#' Validate "testing" objects and reorder columns to match training data
#'
#' @param training The data frame representing your "training" data.
#'
#' @param testing The output from [hardhat::mold()],
#' containing the data used to validate your model.
#' This should be the same data as used to calculate all model accuracy metrics.
#'
#' @return `testing`, with columns re-ordered to match `training`
#'
#' @noRd
check_di_testing <- function(training, testing, na_rm = FALSE) {
# If NULL, nothing to validate or re-order, so just return NULL
if (is.null(testing)) {
return(NULL)
}
testing <- testing$predictors
if (!is.na(na_rm) && na_rm) {
testing <- testing[complete_cases(testing), , drop = FALSE]
} else if (!is.na(na_rm) && yardstick_any_missing(testing, NULL, NULL)) {
rlang::abort(
c(
"Missing values in testing data.",
i = "Either process your data to fix NA values, or set `na_rm = TRUE`."
)
)
}
# Make sure that the testing set has the same columns, in the same order,
# as the original training data
if (!all(names(training) %in% names(testing))) {
rlang::abort(
"Some columns in `training` were not present in `testing` (or `new_data`)."
)
}
# Re-order testing so that its columns are guaranteed to be in the
# same order as those in `training`
testing[names(training)]
}
#' Validate "importance" objects
#'
#' @param training The data frame representing your "training" data.
#'
#' @param importance Any object accepted by [tidy_importance()].
#'
#' @return A standardized importance data frame, with columns "term" and
#' "importance", with terms ordered in the same order as columns in training
#' data.
#'
#' @noRd
check_di_importance <- function(training, importance) {
importance <- tidy_importance(importance)
# Make sure that all training variables have importance values
#
# Because we've already called check_di_testing, this also means all
# predictors in `testing` have importance values
all_importance <- all(names(training) %in% importance[["term"]])
if (!all_importance) {
rlang::abort(
"All predictors must have an importance value in `importance`.",
call = rlang::caller_env(2)
)
}
all_variables <- all(importance[["term"]] %in% names(training))
if (!all_variables) {
rlang::abort(
"All variables with an importance value in `importance` must be included as predictors.",
call = rlang::caller_env(2)
)
}
# Re-order `importance`'s rows
# so they match the column order of `training` and `testing`
importance_order <- purrr::map_dbl(
names(training),
~ which(importance[["term"]] == .x)
)
importance[importance_order, ]
}
#' Validate all predictor columns are numeric
#'
#' @param training The data frame representing your "training" data.
#'
#' @param testing The output from [check_di_testing()].
#'
#' @return TRUE, invisibly.
#'
#' @noRd
check_di_columns_numeric <- function(training, testing) {
col_is_numeric <- c(
purrr::map_lgl(training, is.numeric),
purrr::map_lgl(testing, is.numeric)
)
if (!all(col_is_numeric)) {
rlang::abort(
"All predictors must be numeric.",
call = rlang::caller_env(4)
)
}
return(invisible(TRUE))
}
#' Center and scale variables, and weight by variable importance
#'
#' @param dat A data frame with all numeric columns
#' @param sds The standard deviation of each variable in `dat`, in the same
#' order as `dat`
#' @param means The mean of each variable in `dat`, in the same order as `dat`
#' @param importance The output of [check_di_importance()].
#'
#' @return A data.frame in the same shape as `dat`, with standardized and
#' weighted variables.
#'
#' @noRd
standardize_and_weight <- function(dat, sds, means, importance) {
# 2.1 Standardize
dat <- sweep(dat, 2, means, "-")
dat <- sweep(dat, 2, sds, "/")
# 2.2 Weighting of variables
sweep(dat, 2, importance[["estimate"]], "*")
}
#' Calculate d_bar: The mean distance between training points.
#'
#' @param training The data frame representing your "training" data, after being
#' run through [standardize_and_weight()].
#'
#' @return A numeric of length 1.
#'
#' @noRd
calc_d_bar <- function(training) {
# 2.4 Dissimilarity index
# Find the mean nearest neighbor distance between training points:
dists <- fields::rdist(training)
diag(dists) <- NA
Matrix::mean(dists, na.rm = TRUE)
}
#' Calculate di
#'
#' di is the nearest neighbor distance of each point in "testing" to the
#' training data (or, in the absence of testing data, of each point in training
#' to the rest of the training set), divided by d_bar.
#'
#' @param training The data frame representing your "training" data, after being
#' run through [standardize_and_weight()].
#'
#' @param testing The data frame representing your "testing" data, after being
#' run through [standardize_and_weight()].
#'
#' @param d_bar The output of [calc_d_bar()].
#'
#' @return A numeric of length `nrow(training)`.
#'
#' @noRd
calc_di <- function(training, testing, d_bar) {
# 2.3 Multivariate distance calculation
# Calculates the distance between each point in the `testing` set
# (or `training`, if `testing` is `NULL`)
# to the closest point in the training set
if (is.null(testing)) {
dk <- FNN::knn.dist(training, 1)[, 1]
} else {
dk <- FNN::knnx.dist(training, testing, 1)[, 1]
}
# Use d_bar to rescale dk from 2.3
dk / d_bar
}
#' Calculate the area of applicability threshold
#'
#' @param di The output of [calc_di()].
#'
#' @return A numeric of length 1.
#'
#' @noRd
calc_aoa <- function(di) {
# Section 2.5 in Meyer and Pebesma
as.vector(
stats::quantile(di, 0.75) +
(1.5 * stats::IQR(di))
)
}
#' Predict from a `ww_area_of_applicability`
#'
#' @param object A `ww_area_of_applicability` object.
#'
#' @param new_data A data frame or matrix of new samples.
#'
#' @param ... Not used.
#'
#' @details The function computes the distance indices of the new data and
#' whether or not they are "inside" the area of applicability.
#'
#' @return
#'
#' A tibble of predictions, with two columns: `di`, numeric, contains the
#' "dissimilarity index" of each point in `new_data`, while `aoa`, logical,
#' contains whether a row is inside (`TRUE`) or outside (`FALSE`) the area of
#' applicability.
#'
#' Note that this function is often called using
#' [terra::predict()], in which case `aoa` will be converted to numeric
#' implicitly; `1` values correspond to cells "inside" the area of applicability
#' and `0` corresponds to cells "outside" the AOA.
#'
#' The number of rows in the tibble is guaranteed
#' to be the same as the number of rows in `new_data`. Rows with `NA` predictor
#' values will have `NA` `di` and `aoa` values.
#'
#' @family area of applicability functions
#'
#' @examplesIf rlang::is_installed("vip")
#' library(vip)
#' train <- gen_friedman(1000, seed = 101) # ?vip::gen_friedman
#' test <- train[701:1000, ]
#' train <- train[1:700, ]
#' pp <- stats::ppr(y ~ ., data = train, nterms = 11)
#' metric_name <- ifelse(
#' packageVersion("vip") > package_version("0.3.2"),
#' "rsq",
#' "rsquared"
#' )
#'
#' importance <- vip::vi_permute(
#' pp,
#' target = "y",
#' metric = metric_name,
#' pred_wrapper = predict,
#' train = train
#' )
#'
#' aoa <- ww_area_of_applicability(y ~ ., train, test, importance = importance)
#' predict(aoa, test)
#'
#' @exportS3Method
predict.ww_area_of_applicability <- function(object, new_data, ...) {
new_data <- hardhat::forge(new_data, object$blueprint)
new_data <- check_di_testing(object$transformed_training, new_data, NA)
existing_new_data <- complete.cases(new_data)
check_di_columns_numeric(object$transformed_training, new_data)
new_data <- standardize_and_weight(
new_data,
object$sds,
object$means,
object$importance
)
predictions <- tibble::tibble(
di = NA_real_,
aoa = NA,
.rows = nrow(new_data)
)
predictions[existing_new_data, ]$di <- calc_di(
object$transformed_training,
new_data[existing_new_data, ],
object$d_bar
)
predictions$aoa <- predictions$di <= object$aoa_threshold
hardhat::validate_prediction_size(predictions, new_data)
predictions
}
#' Print number of predictors and area-of-applicability threshold
#'
#' @param x A `ww_area_of_applicability` object.
#'
#' @param digits The number of digits to print, used when rounding the AOA threshold.
#'
#' @inheritParams rlang::args_dots_empty
#'
#' @keywords internal
#'
#' @examplesIf rlang::is_installed("vip")
#' library(vip)
#' trn <- gen_friedman(500, seed = 101) # ?vip::gen_friedman
#' pp <- ppr(y ~ ., data = trn, nterms = 11)
#' metric_name <- ifelse(
#' packageVersion("vip") > package_version("0.3.2"),
#' "rsq",
#' "rsquared"
#' )
#'
#' importance <- vip::vi_permute(
#' pp,
#' target = "y",
#' metric = metric_name,
#' pred_wrapper = predict,
#' train = trn
#' )
#'
#'
#' ww_area_of_applicability(trn[2:11], importance = importance)
#'
#' @export
print.ww_area_of_applicability <- function(x, digits = getOption("digits"), ...) {
predictors_count <- ncol(x$blueprint$ptypes$predictors)
aoa_threshold <- round(x$aoa_threshold, digits)
print_output <- glue::glue(
"# Predictors:
{predictors_count}
Area-of-applicability threshold:
{aoa_threshold}"
)
cat(print_output)
invisible(x)
}
|
/scratch/gouwar.j/cran-all/cranData/waywiser/R/area_of_applicability.R
|
#' Guerry "Moral Statistics" (1830s)
#'
#' This data and description are taken from the geodaData R package.
#' Classic social science foundational study by Andre-Michel Guerry on crime, suicide, literacy and other “moral statistics” in 1830s France. Data from the R package Guerry (Michael Friendly and Stephane Dray).
#'
#' Sf object, units in m. EPSG 27572: NTF (Paris) / Lambert zone II.
#'
#' @format An sf data frame with 85 rows, 23 variables, and a geometry column:
#' \describe{
#' \item{ dept }{ Department ID: Standard numbers for the departments }
#' \item{ Region }{ Region of France ('N'='North', 'S'='South', 'E'='East', 'W'='West', 'C'='Central'). Corsica is coded as NA. }
#' \item{ Dprtmnt }{ Department name: Departments are named according to usage in 1830, but without accents. A factor with levels *Ain* *Aisne* *Allier* ... *Vosges Yonne* }
#' \item{ Crm_prs }{ Population per Crime against persons. }
#' \item{ Crm_prp }{ Population per Crime against property. }
#' \item{ Litercy }{ Percent of military conscripts who can read and write. }
#' \item{ Donatns }{ Donations to the poor. }
#' \item{ Infants }{ Population per illegitimate birth. }
#' \item{ Suicids }{ Population per suicide. }
#' \item{ Maincty }{ Size of principal city ('1:Sm', '2:Med', '3:Lg'), used as a surrogate for population density. Large refers to the top 10, small to the bottom 10; all the rest are classed Medium. }
#' \item{ Wealth }{ Per capita tax on personal property. A ranked index based on taxes on personal and movable property per inhabitant. }
#' \item{ Commerc }{ Commerce and Industry, measured by the rank of the number of patents / population. }
#' \item{ Clergy }{ Distribution of clergy, measured by the rank of the number of Catholic priests in active service population. }
#' \item{ Crim_prn }{ Crimes against parents, measured by the rank of the ratio of crimes against parents to all crimes – Average for the years 1825-1830. }
#' \item{ Infntcd }{ Infanticides per capita. A ranked ratio of number of infanticides to population – Average for the years 1825-1830. }
#' \item{ Dntn_cl }{ Donations to the clergy. A ranked ratio of the number of bequests and donations inter vivios to population – Average for the years 1815-1824. }
#' \item{ Lottery }{ Per capita wager on Royal Lottery. Ranked ratio of the proceeds bet on the royal lottery to population — Average for the years 1822-1826. }
#' \item{ Desertn }{ Military desertion, ratio of number of young soldiers accused of desertion to the force of the military contingent, minus the deficit produced by the insufficiency of available billets – Average of the years 1825-1827. }
#' \item{ Instrct }{ Instruction. Ranks recorded from Guerry's map of Instruction. Note: this is inversely related to Literacy }
#' \item{ Prsttts }{ Number of prostitutes registered in Paris from 1816 to 1834, classified by the department of their birth }
#' \item{ Distanc }{ Distance to Paris (km). Distance of each department centroid to the centroid of the Seine (Paris) }
#' \item{ Area }{ Area (1000 km^2). }
#' \item{ Pop1831 }{ Population in 1831, in 1000s }
#' }
#' @source \itemize{
#' \item{Angeville, A. (1836). Essai sur la Statistique de la Population française Paris: F. Doufour.}
#' \item{Guerry, A.-M. (1833). Essai sur la statistique morale de la France Paris: Crochard. English translation: Hugh P. Whitt and Victor W. Reinking, Lewiston, N.Y. : Edwin Mellen Press, 2002.}
#' \item{Parent-Duchatelet, A. (1836). De la prostitution dans la ville de Paris, 3rd ed, 1857, p. 32, 36}
#' }
#' <https://geodacenter.github.io/data-and-lab/Guerry/>
#' @examples
#' if (requireNamespace("sf", quietly = TRUE)) {
#' library(sf)
#' data(guerry)
#'
#' plot(guerry["Donatns"])
#' }
"guerry"
#' Number of trees and aboveground biomass for Forest Inventory and Analysis plots in New York State
#'
#' The original data is derived from the Forest Inventory and Analysis program,
#' implemented by the US Department of Agriculture's Forest Service.
#'
#' @format An sf object using EPSG 5070: NAD83 / Conus Albers (in meters), with 5,303 rows and 5 columns:
#' \describe{
#' \item{yr}{The year measurements were taken.}
#' \item{plot}{A unique identifier signifying the plot measurements were taken at.}
#' \item{n_trees}{The number of trees present on a plot.}
#' \item{agb}{The total aboveground biomass at the plot location, in pounds.}
#' \item{geometry}{The centroid of the plot location.}
#' }
"ny_trees"
#' Simulated data based on WorldClim Bioclimatic variables
#'
#' This data is adapted from the CAST vignette
#' `vignette("cast02-AOA-tutorial", package = "CAST")`.
#' The original data is derived from the Worldclim global climate variables.
#'
#' @format An sf object with 10,000 rows and 6 columns:
#' \describe{
#' \item{bio2}{Mean Diurnal Range (Mean of monthly (max temp - min temp))}
#' \item{bio10}{Mean Temperature of Warmest Quarter}
#' \item{bio13}{Precipitation of Wettest Month}
#' \item{bio19}{Precipitation of Coldest Quarter}
#' \item{geometry}{The location of the sampled point.}
#' \item{response}{A virtual species distribution, generated using the `generateSpFromPCA()` function from the `virtualspecies` package.}
#' }
#'
#' @source <https://www.worldclim.org>
"worldclim_simulation"
|
/scratch/gouwar.j/cran-all/cranData/waywiser/R/data.R
|
#' Global Geary's C statistic
#'
#' Calculate the global Geary's C statistic for model residuals.
#' `ww_global_geary_c()` returns the statistic itself, while
#' `ww_global_geary_pvalue()` returns the associated p value.
#' These functions are meant to help assess model predictions, for instance by
#' identifying if there are clusters of higher residuals than expected. For
#' statistical testing and inference applications, use
#' [spdep::geary.test()] instead.
#'
#' These functions can be used for geographic or projected coordinate reference
#' systems and expect 2D data.
#'
#' @inheritParams yardstick::rmse
#' @inheritParams spdep::geary.test
#' @inheritParams ww_area_of_applicability
#' @param wt A `listw` object, for instance as created with [ww_build_weights()].
#' For data.frame input, may also be a function that takes `data` and returns a
#' `listw` object.
#' @param ... Additional arguments passed to [spdep::geary()] (for
#' `ww_global_geary_c()`) or [spdep::geary.test()] (for
#' `ww_global_geary_pvalue()`).
#'
#' @family autocorrelation metrics
#' @family yardstick metrics
#'
#' @return
#' A tibble with columns .metric, .estimator, and .estimate and 1 row of values.
#' For grouped data frames, the number of rows returned will be the same as the
#' number of groups.
#' For `_vec()` functions, a single value (or NA).
#'
#' @examples
#' guerry_model <- guerry
#' guerry_lm <- lm(Crm_prs ~ Litercy, guerry_model)
#' guerry_model$predictions <- predict(guerry_lm, guerry_model)
#'
#' ww_global_geary_c(guerry_model, Crm_prs, predictions)
#' ww_global_geary_pvalue(guerry_model, Crm_prs, predictions)
#'
#' wt <- ww_build_weights(guerry_model)
#'
#' ww_global_geary_c_vec(
#' guerry_model$Crm_prs,
#' guerry_model$predictions,
#' wt = wt
#' )
#' ww_global_geary_pvalue_vec(
#' guerry_model$Crm_prs,
#' guerry_model$predictions,
#' wt = wt
#' )
#'
#' @references
#' Geary, R. C. (1954). "The Contiguity Ratio and Statistical Mapping". The
#' Incorporated Statistician. 5 (3): 115–145. doi:10.2307/2986645.
#'
#' Cliff, A. D., Ord, J. K. 1981 Spatial processes, Pion, p. 17.
#'
#' @rdname global_geary_c
#' @export
ww_global_geary_c <- function(data, ...) {
UseMethod("ww_global_geary_c")
}
ww_global_geary_c <- new_numeric_metric(ww_global_geary_c, direction = "zero")
#' @export
ww_global_geary_c.data.frame <- function(data,
truth,
estimate,
wt = NULL,
na_rm = FALSE,
...) {
spatial_yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
wt = wt,
na_rm = na_rm,
name = "global_geary_c",
...
)
}
#' @rdname global_geary_c
#' @export
ww_global_geary_c_vec <- function(truth, estimate, wt, na_rm = FALSE, ...) {
ww_global_geary_c_impl <- function(truth, estimate, ...) {
resid <- truth - estimate
spdep::geary(
x = resid,
listw = wt,
length(wt[["neighbours"]]),
length(wt[["neighbours"]]) - 1,
spdep::Szero(wt),
...
)$C
}
spatial_yardstick_vec(
truth = truth,
estimate = estimate,
wt = wt,
na_rm = na_rm,
impl = ww_global_geary_c_impl,
...
)
}
#' @rdname global_geary_c
#' @export
ww_global_geary_pvalue <- function(data, ...) {
UseMethod("ww_global_geary_pvalue")
}
ww_global_geary_pvalue <- new_numeric_metric(ww_global_geary_pvalue, "minimize")
#' @export
ww_global_geary_pvalue.data.frame <- function(data,
truth,
estimate,
wt = NULL,
na_rm = FALSE,
...) {
spatial_yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
wt = wt,
name = "global_geary_pvalue",
na_rm = na_rm,
...
)
}
#' @rdname global_geary_c
#' @export
ww_global_geary_pvalue_vec <- function(truth,
estimate,
wt = NULL,
na_rm = FALSE,
...) {
ww_global_geary_pvalue_impl <- function(truth, estimate, ...) {
resid <- truth - estimate
if (all(resid == 0)) {
return(NA_real_)
}
spdep::geary.test(
x = resid,
listw = wt,
...
)$p.value
}
spatial_yardstick_vec(
truth = truth,
estimate = estimate,
wt = wt,
na_rm = na_rm,
impl = ww_global_geary_pvalue_impl,
...
)
}
|
/scratch/gouwar.j/cran-all/cranData/waywiser/R/global_geary.R
|
#' Global Moran's I statistic
#'
#' Calculate the global Moran's I statistic for model residuals.
#' `ww_global_moran_i()` returns the statistic itself, while
#' `ww_global_moran_pvalue()` returns the associated p value.
#' These functions are meant to help assess model predictions, for instance by
#' identifying if there are clusters of higher residuals than expected. For
#' statistical testing and inference applications, use
#' [spdep::moran.test()] instead.
#'
#' These functions can be used for geographic or projected coordinate reference
#' systems and expect 2D data.
#'
#' @inheritParams ww_global_geary_c
#' @inheritParams spdep::moran.test
#' @param ... Additional arguments passed to [spdep::moran()] (for
#' `ww_global_moran_i()`) or [spdep::moran.test()] (for
#' `ww_global_moran_pvalue()`).
#'
#' @family autocorrelation metrics
#' @family yardstick metrics
#'
#' @inherit ww_global_geary_c return
#'
#' @examples
#' guerry_model <- guerry
#' guerry_lm <- lm(Crm_prs ~ Litercy, guerry_model)
#' guerry_model$predictions <- predict(guerry_lm, guerry_model)
#'
#' ww_global_moran_i(guerry_model, Crm_prs, predictions)
#' ww_global_moran_pvalue(guerry_model, Crm_prs, predictions)
#'
#' wt <- ww_build_weights(guerry_model)
#'
#' ww_global_moran_i_vec(
#' guerry_model$Crm_prs,
#' guerry_model$predictions,
#' wt = wt
#' )
#' ww_global_moran_pvalue_vec(
#' guerry_model$Crm_prs,
#' guerry_model$predictions,
#' wt = wt
#' )
#'
#' @references
#' Moran, P.A.P. (1950). "Notes on Continuous Stochastic Phenomena." Biometrika,
#' 37(1/2), pp 17. doi: 10.2307/2332142
#'
#' Cliff, A. D., Ord, J. K. 1981 Spatial processes, Pion, p. 17.
#'
#' @rdname global_moran_i
#' @export
ww_global_moran_i <- function(data, ...) {
UseMethod("ww_global_moran_i")
}
ww_global_moran_i <- new_numeric_metric(ww_global_moran_i, direction = "zero")
#' @export
ww_global_moran_i.data.frame <- function(data,
truth,
estimate,
wt = NULL,
na_rm = FALSE,
...) {
spatial_yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
wt = wt,
na_rm = na_rm,
name = "global_moran_i",
...
)
}
#' @rdname global_moran_i
#' @export
ww_global_moran_i_vec <- function(truth,
estimate,
wt = NULL,
na_rm = FALSE,
...) {
ww_global_moran_i_impl <- function(truth, estimate, ...) {
resid <- truth - estimate
spdep::moran(
x = resid,
listw = wt,
n = length(wt$neighbours),
S0 = spdep::Szero(wt),
...
)$I
}
spatial_yardstick_vec(
truth = truth,
estimate = estimate,
wt = wt,
na_rm = na_rm,
impl = ww_global_moran_i_impl,
...
)
}
#' @rdname global_moran_i
#' @export
ww_global_moran_pvalue <- function(data, ...) {
UseMethod("ww_global_moran_pvalue")
}
ww_global_moran_pvalue <- new_numeric_metric(ww_global_moran_pvalue, "minimize")
#' @export
ww_global_moran_pvalue.data.frame <- function(data,
truth,
estimate,
wt = NULL,
na_rm = FALSE,
...) {
spatial_yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
wt = wt,
na_rm = na_rm,
name = "global_moran_pvalue",
...
)
}
#' @rdname global_moran_i
#' @export
ww_global_moran_pvalue_vec <- function(truth,
estimate,
wt = NULL,
na_rm = FALSE,
...) {
ww_global_moran_pvalue_impl <- function(truth, estimate, ...) {
resid <- truth - estimate
if (all(resid == 0)) {
return(NA_real_)
}
spdep::moran.test(
x = resid,
listw = wt,
...
)$p.value
}
spatial_yardstick_vec(
truth = truth,
estimate = estimate,
wt = wt,
na_rm = na_rm,
impl = ww_global_moran_pvalue_impl,
...
)
}
|
/scratch/gouwar.j/cran-all/cranData/waywiser/R/global_moran.R
|
#' Local Geary's C statistic
#'
#' Calculate the local Geary's C statistic for model residuals.
#' `ww_local_geary_c()` returns the statistic itself, while
#' `ww_local_geary_pvalue()` returns the associated p value.
#' These functions are meant to help assess model predictions, for instance by
#' identifying clusters of higher residuals than expected. For statistical
#' testing and inference applications, use [spdep::localC_perm()] instead.
#'
#' These functions can be used for geographic or projected coordinate reference
#' systems and expect 2D data.
#'
#' @inheritParams ww_global_geary_c
#' @inheritParams spdep::localC_perm
#' @param ... Additional arguments passed to [spdep::localC()] (for
#' `ww_local_geary_c()`) or [spdep::localC_perm()] (for
#' `ww_local_geary_pvalue()`).
#'
#' @return
#' A tibble with columns .metric, .estimator, and .estimate and `nrow(data)`
#' rows of values.
#' For `_vec()` functions, a numeric vector of `length(truth)` (or NA).
#'
#' @family autocorrelation metrics
#' @family yardstick metrics
#'
#' @examples
#' guerry_model <- guerry
#' guerry_lm <- lm(Crm_prs ~ Litercy, guerry_model)
#' guerry_model$predictions <- predict(guerry_lm, guerry_model)
#'
#' ww_local_geary_c(guerry_model, Crm_prs, predictions)
#' ww_local_geary_pvalue(guerry_model, Crm_prs, predictions)
#'
#' wt <- ww_build_weights(guerry_model)
#'
#' ww_local_geary_c_vec(
#' guerry_model$Crm_prs,
#' guerry_model$predictions,
#' wt = wt
#' )
#' ww_local_geary_pvalue_vec(
#' guerry_model$Crm_prs,
#' guerry_model$predictions,
#' wt = wt
#' )
#'
#' @references
#' Anselin, L. 1995. Local indicators of spatial association, Geographical
#' Analysis, 27, pp 93–115. doi: 10.1111/j.1538-4632.1995.tb00338.x.
#'
#' Anselin, L. 2019. A Local Indicator of Multivariate Spatial Association:
#' Extending Geary's C. Geographical Analysis, 51, pp 133-150.
#' doi: 10.1111/gean.12164
#'
#' @rdname local_geary_c
#' @export
ww_local_geary_c <- function(data, ...) {
UseMethod("ww_local_geary_c")
}
ww_local_geary_c <- new_numeric_metric(ww_local_geary_c, direction = "zero")
#' @export
ww_local_geary_c.data.frame <- function(data,
truth,
estimate,
wt = NULL,
na_rm = FALSE,
...) {
spatial_yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
wt = wt,
na_rm = na_rm,
name = "local_geary_c",
...
)
}
#' @rdname local_geary_c
#' @export
ww_local_geary_c_vec <- function(truth, estimate, wt, na_rm = FALSE, ...) {
ww_local_geary_c_impl <- function(truth, estimate, ...) {
resid <- truth - estimate
spdep::localC(
x = resid,
listw = wt,
...
)
}
spatial_yardstick_vec(
truth = truth,
estimate = estimate,
wt = wt,
na_rm = na_rm,
impl = ww_local_geary_c_impl,
...
)
}
#' @rdname local_geary_c
#' @export
ww_local_geary_pvalue <- function(data, ...) {
UseMethod("ww_local_geary_pvalue")
}
ww_local_geary_pvalue <- new_numeric_metric(ww_local_geary_pvalue, "minimize")
#' @export
ww_local_geary_pvalue.data.frame <- function(data,
truth,
estimate,
wt = NULL,
na_rm = FALSE,
...) {
spatial_yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
wt = wt,
name = "local_geary_pvalue",
na_rm = na_rm,
...
)
}
#' @rdname local_geary_c
#' @export
ww_local_geary_pvalue_vec <- function(truth,
estimate,
wt = NULL,
na_rm = FALSE,
...) {
ww_local_geary_pvalue_impl <- function(truth, estimate, ...) {
resid <- truth - estimate
out <- spdep::localC_perm(
x = resid,
listw = wt,
...
)
as.vector(
attr(out, "pseudo-p")[, "Pr(z != E(Ci))"]
)
}
spatial_yardstick_vec(
truth = truth,
estimate = estimate,
wt = wt,
na_rm = na_rm,
impl = ww_local_geary_pvalue_impl,
...
)
}
|
/scratch/gouwar.j/cran-all/cranData/waywiser/R/local_geary.R
|
#' Local Getis-Ord G and G* statistic
#'
#' Calculate the local Getis-Ord G and G* statistic for model residuals.
#' `ww_local_getis_ord_g()` returns the statistic itself, while
#' `ww_local_getis_ord_pvalue()` returns the associated p value.
#' These functions are meant to help assess model predictions, for instance by
#' identifying clusters of higher residuals than expected. For statistical
#' testing and inference applications, use [spdep::localG_perm()] instead.
#'
#' These functions can be used for geographic or projected coordinate reference
#' systems and expect 2D data.
#'
#' @inheritParams ww_global_geary_c
#' @inheritParams spdep::localG_perm
#' @param ... Additional arguments passed to [spdep::localG()] (for
#' `ww_local_getis_ord_g()`) or [spdep::localG_perm()] (for
#' `ww_local_getis_ord_pvalue()`).
#'
#' @inherit ww_local_geary_c return
#'
#' @family autocorrelation metrics
#' @family yardstick metrics
#'
#' @examples
#' guerry_model <- guerry
#' guerry_lm <- lm(Crm_prs ~ Litercy, guerry_model)
#' guerry_model$predictions <- predict(guerry_lm, guerry_model)
#'
#' ww_local_getis_ord_g(guerry_model, Crm_prs, predictions)
#' ww_local_getis_ord_g_pvalue(guerry_model, Crm_prs, predictions)
#'
#' wt <- ww_build_weights(guerry_model)
#'
#' ww_local_getis_ord_g_vec(
#' guerry_model$Crm_prs,
#' guerry_model$predictions,
#' wt = wt
#' )
#' ww_local_getis_ord_g_pvalue_vec(
#' guerry_model$Crm_prs,
#' guerry_model$predictions,
#' wt = wt
#' )
#'
#' @references
#' Ord, J. K. and Getis, A. 1995. Local spatial autocorrelation statistics:
#' distributional issues and an application. Geographical Analysis, 27, 286–306.
#' doi: 10.1111/j.1538-4632.1995.tb00912.x
#'
#' @rdname local_getis_ord_g
#' @export
ww_local_getis_ord_g <- function(data, ...) {
UseMethod("ww_local_getis_ord_g")
}
ww_local_getis_ord_g <- new_numeric_metric(ww_local_getis_ord_g, direction = "zero")
#' @export
ww_local_getis_ord_g.data.frame <- function(data,
truth,
estimate,
wt = NULL,
na_rm = FALSE,
...) {
spatial_yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
wt = wt,
na_rm = na_rm,
name = "local_getis_ord_g",
...
)
}
#' @rdname local_getis_ord_g
#' @export
ww_local_getis_ord_g_vec <- function(truth, estimate, wt, na_rm = FALSE, ...) {
ww_local_getis_ord_g_impl <- function(truth, estimate, ...) {
resid <- truth - estimate
as.vector(
spdep::localG(
x = resid,
listw = wt,
...
)
)
}
spatial_yardstick_vec(
truth = truth,
estimate = estimate,
wt = wt,
na_rm = na_rm,
impl = ww_local_getis_ord_g_impl,
...
)
}
#' @rdname local_getis_ord_g
#' @export
ww_local_getis_ord_g_pvalue <- function(data, ...) {
UseMethod("ww_local_getis_ord_g_pvalue")
}
ww_local_getis_ord_g_pvalue <- new_numeric_metric(ww_local_getis_ord_g_pvalue, "minimize")
#' @export
ww_local_getis_ord_g_pvalue.data.frame <- function(data,
truth,
estimate,
wt = NULL,
na_rm = FALSE,
...) {
spatial_yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
wt = wt,
na_rm = na_rm,
name = "local_getis_ord_g_pvalue",
...
)
}
#' @rdname local_getis_ord_g
#' @export
ww_local_getis_ord_g_pvalue_vec <- function(truth, estimate, wt, na_rm = FALSE, ...) {
ww_local_getis_ord_pvalue_impl <- function(truth, estimate, ...) {
resid <- truth - estimate
out <- spdep::localG_perm(
x = resid,
listw = wt,
...
)
as.vector(attr(out, "internals")[, "Pr(z != E(Gi))"])
}
spatial_yardstick_vec(
truth = truth,
estimate = estimate,
wt = wt,
na_rm = na_rm,
impl = ww_local_getis_ord_pvalue_impl,
...
)
}
|
/scratch/gouwar.j/cran-all/cranData/waywiser/R/local_getis.R
|
#' Local Moran's I statistic
#'
#' Calculate the local Moran's I statistic for model residuals.
#' `ww_local_moran_i()` returns the statistic itself, while
#' `ww_local_moran_pvalue()` returns the associated p value.
#' These functions are meant to help assess model predictions, for instance by
#' identifying clusters of higher residuals than expected. For statistical
#' testing and inference applications, use [spdep::localmoran_perm()] instead.
#'
#' These functions can be used for geographic or projected coordinate reference
#' systems and expect 2D data.
#'
#' @inheritParams ww_global_geary_c
#' @inheritParams spdep::localmoran
#' @param ... Additional arguments passed to [spdep::localmoran()].
#'
#' @inherit ww_local_geary_c return
#'
#' @family autocorrelation metrics
#' @family yardstick metrics
#'
#' @examples
#' guerry_model <- guerry
#' guerry_lm <- lm(Crm_prs ~ Litercy, guerry_model)
#' guerry_model$predictions <- predict(guerry_lm, guerry_model)
#'
#' ww_local_moran_i(guerry_model, Crm_prs, predictions)
#' ww_local_moran_pvalue(guerry_model, Crm_prs, predictions)
#'
#' wt <- ww_build_weights(guerry_model)
#'
#' ww_local_moran_i_vec(
#' guerry_model$Crm_prs,
#' guerry_model$predictions,
#' wt = wt
#' )
#' ww_local_moran_pvalue_vec(
#' guerry_model$Crm_prs,
#' guerry_model$predictions,
#' wt = wt
#' )
#'
#' @references
#' Anselin, L. 1995. Local indicators of spatial association, Geographical
#' Analysis, 27, pp 93–115. doi: 10.1111/j.1538-4632.1995.tb00338.x.
#'
#' Sokal, R. R, Oden, N. L. and Thomson, B. A. 1998. Local Spatial
#' Autocorrelation in a Biological Model. Geographical Analysis, 30, pp 331–354.
#' doi: 10.1111/j.1538-4632.1998.tb00406.x
#'
#' @rdname local_moran_i
#' @export
ww_local_moran_i <- function(data, ...) {
UseMethod("ww_local_moran_i")
}
ww_local_moran_i <- new_numeric_metric(ww_local_moran_i, direction = "zero")
#' @export
ww_local_moran_i.data.frame <- function(data,
truth,
estimate,
wt = NULL,
na_rm = FALSE,
...) {
spatial_yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
wt = wt,
na_rm = na_rm,
name = "local_moran_i",
...
)
}
#' @rdname local_moran_i
#' @export
ww_local_moran_i_vec <- function(truth, estimate, wt, na_rm = FALSE, ...) {
ww_local_moran_i_impl <- function(truth, estimate, ...) {
resid <- truth - estimate
as.vector(
spdep::localmoran(
x = resid,
listw = wt,
...
)[, 1]
)
}
spatial_yardstick_vec(
truth = truth,
estimate = estimate,
wt = wt,
na_rm = na_rm,
impl = ww_local_moran_i_impl,
...
)
}
#' @rdname local_moran_i
#' @export
ww_local_moran_pvalue <- function(data, ...) {
UseMethod("ww_local_moran_pvalue")
}
ww_local_moran_pvalue <- new_numeric_metric(ww_local_moran_pvalue, "minimize")
#' @export
ww_local_moran_pvalue.data.frame <- function(data,
truth,
estimate,
wt = NULL,
na_rm = FALSE,
...) {
spatial_yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
wt = wt,
name = "local_moran_pvalue",
na_rm = na_rm,
...
)
}
#' @rdname local_moran_i
#' @export
ww_local_moran_pvalue_vec <- function(truth,
estimate,
wt = NULL,
na_rm = FALSE,
...) {
ww_local_moran_pvalue_impl <- function(truth, estimate, ...) {
resid <- truth - estimate
as.vector(
spdep::localmoran(
x = resid,
listw = wt,
...
)[, 5]
)
}
spatial_yardstick_vec(
truth = truth,
estimate = estimate,
wt = wt,
na_rm = na_rm,
impl = ww_local_moran_pvalue_impl,
...
)
}
|
/scratch/gouwar.j/cran-all/cranData/waywiser/R/local_moran.R
|
#' Make 'nb' objects from sf objects
#'
#' These functions can be used for geographic or projected coordinate reference
#' systems and expect 2D data.
#'
#' @details
#' When `nb = NULL`, the method used to create neighbors from `data` is
#' dependent on what geometry type `data` is:
#'
#' + If `nb = NULL` and `data` is a point geometry
#' (classes "sfc_POINT" or "sfc_MULTIPOINT") the "nb" object will be created
#' using [ww_make_point_neighbors()].
#' + If `nb = NULL` and `data` is a polygon geometry
#' (classes "sfc_POLYGON" or "sfc_MULTIPOLYGON") the "nb" object will be created
#' using [ww_make_polygon_neighbors()].
#' + If `nb = NULL` and `data` is any other geometry type, the "nb" object will
#' be created using the centroids of the data as points, with a warning.
#'
#' @param ... Arguments passed to the neighbor-creating function.
#' @param data An sf object (of class "sf" or "sfc").
#' @param nb An object of class "nb" (in which case it will be returned
#' unchanged), or a function to create an object of class "nb" from `data` and
#' `...`, or `NULL`. See details.
#' @inheritParams rlang::abort
#'
#' @examples
#' ww_build_neighbors(guerry)
#'
#' @return An object of class "nb".
#'
#' @export
ww_build_neighbors <- function(data, nb = NULL, ..., call = rlang::caller_env()) {
if (!(inherits(data, "sf") || inherits(data, "sfc"))) {
rlang::abort(
"`data` must be an `sf` or `sfc` object.",
call = call
)
}
data <- sf::st_geometry(data)
type <- if (any(c("sfc_MULTIPOINT", "sfc_POINT") %in% class(data))) {
"point"
} else if (any(c("sfc_MULTIPOLYGON", "sfc_POLYGON") %in% class(data))) {
"polygon"
} else if (is.null(nb)) {
rlang::abort(
c(
"Can only calculate neighbors from point or polygon geometries.",
"i" = "To avoid this, provide neighbors explicitly.",
"i" = "Or provide a neighbor-creating function."
),
call = call
)
}
if (is.null(nb)) {
nb <- switch(type,
"point" = ww_make_point_neighbors(data, ...),
"polygon" = ww_make_polygon_neighbors(data, ...)
)
}
if (rlang::is_function(nb)) {
nb <- do.call(nb, list(data, ...))
}
if (!inherits(nb, "nb")) {
rlang::abort(
"Couldn't figure out how to build an `nb` object from the provided arguments",
call = call
)
}
nb
}
#' Make 'nb' objects from point geometries
#'
#' This function uses [spdep::knearneigh()] and [spdep::knn2nb()] to
#' create a "nb" neighbors list.
#'
#' These functions can be used for geographic or projected coordinate reference
#' systems and expect 2D data.
#'
#' @param data An `sfc_POINT` or `sfc_MULTIPOINT` object.
#' @param k How many nearest neighbors to use in [spdep::knearneigh()].
#' @param sym Force the output neighbors list (from [spdep::knn2nb()]) to
#' symmetry.
#' @param ... Other arguments passed to [spdep::knearneigh()].
#'
#' @return An object of class "nb"
#'
#' @examples
#' ww_make_point_neighbors(ny_trees)
#'
#' @export
ww_make_point_neighbors <- function(data, k = 1, sym = FALSE, ...) {
if (!(inherits(data, "sf") || inherits(data, "sfc"))) {
rlang::abort(
"`data` must be an `sf` or `sfc` object."
)
}
if (length(k) > 1 || !rlang::is_integerish(k)) {
rlang::abort(
"`k` must be a single numeric integer."
)
}
knn <- spdep::knearneigh(data, k, ...)
spdep::knn2nb(knn, sym = sym)
}
#' Make 'nb' objects from polygon geometries
#'
#' This function is an extremely thin wrapper around [spdep::poly2nb()],
#' renamed to use the waywiser "ww" prefix.
#'
#' These functions can be used for geographic or projected coordinate reference
#' systems and expect 2D data.
#'
#' @param data An `sfc_POLYGON` or `sfc_MULTIPOLYGON` object.
#' @param ... Additional arguments passed to [spdep::poly2nb()].
#'
#' @return An object of class "nb"
#'
#' @examples
#' ww_make_polygon_neighbors(guerry)
#'
#' @export
ww_make_polygon_neighbors <- function(data, ...) {
spdep::poly2nb(data, ...)
}
#' Build "listw" objects of spatial weights
#'
#' These functions can be used for geographic or projected coordinate reference
#' systems and expect 2D data.
#'
#' @param x Either an sf object or a "nb" neighbors list object.
#' If an sf object, will be converted into a neighbors list via
#' [ww_build_neighbors()].
#' @param wt Either a "listw" object (which will be returned unchanged),
#' a function for creating a "listw" object from `x`, or `NULL`, in which case
#' weights will be constructed via [spdep::nb2listw()].
#' @param include_self Include each region itself in its own list of neighbors?
#' @param ... Arguments passed to the weight constructing function.
#'
#' @return A `listw` object.
#'
#' @examples
#' ww_build_weights(guerry)
#'
#' @export
ww_build_weights <- function(x, wt = NULL, include_self = FALSE, ...) {
if (!inherits(x, "nb")) {
x <- ww_build_neighbors(x, call = rlang::caller_env())
}
if (include_self) {
if (!identical(attr(x, "self.included"), TRUE)) x <- spdep::include.self(x)
}
if (is.null(wt)) wt <- spdep::nb2listw(x, ...)
if (rlang::is_function(wt)) {
wt <- do.call(wt, list(x, ...))
}
if (!inherits(wt, "listw")) {
rlang::abort(
"Couldn't figure out how to build a `listw` object from the provided arguments",
call = rlang::caller_env()
)
}
wt
}
complete_cases <- function(data_frame) {
if (!identical(data_frame, NULL) && nrow(data_frame) > 0) {
stats::complete.cases(data_frame)
} else {
logical(0)
}
}
|
/scratch/gouwar.j/cran-all/cranData/waywiser/R/misc.R
|
#' Workhorse function handling yardstick metrics for the package
#'
#' @param name The human-understandable name of the metric, to return in the
#' output data frame.
#' @param metric_fun The name of the function to use to calculate the metric.
#' @param na_rm Here for compatibility with yardstick; ignored.
#' @inheritParams yardstick::rmse
#' @inheritParams ww_area_of_applicability
#' @inheritParams rlang::args_dots_empty
#' @inheritParams rlang::args_error_context
#'
#' @return A tibble with one row and three columns: `.metric`, containing `name`,
#' `.estimator`, containing `standard`, and `.estimate`, the metric estimate.
#' sf objects may also have a `geometry` column with the unioned geometry of
#' inputs.
#'
#' @noRd
yardstick_df <- function(data, truth, estimate, na_rm, name, metric_fun, ..., case_weights = NULL,
error_call = rlang::caller_env()) {
if (missing(metric_fun)) metric_fun <- get(paste0("ww_", name, "_vec"))
out <- metric_reframer(
name = name,
fn = metric_fun,
data = data,
na_rm = na_rm,
truth = !!enquo(truth),
estimate = !!enquo(estimate),
fn_options = list(
...
),
error_call = error_call
)
if (inherits(out, "sf")) {
sf::st_geometry(out) <- NULL
}
out
}
metric_reframer <- function(name, fn, data, truth, estimate, ..., na_rm = TRUE, fn_options = list(), error_call = rlang::caller_env()) {
truth <- enquo(truth)
estimate <- enquo(estimate)
truth <- ww_eval_select(expr = truth, data = data, error_call = error_call)
estimate <- ww_eval_select(expr = estimate, data = data, error_call = error_call)
group_rows <- dplyr::group_rows(data)
group_keys <- dplyr::group_keys(data)
data <- dplyr::ungroup(data)
groups <- vctrs::vec_chop(data, indices = group_rows)
out <- vector("list", length = length(groups))
for (i in seq_along(groups)) {
group <- groups[[i]]
group_truth <- group[[truth]]
group_estimate <- group[[estimate]]
elt_out <- list(
.metric = name,
.estimator = "standard",
.estimate = as.vector(
rlang::inject(
withCallingHandlers(
fn(
truth = group_truth,
estimate = group_estimate,
na_rm = na_rm,
!!!fn_options
),
error = function(cnd) {
cnd$call <- error_call
rlang::cnd_signal(cnd)
}
)
)
)
)
elt_out <- vctrs::vec_recycle_common(!!!elt_out)
out[[i]] <- tibble::new_tibble(elt_out)
}
group_keys <- vctrs::vec_rep_each(group_keys, times = vctrs::list_sizes(out))
out <- vctrs::vec_rbind(!!!out)
out <- vctrs::vec_cbind(group_keys, out)
out
}
# cribbed from yardstick 1.2.0
ww_eval_select <- function(expr, data, arg, ..., error_call = rlang::caller_env()) {
rlang::check_dots_empty()
out <- tidyselect::eval_select(
expr = expr,
data = data,
allow_predicates = FALSE,
allow_rename = FALSE,
allow_empty = FALSE,
error_call = error_call
)
out <- names(out)
if (length(out) != 1L) {
message <- paste0("`", arg, "` must select exactly 1 column from `data`.")
rlang::abort(message, call = error_call)
}
out
}
#' Workhorse function handling spatial yardstick metrics for the package
#'
#' @inheritParams ww_global_geary_c
#' @inheritParams yardstick::rmse
#' @inheritParams ww_area_of_applicability
#' @inheritParams rlang::args_dots_empty
#' @inheritParams rlang::args_error_context
#' @inheritParams yardstick_df
#'
#' @return A tibble with one row and three columns: `.metric`, containing `name`,
#' `.estimator`, containing `standard`, and `.estimate`, the metric estimate.
#' sf objects may also have a `geometry` column with the unioned geometry of
#' inputs.
#'
#' @noRd
spatial_yardstick_df <- function(data, truth, estimate, wt, na_rm, name, ..., case_weights = NULL,
error_call = rlang::caller_env()) {
if (is.null(wt)) {
wt <- ww_build_weights(data)
}
if (rlang::is_function(wt)) {
wt <- do.call(wt, list(data))
}
truth <- enquo(truth)
estimate <- enquo(estimate)
truth <- ww_eval_select(expr = truth, data = data, error_call = rlang::caller_env())
estimate <- ww_eval_select(expr = estimate, data = data, error_call = rlang::caller_env())
if (yardstick_any_missing(data[[truth]], data[[estimate]], NULL)) {
rlang::abort(
c(
"Missing values in data.",
i = "waywiser can't handle missing data for functions that use spatial weights."
)
)
}
metric_fun <- get(paste0("ww_", name, "_vec"))
if (grepl("getis_ord_g", name) &&
identical(attr(wt$neighbours, "self.included"), TRUE)) {
name <- gsub("ord_g", "ord_gstar", name)
}
yardstick_df(
data = as.data.frame(data),
truth = {{ truth }},
estimate = {{ estimate }},
na_rm = na_rm,
name = name,
metric_fun = metric_fun,
wt = wt,
...,
error_call = error_call
)
}
#' Workhorse function powering yardstick metrics
#'
#' @inheritParams ww_global_geary_c
#' @inheritParams yardstick::rmse
#' @param impl The metric implementation function.
#'
#' @return A vector of metric estimates
#'
#' @noRd
yardstick_vec <- function(truth, estimate, na_rm, impl, wt = NULL, ..., case_weights = NULL) {
if (!is.vector(truth)) rlang::abort("`truth` must be a numeric vector.")
if (!is.vector(estimate)) rlang::abort("`estimate` must be a numeric vector.")
if (!is.numeric(truth)) rlang::abort("`truth` must be numeric.")
if (!is.numeric(estimate)) rlang::abort("`estimate` must be numeric.")
if (length(truth) != length(estimate)) {
rlang::abort(
glue::glue("Length of `truth` ({length(truth)}) and `estimate` ({length(estimate)}) must match.")
)
}
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, NULL)
truth <- result$truth
estimate <- result$estimate
} else if (yardstick_any_missing(truth, estimate, NULL)) {
return(NA_real_)
}
if (length(truth) == 0) rlang::abort("0 non-missing values were passed to `truth`.")
if (length(estimate) == 0) rlang::abort("0 non-missing values were passed to `estimate`.")
impl(truth = truth, estimate = estimate, ...)
}
#' Workhorse function powering spatial yardstick metrics
#'
#' @inheritParams yardstick_vec
#' @inheritParams spatial_yardstick_df
#'
#' @return A vector of metric estimates
#'
#' @noRd
spatial_yardstick_vec <- function(truth, estimate, wt, na_rm = TRUE, impl, ..., case_weights = NULL) {
if (!inherits(wt, "listw")) {
rlang::abort(
c(
"`wt` must be a 'listw' object",
"i" = "You can create 'listw' objects using `ww_build_weights()`"
)
)
}
if (yardstick_any_missing(truth, estimate, NULL)) {
rlang::abort(
c(
"Missing values in data.",
i = "waywiser can't handle missing data for functions that use spatial weights."
)
)
}
yardstick_vec(
truth,
estimate,
na_rm,
impl,
wt = wt,
...
)
}
|
/scratch/gouwar.j/cran-all/cranData/waywiser/R/misc_yardstick.R
|
#' Evaluate metrics at multiple scales of aggregation
#'
#' @section Raster inputs:
#'
#' If `data` is `NULL`, then `truth` and `estimate` should both be `SpatRaster`
#' objects, as created via [terra::rast()]. These rasters will then be
#' aggregated to each grid using [exactextractr::exact_extract()]. If `data`
#' is a `SpatRaster` object, then `truth` and `estimate` should be indices to
#' select the appropriate layers of the raster via [terra::subset()].
#'
#' Grids are calculated using the bounding box of `truth`, under the assumption
#' that you may have extrapolated into regions which do not have matching "true"
#' values. This function does not check that `truth` and `estimate` overlap at
#' all, or that they are at all contained within the grid.
#'
#' @section Creating grid blocks:
#'
#' The grid blocks can be controlled by passing arguments to
#' [sf::st_make_grid()] via `...`. Some particularly useful arguments include:
#'
#' * `cellsize`: Target cellsize, expressed as the "diameter" (shortest
#' straight-line distance between opposing sides; two times the apothem)
#' of each block, in map units.
#' * `n`: The number of grid blocks in the x and y direction (columns, rows).
#' * `square`: A logical value indicating whether to create square (`TRUE`) or
#' hexagonal (`FALSE`) cells.
#'
#' If both `cellsize` and `n` are provided, then the number of blocks requested
#' by `n` of sizes specified by `cellsize` will be returned, likely not
#' lining up with the bounding box of `data`. If only `cellsize`
#' is provided, this function will return as many blocks of size
#' `cellsize` as fit inside the bounding box of `data`. If only `n` is provided,
#' then `cellsize` will be automatically adjusted to create the requested
#' number of cells.
#'
#' Grids are created by mapping over each argument passed via `...`
#' simultaneously, in a similar manner to [mapply()] or [purrr::pmap()]. This
#' means that, for example, passing `n = list(c(1, 2))` will create a single
#' 1x2 grid, while passing `n = c(1, 2)` will create a 1x1 grid _and_ a 2x2
#' grid. It also means that arguments will be recycled using R's standard
#' vector recycling rules, so that passing `n = c(1, 2)` and `square = FALSE`
#' will create two separate grids of hexagons.
#'
#' This function can be used for geographic or projected coordinate reference
#' systems and expects 2D data.
#'
#' @param data Either: a point geometry `sf` object containing the columns
#' specified by the `truth` and `estimate` arguments; a `SpatRaster` from
#' the `terra` package containing layers specified by the `truth` and `estimate`
#' arguments; or `NULL` if `truth` and `estimate` are `SpatRaster` objects.
#' @param truth,estimate If `data` is an `sf` object, the names (optionally
#' unquoted) for the columns in `data` containing the true and predicted values,
#' respectively. If `data` is a `SpatRaster` object, either layer names or
#' indices which will select the true and predicted layers, respectively, via
#' [terra::subset()] If `data` is `NULL`, `SpatRaster` objects with a single
#' layer containing the true and predicted values, respectively.
#' @param metrics Either a [yardstick::metric_set()] object, or a list of
#' functions which will be used to construct a [yardstick::metric_set()] object
#' specifying the performance metrics to evaluate at each scale.
#' @param grids Optionally, a list of pre-computed `sf` or `sfc` objects
#' specifying polygon boundaries to use for assessments.
#' @param ... Arguments passed to [sf::st_make_grid()].
#' **You almost certainly should provide these arguments as lists.**
#' For instance, passing `n = list(c(1, 2))` will create a single 1x2 grid;
#' passing `n = c(1, 2)` will create a 1x1 grid _and_ a 2x2 grid.
#' @param aggregation_function The function to use to aggregate predictions and
#' true values at various scales, by default [mean()]. For the `sf` method,
#' you can pass any function which takes a single vector and returns a scalar.
#' For raster methods, any function accepted by
#' [exactextractr::exact_extract()] (note that built-in function names must be
#' quoted). Note that this function does _not_ pay attention to the value of
#' `na_rm`; any NA handling you want to do during aggregation should be handled
#' by this function directly.
#' @param na_rm Boolean: Should polygons with NA values be removed before
#' calculating metrics? Note that this does _not_ impact how values are
#' aggregated to polygons: if you want to remove NA values before aggregating,
#' provide a function to `aggregation_function` which will remove NA values.
#' @param autoexpand_grid Boolean: if `data` is in geographic coordinates and
#' `grids` aren't provided, the grids generated by [sf::st_make_grid()] may not
#' contain all observations. If `TRUE`, this function will automatically expand
#' generated grids by a tiny factor to attempt to capture all observations.
#' @param progress Boolean: if `data` is `NULL`, should aggregation via
#' [exactextractr::exact_extract()] show a progress bar? Separate progress bars
#' will be shown for each time `truth` and `estimate` are aggregated.
#'
#' @return A tibble with six columns: `.metric`, with the name
#' of the metric that the row describes; `.estimator`, with the name of the
#' estimator used, `.estimate`, with the output of the metric function;
#' `.grid_args`, with the arguments passed to [sf::st_make_grid()] via `...`
#' (if any), `.grid`, containing the grids used to aggregate predictions,
#' as well as the aggregated values of `truth` and `estimate` as well as the
#' count of non-NA values for each, and `.notes`, which (if `data` is an `sf`
#' object) will indicate any observations which were not used in a given
#' assessment.
#'
#' @examplesIf rlang::is_installed("modeldata")
#' data(ames, package = "modeldata")
#' ames_sf <- sf::st_as_sf(ames, coords = c("Longitude", "Latitude"), crs = 4326)
#' ames_model <- lm(Sale_Price ~ Lot_Area, data = ames_sf)
#' ames_sf$predictions <- predict(ames_model, ames_sf)
#'
#' ww_multi_scale(
#' ames_sf,
#' Sale_Price,
#' predictions,
#' n = list(
#' c(10, 10),
#' c(1, 1)
#' ),
#' square = FALSE
#' )
#'
#' # or, mostly equivalently
#' # (there will be a slight difference due to `autoexpand_grid = TRUE`)
#' grids <- list(
#' sf::st_make_grid(ames_sf, n = c(10, 10), square = FALSE),
#' sf::st_make_grid(ames_sf, n = c(1, 1), square = FALSE)
#' )
#' ww_multi_scale(ames_sf, Sale_Price, predictions, grids = grids)
#'
#' @references
#' Riemann, R., Wilson, B. T., Lister, A., and Parks, S. (2010). "An effective
#' assessment protocol for continuous geospatial datasets of forest
#' characteristics using USFS Forest Inventory and Analysis (FIA) data."
#' Remote Sensing of Environment 114(10), pp 2337-2352,
#' doi: 10.1016/j.rse.2010.05.010 .
#'
#' @export
ww_multi_scale <- function(
data = NULL,
truth,
estimate,
metrics = list(yardstick::rmse, yardstick::mae),
grids = NULL,
...,
na_rm = TRUE,
aggregation_function = "mean",
autoexpand_grid = TRUE,
progress = TRUE) {
if (length(na_rm) != 1 || !is.logical(na_rm)) {
rlang::abort("Only one logical value can be passed to `na_rm`.")
}
if (missing(data) || is.null(data)) {
ww_multi_scale_raster_args(
data = data,
truth = truth,
estimate = estimate,
metrics = metrics,
grids = grids,
...,
na_rm = na_rm,
aggregation_function = aggregation_function,
autoexpand_grid = autoexpand_grid,
progress = progress
)
} else {
UseMethod("ww_multi_scale", data)
}
}
#' @exportS3Method
ww_multi_scale.SpatRaster <- function(
data = NULL,
truth,
estimate,
metrics = list(yardstick::rmse, yardstick::mae),
grids = NULL,
...,
na_rm = TRUE,
aggregation_function = "mean",
autoexpand_grid = TRUE,
progress = TRUE) {
rlang::check_installed("terra")
rlang::check_installed("exactextractr")
data <- prep_multi_scale_raster(data, truth, estimate)
metrics <- handle_metrics(metrics)
grid_list <- handle_grids(data, grids, autoexpand_grid, sf::st_crs(data), ...)
grid_list$grids <- lapply(
grid_list$grids,
spatraster_extract,
data,
aggregation_function,
progress
)
.notes <- raster_method_notes(grid_list)
raster_method_summary(grid_list, .notes, metrics, na_rm)
}
prep_multi_scale_raster <- function(data, truth, estimate) {
data <- tryCatch(
terra::subset(data, c(truth, estimate)),
error = function(e) {
rlang::abort("Couldn't select either `truth` or `estimate`. Are your indices correct?")
}
)
if (terra::nlyr(data) != 2) {
rlang::abort(c(
"`terra::subset(data, c(truth, estimate))` didn't return 2 layers as expected.",
i = "Make sure `truth` and `estimate` both select exactly one layer."
))
}
names(data) <- c("truth", "estimate")
data
}
spatraster_extract <- function(grid, data, aggregation_function, progress) {
grid <- sf::st_as_sf(grid)
sf::st_geometry(grid) <- "geometry"
exactextract_names <- c(".truth", ".estimate")
if (!rlang::is_function(aggregation_function) && aggregation_function != "count") {
exactextract_names <- c(exactextract_names, ".truth_count", ".estimate_count")
aggregation_function <- c(aggregation_function, "count")
}
grid_df <- exactextractr::exact_extract(
data,
grid,
fun = aggregation_function,
progress = progress
)
names(grid_df) <- exactextract_names
if (length(exactextract_names) == 4L) {
exactextract_names <- exactextract_names[c(1, 3, 2, 4)]
}
cbind(grid, grid_df)[c(exactextract_names, "geometry")]
}
ww_multi_scale_raster_args <- function(
data = NULL,
truth,
estimate,
metrics = list(yardstick::rmse, yardstick::mae),
grids = NULL,
...,
na_rm = TRUE,
aggregation_function = "mean",
autoexpand_grid = TRUE,
progress = TRUE) {
rlang::check_installed("terra")
rlang::check_installed("exactextractr")
if (!inherits(truth, "SpatRaster") || terra::nlyr(truth) != 1) {
rlang::abort("`truth` must be a SpatRaster with only one layer.")
}
if (!inherits(estimate, "SpatRaster") || terra::nlyr(estimate) != 1) {
rlang::abort("`estimate` must be a SpatRaster with only one layer.")
}
if (sf::st_crs(truth) != sf::st_crs(estimate)) {
rlang::abort("`truth` and `estimate` must share a CRS.")
}
metrics <- handle_metrics(metrics)
grid_list <- handle_grids(truth, grids, autoexpand_grid, sf::st_crs(data), ...)
grid_list$grids <- lapply(
grid_list$grids,
function(grid) {
grid <- sf::st_as_sf(grid)
sf::st_geometry(grid) <- "geometry"
if (rlang::is_function(aggregation_function) || aggregation_function == "count") {
grid$.truth <- exactextractr::exact_extract(
truth,
grid,
fun = aggregation_function,
progress = progress
)
grid$.estimate <- exactextractr::exact_extract(
estimate,
grid,
fun = aggregation_function,
progress = progress
)
grid[c(".truth", ".estimate", "geometry")]
} else {
truth_df <- exactextractr::exact_extract(
truth,
grid,
fun = c(aggregation_function, "count"),
progress = progress
)
names(truth_df) <- c(".truth", ".truth_count")
estimate_df <- exactextractr::exact_extract(
estimate,
grid,
fun = c(aggregation_function, "count"),
progress = progress
)
names(estimate_df) <- c(".estimate", ".estimate_count")
cbind(grid, truth_df, estimate_df)[c(
".truth",
".truth_count",
".estimate",
".estimate_count",
"geometry"
)]
}
}
)
.notes <- raster_method_notes(grid_list)
raster_method_summary(grid_list, .notes, metrics, na_rm)
}
raster_method_notes <- function(grid_list) {
lapply(
seq_along(grid_list$grids),
function(idx) {
tibble::tibble(
note = character(0),
missing_indices = list()
)
}
)
}
raster_method_summary <- function(grid_list, .notes, metrics, na_rm) {
out <- mapply(
function(grid, grid_arg, .notes) {
out <- metrics(grid, .truth, .estimate, na_rm = na_rm)
out[attr(out, "sf_column")] <- NULL
out$.grid_args <- list(grid_list$grid_args[grid_arg, ])
out$.grid <- list(grid)
out$.notes <- list(.notes)
out
},
grid = grid_list$grids,
grid_arg = grid_list$grid_arg_idx,
.notes = .notes,
SIMPLIFY = FALSE
)
do.call(dplyr::bind_rows, out)
}
#' @exportS3Method
ww_multi_scale.sf <- function(
data,
truth,
estimate,
metrics = list(yardstick::rmse, yardstick::mae),
grids = NULL,
...,
na_rm = TRUE,
aggregation_function = "mean",
autoexpand_grid = TRUE,
progress = TRUE) {
if (nrow(data) == 0) {
rlang::abort(
"0 rows were passed to `data`."
)
}
check_multi_scale_data(data)
metrics <- handle_metrics(metrics)
truth_var <- tidyselect::eval_select(rlang::expr({{ truth }}), data)
estimate_var <- tidyselect::eval_select(rlang::expr({{ estimate }}), data)
if (!is.numeric(data[[truth_var]])) {
rlang::abort("`truth` must be numeric.")
}
if (!is.numeric(data[[estimate_var]])) {
rlang::abort("`estimate` must be numeric.")
}
data_crs <- sf::st_crs(data)
grid_list <- handle_grids(data, grids, autoexpand_grid, data_crs, ...)
data$.grid_idx <- seq_len(nrow(data))
out <- mapply(
function(grid, grid_args_idx) {
grid_args <- grid_list[["grid_args"]][grid_args_idx, ]
grid <- prep_grid(data, grid, data_crs)
grid$grid_cell_idx <- seq_len(nrow(grid))
grid_matches <- sf::st_join(
grid,
data[".grid_idx"],
left = FALSE
)
grid_matches <- sf::st_drop_geometry(grid_matches)
notes_tibble <- make_notes_tibble(data, grid_matches)
matched_data <- match_data(
data,
grid_matches,
matched_data,
truth_var,
estimate_var,
aggregation_function
)
out <- metrics(matched_data, .truth, .estimate, na_rm = na_rm)
out["grid_cell_idx"] <- NULL
out[attr(out, "sf_column")] <- NULL
out$.grid_args <- list(grid_args)
.grid <- dplyr::left_join(
grid,
matched_data,
by = dplyr::join_by(grid_cell_idx)
)
.grid["grid_cell_idx"] <- NULL
out$.grid <- list(.grid)
out$.notes <- list(notes_tibble)
out
},
grid = grid_list$grids,
grid_args_idx = grid_list$grid_arg_idx,
SIMPLIFY = FALSE
)
out <- dplyr::bind_rows(out)
if (any(vapply(out[[".notes"]], function(x) nrow(x) > 0, logical(1)))) {
rlang::warn(
c(
"Some observations were not within any grid cell, and as such were not used in any assessments.",
i = "See the `.notes` column for details."
)
)
}
out
}
handle_metrics <- function(metrics) {
if (inherits(metrics, "metric")) metrics <- list(metrics)
if (!inherits(metrics, "metric_set")) {
metrics <- do.call(yardstick::metric_set, metrics)
}
metrics
}
handle_grids <- function(data, grids, autoexpand_grid, data_crs, ...) {
if (is.null(grids)) {
grid_args <- rlang::list2(...)
if ("crs" %in% names(grid_args)) {
rlang::warn(
c(
"The `crs` argument (passed via `...`) will be ignored.",
i = "Grids will be created using the same crs as `data`."
),
call = rlang::caller_env()
)
grid_args["crs"] <- NULL
}
grid_arg_idx <- max(vapply(grid_args, length, integer(1)))
grid_args <- stats::setNames(
lapply(
grid_args,
\(x) rep(x, length.out = grid_arg_idx)
),
names(grid_args)
)
grid_args <- tibble::as_tibble(grid_args)
grid_arg_idx <- seq_len(nrow(grid_args))
grid_box <- sf::st_bbox(data)
if (is_longlat(data) && autoexpand_grid) {
# cf https://github.com/ropensci/stplanr/pull/467
# basically: spherical geometry means sometimes the straight line of the
# grid will exclude points within the bounding box
#
# so here we'll expand our boundary by a small bit in order to always contain our
# points within the grid
grid_box <- expand_grid(grid_box)
}
grids <- lapply(
grid_arg_idx,
function(idx) {
arg <- lapply(
names(grid_args),
function(arg) {
grid_args[[arg]][[idx]]
}
)
names(arg) <- names(grid_args)
do.call(
sf::st_make_grid,
c(x = list(grid_box), crs = list(data_crs), arg)
)
}
)
} else {
rlang::check_dots_empty(call = rlang::caller_env())
grid_args <- tibble::tibble()
grid_arg_idx <- 0
if (!is.na(data_crs)) {
grids <- lapply(grids, sf::st_transform, sf::st_crs(data))
}
}
list(
grids = grids,
grid_args = grid_args,
grid_arg_idx = grid_arg_idx
)
}
match_data <- function(data,
grid_matches,
matched_data,
truth_var,
estimate_var,
aggregation_function) {
matched_data <- dplyr::left_join(
data,
grid_matches,
by = dplyr::join_by(.grid_idx)
)
matched_data <- sf::st_drop_geometry(matched_data)
matched_data <- matched_data[!is.na(matched_data[["grid_cell_idx"]]), ]
matched_data <- dplyr::group_by(
matched_data,
dplyr::across(dplyr::all_of(c(dplyr::group_vars(data), "grid_cell_idx")))
)
matched_data <- dplyr::summarise(
matched_data,
.truth = rlang::exec(.env[["aggregation_function"]], .data[[names(truth_var)]]),
.truth_count = sum(!is.na(.data[[names(truth_var)]])),
.estimate = rlang::exec(.env[["aggregation_function"]], .data[[names(estimate_var)]]),
.estimate_count = sum(!is.na(.data[[names(estimate_var)]])),
.groups = "drop"
)
if (dplyr::is_grouped_df(data)) {
matched_data <- dplyr::group_by(matched_data, !!!dplyr::groups(data))
}
matched_data
}
prep_grid <- function(data, grid, data_crs) {
grid <- sf::st_as_sf(grid)
grid_crs <- sf::st_crs(grid)
# If both have a CRS, reproject
if (!is.na(data_crs) && !is.na(grid_crs) && (grid_crs != data_crs)) {
grid <- sf::st_transform(grid, data_crs)
# if only data has CRS, assume grid in same
} else if (!is.na(data_crs)) {
grid <- sf::st_set_crs(grid, data_crs)
}
# if neither has a CRS, ignore (so, implicitly assume grid is in same)
grid
}
make_notes_tibble <- function(data, grid_matches) {
missing <- setdiff(data[[".grid_idx"]], grid_matches[[".grid_idx"]])
note <- character(0)
if (length(missing) > 0) {
note <- "Some observations were not within any grid cell, and as such were not used in any assessments. Their row numbers are in the `missing_indices` column."
missing <- list(missing)
} else {
missing <- list()
}
tibble::tibble(
note = note,
missing_indices = missing
)
}
check_multi_scale_data <- function(data) {
if (any(names(data) %in% c(".truth", ".estimate", ".truth_count", ".estimate_count"))) {
rlang::abort(
c(
"This function cannot work with data whose columns are named `.truth`, `.estimate`, `.truth_count`, or `estimate_count`.",
i = "Rename the relevant columns and try again."
),
call = rlang::caller_env()
)
}
geom_type <- unique(sf::st_geometry_type(data))
if (!(length(geom_type) == 1 && geom_type == "POINT")) {
rlang::abort(
c(
"ww_multi_scale is currently only implemented for point geometries.",
i = "Consider casting your data to points."
),
call = rlang::caller_env()
)
}
}
#' Expand geographic bounding boxes slightly
#'
#' Because we're drawing straight lines on spheres when working with geographic
#' coordinates, it's entirely possible to have points within a bounding box but
#' outside of the straight lines between the corners. As this is almost never
#' expected, this function adds a tiny fudge factor to bounding boxes in order
#' to "catch" points.
#'
#' @param grid_box The output from [sf::st_bbox()]
#' @param expansion The expansion factor: what fraction should each coordinate
#' be adjusted by?
#'
#' @return A very slightly buffered bounding box
#'
#' @references
#' https://github.com/ropensci/stplanr/pull/467
#'
#' @noRd
expand_grid <- function(grid_box, expansion = 0.00001) {
grid_box[1] <- grid_box[1] - abs(grid_box[1] * expansion)
grid_box[2] <- grid_box[2] - abs(grid_box[2] * expansion)
grid_box[3] <- grid_box[3] + abs(grid_box[3] * expansion)
grid_box[4] <- grid_box[4] + abs(grid_box[4] * expansion)
grid_box
}
#' Check if an sf object is in geographic coordinates
#'
#' This function adjusts [sf::st_is_longlat()] so that data without a CRS,
#' such as simulated data on arbitrary grids, is treated as non-geographic.
#'
#' @inheritParams sf::st_is_longlat
#'
#' @noRd
is_longlat <- function(x) {
!(sf::st_crs(x) == sf::NA_crs_) && sf::st_is_longlat(x)
}
|
/scratch/gouwar.j/cran-all/cranData/waywiser/R/multi_scale.R
|
#' Convert various "importance" tibbles into standardized input formats
#'
#' @param importance Either a data.frame with columns "term" and "estimate", or
#' a `vi` object from the `vip` package.
#' @inheritParams rlang::args_dots_empty
#'
#' @return A data.frame with two columns, `term` and `estimate`.
#'
#' @noRd
tidy_importance <- function(importance, ...) {
UseMethod("tidy_importance")
}
tidy_importance.vi <- function(importance, ...) {
rlang::check_dots_empty()
data.frame(
term = importance[["Variable"]],
estimate = importance[["Importance"]]
)
}
tidy_importance.data.frame <- function(importance, ...) {
rlang::check_dots_empty()
if (!all(c("term", "estimate") %in% names(importance))) {
rlang::abort(
"'term' and 'estimate' must be columns in `importance`",
call = rlang::caller_env()
)
}
importance
}
tidy_importance.default <- function(importance, ...) {
cls <- class(importance)[1]
rlang::abort(
glue::glue("Can't construct a tidy importance table from an object of class {cls}")
)
}
|
/scratch/gouwar.j/cran-all/cranData/waywiser/R/tidy_importance.R
|
## usethis namespace: start
#' @keywords internal
"_PACKAGE"
#' @importFrom rlang enquo .data .env
#' @importFrom stats predict complete.cases na.fail
#' @importFrom yardstick new_numeric_metric yardstick_any_missing
#' @importFrom yardstick yardstick_remove_missing
utils::globalVariables(c(".truth", ".estimate", ".grid_idx", "grid_cell_idx"))
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/waywiser/R/waywiser-package.R
|
#' Willmott's d and related values
#'
#' These functions calculate Willmott's d value, a proposed replacement for R2
#' which better differentiates between types and magnitudes of possible
#' covariations. Additional functions calculate systematic and unsystematic
#' components of MSE and RMSE; the sum of the systematic and unsystematic
#' components of MSE equal total MSE (though the same is not true for RMSE).
#'
#' Values of d and d1 range from 0 to 1, with 1 indicating perfect agreement.
#' Values of
#' dr range from -1 to 1, with 1 similarly indicating perfect agreement. Values
#' of RMSE are in the same units as `truth` and `estimate`, while values of MSE
#' are in squared units. `truth` and `estimate` must be the same length. This
#' function is not explicitly spatial and as such can be applied to data with
#' any number of dimensions and any coordinate reference system.
#'
#' @inheritParams yardstick::rmse
#' @inheritParams ww_area_of_applicability
#'
#' @return
#' A tibble with columns .metric, .estimator, and .estimate and 1 row of values.
#' For grouped data frames, the number of rows returned will be the same as the number of groups.
#' For `_vec()` functions, a single value (or NA).
#'
#' @family agreement metrics
#' @family yardstick metrics
#'
#' @examples
#' x <- c(6, 8, 9, 10, 11, 14)
#' y <- c(2, 3, 5, 5, 6, 8)
#'
#' ww_willmott_d_vec(x, y)
#' ww_willmott_d1_vec(x, y)
#' ww_willmott_dr_vec(x, y)
#' ww_systematic_mse_vec(x, y)
#' ww_unsystematic_mse_vec(x, y)
#' ww_systematic_rmse_vec(x, y)
#' ww_unsystematic_rmse_vec(x, y)
#'
#' example_df <- data.frame(x = x, y = y)
#' ww_willmott_d(example_df, x, y)
#' ww_willmott_d1(example_df, x, y)
#' ww_willmott_dr(example_df, x, y)
#' ww_systematic_mse(example_df, x, y)
#' ww_unsystematic_mse(example_df, x, y)
#' ww_systematic_rmse(example_df, x, y)
#' ww_unsystematic_rmse(example_df, x, y)
#'
#' @references
#' Willmott, C. J. 1981. "On the Validation of Models". Physical Geography 2(2),
#' pp 184-194, doi: 10.1080/02723646.1981.10642213.
#'
#' Willmott, C. J. 1982. "Some Comments on the Evaluation of Model Performance".
#' Bulletin of the American Meteorological Society 63(11), pp 1309-1313,
#' doi: 10.1175/1520-0477(1982)063<1309:SCOTEO>2.0.CO;2.
#'
#' Willmott C. J., Ackleson S. G., Davis R. E., Feddema J. J., Klink K. M.,
#' Legates D. R., O’Donnell J., Rowe C. M. 1985. "Statistics for the
#' evaluation of model performance." Journal of Geophysical Research
#' 90(C5): 8995–9005, doi: 10.1029/jc090ic05p08995
#'
#' Willmott, C. J., Robeson, S. M., and Matsuura, K. "A refined index of model
#' performance". International Journal of Climatology 32, pp 2088-2094, doi:
#' 10.1002/joc.2419.
#'
#' @export
ww_willmott_d <- function(data, ...) {
UseMethod("ww_willmott_d")
}
ww_willmott_d <- new_numeric_metric(ww_willmott_d, direction = "maximize")
#' @rdname ww_willmott_d
#' @export
ww_willmott_d.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
...) {
yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
na_rm = na_rm,
name = "willmott_d",
...
)
}
#' @rdname ww_willmott_d
#' @export
ww_willmott_d_vec <- function(truth,
estimate,
na_rm = TRUE,
...) {
ww_willmott_d_impl <- function(truth, estimate, ...) {
numerator <- calc_ssd(truth, estimate)
denominator <- sum(
(abs(truth - mean(truth)) + abs(estimate - mean(truth)))^2
)
1 - (numerator / denominator)
}
yardstick_vec(
truth = truth,
estimate = estimate,
na_rm = na_rm,
impl = ww_willmott_d_impl,
...
)
}
#' @rdname ww_willmott_d
#' @export
ww_willmott_d1 <- function(data, ...) {
UseMethod("ww_willmott_d1")
}
ww_willmott_d1 <- new_numeric_metric(ww_willmott_d1, direction = "maximize")
#' @rdname ww_willmott_d
#' @export
ww_willmott_d1.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
...) {
yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
na_rm = na_rm,
name = "willmott_d1",
...
)
}
#' @rdname ww_willmott_d
#' @export
ww_willmott_d1_vec <- function(truth,
estimate,
na_rm = TRUE,
...) {
ww_willmott_d1_impl <- function(truth, estimate, ...) {
numerator <- sum(abs(truth - estimate))
denominator <- sum(
(abs(truth - mean(truth)) + abs(estimate - mean(truth)))
)
1 - (numerator / denominator)
}
yardstick_vec(
truth = truth,
estimate = estimate,
na_rm = na_rm,
impl = ww_willmott_d1_impl,
...
)
}
#' @rdname ww_willmott_d
#' @export
ww_willmott_dr <- function(data, ...) {
UseMethod("ww_willmott_dr")
}
ww_willmott_dr <- new_numeric_metric(ww_willmott_dr, direction = "maximize")
#' @rdname ww_willmott_d
#' @export
ww_willmott_dr.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
...) {
yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
na_rm = na_rm,
name = "willmott_dr",
...
)
}
#' @rdname ww_willmott_d
#' @export
ww_willmott_dr_vec <- function(truth,
estimate,
na_rm = TRUE,
...) {
ww_willmott_dr_impl <- function(truth, estimate, ...) {
term_1 <- sum(abs(estimate - truth))
term_2 <- sum(abs(truth - mean(truth))) * 2
if (term_1 <= term_2) {
1 - (term_1 / term_2)
} else {
(term_2 / term_1) - 1
}
}
yardstick_vec(
truth = truth,
estimate = estimate,
na_rm = na_rm,
impl = ww_willmott_dr_impl,
...
)
}
#' @rdname ww_willmott_d
#' @export
ww_systematic_mse <- function(data, ...) {
UseMethod("ww_systematic_mse")
}
ww_systematic_mse <- new_numeric_metric(ww_systematic_mse, direction = "minimize")
#' @rdname ww_willmott_d
#' @export
ww_systematic_mse.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
...) {
yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
na_rm = na_rm,
name = "systematic_mse",
...
)
}
#' @rdname ww_willmott_d
#' @export
ww_systematic_mse_vec <- function(truth,
estimate,
na_rm = TRUE,
...) {
yardstick_vec(
truth = truth,
estimate = estimate,
na_rm = na_rm,
impl = ww_systematic_mse_impl,
...
)
}
ww_systematic_mse_impl <- function(truth, estimate, ...) {
dt <- data.frame(truth = truth, estimate = estimate)
preds <- predict(stats::lm(truth ~ estimate, dt), dt)
mean((preds - truth)^2)
}
#' @rdname ww_willmott_d
#' @export
ww_unsystematic_mse <- function(data, ...) {
UseMethod("ww_unsystematic_mse")
}
ww_unsystematic_mse <- new_numeric_metric(ww_unsystematic_mse, direction = "minimize")
#' @rdname ww_willmott_d
#' @export
ww_unsystematic_mse.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
...) {
yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
na_rm = na_rm,
name = "unsystematic_mse",
...
)
}
#' @rdname ww_willmott_d
#' @export
ww_unsystematic_mse_vec <- function(truth,
estimate,
na_rm = TRUE,
...) {
yardstick_vec(
truth = truth,
estimate = estimate,
na_rm = na_rm,
impl = ww_unsystematic_mse_impl,
...
)
}
ww_unsystematic_mse_impl <- function(truth, estimate, ...) {
dt <- data.frame(truth = truth, estimate = estimate)
preds <- predict(stats::lm(truth ~ estimate, dt), dt)
mean((estimate - preds)^2)
}
#' @rdname ww_willmott_d
#' @export
ww_systematic_rmse <- function(data, ...) {
UseMethod("ww_systematic_rmse")
}
ww_systematic_rmse <- new_numeric_metric(ww_systematic_rmse, direction = "minimize")
#' @rdname ww_willmott_d
#' @export
ww_systematic_rmse.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
...) {
yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
na_rm = na_rm,
name = "systematic_rmse",
...
)
}
#' @rdname ww_willmott_d
#' @export
ww_systematic_rmse_vec <- function(truth,
estimate,
na_rm = TRUE,
...) {
ww_systematic_rmse_impl <- function(truth, estimate, ...) {
sqrt(ww_systematic_mse_impl(truth, estimate, ...))
}
yardstick_vec(
truth = truth,
estimate = estimate,
na_rm = na_rm,
impl = ww_systematic_rmse_impl,
...
)
}
#' @rdname ww_willmott_d
#' @export
ww_unsystematic_rmse <- function(data, ...) {
UseMethod("ww_unsystematic_rmse")
}
ww_unsystematic_rmse <- new_numeric_metric(ww_unsystematic_rmse, direction = "minimize")
#' @rdname ww_willmott_d
#' @export
ww_unsystematic_rmse.data.frame <- function(data,
truth,
estimate,
na_rm = TRUE,
...) {
yardstick_df(
data = data,
truth = {{ truth }},
estimate = {{ estimate }},
na_rm = na_rm,
name = "unsystematic_rmse",
...
)
}
#' @rdname ww_willmott_d
#' @export
ww_unsystematic_rmse_vec <- function(truth,
estimate,
na_rm = TRUE,
...) {
ww_unsystematic_rmse_impl <- function(truth, estimate, ...) {
sqrt(ww_unsystematic_mse_impl(truth, estimate, ...))
}
yardstick_vec(
truth = truth,
estimate = estimate,
na_rm = na_rm,
impl = ww_unsystematic_rmse_impl,
...
)
}
|
/scratch/gouwar.j/cran-all/cranData/waywiser/R/willmott_d.R
|
release_bullets <- function() {
c(
"`cffr::cff_write()`",
"`codemetar::write_codemeta()`",
"`styler::style_pkg()`",
"`Rscript inst/generate_srr.R`"
)
}
|
/scratch/gouwar.j/cran-all/cranData/waywiser/R/zzz.R
|
delayedAssign("guerry", local({
requireNamespace("sf", quietly = TRUE)
waywiser:::guerry
}))
|
/scratch/gouwar.j/cran-all/cranData/waywiser/data/guerry.R
|
delayedAssign("ny_trees", local({
requireNamespace("sf", quietly = TRUE)
waywiser:::ny_trees
}))
|
/scratch/gouwar.j/cran-all/cranData/waywiser/data/ny_trees.R
|
delayedAssign("worldclim_simulation", local({
requireNamespace("sf", quietly = TRUE)
waywiser:::worldclim_simulation
}))
|
/scratch/gouwar.j/cran-all/cranData/waywiser/data/worldclim_simulation.R
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = rlang::is_installed("ggplot2") && rlang::is_installed("tigris")
)
## ----setup, include=FALSE-----------------------------------------------------
ggplot2::theme_set(ggplot2::theme_minimal())
## -----------------------------------------------------------------------------
library(sf)
library(tidyr)
library(dplyr)
library(waywiser)
invisible(sf_proj_network(TRUE))
## -----------------------------------------------------------------------------
library(ggplot2)
ny_trees %>%
ggplot() +
geom_sf(aes(color = agb), alpha = 0.4) +
scale_color_distiller(palette = "Greens", direction = 1)
## -----------------------------------------------------------------------------
agb_lm <- lm(agb ~ n_trees, ny_trees)
ny_trees$predicted <- predict(agb_lm, ny_trees)
## -----------------------------------------------------------------------------
cell_sizes <- seq(10, 100, 10) * 1000
ny_multi_scale <- ww_multi_scale(
ny_trees,
agb,
predicted,
cellsize = cell_sizes
)
ny_multi_scale
## -----------------------------------------------------------------------------
ny_multi_scale %>%
unnest(.grid_args) %>%
ggplot(aes(x = cellsize, y = .estimate, color = .metric)) +
geom_line()
## -----------------------------------------------------------------------------
ny_multi_scale$.grid[[9]] %>%
filter(!is.na(.estimate)) %>%
ggplot(aes(fill = .estimate)) +
geom_sf() +
scale_fill_distiller(palette = "Greens", direction = 1)
## ----message=FALSE, results='hide'--------------------------------------------
suppressPackageStartupMessages(library(tigris))
ny_block_groups <- block_groups("NY")
ny_county_subdivisions <- county_subdivisions("NY")
ny_counties <- counties("NY")
## ----warning=FALSE------------------------------------------------------------
ny_division_assessment <- ww_multi_scale(
ny_trees,
agb,
predicted,
grids = list(
ny_block_groups,
ny_county_subdivisions,
ny_counties
)
)
ny_division_assessment %>%
mutate(
division = rep(c("Block group", "County subdivision", "County"), each = 2)
) %>%
ggplot(aes(x = division, y = .estimate, fill = .metric)) +
geom_col(position = position_dodge())
|
/scratch/gouwar.j/cran-all/cranData/waywiser/inst/doc/multi-scale-assessment.R
|
---
title: "Multi-scale model assessment"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Multi-scale model assessment}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = rlang::is_installed("ggplot2") && rlang::is_installed("tigris")
)
```
```{r setup, include=FALSE}
ggplot2::theme_set(ggplot2::theme_minimal())
```
This vignette walks through how to use waywiser to assess model predictions at
multiple spatial scales, using the `ny_trees` data in waywiser, adapted from
that post.
First things first, we'll set up our environment, loading a few packages
and telling sf to download the coordinate reference system for our data,
if needed:
```{r}
library(sf)
library(tidyr)
library(dplyr)
library(waywiser)
invisible(sf_proj_network(TRUE))
```
The data we're working with is extremely simple, reflecting the number of trees
and amount of aboveground biomass ("AGB", the total amount of aboveground woody
bits) at a number of plots across New York State. We can plot it to see that
there's some obvious spatial dependence in this data -- certain regions have
clusters of much higher AGB values, while other areas (such as the area around
New York City to the south) have clusters of much lower AGB.
```{r}
library(ggplot2)
ny_trees %>%
ggplot() +
geom_sf(aes(color = agb), alpha = 0.4) +
scale_color_distiller(palette = "Greens", direction = 1)
```
Because our focus here is on model _assessment_, not model fitting, we're going
to use an extremely simple linear regression to try and model AGB across the
state. We'll predict AGB as being a linear function of the number of trees at
each plot, and then we're going to use this model to predict expected AGB:
```{r}
agb_lm <- lm(agb ~ n_trees, ny_trees)
ny_trees$predicted <- predict(agb_lm, ny_trees)
```
Now we're ready to perform our multi-scale assessments. The `ww_multi_scale()`
function supports two different methods for performing assessments: first,
you can pass arguments to `sf::st_make_grid()` (via `...`), specifying the sort
of grids that you want to make. For instance, if we wanted to make grids with
apothems (the distance from the middle of a grid cell to the middle of its
sides) ranging from 10km to 100km long, we can call the function like this:
```{r}
cell_sizes <- seq(10, 100, 10) * 1000
ny_multi_scale <- ww_multi_scale(
ny_trees,
agb,
predicted,
cellsize = cell_sizes
)
ny_multi_scale
```
We've now got a tibble with estimates for our model's RMSE and MAE at each scale
of aggregation! We can use this information to better understand how our model
does when predictions are being aggregated across larger units than a single
plot; for instance, our model _generally_ does better at larger scales of
aggregation:
```{r}
ny_multi_scale %>%
unnest(.grid_args) %>%
ggplot(aes(x = cellsize, y = .estimate, color = .metric)) +
geom_line()
```
Note that we used the `.grid_args` column, which stores the arguments we used to
make the grid, to associate our performance estimates with their corresponding
`cellsize`.
In addition to our top-level performance estimates, our `ny_multi_scale` object
also includes our true and estimated AGB, aggregated to each scale, in the
`.grid` column. This lets us easily check what our predictions look like at each
level of aggregation:
```{r}
ny_multi_scale$.grid[[9]] %>%
filter(!is.na(.estimate)) %>%
ggplot(aes(fill = .estimate)) +
geom_sf() +
scale_fill_distiller(palette = "Greens", direction = 1)
```
In addition to specifying systematic grids via `sf::st_make_grid()`,
`ww_multi_scale()` also allows you to provide your own aggregation units. For
instance, we can use the `tigris` package to download census block group
boundaries, as well as county and county subdivision boundaries, for the state
of New York:
```{r message=FALSE, results='hide'}
suppressPackageStartupMessages(library(tigris))
ny_block_groups <- block_groups("NY")
ny_county_subdivisions <- county_subdivisions("NY")
ny_counties <- counties("NY")
```
We can then provide those `sf` objects straight to `ww_multi_scale`:
```{r warning=FALSE}
ny_division_assessment <- ww_multi_scale(
ny_trees,
agb,
predicted,
grids = list(
ny_block_groups,
ny_county_subdivisions,
ny_counties
)
)
ny_division_assessment %>%
mutate(
division = rep(c("Block group", "County subdivision", "County"), each = 2)
) %>%
ggplot(aes(x = division, y = .estimate, fill = .metric)) +
geom_col(position = position_dodge())
```
By providing grids directly to `ww_multi_scale()`, we can see how well our model
performs when we aggregate predictions to more semantically meaningful levels
than the systematic grids.
|
/scratch/gouwar.j/cran-all/cranData/waywiser/inst/doc/multi-scale-assessment.Rmd
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = rlang::is_installed("ggplot2")
)
## ----message=FALSE------------------------------------------------------------
# waywiser itself, of course:
library(waywiser)
# For the %>% pipe and mutate:
library(dplyr)
## -----------------------------------------------------------------------------
guerry %>%
mutate(pred = predict(lm(Crm_prs ~ Litercy, .))) %>%
ww_local_moran_i(Crm_prs, pred)
## -----------------------------------------------------------------------------
ww_build_neighbors(guerry)
ww_build_weights(guerry)
## -----------------------------------------------------------------------------
weights <- guerry %>%
sf::st_geometry() %>%
sf::st_centroid() %>%
spdep::dnearneigh(0, 97000) %>%
spdep::nb2listw()
weights
guerry %>%
mutate(pred = predict(lm(Crm_prs ~ Litercy, .))) %>%
ww_local_moran_i(Crm_prs, pred, weights)
## -----------------------------------------------------------------------------
weights_function <- function(data) {
data %>%
sf::st_geometry() %>%
sf::st_centroid() %>%
spdep::dnearneigh(0, 97000) %>%
spdep::nb2listw()
}
guerry %>%
mutate(pred = predict(lm(Crm_prs ~ Litercy, .))) %>%
ww_local_moran_i(Crm_prs, pred, weights_function)
## ----2022_06_29-guerry, fig.width=8-------------------------------------------
library(ggplot2)
weights <- ww_build_weights(guerry)
guerry %>%
mutate(
pred = predict(lm(Crm_prs ~ Litercy, .)),
.estimate = ww_local_moran_i_vec(Crm_prs, pred, weights)
) %>%
sf::st_as_sf() %>%
ggplot(aes(fill = .estimate)) +
geom_sf() +
scale_fill_gradient2(
"Local Moran",
low = "#018571",
mid = "white",
high = "#A6611A"
)
## -----------------------------------------------------------------------------
moran <- yardstick::metric_set(
ww_global_moran_i,
ww_global_moran_pvalue
)
guerry %>%
mutate(pred = predict(lm(Crm_prs ~ Litercy, .))) %>%
moran(Crm_prs, pred)
## ----2023_02_21-guerryp, fig.width=8------------------------------------------
guerry %>%
mutate(
pred = predict(lm(Crm_prs ~ Litercy, .)),
.estimate = ww_local_moran_pvalue_vec(Crm_prs, pred, weights)
) %>%
sf::st_as_sf() %>%
ggplot(aes(fill = .estimate < 0.01)) +
geom_sf() +
scale_fill_discrete("Local Moran p-value < 0.01?") +
theme(legend.position = "bottom")
|
/scratch/gouwar.j/cran-all/cranData/waywiser/inst/doc/residual-autocorrelation.R
|
---
title: "Calculating residual spatial autocorrelation"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Calculating residual spatial autocorrelation}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = rlang::is_installed("ggplot2")
)
```
Perhaps the most famous sentence in spatial analysis is Tobler's first law of
geography, from [Tobler (1970)](https://doi.org/10.2307/143141): "Everything is
related to everything else, but near things are more related than distant
things." Spatial data often exhibits spatial autocorrelation, where variables of
interest are not distributed at random but rather exhibit spatial patterns; in
particular, spatial data is often clustered (exhibiting positive spatial
autocorrelation) such that locations near each other are _more similar_ than
you'd expect if you had just sampled two observations at random.
For some data, this makes intuitive sense. The elevation at two neighboring
points is extremely likely to be similar, as is the precipitation and
temperature; these are variables whose values depend on (among other things)
your position on the Earth. However, the first law is often over-interpreted.
[Pebesma and Bivand (2022)](https://r-spatial.org/book/15-Measures.html) present
an interesting discussion of the "first law", quoting
[Olsson (1970)](https://doi.org/10.2307/143140) who says:
> [T]he fact that the autocorrelations seem to hide systematic specification errors suggests that the elevation of this statement to the status of ‘the first law of geography’ is at best premature. At worst, the statement may represent the spatial variant of the post hoc fallacy, which would mean that coincidence has been mistaken for a causal relation.
Oftentimes, finding spatial autocorrelation in a variable is a result of that
variable depending on _other_ variables, which may or may not be spatially
dependent themselves. For instance, house prices often exhibit positive
autocorrelation, not because home prices are determined by their relative
position on Earth, but because house prices rely upon other variables -- school
zones, median income, housing availability and more -- which may themselves
be spatially autocorrelated.
For that reason, it's often worthwhile to look at the spatial autocorrelation of
model residuals, to see if your model makes more errors in certain regions than
you'd expect if errors were randomly arranged. That can help you to identify
misspecifications in your model: seeing large autocorrelations in model
residuals in an area might suggest that you're missing variables in your model,
and knowing which areas your model does worse in can help you to identify what
those variables might be. Even if you can't fix your model, it's often useful to
identify regions your model does notably worse in, so that you can communicate
that to whoever winds up using your predictions.
Let's walk through how we can use waywiser to find local indicators of spatial autocorrelation for a very simple model. First things first, let's load a few libraries:
```{r message=FALSE}
# waywiser itself, of course:
library(waywiser)
# For the %>% pipe and mutate:
library(dplyr)
```
We'll be working with the `guerry` data included in waywiser package. We'll fit a simple linear model relating crimes against persons with literacy, and then generate predictions from that model. We can use `ww_local_moran_i()` to calculate the local spatial autocorrelation of our residuals at each data point:
```{r}
guerry %>%
mutate(pred = predict(lm(Crm_prs ~ Litercy, .))) %>%
ww_local_moran_i(Crm_prs, pred)
```
If you're familiar with spdep, you can probably guess that waywiser is doing _something_ under the hood here to calculate which of our observations are neighbors, and how to create spatial weights from those neighborhoods. And that guess would be right -- waywiser is making use of two functions, `ww_build_neighbors()` and `ww_build_weights()`, in order to automatically calculate spatial weights for calculating metrics:
```{r}
ww_build_neighbors(guerry)
ww_build_weights(guerry)
```
These functions aren't always the best way to calculate spatial weights for your data, however. As a result, waywiser also lets you specify your own weights directly:
```{r}
weights <- guerry %>%
sf::st_geometry() %>%
sf::st_centroid() %>%
spdep::dnearneigh(0, 97000) %>%
spdep::nb2listw()
weights
guerry %>%
mutate(pred = predict(lm(Crm_prs ~ Litercy, .))) %>%
ww_local_moran_i(Crm_prs, pred, weights)
```
Or as a function, which lets you use custom weights with other tidymodels functions like `fit_resamples()`:
```{r}
weights_function <- function(data) {
data %>%
sf::st_geometry() %>%
sf::st_centroid() %>%
spdep::dnearneigh(0, 97000) %>%
spdep::nb2listw()
}
guerry %>%
mutate(pred = predict(lm(Crm_prs ~ Litercy, .))) %>%
ww_local_moran_i(Crm_prs, pred, weights_function)
```
Providing custom weights also lets us use `ww_local_moran_i_vec()` to add a column to our original data frame with our statistic, which makes plotting using our original geometries easier:
```{r 2022_06_29-guerry, fig.width=8}
library(ggplot2)
weights <- ww_build_weights(guerry)
guerry %>%
mutate(
pred = predict(lm(Crm_prs ~ Litercy, .)),
.estimate = ww_local_moran_i_vec(Crm_prs, pred, weights)
) %>%
sf::st_as_sf() %>%
ggplot(aes(fill = .estimate)) +
geom_sf() +
scale_fill_gradient2(
"Local Moran",
low = "#018571",
mid = "white",
high = "#A6611A"
)
```
This makes it easy to see what areas are poorly represented by our model (which have the highest local Moran values), which might lead us to identify ways to improve our model or help us identify caveats and limitations of the models we're working with.
Other functions in waywiser will allow you to calculate the p-value associated with spatial autocorrelation metrics. You can calculate these alongside the autocorrelation metrics themselves using `yardstick::metric_set()`:
```{r}
moran <- yardstick::metric_set(
ww_global_moran_i,
ww_global_moran_pvalue
)
guerry %>%
mutate(pred = predict(lm(Crm_prs ~ Litercy, .))) %>%
moran(Crm_prs, pred)
```
These functions can also be used on their own to help qualitatively identify regions of concern, which may be poorly represented by your model:
```{r 2023_02_21-guerryp, fig.width=8}
guerry %>%
mutate(
pred = predict(lm(Crm_prs ~ Litercy, .)),
.estimate = ww_local_moran_pvalue_vec(Crm_prs, pred, weights)
) %>%
sf::st_as_sf() %>%
ggplot(aes(fill = .estimate < 0.01)) +
geom_sf() +
scale_fill_discrete("Local Moran p-value < 0.01?") +
theme(legend.position = "bottom")
```
This can help identify new predictor variables or other promising refinements to a model during the iterative process of model development. You shouldn't report p-values without other context as results of your model, but this approach can help qualitatively assess a model during the development process. To use these tests for inference, consider using functions from spdep directly; each autocorrelation function in waywiser links to the spdep function it wraps from its documentation.
|
/scratch/gouwar.j/cran-all/cranData/waywiser/inst/doc/residual-autocorrelation.Rmd
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = rlang::is_installed("vip") && rlang::is_installed("ggplot2")
)
## ----setup--------------------------------------------------------------------
library(waywiser)
set.seed(1107)
worldclim_training <- sample(nrow(worldclim_simulation) * 0.8)
worldclim_testing <- worldclim_simulation[-worldclim_training, ]
worldclim_training <- worldclim_simulation[worldclim_training, ]
worldclim_model <- lm(
response ~ bio2 + bio10 + bio13 + bio19,
worldclim_training
)
worldclim_testing$predictions <- predict(
worldclim_model,
worldclim_testing
)
head(worldclim_testing)
## -----------------------------------------------------------------------------
ww_agreement_coefficient(
worldclim_testing,
truth = response,
estimate = predictions
)
ww_agreement_coefficient_vec(
truth = worldclim_testing$response,
estimate = worldclim_testing$predictions
)
## -----------------------------------------------------------------------------
ww_global_geary_c(
worldclim_testing,
truth = response,
estimate = predictions
)
## -----------------------------------------------------------------------------
ww_global_geary_c(
worldclim_testing,
truth = response,
estimate = predictions,
wt = ww_build_weights(worldclim_testing)
)
ww_global_geary_c(
worldclim_testing,
truth = response,
estimate = predictions,
wt = ww_build_weights
)
## -----------------------------------------------------------------------------
yardstick::metric_set(
ww_agreement_coefficient,
ww_global_geary_c
)(worldclim_testing,
truth = response,
estimate = predictions)
## -----------------------------------------------------------------------------
ww_multi_scale(
worldclim_testing,
truth = response,
estimate = predictions,
metrics = list(ww_agreement_coefficient, yardstick::rmse),
n = list(c(2, 4))
)
## -----------------------------------------------------------------------------
grid <- sf::st_make_grid(worldclim_testing, n = c(2, 4))
ww_multi_scale(
worldclim_testing,
truth = response,
estimate = predictions,
metrics = list(ww_agreement_coefficient, yardstick::rmse),
grids = list(grid)
)
## -----------------------------------------------------------------------------
worldclim_aoa <- ww_area_of_applicability(
response ~ bio2 + bio10 + bio13 + bio19,
worldclim_training,
importance = vip::vi_model(worldclim_model)
)
worldclim_aoa
## -----------------------------------------------------------------------------
worldclim_testing <- cbind(
worldclim_testing,
predict(worldclim_aoa, worldclim_testing)
)
head(worldclim_testing)
## -----------------------------------------------------------------------------
library(ggplot2)
ggplot(worldclim_testing, aes(di, abs(response - predictions), color = aoa)) +
geom_point(alpha = 0.6)
|
/scratch/gouwar.j/cran-all/cranData/waywiser/inst/doc/waywiser.R
|
---
title: "Assessing models with waywiser"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Assessing Models with waywiser}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = rlang::is_installed("vip") && rlang::is_installed("ggplot2")
)
```
The waywiser package aims to be an ergonomic toolbox providing a consistent user
interface for assessing spatial models. To that end, waywiser does three main
things:
1. Provides new [yardstick](https://yardstick.tidymodels.org/) extensions,
making it easier to use performance metrics from the spatial modeling
literature via a standardized API.
2. Provides a new function, `ww_multi_scale()`, which makes it easy to see how
model performance metrics change when predictions are aggregated to various
scales.
3. Provides an implementation of the
[area of applicability from Meyer and Pebesma 2021](https://doi.org/10.1111/2041-210X.13650),
extending this tool to work with tidymodels infrastructure.
This vignette will walk through each of these goals in turn. Before we do that,
let's set up the data we'll use in examples. We'll be using simulated data
based on [Worldclim](https://www.worldclim.org/) variables; our predictors here
represent temperature and precipitation values at sampled locations, while,
our response represents a virtual species distribution:
```{r setup}
library(waywiser)
set.seed(1107)
worldclim_training <- sample(nrow(worldclim_simulation) * 0.8)
worldclim_testing <- worldclim_simulation[-worldclim_training, ]
worldclim_training <- worldclim_simulation[worldclim_training, ]
worldclim_model <- lm(
response ~ bio2 + bio10 + bio13 + bio19,
worldclim_training
)
worldclim_testing$predictions <- predict(
worldclim_model,
worldclim_testing
)
head(worldclim_testing)
```
## Yardstick Extensions
First and foremost, waywiser provides a host of new yardstick metrics to provide
a standardized interface for various performance metrics.
All of these functions work more or less the same way: you provide your data,
the names of your "true" values and predicted values, and get back a
standardized output format. As usual with yardstick, that output can either be
a tibble or a vector output. For instance, if we want to calculate the agreement
coefficient from [Ji and Gallo 2006](https://doi.org/10.14358/PERS.72.7.823):
```{r}
ww_agreement_coefficient(
worldclim_testing,
truth = response,
estimate = predictions
)
ww_agreement_coefficient_vec(
truth = worldclim_testing$response,
estimate = worldclim_testing$predictions
)
```
Some of these additional metrics are implemented by wrapping functions from the
[spdep](https://r-spatial.github.io/spdep/) package:
```{r}
ww_global_geary_c(
worldclim_testing,
truth = response,
estimate = predictions
)
```
These functions rely on calculating the spatial neighbors of each observation.
The waywiser package will automatically use `ww_build_weights()` to calculate
these, if not provided, but this is often not desirable. For that reason, these
functions all have a `wt` argument, which can take either pre-calculated weights
or a function that will create spatial weights:
```{r}
ww_global_geary_c(
worldclim_testing,
truth = response,
estimate = predictions,
wt = ww_build_weights(worldclim_testing)
)
ww_global_geary_c(
worldclim_testing,
truth = response,
estimate = predictions,
wt = ww_build_weights
)
```
Because these are yardstick metrics, they can be used with
`yardstick::metric_set()` and other tidymodels infrastructure:
```{r}
yardstick::metric_set(
ww_agreement_coefficient,
ww_global_geary_c
)(worldclim_testing,
truth = response,
estimate = predictions)
```
## Multi-scale model assessment
A common pattern with spatial models is that you need to predict observation
units -- pixels of a raster or individual points -- which will be aggregated to
arbitrary scales, such as towns or parcel boundaries. Because errors can be
spatially distributed, or can either compound or counteract each other when
aggregated, [some assessment protocols](https://doi.org/10.1016/j.rse.2010.05.010)
recommend assessing model predictions aggregated to multiple scales.
The `ww_multi_scale()` function helps automate this process. The interface for
this function works similarly to that for yardstick metrics -- you provide your
data, your true values, and your estimate -- except you also must provide
instructions for how to aggregate your data. You can do this by passing
arguments that will be used by `sf::st_make_grid()`; for instance, we can use
the `n` argument to control how many polygons our grid has in the x and y
directions.
Note that each element of argument vector is used to make a separate grid --
so, for instance, passing `n = c(2, 4)` will result in one 2-by-2 grid and one
4-by-4 grid, because `n[[1]]` is 2 and `n[[2]]` is 4. If we actually wanted to
create a 2-by-4 grid, by passing `sf::st_make_grid()` the argument
`n = c(2, 4)`, we need to wrap that vector in a list so that running `n[[1]]`
returns `c(2, 4)`:
```{r}
ww_multi_scale(
worldclim_testing,
truth = response,
estimate = predictions,
metrics = list(ww_agreement_coefficient, yardstick::rmse),
n = list(c(2, 4))
)
```
You can also pass polygons directly, if you have pre-defined grids you'd like
to use:
```{r}
grid <- sf::st_make_grid(worldclim_testing, n = c(2, 4))
ww_multi_scale(
worldclim_testing,
truth = response,
estimate = predictions,
metrics = list(ww_agreement_coefficient, yardstick::rmse),
grids = list(grid)
)
```
## Area of Applicability
Last but not least, we can also see if there's any areas in our data that are
too different from our training data for us to safely predict on, which fall
outside the "area of applicability" defined by
[Meyer and Pebesma (2021)](https://doi.org/10.1111/2041-210X.13650). This
approach looks at how similar the predictor values of new data are to the data
you used to train your data, with each predictor weighted by how important it is
to your model.
In order to calculate your area of applicability, you can pass
`ww_area_of_applicability()` information about which of your variables are used
as predictors in your model, your training data, and the importance scores for
each of your variables. Out of the box, waywiser should work with any of the
importance score-calculating functions from the vip package:
```{r}
worldclim_aoa <- ww_area_of_applicability(
response ~ bio2 + bio10 + bio13 + bio19,
worldclim_training,
importance = vip::vi_model(worldclim_model)
)
worldclim_aoa
```
You can also pass a data.frame with columns named "term" and "estimate"
(containing the name of each term, or predictor, and their estimated importance)
rather than using the vip package if that's more convenient.
The objects returned by `ww_area_of_applicability()` are models in their own
right, which can be used by functions such as `predict()` to calculate if new
observations are in the area of applicability of a model.
```{r}
worldclim_testing <- cbind(
worldclim_testing,
predict(worldclim_aoa, worldclim_testing)
)
head(worldclim_testing)
```
The predict function returns the "distance index", or "di", for each
observation: a score of how far away the observation is, in predictor space,
from your training data. Points with a "di" higher than a set threshold are
"outside" the area of applicability. We can visualize our test set here to see
that our model often, but not always, performs worse on observations with a
higher "di":
```{r}
library(ggplot2)
ggplot(worldclim_testing, aes(di, abs(response - predictions), color = aoa)) +
geom_point(alpha = 0.6)
```
|
/scratch/gouwar.j/cran-all/cranData/waywiser/inst/doc/waywiser.Rmd
|
# This file generates helpers for compatibility with srr
# and is not a part of the waywiser package proper
devtools::load_all()
read_utf8 <- function(x) base::readLines(x, encoding = "UTF-8", warn = FALSE)
nonspatial_yardstick <- c(
"ww_agreement_coefficient",
"ww_systematic_agreement_coefficient",
"ww_unsystematic_agreement_coefficient",
"ww_unsystematic_mpd",
"ww_systematic_mpd",
"ww_unsystematic_rmpd",
"ww_systematic_rmpd",
"ww_willmott_d",
"ww_willmott_d1",
"ww_willmott_dr",
"ww_systematic_mse",
"ww_unsystematic_mse",
"ww_systematic_rmse",
"ww_unsystematic_rmse"
)
nonspatial_yardstick_template <- read_utf8("inst/srr_template_nonspatial_yardstick.R")
for (name in nonspatial_yardstick) {
n_sims <- ""
tolerance <- ""
generated_template <- whisker::whisker.render(nonspatial_yardstick_template)
generated_template <- c(
"# This file was generated, do not edit by hand",
"# Please edit inst/srr_template_nonspatial_yardstick.R instead",
"",
generated_template
)
writeLines(
generated_template,
file.path("tests", "testthat", paste0("test-srr-", name, ".R"))
)
}
spatial_yardstick <- c(
"ww_global_moran_i",
"ww_global_moran_pvalue",
"ww_local_moran_i",
"ww_local_moran_pvalue",
"ww_global_geary_c",
"ww_global_geary_pvalue",
"ww_local_geary_c",
"ww_local_geary_pvalue",
"ww_local_getis_ord_g",
"ww_local_getis_ord_g_pvalue"
)
spatial_yardstick_template <- read_utf8("inst/srr_template_spatial_yardstick.R")
for (name in spatial_yardstick) {
n_sims <- switch(
name,
"ww_local_geary_pvalue" = ", nsim = 10000",
"ww_local_getis_ord_g_pvalue" = ", nsim = 10000",
""
)
tolerance <- switch(
name,
"ww_local_geary_pvalue" = ", tolerance = 0.1",
"ww_local_getis_ord_g_pvalue" = ", tolerance = 0.03",
""
)
generated_template <- whisker::whisker.render(spatial_yardstick_template)
generated_template <- c(
"# This file was generated, do not edit by hand",
"# Please edit inst/srr_template_spatial_yardstick.R instead",
"",
generated_template
)
writeLines(
generated_template,
file.path("tests", "testthat", paste0("test-srr-", name, ".R"))
)
styler::style_file(file.path("tests", "testthat", paste0("test-srr-", name, ".R")))
}
|
/scratch/gouwar.j/cran-all/cranData/waywiser/inst/generate_srr.R
|
test_that("srr: {{{name}}} errors if truth and estimate are different lengths", {
# Note that this test isn't applicable to data-frame input, which enforces
# constant column lengths
expect_snapshot(
{{{name}}}_vec(1:5, 1:4),
error = TRUE
)
expect_snapshot(
{{{name}}}_vec(1:4, 1:5),
error = TRUE
)
})
test_that("srr: {{{name}}} errors if truth and estimate aren't numeric", {
char_df <- tibble::tibble(x = 1:5, y = letters[1:5])
expect_snapshot(
{{{name}}}(char_df, x, y),
error = TRUE
)
expect_snapshot(
{{{name}}}(char_df, y, x),
error = TRUE
)
expect_snapshot(
{{{name}}}_vec(as.character(1:5), 1:4),
error = TRUE
)
expect_snapshot(
{{{name}}}_vec(1:5, as.character(1:4)),
error = TRUE
)
})
test_that("srr: {{{name}}} errors if truth and estimate are list columns", {
list_df <- tibble::tibble(x = 1:5, y = lapply(1:5, function(x) x))
expect_snapshot(
{{{name}}}(list_df, x, y),
error = TRUE
)
expect_snapshot(
{{{name}}}(list_df, y, x),
error = TRUE
)
})
test_that("srr: {{{name}}} removes NaN and NA when na_rm = TRUE", {
missing_df <- tibble::tibble(x = c(NaN, 2:5), y = c(1:4, NA))
expect_snapshot(
round({{{name}}}(missing_df, x, y)$.estimate, 15),
)
expect_snapshot(
round({{{name}}}(missing_df, y, x)$.estimate, 15),
)
expect_snapshot(
round({{{name}}}_vec(missing_df$y, missing_df$x), 15),
)
expect_snapshot(
round({{{name}}}_vec(missing_df$x, missing_df$y), 15),
)
})
test_that("srr: {{{name}}} returns NA when na_rm = FALSE and NA is present", {
missing_df <- tibble::tibble(x = c(NaN, 2:5), y = c(1:4, NA))
expect_identical(
{{{name}}}(missing_df, y, x, na_rm = FALSE)$.estimate,
NA_real_
)
expect_identical(
{{{name}}}(missing_df, x, y, na_rm = FALSE)$.estimate,
NA_real_
)
expect_identical(
{{{name}}}_vec(missing_df$y, missing_df$x, na_rm = FALSE),
NA_real_
)
expect_identical(
{{{name}}}_vec(missing_df$x, missing_df$y, na_rm = FALSE),
NA_real_
)
})
test_that("srr: {{{name}}} errors on zero-length data", {
expect_snapshot(
{{{name}}}_vec(numeric(), numeric()),
error = TRUE
)
empty_df <- tibble::tibble(x = numeric(), y = numeric())
expect_snapshot(
{{{name}}}(empty_df, x, y),
error = TRUE
)
expect_snapshot(
{{{name}}}(empty_df, y, x),
error = TRUE
)
})
test_that("srr: {{{name}}} errors on all-NA data", {
expect_snapshot(
{{{name}}}_vec(rep(NA_real_, 4), 4:1),
error = TRUE
)
expect_snapshot(
{{{name}}}_vec(1:4, rep(NA_real_, 4)),
error = TRUE
)
all_na <- tibble::tibble(x = rep(NA_real_, 4), y = 1:4)
expect_snapshot(
{{{name}}}(all_na, x, y),
error = TRUE
)
expect_snapshot(
{{{name}}}(all_na, y, x),
error = TRUE
)
expect_snapshot(
{{{name}}}_vec(1:4, 1:4)
)
})
test_that("srr: {{{name}}} works with all identical data", {
all_identical <- tibble::tibble(x = 1:4, y = 1:4)
expect_snapshot(
{{{name}}}(all_identical, x, y)
)
expect_snapshot(
{{{name}}}_vec(1:4, 1:4)
)
all_identical <- tibble::tibble(x = 1:4, y = 1:4)
expect_snapshot(
{{{name}}}(all_identical, x, y)
)
})
test_that("srr: {{{name}}} results don't change with trivial noise", {
skip_if_not_installed("withr")
x <- c(6, 8, 9, 10, 11, 14)
y <- c(2, 3, 5, 5, 6, 8)
df <- tibble::tibble(x = x, y = y)
noised_x <- x + rnorm(x, .Machine$double.eps, .Machine$double.eps)
noised_df <- tibble::tibble(x = noised_x, y = y)
expect_equal(
{{{name}}}(noised_df, x, y),
{{{name}}}(df, x, y)
)
expect_equal(
{{{name}}}(noised_df, y, x),
{{{name}}}(df, y, x)
)
expect_equal(
{{{name}}}_vec(noised_x, y),
{{{name}}}_vec(x, y)
)
expect_equal(
{{{name}}}_vec(y, noised_x),
{{{name}}}_vec(y, x)
)
})
test_that("srr: {{{name}}} results don't change with different seeds", {
skip_if_not_installed("withr")
x <- c(6, 8, 9, 10, 11, 14)
y <- c(2, 3, 5, 5, 6, 8)
df <- tibble::tibble(x = x, y = y)
expect_equal(
withr::with_seed(
123,
{{{name}}}(df, x, y)
),
withr::with_seed(
1107,
{{{name}}}(df, x, y)
)
)
expect_equal(
withr::with_seed(
123,
{{{name}}}(df, y, x)
),
withr::with_seed(
1107,
{{{name}}}(df, y, x)
)
)
expect_equal(
withr::with_seed(
123,
{{{name}}}_vec(x, y)
),
withr::with_seed(
1107,
{{{name}}}_vec(x, y)
)
)
expect_equal(
withr::with_seed(
123,
{{{name}}}_vec(y, x)
),
withr::with_seed(
1107,
{{{name}}}_vec(y, x)
)
)
})
|
/scratch/gouwar.j/cran-all/cranData/waywiser/inst/srr_template_nonspatial_yardstick.R
|
test_that("srr: expected failures for {{{name}}}", {
worldclim_predicted <- worldclim_simulation
worldclim_predicted$predicted <- predict(
lm(response ~ bio2 * bio10 * bio13 * bio19, data = worldclim_simulation),
worldclim_simulation
)
worldclim_weights <- ww_build_weights(worldclim_simulation)
# Note that this test isn't applicable to data-frame input, which enforces
# constant column lengths
#' @srrstats {G5.2} Testing errors
#' @srrstats {G5.2b} Testing errors
#' @srrstats {G2.0} Truth and estimate are equal in length:
expect_snapshot(
{{{name}}}_vec(
worldclim_predicted$response,
tail(worldclim_predicted$predicted, -1),
worldclim_weights
),
error = TRUE
)
#' @srrstats {G5.2} Testing errors
#' @srrstats {G5.2b} Testing errors
#' @srrstats {G2.0} Truth and estimate are equal in length:
expect_snapshot(
{{{name}}}_vec(
tail(worldclim_predicted$response, -1),
worldclim_predicted$predicted,
worldclim_weights
),
error = TRUE
)
worldclim_predicted$predicted <- as.character(worldclim_predicted$predicted)
#' @srrstats {G5.2} Testing errors
#' @srrstats {G5.2b} Testing errors
#' @srrstats {G5.8b} Data of unsupported types
#' @srrstats {G2.1} Truth and estimate are numeric:
expect_snapshot(
{{{name}}}(worldclim_predicted, predicted, response),
error = TRUE
)
#' @srrstats {G5.2} Testing errors
#' @srrstats {G5.2b} Testing errors
#' @srrstats {G5.8b} Data of unsupported types
#' @srrstats {G2.1} Truth and estimate are numeric:
expect_snapshot(
{{{name}}}(worldclim_predicted, response, predicted),
error = TRUE
)
#' @srrstats {G5.2} Testing errors
#' @srrstats {G5.2b} Testing errors
#' @srrstats {G5.8b} Data of unsupported types
#' @srrstats {G2.1} Truth and estimate are numeric:
expect_snapshot(
{{{name}}}_vec(
worldclim_predicted$response,
worldclim_predicted$predicted,
worldclim_weights
),
error = TRUE
)
#' @srrstats {G5.2} Testing errors
#' @srrstats {G5.2b} Testing errors
#' @srrstats {G5.8b} Data of unsupported types
#' @srrstats {G2.1} Truth and estimate are numeric:
expect_snapshot(
{{{name}}}_vec(
worldclim_predicted$predicted,
worldclim_predicted$response,
worldclim_weights
),
error = TRUE
)
worldclim_predicted$predicted <- lapply(
as.numeric(worldclim_predicted$predicted),
function(x) (x)
)
#' @srrstats {G5.2} Testing errors
#' @srrstats {G5.2b} Testing errors
#' @srrstats {G5.8b} Data of unsupported types
#' @srrstats {G2.12} List column inputs fail:
expect_snapshot(
{{{name}}}(worldclim_predicted, response, predicted),
error = TRUE
)
#' @srrstats {G5.2} Testing errors
#' @srrstats {G5.2b} Testing errors
#' @srrstats {G5.8b} Data of unsupported types
#' @srrstats {G2.12} List column inputs fail:
expect_snapshot(
{{{name}}}(worldclim_predicted, predicted, response),
error = TRUE
)
worldclim_predicted$predicted <- unlist(worldclim_predicted$predicted)
#' @srrstats {G2.13} Missing data is properly handled
#' @srrstats {G2.15} Missingness is checked
#' @srrstats {G2.14} Users can specify behavior with NA results
#' @srrstats {G2.16} NaN is properly handled
#' Users can error:
worldclim_predicted$response[4] <- NA_real_
expect_snapshot(
{{{name}}}(worldclim_predicted, predicted, response)$.estimate,
error = TRUE
)
#' Users can error:
expect_snapshot(
{{{name}}}(worldclim_predicted, response, predicted)$.estimate,
error = TRUE
)
#' Users can error:
expect_snapshot(
{{{name}}}_vec(worldclim_predicted$predicted, worldclim_predicted$response, worldclim_weights),
error = TRUE
)
#' Users can error:
expect_snapshot(
{{{name}}}_vec(worldclim_predicted$response, worldclim_predicted$predicted, worldclim_weights),
error = TRUE
)
#' @srrstats {G5.8} Edge condition tests
#' @srrstats {G5.8a} Zero-length data:
expect_snapshot(
{{{name}}}_vec(numeric(), numeric(), structure(list(), class = "listw")),
error = TRUE
)
empty_df <- tibble::tibble(x = numeric(), y = numeric())
#' @srrstats {G5.8} Edge condition tests
#' @srrstats {G5.8a} Zero-length data:
expect_snapshot(
{{{name}}}(head(worldclim_predicted, 0), response, predicted, structure(list(), class = "listw")),
error = TRUE
)
#' @srrstats {G5.8} Edge condition tests
#' @srrstats {G5.8a} Zero-length data:
expect_snapshot(
{{{name}}}(head(worldclim_predicted, 0), predicted, response, structure(list(), class = "listw")),
error = TRUE
)
#' @srrstats {G5.8} Edge condition tests
#' @srrstats {G5.8c} All-NA:
expect_snapshot(
{{{name}}}_vec(NA_real_, NA_real_, structure(list(neighbours = 1), class = "listw")),
error = TRUE
)
worldclim_predicted$response <- NA_real_
#' @srrstats {G5.8} Edge condition tests
#' @srrstats {G5.8c} All-NA:
expect_snapshot(
{{{name}}}(worldclim_predicted, response, predicted)$.estimate,
error = TRUE
)
#' @srrstats {G5.8} Edge condition tests
#' @srrstats {G5.8c} All-NA:
expect_snapshot(
{{{name}}}(worldclim_predicted, predicted, response)$.estimate,
error = TRUE
)
#' @srrstats {G5.8} Edge condition tests
#' @srrstats {G5.8c} All-identical:
expect_snapshot(
{{{name}}}_vec(worldclim_simulation$response, worldclim_simulation$response, worldclim_weights)
)
#' @srrstats {G5.8} Edge condition tests
#' @srrstats {G5.8c} All-identical:
expect_snapshot(
{{{name}}}(worldclim_simulation, response, response)
)
})
test_that("other generic srr standards", {
skip_if_not_installed("withr")
worldclim_predicted <- worldclim_simulation
worldclim_predicted$predicted <- predict(
lm(response ~ bio2 * bio10 * bio13 * bio19, data = worldclim_simulation),
worldclim_simulation
)
noised_worldclim <- worldclim_predicted + rnorm(
nrow(worldclim_predicted) * ncol(worldclim_predicted),
.Machine$double.eps,
.Machine$double.eps
)
noised_worldclim <- sf::st_as_sf(
noised_worldclim,
crs = sf::st_crs(worldclim_predicted)
)
worldclim_weights <- ww_build_weights(worldclim_simulation)
noised_weights <- ww_build_weights(noised_worldclim)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {G5.9} Noise susceptibility tests
#' @srrstats {G5.9a} Trivial noise doesn't change results:
#' @srrstats {SP6.2} Testing with ~global data
expect_equal(
withr::with_seed(
123,
{{{name}}}(worldclim_predicted, response, predicted)
),
withr::with_seed(
123,
{{{name}}}(noised_worldclim, response, predicted)
)
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {G5.9} Noise susceptibility tests
#' @srrstats {G5.9a} Trivial noise doesn't change results:
#' @srrstats {SP6.2} Testing with ~global data
expect_equal(
withr::with_seed(
123,
{{{name}}}(worldclim_predicted, predicted, response)
),
withr::with_seed(
123,
{{{name}}}(noised_worldclim, predicted, response)
)
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {G5.9} Noise susceptibility tests
#' @srrstats {G5.9a} Trivial noise doesn't change results:
#' @srrstats {SP6.2} Testing with ~global data
expect_equal(
withr::with_seed(
123,
{{{name}}}_vec(worldclim_predicted$predicted, worldclim_predicted$response, worldclim_weights)
),
withr::with_seed(
123,
{{{name}}}_vec(noised_worldclim$predicted, noised_worldclim$response, noised_weights)
)
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {G5.9} Noise susceptibility tests
#' @srrstats {G5.9a} Trivial noise doesn't change results:
#' @srrstats {SP6.2} Testing with ~global data
expect_equal(
withr::with_seed(
123,
{{{name}}}_vec(worldclim_predicted$response, worldclim_predicted$predicted, worldclim_weights)
),
withr::with_seed(
123,
{{{name}}}_vec(noised_worldclim$response, noised_worldclim$predicted, noised_weights)
)
)
skip_on_cran()
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {G5.9} Noise susceptibility tests
#' @srrstats {G5.9b} Different seeds are equivalent:
#' @srrstats {SP6.2} Testing with ~global data
expect_equal(
withr::with_seed(
123,
{{{name}}}(worldclim_predicted, predicted, response{{{n_sims}}})
),
withr::with_seed(
1107,
{{{name}}}(worldclim_predicted, predicted, response{{{n_sims}}})
){{{tolerance}}}
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {G5.9} Noise susceptibility tests
#' @srrstats {G5.9b} Different seeds are equivalent:
#' @srrstats {SP6.2} Testing with ~global data
expect_equal(
withr::with_seed(
123,
{{{name}}}(worldclim_predicted, response, predicted{{{n_sims}}})
),
withr::with_seed(
1107,
{{{name}}}(worldclim_predicted, response, predicted{{{n_sims}}})
){{{tolerance}}}
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {G5.9} Noise susceptibility tests
#' @srrstats {G5.9b} Different seeds are equivalent:
#' @srrstats {SP6.2} Testing with ~global data
expect_equal(
withr::with_seed(
123,
{{{name}}}_vec(worldclim_predicted$response, worldclim_predicted$predicted, worldclim_weights{{{n_sims}}})
),
withr::with_seed(
1107,
{{{name}}}_vec(worldclim_predicted$response, worldclim_predicted$predicted, worldclim_weights{{{n_sims}}})
){{{tolerance}}}
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {G5.9} Noise susceptibility tests
#' @srrstats {G5.9b} Different seeds are equivalent:
#' @srrstats {SP6.2} Testing with ~global data
expect_equal(
withr::with_seed(
123,
{{{name}}}_vec(worldclim_predicted$predicted, worldclim_predicted$response, worldclim_weights{{{n_sims}}})
),
withr::with_seed(
1107,
{{{name}}}_vec(worldclim_predicted$predicted, worldclim_predicted$response, worldclim_weights{{{n_sims}}})
){{{tolerance}}}
)
guerry_modeled <- guerry
guerry_modeled$predictions <- predict(
lm(Crm_prs ~ Litercy, guerry),
guerry
)
guerry_modeled_geo <- sf::st_transform(guerry_modeled, 4326)
guerry_weights <- ww_build_weights(guerry)
guerry_weights_geo <- ww_build_weights(guerry_modeled_geo)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {SP6.1} Testing with both projected and geographic CRS
#' @srrstats {SP6.1b} Testing with both projected and geographic CRS
#' @srrstats {SP6.2} Testing with ~global data
expect_equal(
{{{name}}}(guerry_modeled, predictions, Crm_prs{{{n_sims}}})$.estimate,
{{{name}}}(guerry_modeled_geo, predictions, Crm_prs{{{n_sims}}})$.estimate{{{tolerance}}}
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {SP6.1} Testing with both projected and geographic CRS
#' @srrstats {SP6.1b} Testing with both projected and geographic CRS
#' @srrstats {SP6.2} Testing with ~global data
expect_equal(
{{{name}}}(guerry_modeled, Crm_prs, predictions{{{n_sims}}})$.estimate,
{{{name}}}(guerry_modeled_geo, Crm_prs, predictions{{{n_sims}}})$.estimate{{{tolerance}}}
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {SP6.1} Testing with both projected and geographic CRS
#' @srrstats {SP6.1b} Testing with both projected and geographic CRS
#' @srrstats {SP6.2} Testing with ~global data
expect_equal(
{{{name}}}_vec(guerry_modeled$Crm_prs, guerry_modeled$predictions, guerry_weights{{{n_sims}}}),
{{{name}}}_vec(guerry_modeled_geo$Crm_prs, guerry_modeled_geo$predictions, guerry_weights_geo{{{n_sims}}}){{{tolerance}}}
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {SP6.1} Testing with both projected and geographic CRS
#' @srrstats {SP6.1b} Testing with both projected and geographic CRS
#' @srrstats {SP6.2} Testing with ~global data
expect_equal(
{{{name}}}_vec(guerry_modeled$predictions, guerry_modeled$Crm_prs, guerry_weights{{{n_sims}}}),
{{{name}}}_vec(guerry_modeled_geo$predictions, guerry_modeled_geo$Crm_prs, guerry_weights_geo{{{n_sims}}}){{{tolerance}}}
)
#' @srrstats {SP2.3} Testing with loaded sf objects:
worldclim_loaded <- sf::read_sf(
system.file("worldclim_simulation.gpkg", package = "waywiser")
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {SP2.3} Testing with loaded sf objects:
#' @srrstats {SP6.2} Testing with ~global data
expect_snapshot(
withr::with_seed(
123,
{{{name}}}(worldclim_loaded, bio13, bio19)
)
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {SP2.3} Testing with loaded sf objects:
#' @srrstats {SP6.2} Testing with ~global data
expect_snapshot(
withr::with_seed(
123,
{{{name}}}(worldclim_loaded, bio13, bio19)
)
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {SP2.3} Testing with loaded sf objects:
#' @srrstats {SP6.2} Testing with ~global data
expect_snapshot(
withr::with_seed(
123,
{{{name}}}_vec(worldclim_loaded$bio13, worldclim_loaded$bio19, worldclim_weights)
)
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {SP2.3} Testing with loaded sf objects:
#' @srrstats {SP6.2} Testing with ~global data
expect_snapshot(
withr::with_seed(
123,
{{{name}}}_vec(worldclim_loaded$bio13, worldclim_loaded$bio19, worldclim_weights)
)
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {SP2.3} Testing with loaded sf objects:
#' @srrstats {SP6.2} Testing with ~global data
expect_snapshot(
withr::with_seed(
123,
{{{name}}}(worldclim_loaded, bio13, bio19)
)
)
other_weights <- ww_build_weights(ww_make_point_neighbors(worldclim_loaded, k = 5))
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {SP6.3} Testing alternative weights:
expect_snapshot(
withr::with_seed(
123,
{{{name}}}(worldclim_loaded, bio13, bio19, function(data) ww_build_weights(ww_make_point_neighbors(data, k = 5)))
)
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {SP6.3} Testing alternative weights:
expect_snapshot(
withr::with_seed(
123,
{{{name}}}_vec(worldclim_loaded$bio13, worldclim_loaded$bio19, other_weights)
)
)
#' @srrstats {G3.0} Testing with appropriate tolerances.
#' @srrstats {SP6.3} Testing alternative weights:
expect_snapshot(
withr::with_seed(
123,
{{{name}}}_vec(worldclim_loaded$bio13, worldclim_loaded$bio19, other_weights)
)
)
})
|
/scratch/gouwar.j/cran-all/cranData/waywiser/inst/srr_template_spatial_yardstick.R
|
---
title: "Multi-scale model assessment"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Multi-scale model assessment}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = rlang::is_installed("ggplot2") && rlang::is_installed("tigris")
)
```
```{r setup, include=FALSE}
ggplot2::theme_set(ggplot2::theme_minimal())
```
This vignette walks through how to use waywiser to assess model predictions at
multiple spatial scales, using the `ny_trees` data in waywiser, adapted from
that post.
First things first, we'll set up our environment, loading a few packages
and telling sf to download the coordinate reference system for our data,
if needed:
```{r}
library(sf)
library(tidyr)
library(dplyr)
library(waywiser)
invisible(sf_proj_network(TRUE))
```
The data we're working with is extremely simple, reflecting the number of trees
and amount of aboveground biomass ("AGB", the total amount of aboveground woody
bits) at a number of plots across New York State. We can plot it to see that
there's some obvious spatial dependence in this data -- certain regions have
clusters of much higher AGB values, while other areas (such as the area around
New York City to the south) have clusters of much lower AGB.
```{r}
library(ggplot2)
ny_trees %>%
ggplot() +
geom_sf(aes(color = agb), alpha = 0.4) +
scale_color_distiller(palette = "Greens", direction = 1)
```
Because our focus here is on model _assessment_, not model fitting, we're going
to use an extremely simple linear regression to try and model AGB across the
state. We'll predict AGB as being a linear function of the number of trees at
each plot, and then we're going to use this model to predict expected AGB:
```{r}
agb_lm <- lm(agb ~ n_trees, ny_trees)
ny_trees$predicted <- predict(agb_lm, ny_trees)
```
Now we're ready to perform our multi-scale assessments. The `ww_multi_scale()`
function supports two different methods for performing assessments: first,
you can pass arguments to `sf::st_make_grid()` (via `...`), specifying the sort
of grids that you want to make. For instance, if we wanted to make grids with
apothems (the distance from the middle of a grid cell to the middle of its
sides) ranging from 10km to 100km long, we can call the function like this:
```{r}
cell_sizes <- seq(10, 100, 10) * 1000
ny_multi_scale <- ww_multi_scale(
ny_trees,
agb,
predicted,
cellsize = cell_sizes
)
ny_multi_scale
```
We've now got a tibble with estimates for our model's RMSE and MAE at each scale
of aggregation! We can use this information to better understand how our model
does when predictions are being aggregated across larger units than a single
plot; for instance, our model _generally_ does better at larger scales of
aggregation:
```{r}
ny_multi_scale %>%
unnest(.grid_args) %>%
ggplot(aes(x = cellsize, y = .estimate, color = .metric)) +
geom_line()
```
Note that we used the `.grid_args` column, which stores the arguments we used to
make the grid, to associate our performance estimates with their corresponding
`cellsize`.
In addition to our top-level performance estimates, our `ny_multi_scale` object
also includes our true and estimated AGB, aggregated to each scale, in the
`.grid` column. This lets us easily check what our predictions look like at each
level of aggregation:
```{r}
ny_multi_scale$.grid[[9]] %>%
filter(!is.na(.estimate)) %>%
ggplot(aes(fill = .estimate)) +
geom_sf() +
scale_fill_distiller(palette = "Greens", direction = 1)
```
In addition to specifying systematic grids via `sf::st_make_grid()`,
`ww_multi_scale()` also allows you to provide your own aggregation units. For
instance, we can use the `tigris` package to download census block group
boundaries, as well as county and county subdivision boundaries, for the state
of New York:
```{r message=FALSE, results='hide'}
suppressPackageStartupMessages(library(tigris))
ny_block_groups <- block_groups("NY")
ny_county_subdivisions <- county_subdivisions("NY")
ny_counties <- counties("NY")
```
We can then provide those `sf` objects straight to `ww_multi_scale`:
```{r warning=FALSE}
ny_division_assessment <- ww_multi_scale(
ny_trees,
agb,
predicted,
grids = list(
ny_block_groups,
ny_county_subdivisions,
ny_counties
)
)
ny_division_assessment %>%
mutate(
division = rep(c("Block group", "County subdivision", "County"), each = 2)
) %>%
ggplot(aes(x = division, y = .estimate, fill = .metric)) +
geom_col(position = position_dodge())
```
By providing grids directly to `ww_multi_scale()`, we can see how well our model
performs when we aggregate predictions to more semantically meaningful levels
than the systematic grids.
|
/scratch/gouwar.j/cran-all/cranData/waywiser/vignettes/multi-scale-assessment.Rmd
|
---
title: "Calculating residual spatial autocorrelation"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Calculating residual spatial autocorrelation}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = rlang::is_installed("ggplot2")
)
```
Perhaps the most famous sentence in spatial analysis is Tobler's first law of
geography, from [Tobler (1970)](https://doi.org/10.2307/143141): "Everything is
related to everything else, but near things are more related than distant
things." Spatial data often exhibits spatial autocorrelation, where variables of
interest are not distributed at random but rather exhibit spatial patterns; in
particular, spatial data is often clustered (exhibiting positive spatial
autocorrelation) such that locations near each other are _more similar_ than
you'd expect if you had just sampled two observations at random.
For some data, this makes intuitive sense. The elevation at two neighboring
points is extremely likely to be similar, as is the precipitation and
temperature; these are variables whose values depend on (among other things)
your position on the Earth. However, the first law is often over-interpreted.
[Pebesma and Bivand (2022)](https://r-spatial.org/book/15-Measures.html) present
an interesting discussion of the "first law", quoting
[Olsson (1970)](https://doi.org/10.2307/143140) who says:
> [T]he fact that the autocorrelations seem to hide systematic specification errors suggests that the elevation of this statement to the status of ‘the first law of geography’ is at best premature. At worst, the statement may represent the spatial variant of the post hoc fallacy, which would mean that coincidence has been mistaken for a causal relation.
Oftentimes, finding spatial autocorrelation in a variable is a result of that
variable depending on _other_ variables, which may or may not be spatially
dependent themselves. For instance, house prices often exhibit positive
autocorrelation, not because home prices are determined by their relative
position on Earth, but because house prices rely upon other variables -- school
zones, median income, housing availability and more -- which may themselves
be spatially autocorrelated.
For that reason, it's often worthwhile to look at the spatial autocorrelation of
model residuals, to see if your model makes more errors in certain regions than
you'd expect if errors were randomly arranged. That can help you to identify
misspecifications in your model: seeing large autocorrelations in model
residuals in an area might suggest that you're missing variables in your model,
and knowing which areas your model does worse in can help you to identify what
those variables might be. Even if you can't fix your model, it's often useful to
identify regions your model does notably worse in, so that you can communicate
that to whoever winds up using your predictions.
Let's walk through how we can use waywiser to find local indicators of spatial autocorrelation for a very simple model. First things first, let's load a few libraries:
```{r message=FALSE}
# waywiser itself, of course:
library(waywiser)
# For the %>% pipe and mutate:
library(dplyr)
```
We'll be working with the `guerry` data included in waywiser package. We'll fit a simple linear model relating crimes against persons with literacy, and then generate predictions from that model. We can use `ww_local_moran_i()` to calculate the local spatial autocorrelation of our residuals at each data point:
```{r}
guerry %>%
mutate(pred = predict(lm(Crm_prs ~ Litercy, .))) %>%
ww_local_moran_i(Crm_prs, pred)
```
If you're familiar with spdep, you can probably guess that waywiser is doing _something_ under the hood here to calculate which of our observations are neighbors, and how to create spatial weights from those neighborhoods. And that guess would be right -- waywiser is making use of two functions, `ww_build_neighbors()` and `ww_build_weights()`, in order to automatically calculate spatial weights for calculating metrics:
```{r}
ww_build_neighbors(guerry)
ww_build_weights(guerry)
```
These functions aren't always the best way to calculate spatial weights for your data, however. As a result, waywiser also lets you specify your own weights directly:
```{r}
weights <- guerry %>%
sf::st_geometry() %>%
sf::st_centroid() %>%
spdep::dnearneigh(0, 97000) %>%
spdep::nb2listw()
weights
guerry %>%
mutate(pred = predict(lm(Crm_prs ~ Litercy, .))) %>%
ww_local_moran_i(Crm_prs, pred, weights)
```
Or as a function, which lets you use custom weights with other tidymodels functions like `fit_resamples()`:
```{r}
weights_function <- function(data) {
data %>%
sf::st_geometry() %>%
sf::st_centroid() %>%
spdep::dnearneigh(0, 97000) %>%
spdep::nb2listw()
}
guerry %>%
mutate(pred = predict(lm(Crm_prs ~ Litercy, .))) %>%
ww_local_moran_i(Crm_prs, pred, weights_function)
```
Providing custom weights also lets us use `ww_local_moran_i_vec()` to add a column to our original data frame with our statistic, which makes plotting using our original geometries easier:
```{r 2022_06_29-guerry, fig.width=8}
library(ggplot2)
weights <- ww_build_weights(guerry)
guerry %>%
mutate(
pred = predict(lm(Crm_prs ~ Litercy, .)),
.estimate = ww_local_moran_i_vec(Crm_prs, pred, weights)
) %>%
sf::st_as_sf() %>%
ggplot(aes(fill = .estimate)) +
geom_sf() +
scale_fill_gradient2(
"Local Moran",
low = "#018571",
mid = "white",
high = "#A6611A"
)
```
This makes it easy to see what areas are poorly represented by our model (which have the highest local Moran values), which might lead us to identify ways to improve our model or help us identify caveats and limitations of the models we're working with.
Other functions in waywiser will allow you to calculate the p-value associated with spatial autocorrelation metrics. You can calculate these alongside the autocorrelation metrics themselves using `yardstick::metric_set()`:
```{r}
moran <- yardstick::metric_set(
ww_global_moran_i,
ww_global_moran_pvalue
)
guerry %>%
mutate(pred = predict(lm(Crm_prs ~ Litercy, .))) %>%
moran(Crm_prs, pred)
```
These functions can also be used on their own to help qualitatively identify regions of concern, which may be poorly represented by your model:
```{r 2023_02_21-guerryp, fig.width=8}
guerry %>%
mutate(
pred = predict(lm(Crm_prs ~ Litercy, .)),
.estimate = ww_local_moran_pvalue_vec(Crm_prs, pred, weights)
) %>%
sf::st_as_sf() %>%
ggplot(aes(fill = .estimate < 0.01)) +
geom_sf() +
scale_fill_discrete("Local Moran p-value < 0.01?") +
theme(legend.position = "bottom")
```
This can help identify new predictor variables or other promising refinements to a model during the iterative process of model development. You shouldn't report p-values without other context as results of your model, but this approach can help qualitatively assess a model during the development process. To use these tests for inference, consider using functions from spdep directly; each autocorrelation function in waywiser links to the spdep function it wraps from its documentation.
|
/scratch/gouwar.j/cran-all/cranData/waywiser/vignettes/residual-autocorrelation.Rmd
|
---
title: "Assessing models with waywiser"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Assessing Models with waywiser}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = rlang::is_installed("vip") && rlang::is_installed("ggplot2")
)
```
The waywiser package aims to be an ergonomic toolbox providing a consistent user
interface for assessing spatial models. To that end, waywiser does three main
things:
1. Provides new [yardstick](https://yardstick.tidymodels.org/) extensions,
making it easier to use performance metrics from the spatial modeling
literature via a standardized API.
2. Provides a new function, `ww_multi_scale()`, which makes it easy to see how
model performance metrics change when predictions are aggregated to various
scales.
3. Provides an implementation of the
[area of applicability from Meyer and Pebesma 2021](https://doi.org/10.1111/2041-210X.13650),
extending this tool to work with tidymodels infrastructure.
This vignette will walk through each of these goals in turn. Before we do that,
let's set up the data we'll use in examples. We'll be using simulated data
based on [Worldclim](https://www.worldclim.org/) variables; our predictors here
represent temperature and precipitation values at sampled locations, while,
our response represents a virtual species distribution:
```{r setup}
library(waywiser)
set.seed(1107)
worldclim_training <- sample(nrow(worldclim_simulation) * 0.8)
worldclim_testing <- worldclim_simulation[-worldclim_training, ]
worldclim_training <- worldclim_simulation[worldclim_training, ]
worldclim_model <- lm(
response ~ bio2 + bio10 + bio13 + bio19,
worldclim_training
)
worldclim_testing$predictions <- predict(
worldclim_model,
worldclim_testing
)
head(worldclim_testing)
```
## Yardstick Extensions
First and foremost, waywiser provides a host of new yardstick metrics to provide
a standardized interface for various performance metrics.
All of these functions work more or less the same way: you provide your data,
the names of your "true" values and predicted values, and get back a
standardized output format. As usual with yardstick, that output can either be
a tibble or a vector output. For instance, if we want to calculate the agreement
coefficient from [Ji and Gallo 2006](https://doi.org/10.14358/PERS.72.7.823):
```{r}
ww_agreement_coefficient(
worldclim_testing,
truth = response,
estimate = predictions
)
ww_agreement_coefficient_vec(
truth = worldclim_testing$response,
estimate = worldclim_testing$predictions
)
```
Some of these additional metrics are implemented by wrapping functions from the
[spdep](https://r-spatial.github.io/spdep/) package:
```{r}
ww_global_geary_c(
worldclim_testing,
truth = response,
estimate = predictions
)
```
These functions rely on calculating the spatial neighbors of each observation.
The waywiser package will automatically use `ww_build_weights()` to calculate
these, if not provided, but this is often not desirable. For that reason, these
functions all have a `wt` argument, which can take either pre-calculated weights
or a function that will create spatial weights:
```{r}
ww_global_geary_c(
worldclim_testing,
truth = response,
estimate = predictions,
wt = ww_build_weights(worldclim_testing)
)
ww_global_geary_c(
worldclim_testing,
truth = response,
estimate = predictions,
wt = ww_build_weights
)
```
Because these are yardstick metrics, they can be used with
`yardstick::metric_set()` and other tidymodels infrastructure:
```{r}
yardstick::metric_set(
ww_agreement_coefficient,
ww_global_geary_c
)(worldclim_testing,
truth = response,
estimate = predictions)
```
## Multi-scale model assessment
A common pattern with spatial models is that you need to predict observation
units -- pixels of a raster or individual points -- which will be aggregated to
arbitrary scales, such as towns or parcel boundaries. Because errors can be
spatially distributed, or can either compound or counteract each other when
aggregated, [some assessment protocols](https://doi.org/10.1016/j.rse.2010.05.010)
recommend assessing model predictions aggregated to multiple scales.
The `ww_multi_scale()` function helps automate this process. The interface for
this function works similarly to that for yardstick metrics -- you provide your
data, your true values, and your estimate -- except you also must provide
instructions for how to aggregate your data. You can do this by passing
arguments that will be used by `sf::st_make_grid()`; for instance, we can use
the `n` argument to control how many polygons our grid has in the x and y
directions.
Note that each element of argument vector is used to make a separate grid --
so, for instance, passing `n = c(2, 4)` will result in one 2-by-2 grid and one
4-by-4 grid, because `n[[1]]` is 2 and `n[[2]]` is 4. If we actually wanted to
create a 2-by-4 grid, by passing `sf::st_make_grid()` the argument
`n = c(2, 4)`, we need to wrap that vector in a list so that running `n[[1]]`
returns `c(2, 4)`:
```{r}
ww_multi_scale(
worldclim_testing,
truth = response,
estimate = predictions,
metrics = list(ww_agreement_coefficient, yardstick::rmse),
n = list(c(2, 4))
)
```
You can also pass polygons directly, if you have pre-defined grids you'd like
to use:
```{r}
grid <- sf::st_make_grid(worldclim_testing, n = c(2, 4))
ww_multi_scale(
worldclim_testing,
truth = response,
estimate = predictions,
metrics = list(ww_agreement_coefficient, yardstick::rmse),
grids = list(grid)
)
```
## Area of Applicability
Last but not least, we can also see if there's any areas in our data that are
too different from our training data for us to safely predict on, which fall
outside the "area of applicability" defined by
[Meyer and Pebesma (2021)](https://doi.org/10.1111/2041-210X.13650). This
approach looks at how similar the predictor values of new data are to the data
you used to train your data, with each predictor weighted by how important it is
to your model.
In order to calculate your area of applicability, you can pass
`ww_area_of_applicability()` information about which of your variables are used
as predictors in your model, your training data, and the importance scores for
each of your variables. Out of the box, waywiser should work with any of the
importance score-calculating functions from the vip package:
```{r}
worldclim_aoa <- ww_area_of_applicability(
response ~ bio2 + bio10 + bio13 + bio19,
worldclim_training,
importance = vip::vi_model(worldclim_model)
)
worldclim_aoa
```
You can also pass a data.frame with columns named "term" and "estimate"
(containing the name of each term, or predictor, and their estimated importance)
rather than using the vip package if that's more convenient.
The objects returned by `ww_area_of_applicability()` are models in their own
right, which can be used by functions such as `predict()` to calculate if new
observations are in the area of applicability of a model.
```{r}
worldclim_testing <- cbind(
worldclim_testing,
predict(worldclim_aoa, worldclim_testing)
)
head(worldclim_testing)
```
The predict function returns the "distance index", or "di", for each
observation: a score of how far away the observation is, in predictor space,
from your training data. Points with a "di" higher than a set threshold are
"outside" the area of applicability. We can visualize our test set here to see
that our model often, but not always, performs worse on observations with a
higher "di":
```{r}
library(ggplot2)
ggplot(worldclim_testing, aes(di, abs(response - predictions), color = aoa)) +
geom_point(alpha = 0.6)
```
|
/scratch/gouwar.j/cran-all/cranData/waywiser/vignettes/waywiser.Rmd
|
is_outlier <- function(object, ...)
{
UseMethod("is_outlier", object)
}
is_outlier.wbaconmv <- function(object, ...)
{
object$subset == 0
}
is_outlier.wbaconlm <- function(object, ...)
{
object$subset == FALSE
}
|
/scratch/gouwar.j/cran-all/cranData/wbacon/R/is_outlier.R
|
median_w <- function(x, w, na.rm = FALSE)
{
quantile_w(x, w, 0.5, na.rm)
}
|
/scratch/gouwar.j/cran-all/cranData/wbacon/R/median_w.R
|
plot.wbaconlm <- function(x, which = c(1, 2, 3, 4),
hex = FALSE, caption = c("Residuals vs Fitted", "Normal Q-Q",
"Scale-Location", "Standardized Residuals vs Robust Mahalanobis Distance"),
panel = if (add.smooth) function(x, y, ...) panel.smooth(x, y,
iter = iter.smooth, ...) else points, sub.caption = NULL, main = "",
ask = prod(par("mfcol")) < length(which) && dev.interactive(), ...,
id.n = 3, labels.id = names(residuals(x)), cex.id = 0.75, qqline = TRUE,
add.smooth = getOption("add.smooth"), iter.smooth = 3,
label.pos = c(4, 2), cex.caption = 1, cex.oma.main = 1.25)
{
dropInf <- function(x, h)
{
if (any(isInf <- h >= 1)) {
warning(gettextf("not plotting observations with leverage one:\n %s",
paste(which(isInf), collapse = ", ")),
call. = FALSE, domain = NA)
x[isInf] <- NaN
}
x
}
if (!is.numeric(which) || any(which < 1) || any(which > 6))
stop("'which' must be in 1:4")
show <- rep(FALSE, 6)
show[which] <- TRUE
subset0 <- x$subset
r <- x$residuals[subset0]
yh <- x$fitted.values[subset0]
w <- x$weights[subset0]
n <- length(r)
# regression scale
s <- sqrt(sum(w * r^2) / x$df.residual)
if (any(show[c(2L:4L)])) {
ylab5 <- ylab23 <- "Standardized residuals"
xmat <- stats::model.matrix(x$terms, x$model)[subset0, ]
R <- qr.R(qr(sqrt(w) * xmat))
A <- xmat %*% backsolve(R, diag(NCOL(R)))
hii <- rowSums(A^2) * w
rs <- dropInf(r / (s * sqrt(1 - hii)), hii)
}
if (any(show[c(1L, 3L)]))
l.fit <- "Fitted values"
if (is.null(id.n)) {
id.n <- 0
} else {
id.n <- as.integer(id.n)
if (id.n < 0L || id.n > n)
stop(gettextf("'id.n' must be in {1,..,%d}", n), domain = NA)
}
if (id.n > 0L) {
if (is.null(labels.id))
labels.id <- paste(1L:n)
iid <- 1L:id.n
show.r <- sort.list(abs(r), decreasing = TRUE)[iid]
if (any(show[2L:3L]))
show.rs <- sort.list(abs(rs), decreasing = TRUE)[iid]
text.id <- function(x, y, ind, adj.x = TRUE)
{
labpos <- if (adj.x)
label.pos[1 + as.numeric(x > mean(range(x)))]
else
3
text(x, y, labels.id[ind], cex = cex.id, xpd = TRUE, pos = labpos,
offset = 0.25)
}
}
getCaption <- function(k)
{
if (length(caption) < k)
NA_character_
else
as.graphicsAnnot(caption[[k]])
}
if (is.null(sub.caption)) {
cal <- x$call
if (!is.na(m.f <- match("formula", names(cal)))) {
cal <- cal[c(1, m.f)]
names(cal)[2L] <- ""
}
cc <- deparse(cal, 80)
nc <- nchar(cc[1L], "c")
abbr <- length(cc) > 1 || nc > 75
sub.caption <- if (abbr)
paste(substr(cc[1L], 1L, min(75L, nc)), "...")
else
cc[1L]
}
one.fig <- prod(par("mfcol")) == 1
if (ask) {
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
}
# Residuals vs Fitted
if (show[1L]) {
ylim <- range(r, na.rm = TRUE)
if (id.n > 0)
ylim <- extendrange(r = ylim, f = 0.08)
dev.hold()
if (hex) {
requireNamespace("hexbin")
hb <- hexbin(yh, r, ybnds = ylim)
hvp <- hexbin::plot(hb, xlab = l.fit, ylab = "Residuals",
main = main)
hexVP.abline(hvp$plot, h = 0, lty = 3, col = "gray")
hexVP.loess(hb, hvp = hvp$plot, span = 2 / 3, ...)
} else {
plot(yh, r, xlab = l.fit, ylab = "Residuals", main = main,
ylim = ylim, type = "n", ...)
panel(yh, r, ...)
if (one.fig)
title(sub = sub.caption, ...)
mtext(getCaption(1), 3, 0.25, cex = cex.caption)
if (id.n > 0) {
y.id <- r[show.r]
y.id[y.id < 0] <- y.id[y.id < 0] - strheight(" ") / 3
text.id(yh[show.r], y.id, show.r)
}
abline(h = 0, lty = 3, col = "gray")
}
dev.flush()
}
# Normal Q-Q
if (show[2L]) {
ylim <- range(rs, na.rm = TRUE)
ylim[2L] <- ylim[2L] + diff(ylim) * 0.075
dev.hold()
qq <- qqnorm(rs, main = main, ylab = ylab23, ylim = ylim, ...)
if (qqline)
qqline(rs, lty = 3, col = "gray50")
if (one.fig)
title(sub = sub.caption, ...)
mtext(getCaption(2), 3, 0.25, cex = cex.caption)
if (id.n > 0)
text.id(qq$x[show.rs], qq$y[show.rs], show.rs)
dev.flush()
}
# Scale-Location
if (show[3L]) {
sqrtabsr <- sqrt(abs(rs))
ylim <- c(0, max(sqrtabsr, na.rm = TRUE))
yl <- as.expression(substitute(sqrt(abs(YL)),
list(YL = as.name(ylab23))))
yhn0 <- yh
dev.hold()
if (hex) {
hb <- hexbin(yhn0, sqrtabsr, ybnds = ylim)
hvp <- plot(hb, xlab = l.fit, ylab = yl, main = main)
hexVP.loess(hb, hvp = hvp$plot, span = 2 / 3, ...)
} else {
plot(yhn0, sqrtabsr, xlab = l.fit, ylab = yl, main = main,
ylim = ylim, type = "n", ...)
panel(yhn0, sqrtabsr, ...)
if (one.fig)
title(sub = sub.caption, ...)
mtext(getCaption(3), 3, 0.25, cex = cex.caption)
if (id.n > 0)
text.id(yhn0[show.rs], sqrtabsr[show.rs], show.rs)
}
dev.flush()
}
# Standardized residuals vs. robust Mahalanobis distances
if (show[4L]) {
xlim <- c(0, max(x$mv$dist, na.rm = TRUE))
dev.hold()
if (hex) {
hb <- hexbin(x$mv$dist, x$residuals/s, xbnds = xlim)
hvp <- plot(hb, xlab = "Robust distance",
ylab = "Standardized residuals", main = main)
hexVP.abline(hvp$plot, h = 0, lty = 3, col = "gray")
hexVP.abline(hvp$plot, v = x$mv$cutoff, h = c(-x$reg$cutoff,
x$reg$cutoff), lty = 2, col = 2)
} else {
plot(x$mv$dist, x$residuals / s, xlab = "Robust distance",
ylab = "Standardized residuals", main = main, xlim = xlim,
type = "n", ...)
points(x$mv$dist[subset0], x$residuals[subset0] / s, ...)
points(x$mv$dist[!subset0], x$residuals[!subset0] / s, pch = 19,
col = 2, ...)
abline(h = 0, lty = 3, col = "gray")
abline(v = x$mv$cutoff, h = c(-x$reg$cutoff, x$reg$cutoff),
lty = 2, col = 2)
if (one.fig)
title(sub = sub.caption, ...)
mtext(getCaption(4), 3, 0.25, cex = cex.caption)
}
dev.flush()
}
if (!one.fig && par("oma")[3L] >= 1)
mtext(sub.caption, outer = TRUE, cex = cex.oma.main)
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/wbacon/R/plot_wbaconlm.R
|
plot.wbaconmv <- function(x, which = 1:2,
caption = c("Robust distance vs. Index",
"Robust distance vs. Univariate projection"), hex = FALSE, col = 2,
pch = 19, ask = prod(par("mfcol")) < length(which) && dev.interactive(),
alpha = 0.05, maxiter = 20, tol = 1e-5, ...)
{
if (!inherits(x, "wbaconmv"))
stop("use only with 'wbaconmv' objects")
if (!is.numeric(which) || any(which < 1) || any(which > 2))
stop("'which' must be in 1:2")
show <- rep(FALSE, 2)
show[which] <- TRUE
if (ask) {
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
}
if (show[1]) {
plot(x$dist, xlab = "Index", ylab = "Robust distance",
main = caption[1], type = "n", ...)
at <- x$subset == 1
points(which(at), x$dist[at])
points(which(!at), x$dist[!at], pch = pch, col = col)
abline(h = x$cutoff, lty = 2, col = col)
dev.flush()
}
if (show[2]) {
tmp <- SeparationIndex(x, alpha, tol, maxiter)
if (tmp$failed) {
plot(0, 0, type = "n", axes = FALSE, xlab = "", ylab = "",
main = caption[2], ...)
text(0, 1, labels = "[too few observations]")
} else {
if (!tmp$converged)
warning("Optimal projection: not converged; see arguments 'maxiter'")
if (hex) {
requireNamespace("hexbin")
hb <- hexbin(tmp$proj, x$dist)
hvp <- hexbin::plot(hb, xlab = "Univariate projection",
ylab = "Robust distance", main = caption[2],
...)
hexVP.abline(hvp$plot, h = x$cutoff, lty = 2, col = 2)
} else {
if (length(tmp$proj) > 10000)
message("Tool tip: A hexbin scatterplot is available (hex = TRUE)\n")
plot(tmp$proj, x$dist, xlab = "Univariate projection",
ylab = "Robust distance", main = caption[2], type = "n",
...)
at <- x$subset == 1
points(tmp$proj[at], x$dist[at])
points(tmp$proj[!at], x$dist[!at], pch = pch, col = col)
abline(h = x$cutoff, lty = 2, col = col)
}
}
grDevices::dev.flush()
}
invisible()
}
# Separation index of Qiu and Joe (2006): Separation index and partial
# membership for clustering, Computational Statistics and Data Analysis 50,
# pp. 585-603
SeparationIndex <- function(object, alpha = 0.05, tol = 1e-5, maxiter = 20)
{
n_outlier <- object$n - sum(object$subset)
if (n_outlier < object$p)
return(list(failed = TRUE))
stopifnot(alpha > 0, alpha < 1, maxiter > 0, tol > 0)
C1 <- object$cov
C2 <- cov(object$x[object$subset == 0, ])
L1 <- chol(C1)
L2 <- chol(C2)
delta <- colMeans(object$x[!object$subset, ]) - object$center
a0 <- delta / norm(as.matrix(delta), type = "F")
iter <- 0
repeat {
iter <- iter + 1
p1 <- sqrt(crossprod(L1 %*% a0))[1, 1]
p2 <- sqrt(crossprod(L2 %*% a0))[1, 1]
a1 <- (p1 + p2) * solve(C1 / p1 + C2 / p2) %*% delta
a1 <- a1 / norm(as.matrix(a1), type = "F")
if (norm(as.matrix(a1 - a0), type = "F") < tol || iter >= maxiter)
break
a0 <- a1
}
if (crossprod(a1, delta) < 0)
a1 <- -a1
d <- crossprod(a1, delta)
p1 <- sqrt(crossprod(L1 %*% a1))[1, 1]
p2 <- sqrt(crossprod(L2 %*% a1))[1, 1]
q_alpha <- qnorm(1 - alpha / 2)
p12 <- q_alpha * (p1 + p2)
J <- (d - p12) / (d + p12)
list(J = J, a = a1, proj = as.vector(object$x %*% a1),
converged = iter < maxiter, failed = FALSE)
}
|
/scratch/gouwar.j/cran-all/cranData/wbacon/R/plot_wbaconmv.R
|
predict.wbaconlm <- function(object, newdata, se.fit = FALSE, scale = NULL,
df = Inf, interval = c("none", "confidence", "prediction"),
level = 0.95, type = c("response", "terms"), terms = NULL,
na.action = na.pass, ...)
{
# on the subset, the weighted BACON regression works like a lm model
in_subset <- object$subset == 1
# cast 'object' to an object of class 'lm'
ans <- object
ans$residuals <- ans$residuals[in_subset]
ans$fitted.values <- ans$fitted.values[in_subset]
ans$weights <- ans$weights[in_subset]
ans$qr$qr <- ans$qr$qr[in_subset, ]
class(ans) <- "lm"
stats::predict.lm(ans, newdata, se.fit, scale, df, interval, level, type,
terms, na.action, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/wbacon/R/predict_wbaconlm.R
|
quantile_w <- function(x, w, probs, na.rm = FALSE)
{
dat <- .check(x, w, na.rm); if (is.null(dat)) return(NA)
if (any(probs < 0) | any(probs > 1))
stop("Argument 'probs' not in [0, 1]\n", call. = FALSE)
res <- NULL
for (i in 1:length(probs)) {
tmp <- .C(C_wquantile, x = as.double(dat$x), w = as.double(dat$w),
n = as.integer(dat$n), probs = as.double(probs[i]),
q = as.double(numeric(1)))
res <- c(res, tmp$q)
}
names(res) <- paste0(probs * 100, "%")
return(res)
}
# some sanity checks (univariate)
.check <- function(x, w, na.rm)
{
if (is.factor(x) || is.factor(w) || is.data.frame(x))
stop("Arguments data and weights must be numeric vectors\n")
n <- length(x); nw <- length(w)
if (nw != n)
stop("Data vector and weights are not of the same dimension\n",
call. = FALSE)
if (n == 0)
return(NA)
# check for missing values
cc <- stats::complete.cases(x, w)
if (sum(cc) != n) {
if (na.rm) {
x <- x[cc]
w <- w[cc]
} else {
return(NULL)
}
}
n <- length(x)
# check if data vector and weights are finite
if (sum(is.finite(c(x, w))) != 2 * n) {
warning("Some observations are not finite\n", call. = FALSE,
immediate. = TRUE)
return(NULL)
}
list(x = x, w = w, n = n)
}
|
/scratch/gouwar.j/cran-all/cranData/wbacon/R/quantile_w.R
|
wBACON <- function(x, weights = NULL, alpha = 0.05, collect = 4,
version = c("V2", "V1"), na.rm = FALSE, maxiter = 50, verbose = FALSE,
n_threads = 2)
{
n <- NROW(x); p <- NCOL(x)
stopifnot(n > p, p > 0, 0 < alpha, alpha < 1, maxiter > 0, collect > 1,
n_threads > 0)
if (version[1] == "V2")
vers <- 1
else if (version[1] == "V1")
vers <- 0
else
stop(paste0("Argument '", version, "' is not defined\n"))
if (!is.matrix(x))
x <- as.matrix(x)
if (is.null(weights))
weights <- rep(1, n)
stopifnot(n == length(weights))
# NA treatment
cc <- stats::complete.cases(x, weights)
if (sum(cc) != n) {
if (na.rm) {
x <- x[cc, ]
weights <- weights[cc]
} else
stop("Data must not contain missing values; see argument 'na.rm'\n",
call. = FALSE)
}
n <- nrow(x)
# check if any element is not finite
chk <- sum(is.finite(c(x, weights))) != (1 + p) * n
if (chk)
stop("Some observations are not finite\n", call. = FALSE)
# check if collect is corretly specified
if (collect >= n / p)
stop("Argument 'collect' must be an integer smaller than ",
floor(n / p), "\n")
if (collect * p / n > 0.6 && verbose)
cat("Note: initial subset > 60% (use a smaller value for 'collect')\n")
# compute weighted BACON algorithm
tmp <- .C(C_wbacon, x = as.double(x), w = as.double(weights),
center = as.double(numeric(p)),
scatter = as.double(numeric(p * p)),
dist = as.double(numeric(n)), n = as.integer(n),
p = as.integer(p), alpha = as.double(alpha),
subset = as.integer(rep(0, n)), cutoff = as.double(numeric(1)),
maxiter = as.integer(abs(maxiter)),
verbose = as.integer(verbose), version = as.integer(vers),
collect = as.integer(collect), success = as.integer(1),
n_threads = as.integer(n_threads))
tmp$cutoff <- sqrt(tmp$cutoff)
tmp$verbose <- NULL
tmp$converged <- tmp$success == 1
tmp$success <- NULL
tmp$x <- matrix(tmp$x, ncol = p)
if (!tmp$converged) {
tmp$center <- rep(NA, p)
tmp$cov <- matrix(rep(NA, p * p), ncol = p)
tmp$dist <- rep(NA, n)
tmp$subset <- rep(NA, n)
tmp$cutoff <- NA
} else {
tmp$cov <- matrix(tmp$scatter, ncol = p)
tmp$cov <- tmp$cov + t(tmp$cov * lower.tri(tmp$cov))
}
names(tmp$center) <- colnames(x)
colnames(tmp$cov) <- colnames(x)
rownames(tmp$cov) <- colnames(x)
tmp$scatter <- NULL
tmp$call <- match.call()
class(tmp) <- "wbaconmv"
tmp
}
print.wbaconmv <- function(x, digits = max(3L, getOption("digits") - 3L), ...)
{
if (x$converged) {
cat("\nWeighted BACON: Robust location, covariance, and distances\n")
cat(paste0("Converged in ", x$maxiter, " iterations (alpha = ",
x$alpha, ")\n"))
n_outlier <- x$n - sum(x$subset)
cat(paste0("Number of potential outliers: ", n_outlier, " (",
round(100 * n_outlier / x$n, 2), "%)\n\n"))
} else
cat(paste0("Weighted BACON did not converge in ", x$maxiter,
" iterations!\n\n"))
}
summary.wbaconmv <- function(object, ...)
{
digits <- max(3L, getOption("digits") - 3L)
cat("\nWeighted BACON: Robust location, covariance, and distances\n")
cat("Initialized by method:", ifelse(object$version == 1, "V2", "V1"), "\n")
if (object$converged)
cat(paste0("Converged in ", object$maxiter, " iterations (alpha = ",
object$alpha, ")\n"))
else
cat(paste0("\nDID NOT CONVERGE in ", object$maxiter,
" iterations (alpha = ", object$alpha, ")\n"))
n <- length(object$subset)
n_outlier <- object$n - sum(object$subset)
cat(paste0("\nNumber of potential outliers: ", n_outlier, " (",
round(100 * n_outlier / n, 2), "%)\n"))
cat("\nRobust estimate of location:\n")
print(object$center, digits = digits)
cat("\nRobust estimate of covariance:\n")
print(object$cov, digits = digits)
cat(paste0("\nDistances (cutoff: ",
format(object$cutoff, digits = digits), "):\n"))
if (object$converged)
print(summary(object$dist), digits = digits)
else
print(NA)
cat("\n")
}
distance <- function(x)
{
if (!inherits(x, "wbaconmv"))
cat("not defined for this type of argument\n")
else
x$dist
}
vcov.wbaconmv<- function(object, ...)
{
object$cov
}
center <- function(object)
{
object$center
}
|
/scratch/gouwar.j/cran-all/cranData/wbacon/R/wbacon.R
|
wBACON_reg <- function(formula, weights = NULL, data, collect = 4,
na.rm = FALSE, alpha = 0.05, version = c("V2", "V1"), maxiter = 50,
verbose = FALSE, original = FALSE, n_threads = 2)
{
stopifnot(alpha < 1, alpha > 0, collect > 0, maxiter > 0, collect > 0,
n_threads > 0)
if (!inherits(formula, "formula"))
stop("Argument '", formula, "' must be a formula\n", call. = FALSE)
# data preparation
mf <- stats::model.frame(formula, data, na.action = stats::na.pass)
mt <- stats::terms(mf)
if (any(attr(mt, "dataClasses") == "factor"))
stop("Factor variables are not allowed\n")
response <- attr(mt, "response")
y <- as.numeric(stats::model.response(mf))
n <- length(y)
yname <- names(mf)[response]
x <- stats::model.matrix(mt, mf)
if (is.null(weights))
weights <- rep(1, n)
# NA treatment
cc <- stats::complete.cases(y, x, weights)
if (sum(cc) != n) {
if (na.rm) {
mf <- mf[cc, ]
x <- x[cc, ]
y <- y[cc]
weights <- weights[cc]
} else {
stop("Data must not contain missing values; see 'na.rm'\n",
call. = FALSE)
}
}
n <- NROW(x); p <- NCOL(x)
# check if any element is not finite
if (sum(is.finite(c(x, y, weights))) != (2 + p) * n)
stop("Some observations are not finite\n", call. = FALSE)
# Algorithm 3
if (verbose)
cat("\nOutlier detection (Algorithm 3)\n---\n")
wb <- wBACON(if (attr(mt, "intercept")) x[, -1] else x, weights, alpha,
collect, version, na.rm, maxiter, verbose)
if (isFALSE(wb$converged))
stop("wBACON on the design matrix failed\n")
# Algorithms 4 and 5
if (verbose)
cat("\nRegression\n---\n")
collect <- min(collect, floor(n / p))
tmp <- .C(C_wbacon_reg, x = as.double(x), y = as.double(y),
w = as.double(weights), resid = as.double(numeric(n)),
beta = as.double(numeric(p)), subset = as.integer(wb$subset),
dist = as.double(wb$dist), n = as.integer(n), p = as.integer(p),
m = as.integer(sum(wb$subset)), verbose = as.integer(verbose),
sucess = as.integer(1), collect = as.integer(collect),
alpha = as.double(alpha), maxiter = as.integer(maxiter),
original = as.integer(original),
n_threads = as.integer(n_threads))
# cast the QR factorization as returned by LAPACK:dgeqrf to a 'qr' object
QR <- structure(
list(qr = matrix(tmp$x, ncol = p), qraux = rep(NA, p), pivot = 1L:p,
tol = NA, rank = p), class = "qr")
# return value
res <- list(coefficients = tmp$beta, residuals = tmp$resid, rank = p,
fitted.values = y - tmp$resid,
df.residual = sum(weights[tmp$subset == 1]) - p,
call = match.call(), terms = mt, model = mf, weights = weights,
qr = QR, subset = (tmp$subset == 1),
reg = list(converged = as.logical(tmp$sucess),
collect = collect, version = version, alpha = alpha,
maxiter = tmp$maxiter, dist = tmp$dist,
cutoff = qt(alpha / (2 * (tmp$m + 1)),
tmp$m - p, lower.tail = FALSE)),
mv = list(center = wb$center, cov = wb$cov, dist = wb$dist,
cutoff = wb$cutoff))
names(res$coefficients) <- colnames(x)
class(res) <- "wbaconlm"
res
}
print.wbaconlm <- function(x, digits = max(3L, getOption("digits") - 3L), ...)
{
if (x$reg$converged){
n <- length(x$residuals)
cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"),
"\n", sep = "")
n_subset <- sum(x$subset)
cat(paste0("\nRegression on the subset of ", n_subset, " out of ", n,
" observations (", round(100 * n_subset / n, 1), "%)\n"))
cat("\nCoefficients:\n")
print.default(format(x$coefficients, digits = digits), print.gap = 2L,
quote = FALSE)
} else {
cat(paste0("Algorithm did not converge in ", x$reg$maxiter,
" iterations!\n\n"))
}
invisible(x)
}
summary.wbaconlm <- function (object, ...)
{
in_subset <- object$subset
r <- object$residuals[in_subset]
w <- object$weights[in_subset]
f <- object$fitted.values[in_subset]
qr <- object$qr
p <- object$rank
rdf <- sum(w) - p
n <- sum(w)
mss <- if (attr(object$terms, "intercept")) {
m <- sum(w * f / sum(w))
sum(w * (f - m)^2)
} else {
sum(w * f^2)
}
rss <- sum(w * r^2)
resvar <- rss / rdf
if (is.finite(resvar) && resvar < (mean(f)^2 + stats::var(c(f))) * 1e-30)
warning("essentially perfect fit: summary may be unreliable")
p1 <- 1L:p
R <- chol2inv(qr$qr[p1, p1, drop = FALSE])
se <- sqrt(diag(R) * resvar)
est <- object$coefficients[qr$pivot[p1]]
tval <- est / se
ans <- object[c("call", "terms", if (!is.null(object$weights)) "weights")]
ans$residuals <- r
ans$coefficients <- cbind(Estimate = est, `Std. Error` = se,
`t value` = tval,
`Pr(>|t|)` = 2 * stats::pt(abs(tval), rdf,
lower.tail = FALSE))
ans$aliased <- is.na(object$coefficients)
ans$sigma <- sqrt(resvar)
ans$df <- c(p, rdf, NCOL(qr$qr))
if (p != attr(object$terms, "intercept")) {
df.int <- if (attr(object$terms, "intercept")) 1L else 0L
ans$r.squared <- mss / (mss + rss)
ans$adj.r.squared <- 1 - (1 - ans$r.squared) * ((n - df.int) / rdf)
ans$fstatistic <- c(value = (mss / (p - df.int)) / resvar,
numdf = p - df.int, dendf = rdf)
} else {
ans$r.squared <- ans$adj.r.squared <- 0
}
ans$cov.unscaled <- R
dimnames(ans$cov.unscaled) <- dimnames(ans$coefficients)[c(1, 1)]
if (!is.null(object$na.action))
ans$na.action <- object$na.action
class(ans) <- c("summary.lm", "summary.wbaconlm")
ans
}
fitted.wbaconlm <- function(object, ...)
{
object$fitted.values
}
residuals.wbaconlm <- function(object, ...)
{
object$residuals
}
coef.wbaconlm <- function(object, ...)
{
object$coefficients
}
vcov.wbaconlm <- function(object, ...)
{
tmp <- summary.wbaconlm(object, ...)
tmp$sigma^2 * tmp$cov.unscaled
}
|
/scratch/gouwar.j/cran-all/cranData/wbacon/R/wbacon_reg.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "",
prompt = TRUE
)
## -----------------------------------------------------------------------------
library("wbacon")
## -----------------------------------------------------------------------------
data(bushfire, package = "modi")
## -----------------------------------------------------------------------------
head(bushfire)
## -----------------------------------------------------------------------------
data(bushfire.weights, package = "modi")
## -----------------------------------------------------------------------------
fit <- wBACON(bushfire, w = bushfire.weights, alpha = 0.05)
fit
## -----------------------------------------------------------------------------
summary(fit)
## -----------------------------------------------------------------------------
plot(fit, 1)
## -----------------------------------------------------------------------------
which(is_outlier(fit))
## -----------------------------------------------------------------------------
center(fit)
## -----------------------------------------------------------------------------
data(philips)
head(philips)
## -----------------------------------------------------------------------------
fit <- wBACON(philips, alpha = 0.05, version = "V1")
fit
## -----------------------------------------------------------------------------
plot(fit, which = 2)
## -----------------------------------------------------------------------------
plot(fit, which = 2, hex = TRUE)
## -----------------------------------------------------------------------------
data(education, package = "robustbase")
## -----------------------------------------------------------------------------
names(education)[3:6] <- c("RES", "INC", "YOUNG", "EXP")
head(education)
## -----------------------------------------------------------------------------
reg <- wBACON_reg(EXP ~ RES + INC + YOUNG, data = education)
reg
## -----------------------------------------------------------------------------
summary(reg)
## -----------------------------------------------------------------------------
summary(lm(EXP ~ RES + INC + YOUNG, data = education[!is_outlier(reg), ]))
## -----------------------------------------------------------------------------
plot(reg, 4)
|
/scratch/gouwar.j/cran-all/cranData/wbacon/inst/doc/wbacon.R
|
---
title: "Vignette: Weighted BACON algorithms"
author: "Tobias Schoch"
output:
html_document:
css: "fluent.css"
highlight: tango
vignette: >
%\VignetteIndexEntry{Weighted BACON algorithms}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "",
prompt = TRUE
)
```
```{css, echo = FALSE}
.my-callout {
padding: 0.25rem;
padding-left: 1rem;
margin-top: 0.25rem;
margin-bottom: 0.25rem;
border: 1px solid #eee;
border-left-width: 0.75rem;
border-left-color: #df536b;
border-radius: .25rem
}
```
## 1 Introduction
The package `wbacon` implements a weighted variant of the BACON (blocked
adaptive computationally-efficient outlier nominators) algorithms [Billor et
al.](#biblio) (2000) for multivariate outlier detection and robust linear
regression. The extension of the BACON algorithm for outlier detection to allow
for weighting is due to [Béguin and Hulliger](#biblio) (2008).
The details of the package are discussed in the accompanying paper; see
[Schoch](#biblio) (2021)
First, we attach the package to the search space.
```{r}
library("wbacon")
```
### 1.1 Available methods
* `wBACON()` is for multivariate outlier nomination and robust estimation of
location/ center and covariance matrix
* `wBACON_reg()` is for robust linear regression (the method is robust against
outliers in the response variable and the model's design matrix)
### 1.2 Assumptions
The BACON algorithms assume that the underlying model is an appropriate
description of the non-outlying observations; [Billor et al.](#biblio) (2000).
More precisely,
* the outlier nomination method assumes that the "good" data have (roughly) an
*elliptically contoured* distribution (this includes the Gaussian
distribution as a special case);
* the regression method assumes that the non-outlying ("good") data are
described by a *linear* (homoscedastic) regression model and that the
independent variables (having removed the regression intercept/constant, if
there is a constant) follow (roughly) an elliptically contoured distribution.
<div class="my-callout">
<p>"Although the algorithms will often do something reasonable even when
these assumptions are violated, it is hard to say what the results mean."
[Billor et al.](#biblio) (2000, p. 290)</p>
</div>
It is strongly recommended that the structure of the data be examined and
whether the assumptions made about the "good" observations are reasonable.
### 1.3 The role of the data analyst
In line with [Billor et al.](#biblio) (2000, p. 290), we use the term outlier
"nomination" rather than "detection" to highlight that algorithms should not go
beyond nominating observations as *potential* outliers; see also [Béguin and
Hulliger](#biblio) (2008). It is left to the analyst to finally label outlying
observations as such.
The software provides the analyst with tools and measures to study potentially
outlying observations. It is strongly recommended to use the tools.
### 1.4 Additional information
Additional information on the BACON algorithms and the implementation can be
found in the documents:
* `methods.pdf`: A mathematical description of the algorithms and their
implementation;
* `doc_c_functions.pdf`: A documentation of the `C` functions.
Both documents can be found in the package folder `doc`.
## 2 Multivariate outlier detection
In this section, we study multivariate outlier detection for the two datasets
* bushfire data (with sampling weights),
* philips data (without sampling weights).
### 2.1 Bushfire data
The bushfire dataset is on satellite remote sensing. These data were used by
[Campbell](#biblio) (1984) to locate bushfire scars. The data are radiometer
readings from polar-orbiting satellites of the National Oceanic and Atmospheric
Administration (NOAA) which have been collected continuously since 1981. The
measurements are taken on five frequency bands or channels. In the near
infrared band, it is possible to distinguish vegetation types from burned
surface. At visible wavelengths, the vegetation spectra are similar to burned
surface. The spatial resolution is rather low (1.1 km per pixel).
#### 2.1.1 Data preparation
The bushfire data contain radiometer readings for 38 pixels and have been
studied in [Maronna and Yohai](#biblio) (1995), [Béguin and Hulliger](#biblio)
(2002), [Béguin and Hulliger](#biblio) (2008), and [Hulliger and
Schoch](#biblio) (2009). The data can be obtained from the `R` package
`modi`([Hulliger](#biblio), 2023).<sup>[1](#notes)</sup>
```{r}
data(bushfire, package = "modi")
```
The first 6 readings on the five frequency bands (variables) are
```{r}
head(bushfire)
```
[Béguin and Hulliger](#biblio) (2008) generated a set of sampling weights. The
weights can be attached to the current session by
```{r}
data(bushfire.weights, package = "modi")
```
#### 2.1.2 Outlier detection
```{r}
fit <- wBACON(bushfire, w = bushfire.weights, alpha = 0.05)
fit
```
The argument `alpha` determines the $(1-\alpha)$-quantile $\chi_{\alpha,d}^2$
of the chi-square distribution with $d$ degrees of
freedom.<sup>[2](#notes)</sup> All observations whose squared Mahalanobis
distances are smaller than the quantile (times a correction factor) are
selected into the subset of outlier-free data. It is recommended to choose
`alpha` on grounds of an educated guess of the share of "good" observations in
the data. Here, we suppose that 95\% of the observations are not outliers.
By default, the initial subset is determined by the Euclidean norm
(initialization method: `version = "V2"`).
* This initialization method is robust because it is based on the
coordinate-wise (weighted) median. The resulting estimators of center and
scatter are *not affine equivariant*. Let $T(\cdot)$ denote an estimator of a
parameter of interest (e.g., covariance matrix) and let $X$ denote the $(n
\times p)$ data matrix. An estimator $T$ is affine equivariant if and only if
$T(A X + b) = A T(X) + b$, for any nonsingular $(m \times n)$ matrix $A$ and
any $n$-vector $b$. Although version `"V2"` of the BACON method yields an
estimator that is not affine equivariant in the above sense, [Billor et
al.](#biblio) (2000) point out that the method is nearly affine equivariant.
* There exists an alternative initialization method (`"version = V1"`) which is
based on the coordinate-wise (weighted) means; therefore, it is affine
equivariant but *not robust*.
From the above output, we see that the algorithm converged in three iterations.
In case the algorithm does not converge, we may increase the maximum number of
iterations (default: `maxiter = 50`) and toggle `verbose = TRUE` to (hopefully)
learn more why the method did not converge.
In the next step, we want to study the result in more detail. In particular, we
are interested in the estimated center and scatter (or covariance) matrix. To
this end, we can call the `summary()` method on the object `fit`.
```{r}
summary(fit)
```
#### 2.1.3 Diagnostics
The method has detected `r sum(is_outlier(fit))` *potential* outliers. It is
important to study the diagnostic plot to learn more about the potential
outliers. The robust (Mahalanobis) distances vs. the index of the observations
(`1:n`) can be plotted as follows.
```{r}
plot(fit, 1)
```
The dashed horizontal line shows the cutoff threshold on the robust distances.
Observations above the line are nominated as potential outliers by the BACON
algorithm. It is left to the analyst to finally label outlying observations as
such. In the next section, we introduce an alternative plotting method (see
below).
The method `is_outlier()` returns a vector of logicals whether an observation
has been flagged as a potential outlier.
```{r}
which(is_outlier(fit))
```
The (robust) center and covariance (scatter) matrix can be extracted with the
auxiliary functions, respectively, `center()` and `cov()`.
```{r}
center(fit)
```
The robust Mahalanobis distances can be extracted with the `distance()` method.
### 2.2 Philips data
Old television sets had a cathode ray tube with an electron gun. The emitted
beam runs through a diaphragm that lets pass only a partial beam to the screen.
The diaphragm consists of 9 components. The Philips data set contains $n = 667$
measurements on the $p = 9$ components (variables); see [Rousseeuw and van
Driessen](#biblio) (1999).<sup>[3](#notes)</sup> These data do not have
sampling weights.
```{r}
data(philips)
head(philips)
```
We compute the BACON algorithm but this time with the initialization method
`version = "V1"`.
```{r}
fit <- wBACON(philips, alpha = 0.05, version = "V1")
fit
```
The BACON algorithm detected `r sum(is_outlier(fit))` potential outliers. The
robust (Mahalanobis) distances can be plotted against the univariate projection
of the data, which maximizes the separation criterion of [Qiu and Joe](#biblio)
(2006). This kind of diagnostic graph attempts to separate outlying from
non-outlying observations as much as possible; see [Willems et al.](#biblio)
(2009). It is helpful if the outliers are clustered. The graph is generated as
follows.
```{r}
plot(fit, which = 2)
```
From the visual display, we see a cluster of potential outliers in the top
right corner. The dashed horizontal line indicates the cutoff threshold on the
distances as imposed by the BACON algorithm.
For very large datasets, the plot method can be called with the (additional)
argument `hex = TRUE` to show a hexagonally binned scatter plot; see below.
This plot method uses the functionality of the R package `hexbin` ([Carr et
al.](#biblio), 2023).
```{r}
plot(fit, which = 2, hex = TRUE)
```
## 3 Robust linear regression
The education data is on education expenditures in 50 US states in 1975
([Chatterjee and Hadi](#biblio), 2012, Chap. 5.7). The data can be loaded from
the `robustbase` package.
```{r}
data(education, package = "robustbase")
```
It is convenient to rename the variables.
```{r}
names(education)[3:6] <- c("RES", "INC", "YOUNG", "EXP")
head(education)
```
The measured variables for the 50 states are:
* `State`: State
* `Region`: group variable with outcomes: 1=Northeastern, 2=North central,
3=Southern, and 4=Western
* `RES`: Number of residents per thousand residing in urban areas in 1970
* `INC`: Per capita personal income in 1973 (\$US)
* `YOUNG`: Number of residents per thousand under 18 years of age in 1974
* `EXP`: Per capita expenditure on public education in a state (\$US),
projected for 1975
### 3.1 Model fit
We want to regress education expenditures (`EXP`) on the variables `RES`,
`INC`, and `YOUNG` by the BACON algorithm, and obtain
```{r}
reg <- wBACON_reg(EXP ~ RES + INC + YOUNG, data = education)
reg
```
The instance `reg` is an object of the class `wbaconlm`. The printed output of
`wBACON_reg` is identical with the one of the `lm` function. In addition, we
are told the size of the subset on which the regression has been computed. The
observations not in the subset are considered outliers (here 1 out of 50
observations).
The `summary()` method can be used to obtain a summary of the estimated model.
```{r}
summary(reg)
```
The summary output of `wBACON_reg` is identical with the output of the `lm`
estimate on the subset of outlier-free data,
```{r}
summary(lm(EXP ~ RES + INC + YOUNG, data = education[!is_outlier(reg), ]))
```
where we have used `is_outlier()` to extract the set of declared outliers from
`reg` (the summary output of the `lm` estimate is not shown).
### 3.2 Tuning
By default, `wBACON_reg` uses the parametrization $\alpha = 0.05$, `collect =
4`, and `version = "V2"`. These parameters are used to call the `wBACON`
algorithm on the design matrix. Then, the same parameters are used to compute
the robust regression.
To ensure a high breakdown point, `version = "V2"` should not be changed to
`version = "V1"` unless you have good reasons. The main "turning knob" to tune
the algorithm is `alpha`, which defines the $(1-$`alpha`$)$ quantile of the
Student $t$-distribution. All observations whose distances/discrepancies [See
document `methods.pdf` in the folder `doc` of the package.] are smaller (in
absolute value) than the quantile are selected into the subset of "good" data.
By choosing smaller values for `alpha` (e.g., 0.2), more observations are
selected (ceteris paribus) into the subset of "good" data (and vice versa).
The parameter `collect` specifies the initial subset size, which is defined as
$m = p \cdot collect$. It can be modified but should be chosen such that $m$ is
considerably smaller than the number of observations $n$. Otherwise there is a
high risk of selecting too many "bad" observations into the initial subset,
which will eventually bias the regression estimates.
In case the algorithm does not converge, we may increase the maximum number of
iterations (default: `maxiter = 50`) and toggle `verbose = TRUE` to (hopefully)
learn more why the method did not converge.
### 3.3 Model diagnostics
The methods `coef()`, `vcov()`, and `predict()` work exactly the same as their
`lm` counterparts. This is also true for the first three `plot` types, that is
* `which = 1`: Residuals vs Fitted,
* `which = 2`: Normal Q-Q,
* `which = 3`: Scale-Location
The plot types `4:6` of `plot.lm` are not implemented for objects of the class
`wbaconlm` because it is not sensible to study the standard regression
influence diagnostics in the presence of outliers in the model's design space.
Instead, type four (`which = 4`) plots the robust Mahalanobis distances with
respect to the non-constant design variables against the standardized residual.
This plot has been proposed by [Rousseeuw and van Zomeren](#biblio) (1990).
```{r}
plot(reg, 4)
```
The *filled* circle(s) represent the outliers nominated by the BACON algorithm.
The outlier in the top right corner is both a residual outlier and an outlier
in the model's design space.
* Observation with robust Mahalanobis distances larger than 4.57 (see
abscissae) are flagged as outliers in the model's design space (leverage
observations).
* Observations whose standardized residual falls outside the interval spanned
by $\pm \, t_{\alpha/(2m+2), m - p}$, where $t_{\alpha, m - p}$ is the
$(1-\alpha)$ quantile of the Student $t$-distribution with $m-p$ degrees of
freedom, $m$ denoting the size of the final subset of outlier-free data.
Here, we have $m=49$, $\alpha = 0.05$ (see argument `alpha` of
`wBACON_reg`), thus the interval is $[-3.52, \; 3.52]$.
---
## References {#biblio}
Béguin, C. and B. Hulliger (2002). Robust Multivariate Outlier Detection and
Imputation with Incomplete Survey Data, Deliverable D4/5.2.1/2 Part C: EUREDIT
project, https://www.cs.york.ac.uk/euredit/euredit-main.html, research project
funded by the European Commission, IST-1999-10226.
Béguin, C. and B. Hulliger (2008). The BACON-EEM Algorithm for Multivariate
Outlier Detection in Incomplete Survey Data, *Survey Methodology* **34**,
91--103.
Billor, N., A. S. Hadi, and P. F. Vellemann (2000). BACON: Blocked Adaptive
Computationally-efficient Outlier Nominators, *Computational Statistics and
Data Analysis* **34**, 279--298.
[DOI 10.1016/S0167-9473(99)00101-2](https://doi.org/10.1016/S0167-9473(99)00101-2)
Campbell, N. A. (1989). Bushfire Mapping using NOAA AVHRR Data. Technical
Report. Commonwealth Scientific and Industrial Research Organisation, North
Ryde.
Carr, D., N. Lewin-Koh, and M. Maechler (2023). hexbin: Hexagonal Binning
Routines. R package version 1.28.3. (The package contains copies of lattice
functions written by Deepayan Sarkar). URL
https://CRAN.R-project.org/package=hexbin
Chatterjee, S. and A. H. Hadi (2012). *Regression Analysis by Example*, 5th
ed., Hoboken (NJ): John Wiley \& Sons.
Hulliger, B. and T. Schoch (2009). Robust multivariate imputation with survey
data, in *Proceedings of the 57th Session of the International Statistical
Institute*, Durban.
Hulliger, B. (2023). modi: Multivariate Outlier Detection and Imputation for
Incomplete Survey Data, R package version 0.1-2. URL
https://CRAN.R-project.org/package=modi
Maechler, M., P. Rousseeuw, C. Croux, V. Todorov, A. Ruckstuhl, M.
Salibian-Barrera, T. Verbeke, M. Koller, E. L. T. Conceicao, and M. Anna di
Palma (2024). robustbase: Basic Robust Statistics, R package version 0.99-2.
URL https://CRAN.R-project.org/package=robustbase
Maronna, R. A. and V. J. Yohai (1995). The Behavior of the Stahel-Donoho Robust
Multivariate Estimator, *Journal of the American Statistical Association*
**90** 330--341. [DOI 10.2307/2291158](https://doi.org/10.2307/2291158)
Qiu, W. and H. Joe (2006). Separation index and partial membership for
clustering, *Computational Statistics and Data Analysis* **50**, 585--603.
[DOI 10.1016/j.csda.2004.09.009](https://doi.org/10.1016/j.csda.2004.09.009)
Raymaekers, J. and P. Rousseeuw (2023). cellWise: Analyzing Data with Cellwise
Outliers, R package version 2.5.3. URL
https://CRAN.R-project.org/package=cellWise
Rousseeuw, P. J. and K. van Driessen (1999). A fast algorithm for the Minimum
Covariance Determinant estimator, *Technometrics* **41**, 212--223.
[DOI 10.2307/1270566](https://doi.org/10.2307/1270566)
Rousseeuw, P. J. and K. van Zomeren (1990). Unmasking Multivariate Outliers and
Leverage Points, *Journal of the American Statistical Association* **411**,
633--639. [DOI 10.2307/2289995](https://doi.org/10.2307/2289995)
Schoch, T. (2021) wbacon: Weighted BACON algorithms for multivariate outlier
nomination (detection) and robust linear regression, *Journal of Open Source
Software* **6**, 323.
[DOI 10.21105/joss.03238](https://doi.org/10.21105/joss.03238)
Willems, G., H. Joe, and R. Zamar (2009). Diagnosing Multivariate Outliers
Detected by Robust Estimators, *Journal of Computational and Graphical
Statistics* **18**, 73--91.
[DOI 10.1198/jcgs.2009.0005](https://doi.org/10.1198/jcgs.2009.0005)
## Notes {#notes}
<sup>1</sup> The data are also distributed with the `R` package `robustbase`
([Maechler et al.](#biblio), 2023).
<sup>2</sup> The degrees of freedom $d$ is a function of the number of
variables $p$, the number of observations $n$, and the size of the current
subset $m$; see `methods.pdf` in the `inst/doc` folder of the package.
<sup>3</sup> The philips data has been published in the `R` package `cellWise`
([Raymaekers and Rousseeuw](#biblio), 2023).
|
/scratch/gouwar.j/cran-all/cranData/wbacon/inst/doc/wbacon.Rmd
|
#!/usr/bin/env Rscript
args <- commandArgs(trailingOnly = TRUE)
if (length(args) == 0) {
mode <- "help"
} else {
mode <- switch(args,
fast = "fast",
check = "check",
full = "full",
tar = "tar",
h = "help",
help = "help",
pdf = "pdf")
}
if (mode == "help") {
cat("\nOptions:\n")
cat(" fast (install only x64, w/o html)\n")
cat(" check\n")
cat(" tar (generate tar ball)\n")
cat(" full (standard installation)\n\n")
q(save = "no")
}
#-------------------------------------------------------------------------------
PKG <- "wbacon"
PKG_SOURCE <- "/home/tobias/Code"
PKG_ROOT <- "/home/tobias/tmp"
setwd(PKG_ROOT)
# delete package folder if it already exists
if (dir.exists(PKG))
unlink(PKG, recursive = TRUE)
# copy entire directory (excl. files/folders with a leading dot, e.g. '.git')
dir.create(PKG)
pkg_files <- list.files(paste0(PKG_SOURCE, "/", PKG), full.names = TRUE)
file.copy(pkg_files, paste0(PKG_ROOT, "/", PKG), recursive = TRUE)
# copy .Rbuildignore and .Rinstignore
f_R_build_ignore <- paste0(PKG_SOURCE, "/", PKG, "/.Rbuildignore")
if (file.exists(f_R_build_ignore))
file.copy(f_R_build_ignore, paste0(PKG_ROOT, "/", PKG))
f_R_inst_ignore <- paste0(PKG_SOURCE, "/", PKG, "/.Rinstignore")
if (file.exists(f_R_inst_ignore))
file.copy(f_R_inst_ignore, paste0(PKG_ROOT, "/", PKG))
# clean src folder (remove binary files)
binary_files <- list.files(paste0(PKG_ROOT, "/", PKG, "/src"),
pattern = "\\.o$|\\.dll$|\\.so$")
if (length(binary_files) > 0)
file.remove(paste0(PKG_ROOT, "/", PKG, "/src/", binary_files))
#-------------------------------------------------------------------------------
setwd(PKG_ROOT)
# fast install (only x64 arch; without html help files and vignette)
if (mode == "fast")
system(paste0("R CMD INSTALL ", PKG, " --no-html --no-multiarch"))
# build the tar ball
if (mode == "tar") {
system(paste0("R CMD build ", PKG))
}
# build the tar ball and check
if (mode == "check") {
vers <- trimws(strsplit(readLines(paste0(PKG,
"/DESCRIPTION"))[4], ":")[[1]][2])
pkg_tar <- paste0(PKG, "_", vers, ".tar.gz")
if (file.exists(pkg_tar))
file.remove(pkg_tar)
system(paste0("R CMD build ", PKG))
system(paste0("R CMD check ", pkg_tar))
}
# full build and install (incl. html, vignette, etc)
if (mode == "full") {
vers <- trimws(strsplit(readLines(paste0(PKG,
"/DESCRIPTION"))[4], ":")[[1]][2])
pkg_tar <- paste0(PKG, "_", vers, ".tar.gz")
if (file.exists(pkg_tar))
file.remove(pkg_tar)
system(paste0("R CMD build ", PKG))
system(paste0("R CMD INSTALL ", pkg_tar))
}
# render pdf manual
#system("R CMD Rd2pdf PKG")
|
/scratch/gouwar.j/cran-all/cranData/wbacon/inst/varia/_make_pkg.R
|
---
title: "Vignette: Weighted BACON algorithms"
author: "Tobias Schoch"
output:
html_document:
css: "fluent.css"
highlight: tango
vignette: >
%\VignetteIndexEntry{Weighted BACON algorithms}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "",
prompt = TRUE
)
```
```{css, echo = FALSE}
.my-callout {
padding: 0.25rem;
padding-left: 1rem;
margin-top: 0.25rem;
margin-bottom: 0.25rem;
border: 1px solid #eee;
border-left-width: 0.75rem;
border-left-color: #df536b;
border-radius: .25rem
}
```
## 1 Introduction
The package `wbacon` implements a weighted variant of the BACON (blocked
adaptive computationally-efficient outlier nominators) algorithms [Billor et
al.](#biblio) (2000) for multivariate outlier detection and robust linear
regression. The extension of the BACON algorithm for outlier detection to allow
for weighting is due to [Béguin and Hulliger](#biblio) (2008).
The details of the package are discussed in the accompanying paper; see
[Schoch](#biblio) (2021)
First, we attach the package to the search space.
```{r}
library("wbacon")
```
### 1.1 Available methods
* `wBACON()` is for multivariate outlier nomination and robust estimation of
location/ center and covariance matrix
* `wBACON_reg()` is for robust linear regression (the method is robust against
outliers in the response variable and the model's design matrix)
### 1.2 Assumptions
The BACON algorithms assume that the underlying model is an appropriate
description of the non-outlying observations; [Billor et al.](#biblio) (2000).
More precisely,
* the outlier nomination method assumes that the "good" data have (roughly) an
*elliptically contoured* distribution (this includes the Gaussian
distribution as a special case);
* the regression method assumes that the non-outlying ("good") data are
described by a *linear* (homoscedastic) regression model and that the
independent variables (having removed the regression intercept/constant, if
there is a constant) follow (roughly) an elliptically contoured distribution.
<div class="my-callout">
<p>"Although the algorithms will often do something reasonable even when
these assumptions are violated, it is hard to say what the results mean."
[Billor et al.](#biblio) (2000, p. 290)</p>
</div>
It is strongly recommended that the structure of the data be examined and
whether the assumptions made about the "good" observations are reasonable.
### 1.3 The role of the data analyst
In line with [Billor et al.](#biblio) (2000, p. 290), we use the term outlier
"nomination" rather than "detection" to highlight that algorithms should not go
beyond nominating observations as *potential* outliers; see also [Béguin and
Hulliger](#biblio) (2008). It is left to the analyst to finally label outlying
observations as such.
The software provides the analyst with tools and measures to study potentially
outlying observations. It is strongly recommended to use the tools.
### 1.4 Additional information
Additional information on the BACON algorithms and the implementation can be
found in the documents:
* `methods.pdf`: A mathematical description of the algorithms and their
implementation;
* `doc_c_functions.pdf`: A documentation of the `C` functions.
Both documents can be found in the package folder `doc`.
## 2 Multivariate outlier detection
In this section, we study multivariate outlier detection for the two datasets
* bushfire data (with sampling weights),
* philips data (without sampling weights).
### 2.1 Bushfire data
The bushfire dataset is on satellite remote sensing. These data were used by
[Campbell](#biblio) (1984) to locate bushfire scars. The data are radiometer
readings from polar-orbiting satellites of the National Oceanic and Atmospheric
Administration (NOAA) which have been collected continuously since 1981. The
measurements are taken on five frequency bands or channels. In the near
infrared band, it is possible to distinguish vegetation types from burned
surface. At visible wavelengths, the vegetation spectra are similar to burned
surface. The spatial resolution is rather low (1.1 km per pixel).
#### 2.1.1 Data preparation
The bushfire data contain radiometer readings for 38 pixels and have been
studied in [Maronna and Yohai](#biblio) (1995), [Béguin and Hulliger](#biblio)
(2002), [Béguin and Hulliger](#biblio) (2008), and [Hulliger and
Schoch](#biblio) (2009). The data can be obtained from the `R` package
`modi`([Hulliger](#biblio), 2023).<sup>[1](#notes)</sup>
```{r}
data(bushfire, package = "modi")
```
The first 6 readings on the five frequency bands (variables) are
```{r}
head(bushfire)
```
[Béguin and Hulliger](#biblio) (2008) generated a set of sampling weights. The
weights can be attached to the current session by
```{r}
data(bushfire.weights, package = "modi")
```
#### 2.1.2 Outlier detection
```{r}
fit <- wBACON(bushfire, w = bushfire.weights, alpha = 0.05)
fit
```
The argument `alpha` determines the $(1-\alpha)$-quantile $\chi_{\alpha,d}^2$
of the chi-square distribution with $d$ degrees of
freedom.<sup>[2](#notes)</sup> All observations whose squared Mahalanobis
distances are smaller than the quantile (times a correction factor) are
selected into the subset of outlier-free data. It is recommended to choose
`alpha` on grounds of an educated guess of the share of "good" observations in
the data. Here, we suppose that 95\% of the observations are not outliers.
By default, the initial subset is determined by the Euclidean norm
(initialization method: `version = "V2"`).
* This initialization method is robust because it is based on the
coordinate-wise (weighted) median. The resulting estimators of center and
scatter are *not affine equivariant*. Let $T(\cdot)$ denote an estimator of a
parameter of interest (e.g., covariance matrix) and let $X$ denote the $(n
\times p)$ data matrix. An estimator $T$ is affine equivariant if and only if
$T(A X + b) = A T(X) + b$, for any nonsingular $(m \times n)$ matrix $A$ and
any $n$-vector $b$. Although version `"V2"` of the BACON method yields an
estimator that is not affine equivariant in the above sense, [Billor et
al.](#biblio) (2000) point out that the method is nearly affine equivariant.
* There exists an alternative initialization method (`"version = V1"`) which is
based on the coordinate-wise (weighted) means; therefore, it is affine
equivariant but *not robust*.
From the above output, we see that the algorithm converged in three iterations.
In case the algorithm does not converge, we may increase the maximum number of
iterations (default: `maxiter = 50`) and toggle `verbose = TRUE` to (hopefully)
learn more why the method did not converge.
In the next step, we want to study the result in more detail. In particular, we
are interested in the estimated center and scatter (or covariance) matrix. To
this end, we can call the `summary()` method on the object `fit`.
```{r}
summary(fit)
```
#### 2.1.3 Diagnostics
The method has detected `r sum(is_outlier(fit))` *potential* outliers. It is
important to study the diagnostic plot to learn more about the potential
outliers. The robust (Mahalanobis) distances vs. the index of the observations
(`1:n`) can be plotted as follows.
```{r}
plot(fit, 1)
```
The dashed horizontal line shows the cutoff threshold on the robust distances.
Observations above the line are nominated as potential outliers by the BACON
algorithm. It is left to the analyst to finally label outlying observations as
such. In the next section, we introduce an alternative plotting method (see
below).
The method `is_outlier()` returns a vector of logicals whether an observation
has been flagged as a potential outlier.
```{r}
which(is_outlier(fit))
```
The (robust) center and covariance (scatter) matrix can be extracted with the
auxiliary functions, respectively, `center()` and `cov()`.
```{r}
center(fit)
```
The robust Mahalanobis distances can be extracted with the `distance()` method.
### 2.2 Philips data
Old television sets had a cathode ray tube with an electron gun. The emitted
beam runs through a diaphragm that lets pass only a partial beam to the screen.
The diaphragm consists of 9 components. The Philips data set contains $n = 667$
measurements on the $p = 9$ components (variables); see [Rousseeuw and van
Driessen](#biblio) (1999).<sup>[3](#notes)</sup> These data do not have
sampling weights.
```{r}
data(philips)
head(philips)
```
We compute the BACON algorithm but this time with the initialization method
`version = "V1"`.
```{r}
fit <- wBACON(philips, alpha = 0.05, version = "V1")
fit
```
The BACON algorithm detected `r sum(is_outlier(fit))` potential outliers. The
robust (Mahalanobis) distances can be plotted against the univariate projection
of the data, which maximizes the separation criterion of [Qiu and Joe](#biblio)
(2006). This kind of diagnostic graph attempts to separate outlying from
non-outlying observations as much as possible; see [Willems et al.](#biblio)
(2009). It is helpful if the outliers are clustered. The graph is generated as
follows.
```{r}
plot(fit, which = 2)
```
From the visual display, we see a cluster of potential outliers in the top
right corner. The dashed horizontal line indicates the cutoff threshold on the
distances as imposed by the BACON algorithm.
For very large datasets, the plot method can be called with the (additional)
argument `hex = TRUE` to show a hexagonally binned scatter plot; see below.
This plot method uses the functionality of the R package `hexbin` ([Carr et
al.](#biblio), 2023).
```{r}
plot(fit, which = 2, hex = TRUE)
```
## 3 Robust linear regression
The education data is on education expenditures in 50 US states in 1975
([Chatterjee and Hadi](#biblio), 2012, Chap. 5.7). The data can be loaded from
the `robustbase` package.
```{r}
data(education, package = "robustbase")
```
It is convenient to rename the variables.
```{r}
names(education)[3:6] <- c("RES", "INC", "YOUNG", "EXP")
head(education)
```
The measured variables for the 50 states are:
* `State`: State
* `Region`: group variable with outcomes: 1=Northeastern, 2=North central,
3=Southern, and 4=Western
* `RES`: Number of residents per thousand residing in urban areas in 1970
* `INC`: Per capita personal income in 1973 (\$US)
* `YOUNG`: Number of residents per thousand under 18 years of age in 1974
* `EXP`: Per capita expenditure on public education in a state (\$US),
projected for 1975
### 3.1 Model fit
We want to regress education expenditures (`EXP`) on the variables `RES`,
`INC`, and `YOUNG` by the BACON algorithm, and obtain
```{r}
reg <- wBACON_reg(EXP ~ RES + INC + YOUNG, data = education)
reg
```
The instance `reg` is an object of the class `wbaconlm`. The printed output of
`wBACON_reg` is identical with the one of the `lm` function. In addition, we
are told the size of the subset on which the regression has been computed. The
observations not in the subset are considered outliers (here 1 out of 50
observations).
The `summary()` method can be used to obtain a summary of the estimated model.
```{r}
summary(reg)
```
The summary output of `wBACON_reg` is identical with the output of the `lm`
estimate on the subset of outlier-free data,
```{r}
summary(lm(EXP ~ RES + INC + YOUNG, data = education[!is_outlier(reg), ]))
```
where we have used `is_outlier()` to extract the set of declared outliers from
`reg` (the summary output of the `lm` estimate is not shown).
### 3.2 Tuning
By default, `wBACON_reg` uses the parametrization $\alpha = 0.05$, `collect =
4`, and `version = "V2"`. These parameters are used to call the `wBACON`
algorithm on the design matrix. Then, the same parameters are used to compute
the robust regression.
To ensure a high breakdown point, `version = "V2"` should not be changed to
`version = "V1"` unless you have good reasons. The main "turning knob" to tune
the algorithm is `alpha`, which defines the $(1-$`alpha`$)$ quantile of the
Student $t$-distribution. All observations whose distances/discrepancies [See
document `methods.pdf` in the folder `doc` of the package.] are smaller (in
absolute value) than the quantile are selected into the subset of "good" data.
By choosing smaller values for `alpha` (e.g., 0.2), more observations are
selected (ceteris paribus) into the subset of "good" data (and vice versa).
The parameter `collect` specifies the initial subset size, which is defined as
$m = p \cdot collect$. It can be modified but should be chosen such that $m$ is
considerably smaller than the number of observations $n$. Otherwise there is a
high risk of selecting too many "bad" observations into the initial subset,
which will eventually bias the regression estimates.
In case the algorithm does not converge, we may increase the maximum number of
iterations (default: `maxiter = 50`) and toggle `verbose = TRUE` to (hopefully)
learn more why the method did not converge.
### 3.3 Model diagnostics
The methods `coef()`, `vcov()`, and `predict()` work exactly the same as their
`lm` counterparts. This is also true for the first three `plot` types, that is
* `which = 1`: Residuals vs Fitted,
* `which = 2`: Normal Q-Q,
* `which = 3`: Scale-Location
The plot types `4:6` of `plot.lm` are not implemented for objects of the class
`wbaconlm` because it is not sensible to study the standard regression
influence diagnostics in the presence of outliers in the model's design space.
Instead, type four (`which = 4`) plots the robust Mahalanobis distances with
respect to the non-constant design variables against the standardized residual.
This plot has been proposed by [Rousseeuw and van Zomeren](#biblio) (1990).
```{r}
plot(reg, 4)
```
The *filled* circle(s) represent the outliers nominated by the BACON algorithm.
The outlier in the top right corner is both a residual outlier and an outlier
in the model's design space.
* Observation with robust Mahalanobis distances larger than 4.57 (see
abscissae) are flagged as outliers in the model's design space (leverage
observations).
* Observations whose standardized residual falls outside the interval spanned
by $\pm \, t_{\alpha/(2m+2), m - p}$, where $t_{\alpha, m - p}$ is the
$(1-\alpha)$ quantile of the Student $t$-distribution with $m-p$ degrees of
freedom, $m$ denoting the size of the final subset of outlier-free data.
Here, we have $m=49$, $\alpha = 0.05$ (see argument `alpha` of
`wBACON_reg`), thus the interval is $[-3.52, \; 3.52]$.
---
## References {#biblio}
Béguin, C. and B. Hulliger (2002). Robust Multivariate Outlier Detection and
Imputation with Incomplete Survey Data, Deliverable D4/5.2.1/2 Part C: EUREDIT
project, https://www.cs.york.ac.uk/euredit/euredit-main.html, research project
funded by the European Commission, IST-1999-10226.
Béguin, C. and B. Hulliger (2008). The BACON-EEM Algorithm for Multivariate
Outlier Detection in Incomplete Survey Data, *Survey Methodology* **34**,
91--103.
Billor, N., A. S. Hadi, and P. F. Vellemann (2000). BACON: Blocked Adaptive
Computationally-efficient Outlier Nominators, *Computational Statistics and
Data Analysis* **34**, 279--298.
[DOI 10.1016/S0167-9473(99)00101-2](https://doi.org/10.1016/S0167-9473(99)00101-2)
Campbell, N. A. (1989). Bushfire Mapping using NOAA AVHRR Data. Technical
Report. Commonwealth Scientific and Industrial Research Organisation, North
Ryde.
Carr, D., N. Lewin-Koh, and M. Maechler (2023). hexbin: Hexagonal Binning
Routines. R package version 1.28.3. (The package contains copies of lattice
functions written by Deepayan Sarkar). URL
https://CRAN.R-project.org/package=hexbin
Chatterjee, S. and A. H. Hadi (2012). *Regression Analysis by Example*, 5th
ed., Hoboken (NJ): John Wiley \& Sons.
Hulliger, B. and T. Schoch (2009). Robust multivariate imputation with survey
data, in *Proceedings of the 57th Session of the International Statistical
Institute*, Durban.
Hulliger, B. (2023). modi: Multivariate Outlier Detection and Imputation for
Incomplete Survey Data, R package version 0.1-2. URL
https://CRAN.R-project.org/package=modi
Maechler, M., P. Rousseeuw, C. Croux, V. Todorov, A. Ruckstuhl, M.
Salibian-Barrera, T. Verbeke, M. Koller, E. L. T. Conceicao, and M. Anna di
Palma (2024). robustbase: Basic Robust Statistics, R package version 0.99-2.
URL https://CRAN.R-project.org/package=robustbase
Maronna, R. A. and V. J. Yohai (1995). The Behavior of the Stahel-Donoho Robust
Multivariate Estimator, *Journal of the American Statistical Association*
**90** 330--341. [DOI 10.2307/2291158](https://doi.org/10.2307/2291158)
Qiu, W. and H. Joe (2006). Separation index and partial membership for
clustering, *Computational Statistics and Data Analysis* **50**, 585--603.
[DOI 10.1016/j.csda.2004.09.009](https://doi.org/10.1016/j.csda.2004.09.009)
Raymaekers, J. and P. Rousseeuw (2023). cellWise: Analyzing Data with Cellwise
Outliers, R package version 2.5.3. URL
https://CRAN.R-project.org/package=cellWise
Rousseeuw, P. J. and K. van Driessen (1999). A fast algorithm for the Minimum
Covariance Determinant estimator, *Technometrics* **41**, 212--223.
[DOI 10.2307/1270566](https://doi.org/10.2307/1270566)
Rousseeuw, P. J. and K. van Zomeren (1990). Unmasking Multivariate Outliers and
Leverage Points, *Journal of the American Statistical Association* **411**,
633--639. [DOI 10.2307/2289995](https://doi.org/10.2307/2289995)
Schoch, T. (2021) wbacon: Weighted BACON algorithms for multivariate outlier
nomination (detection) and robust linear regression, *Journal of Open Source
Software* **6**, 323.
[DOI 10.21105/joss.03238](https://doi.org/10.21105/joss.03238)
Willems, G., H. Joe, and R. Zamar (2009). Diagnosing Multivariate Outliers
Detected by Robust Estimators, *Journal of Computational and Graphical
Statistics* **18**, 73--91.
[DOI 10.1198/jcgs.2009.0005](https://doi.org/10.1198/jcgs.2009.0005)
## Notes {#notes}
<sup>1</sup> The data are also distributed with the `R` package `robustbase`
([Maechler et al.](#biblio), 2023).
<sup>2</sup> The degrees of freedom $d$ is a function of the number of
variables $p$, the number of observations $n$, and the size of the current
subset $m$; see `methods.pdf` in the `inst/doc` folder of the package.
<sup>3</sup> The philips data has been published in the `R` package `cellWise`
([Raymaekers and Rousseeuw](#biblio), 2023).
|
/scratch/gouwar.j/cran-all/cranData/wbacon/vignettes/wbacon.Rmd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.