content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
sback <-
function(formula, data, offset = NULL, weights = NULL, kernel = c("Gaussian", "Epanechnikov"), bw.grid = seq(0.01, 0.99, length = 30), c.bw.factor = FALSE, KfoldCV = 5, kbin = 30, family = c("gaussian", "binomial", "poisson")) {
family <- match.arg(family)
if(missing(formula)) {
stop("Argument \"formula\" is missing, with no default")
}
if(missing(formula)) {
stop("Argument \"data\" is missing, with no default")
}
data[,"ONE"] <- 1.0
fsb <- interpret.sbformula(formula)
if(is.null(fsb$response)) {
stop("Response variable should be specified in argument \"formula\"")
}
z.varnames <- fsb$II[1,]
x.varnames <- fsb$II[2,]
if(any(is.na(match(c(fsb$response, c(x.varnames, z.varnames)), names(data))))) {
stop("Not all needed variables are supplied in data")
}
data <- na.omit(data[,c(fsb$response, unique(c(x.varnames, z.varnames)))])
n <- nrow(data)
if(is.null(weights)) {
weights <- rep(1, n)
} else {
if(sum(weights) <= 0 || any(weights < 0) || length(weights) != n)
stop("The specified weights are not correct")
}
if(is.null(offset)) {
offset <- rep(0, n)
}
# Smooth effects (either varying or not)
x.varnames.s <- x.varnames[fsb$h != 0]
z.varnames.s <- z.varnames[fsb$h != 0]
if(length(x.varnames.s) == 0) {
stop("No smooth functions have been specified")
}
mode <- lapply(c(x.varnames.s, z.varnames.s), function(x,data) class(data[, x, drop = TRUE]), data = data)
if(any(mode %in% "factor")) {
stop("Only continuos covariates are allowed for non parametric effects and varying coefficient components. Factors are not allowed")
}
if(any(fsb$h == -1)) {
if(c.bw.factor) {
stop("For the alpha correction the user needs to specify bandwidth parameters for all nonparametric functions.")
}
optband <- search.bandwidth(formula = formula, data = data, offset = offset, weights = weights, kernel = kernel, bandwidth = bw.grid, KfoldCV = KfoldCV, kbin = kbin, family = family)
res <- sback.fit(formula = optband$formula, data = data, offset = offset, weights = weights, kernel = kernel, kbin = kbin, family = family, newdata = data, newoffset = offset, call = match.call())
if(res$fit$err == 1) {
stop("There has been an error during the fitting process. Most likely, the error is due to bandwidth parameters being too small.")
}
res$err.CV <- optband$err.CV
} else {
if(!c.bw.factor) {
res <- sback.fit(formula = formula, data = data, offset = offset, weights = weights, kernel = kernel, kbin = kbin, family = family, newdata = data, newoffset = offset, call = match.call())
if(res$fit$err == 1) {
stop("There has been an error during the fitting process. Most likely, the error is due to bandwidth parameters being too small.")
}
} else {
optband <- search.alpha(formula = formula, data = data, offset = offset, weights = weights, kernel = kernel, alpha = seq(0.5, 1.5, length = 10), KfoldCV = KfoldCV, kbin = kbin, family = family)
res <- sback.fit(formula = optband$formula, data = data, offset = offset, weights = weights, kernel = kernel, kbin = kbin, family = family, newdata = data, newoffset = offset, call = match.call())
if(res$fit$err == 1) {
stop("There has been an error during the fitting process. Most likely, the error is due to bandwidth parameters being too small.")
}
res$err.CV <- optband$err.CV
}
}
class(res) <- "sback"
res
} | /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/sback.R |
sback.fit <-
function(formula, data, offset = NULL, weights = NULL, kernel = c("Gaussian", "Epanechnikov"), kbin = 15, family = c("gaussian", "binomial", "poisson"), newdata = NULL, newoffset = NULL, call = NULL, pred = FALSE) {
family <- match.arg(family)
kernel <- match.arg(kernel)
family_fortran <- switch(family, "gaussian" = 2, "binomial" = 1, "poisson" = 3)
kernel_fortran <- switch(kernel, "Gaussian" = 1, "Epanechnikov" = 2)
if(missing(formula)) {
stop("Argument \"formula\" is missing, with no default")
}
if(missing(formula)) {
stop("Argument \"data\" is missing, with no default")
}
data[,"ONE"] <- 1.0
fsb <- interpret.sbformula(formula)
if(is.null(fsb$response)) {
stop("Response variable should be specified in argument \"formula\"")
}
z.varnames <- fsb$II[1,]
x.varnames <- fsb$II[2,]
if(any(is.na(match(c(fsb$response, c(x.varnames, z.varnames)), names(data))))) {
stop("Not all needed variables are supplied in data")
}
if(!is.null(newdata)) {
newdata[,"ONE"] <- 1.0
if(any(is.na(match(c(x.varnames, z.varnames), names(data))))) {
stop("Not all needed variables are supplied in newdata")
}
} else {
newdata <- data
}
data <- na.omit(data[,c(fsb$response, unique(c(x.varnames, z.varnames)))])
newdata <- na.omit(newdata[,unique(c(x.varnames, z.varnames))])
n <- nrow(data)
n0 <- nrow(newdata)
if(is.null(weights)) {
weights <- rep(1, n)
} else {
if(sum(weights) <= 0 || any(weights < 0) || length(weights) != n)
stop("The specified weights are not correct")
}
if(is.null(offset)) {
offset <- rep(0, n)
}
if(is.null(newoffset)) {
newoffset <- rep(0, n0)
}
# Smooth effects (either varying or not)
x.varnames.s <- x.varnames[fsb$h != 0]
z.varnames.s <- z.varnames[fsb$h != 0]
if(length(x.varnames.s) == 0) {
stop("No smooth functions have been specified")
}
mode <- lapply(c(x.varnames.s, z.varnames.s), function(x,data) class(data[, x, drop = TRUE]), data = data)
if(any(mode %in% "factor")) {
stop("Only continuos covariates are allowed for non parametric effects and varying coefficients. Factors are not allowed")
}
# Parametric effects
# Included as linear or parametric (i.e. categorical covariates)
x.varnames.p <- x.varnames[fsb$h == 0]
# Varying (if any)
z.varnames.p <- vector()
aux <- fsb$II[,fsb$h != 0, drop = FALSE]
for(i in 1:ncol(aux)) {
if(aux[1,i] == "ONE") {
z.varnames.p <- c(z.varnames.p, aux[2,i])
#z.varnames.p <- c(z.varnames.p) # Without linear part
} else {
z.varnames.p <- c(z.varnames.p, aux[1,i], paste(aux[1,i], ":", aux[2,i], sep = ""))
#z.varnames.p <- c(z.varnames.p, aux[1,i]) # Without linear part
}
}
# Remove that which are repeated
z.varnames.p <- unique(c(z.varnames.p, x.varnames.p))
if(length(z.varnames.p) == 0) {
Xl <- double(0)
Xpl <- double(0)
nparl <- 0
names.param <- NULL
} else {
# Construct parametric design matrix
formula.p <- paste("~", paste(z.varnames.p, collapse = "+"))
# Fit
MMp <- construct.fixed.part(formula.p, data)
Xl <- MMp$X
# Prediction
Xpl <- construct.fixed.prediction.matrix(MMp, newdata)
nparl <- ncol(Xl)
names.param <- colnames(MMp$X)
}
if(all(z.varnames.s %in% "ONE")) {
fit <- .Fortran("dllsback",
x = matrix(as.double(as.matrix(data[,x.varnames.s])), ncol = length(x.varnames.s)),
y = as.double(data[,fsb$response]),
offset = as.double(offset),
w = as.double(weights),
n = as.integer(n),
npar = as.integer(length(x.varnames.s)),
xl = matrix(as.double(Xl), ncol = nparl),
nparl = as.integer(nparl),
kbin = as.integer(kbin),
h = as.double(fsb$h[fsb$h != 0]),
m = matrix(as.double(rep(0.0,n*length(x.varnames.s))), nrow = n, ncol = length(x.varnames.s)),
muhat = as.double(rep(0.0,n)),
family = as.double(family_fortran),
x0 = matrix(as.double(as.matrix(newdata[,x.varnames.s])), ncol = length(x.varnames.s)),
x0l = matrix(as.double(Xpl), ncol = nparl),
offset0 = as.double(newoffset),
m0 = matrix(as.double(rep(0.0, n0*length(x.varnames.s))), nrow = n0, ncol = length(x.varnames.s)),
muhat0 = as.double(rep(0.0,n0)),
n0 = as.integer(n0),
B = as.double(rep(0.0, as.integer(nparl + 1))),
err = as.integer(0),
ikernel = as.double(kernel_fortran),
PACKAGE = "wsbackfit")
peffects <- fit$m0
effects <- fit$m
} else {
fit <- .Fortran("dllvcoef",
x = matrix(as.double(as.matrix(data[,x.varnames.s])), ncol = length(x.varnames.s)),
z = matrix(as.double(as.matrix(data[,z.varnames.s])), ncol = length(z.varnames.s)),
offset = as.double(offset),
y = as.double(data[,fsb$response]),
w = as.double(weights),
n = as.integer(n),
npar = as.integer(length(x.varnames.s)),
zl = matrix(as.double(Xl), ncol = nparl),
nparl = as.integer(nparl),
kbin = as.integer(kbin),
h = as.double(fsb$h[fsb$h != 0]),
m = matrix(as.double(rep(0.0, n*length(z.varnames.s))), nrow = n, ncol = length(z.varnames.s)),
mx = matrix(as.double(rep(0.0, n*length(x.varnames.s))), nrow = n, ncol = length(x.varnames.s)),
muhat = as.double(rep(0.0, n)),
family = as.double(family_fortran),
x0 = matrix(as.double(as.matrix(newdata[,x.varnames.s])), ncol = length(x.varnames.s)),
z0 = matrix(as.double(as.matrix(newdata[,z.varnames.s])), ncol = length(z.varnames.s)),
z0l = matrix(as.double(Xpl), ncol = nparl),
offset0 = as.double(newoffset),
mx0 = matrix(as.double(rep(0.0, n0*length(x.varnames.s))), nrow = n0, ncol = length(x.varnames.s)),
muhat0 = as.double(rep(0.0, n0)),
n0 = as.integer(n0),
B = as.double(rep(0.0, as.integer(nparl + 1))),
err = as.integer(0),
ikernel = as.double(kernel_fortran),
PACKAGE = "wsbackfit")
effects <- fit$mx
peffects <- fit$mx0
}
colnames(effects) <- colnames(peffects)<- fsb$partial[fsb$h != 0]
names(fit$B) <- c("Intercept", names.param)
residuals <- dev.residuals(data[,fsb$response], fit$muhat, weights, family = family)
res <- list(call = call, formula = formula, data = data, weights = weights, offset = offset, kernel = kernel, kbin = kbin, family = family, pdata = newdata, poffset = newoffset, effects = effects, peffects = peffects, fitted.values = fit$muhat, pfitted.values = fit$muhat0, residuals = residuals, h = fit$h, fit = fit, coeff = fit$B)
if(!pred) {
res$pdata <- NULL
res$poffset <- NULL
res$peffects <- NULL
res$pfitted.values <- NULL
}
res
} | /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/sback.fit.R |
search.alpha <- function (formula, data, offset = NULL, weights = NULL, kernel = c("Gaussian", "Epanechnikov"), alpha = seq(0.5, 1.5, length = 10), KfoldCV = 5, kbin = 25, family = c("gaussian", "binomial", "poisson")) {
family <- match.arg(family)
kernel <- match.arg(kernel)
h <- interpret.sbformula(formula)$h
if (max(h) < 0) {
stop("For the alpha correction the user needs to specify bandwidth parameters for all nonparametric functions.")
} else {
err <- NULL
for (alpha0 in alpha) {
err <- c(err, mean(calculate.CV(formula = create.formula.alpha(formula = formula, alpha0 = alpha0, data = data), data = data, offset = offset, weights = weights, kernel = kernel, kbin = kbin, family = family, KfoldCV = KfoldCV), na.rm = TRUE))
}
err.CV <- data.frame(alpha, err)
alpha0 <- alpha[which.min(err)]
res <- list(formula = create.formula.alpha(formula = formula, alpha0 = alpha0, data = data), err.CV = err.CV)
}
res
} | /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/search.alpha.R |
search.bandwidth <- function (formula, data, offset = NULL, weights = NULL, kernel = c("Gaussian", "Epanechnikov"), bandwidth = seq(0.01, 0.99, length = 30), KfoldCV = 5, kbin = 25, family = c("gaussian", "binomial", "poisson")) {
family <- match.arg(family)
kernel <- match.arg(kernel)
h <- interpret.sbformula(formula)$h
if (min(h) >= 0) {
res <- list(formula = formula)
} else {
err <- NULL
for (h0 in bandwidth) {
err <- c(err, mean(calculate.CV(formula = create.formula(formula = formula, h0 = h0, data = data), data = data, offset = offset, weights = weights, kernel = kernel, kbin = kbin, family = family, KfoldCV = KfoldCV)))
}
err.CV <- data.frame(bandwidth, err)
h0 <- bandwidth[which.min(err)]
res <- list(formula = create.formula(formula = formula, h0 = h0, data = data), err.CV = err.CV)
}
res
} | /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/search.bandwidth.R |
summary.sback <-
function(object, ...) {
class(object) <- "summary.sback"
object
}
| /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/summary.sback.R |
#' Label plots like the wall street journal
#' i.e. display the units only on the top tick of the graph
#'
#' @param prefix character, the unit label to prefix on the max number of the y-axis
#' @param suffix character, the unit label to append on the max number of the y-axis
#' @param rm.bottom logical, remove the lowest number?
#' @param accuracy double, the precision for labels e.g. 1, 0.1, or 0.01
#' @param ... args passed to scales::label_comma(...)
#'
#' @examples
#' library(ggplot2)
#' `%>%` <- magrittr::`%>%`
#'
#' plt <- economics_long %>%
#' dplyr::filter(variable %in% c("psavert", "uempmed")) %>%
#' ggplot(aes(date, value, color = variable)) +
#' geom_line() +
#' scale_y_continuous(
#' labels = label_wsj(prefix = "$", suffix = " %")
#' ) +
#' theme_wsj() +
#' labs(
#' title = "Some Economics Plot",
#' caption = "Source: Top secret."
#' )
#'
#' @export
label_wsj <- function(
prefix = "$", suffix = '', rm.bottom = TRUE, accuracy = NA, ...
) {
function(x) do_wsj_label(
x, prefix, suffix, rm.bottom, accuracy, ...
)
}
# internal calling function
do_wsj_label <- function(
breaks, prefix, suffix, rm.bottom, accuracy, ...
) {
# get info on the input breaks
#
# we can't use length because sometimes the last break mark is "NA"
max_idx <- which.max(breaks)
max_num <- breaks[ max_idx ]
if (rm.bottom) {
nbreaks <- max_idx - 1 # if we remove the first tick
} else {
nbreaks <- max_idx # if we keep the first tick
}
# format breaks with commas
acc <- check_accuracy(accuracy, max_num, nbreaks)
breaks_with_commas <- scales::label_comma(accuracy = acc, ...)(breaks)
# remove bottom tick
if (rm.bottom) {
breaks_with_commas[ 1] <- ''
}
# add prefix and suffix with appropriate padding
nchar_suffix <- stringr::str_length(suffix)
breaks_with_commas <-paste0(
breaks_with_commas, stringr::str_dup(" ", nchar_suffix)
)
breaks_with_commas[ max_idx] <- paste0(prefix, max_num, suffix)
# return new labels
wsj_labels <- breaks_with_commas
return(wsj_labels)
}
check_accuracy <- function(accuracy, maxnum, nbreaks) {
accuracy <- as.double(accuracy)
# if user DIDN'T specify, make "nice" defaults
# if user DID specify, make sure it's a number and return
if (is.na(accuracy)) {
accuracy <- 1
brk_lines <- nbreaks
while (maxnum < brk_lines) {
accuracy <- accuracy / 10
brk_lines <- nbreaks * accuracy
}
}
return(accuracy)
}
| /scratch/gouwar.j/cran-all/cranData/wsjplot/R/label_wsj.R |
#' Make timeseries graphs look like the the Wall Street Journal
#'
#' @importFrom ggplot2 %+replace% theme_minimal theme element_blank element_text margin element_line unit
#'
#' @examples
#' library(ggplot2)
#' `%>%` <- magrittr::`%>%`
#'
#' plt <- economics_long %>%
#' dplyr::filter(variable %in% c("psavert", "uempmed")) %>%
#' ggplot(aes(date, value, color = variable)) +
#' geom_line() +
#' scale_y_continuous(
#' labels = label_wsj(suffix = " M")
#' ) +
#' scale_color_discrete(
#' labels = c("Series 1", "Series 2")
#' ) +
#' theme_wsj() +
#' labs(
#' title = "Some Economics Plot",
#' caption = "Source: Top secret.",
#' y = ""
#' )
#'
#' @export
theme_wsj <- function() {
theme_minimal() %+replace%
theme(
# axis
# x
axis.title.x = element_blank(),
axis.text.x = element_text( angle = 0),
axis.ticks.x = element_line(),
axis.ticks.length.x = unit(4, 'pt'),
# y
axis.title.y = element_text(
angle = 0,
margin = margin(t = 0, r = 20, b = 0, l = 0)
),
axis.text.y = element_text(
hjust = 1
),
axis.ticks.y = element_blank(),
# panel
panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(),
panel.spacing = unit(.5, "lines"),
# plot
plot.title = element_text(
face = "bold",
size = 12,
hjust = 0.5,
margin = margin(t = 0, r = 0, b = 15, l = 0)
),
plot.subtitle = element_text(
hjust = 0.5,
size = 9
),
plot.caption = element_text(
hjust = 0,
size = 9,
color = "darkgrey",
margin = margin(
t = 15, r = 0, b = 0, l = 0
),
),
# facet grid strips
strip.text.y = element_text(
size = 8,
margin = margin(t = 0, r = 0, b = 0, l = 0),
angle = -90
),
# legend
legend.title = element_blank()
)
}
| /scratch/gouwar.j/cran-all/cranData/wsjplot/R/theme_wsj.R |
ewkm <- function(x, centers, lambda=1, maxiter=100, delta=0.00001, maxrestart=10)
{
if (missing(centers))
stop("the number or initial clusters 'centers' must be provided")
vars <- colnames(x)
nr <- as.integer(nrow(x))
nc <- as.integer(ncol(x))
if (is.data.frame(centers) || is.matrix(centers))
{
init <- TRUE
k <- nrow(centers)
}
else
{
init <- FALSE
k <- centers
centers <- double(k * nc)
}
k <- as.integer(k)
Z <- .C(WSKM_ewkm,
x=as.double(as.matrix(x)), # needs to accept a data.frame
nr=nr,
nc=nc,
k=k,
lambda=as.double(lambda),
maxiter=as.integer(maxiter),
delta=as.double(delta),
maxrestart=as.integer(maxrestart),
as.logical(init),
iterations=integer(1),
cluster=integer(nr),
centers=as.double(as.matrix(centers)),
weights=double(k * nc),
restarts=integer(1),
totiters=integer(1),
totss=double(1),
withinss=double(k))
centers <- matrix(Z$centers, ncol=ncol(x))
colnames(centers) <- vars
weights <- matrix(Z$weights, ncol=ncol(x))
colnames(weights) <- vars
# Identify missing clusters to be removed. 110804 Deal with the
# case whereby a single cluster is returned. Previous version did it
# properly in that centers[-ignore,] returns a matrix. But now it
# returns a vector. So need to use drop=FALSE
ignore <- which(rowSums(centers==0) == ncol(centers))
if (length(ignore))
{
centers <- centers[-ignore,, drop=FALSE]
weights <- weights[-ignore,, drop=FALSE]
}
# Give the rows names.
rownames(centers) <- 1:nrow(centers)
rownames(weights) <- 1:nrow(weights)
cluster <- Z$cluster + 1
size <- aggregate(cluster, list(cluster=cluster), length)[[2]]
result <- list(cluster=cluster,
centers=centers,
totss=Z$totss,
withinss=Z$withinss,
tot.withinss=sum(Z$withinss),
betweenss=Z$totss-sum(Z$withinss),
size=size,
iterations=Z$iterations,
total.iterations=Z$totiters,
restarts=Z$restarts,
weights=weights)
class(result) <- c("ewkm", "kmeans")
return(result)
}
| /scratch/gouwar.j/cran-all/cranData/wskm/R/ewkm.R |
fgkm <- function(x, centers, groups, lambda, eta, maxiter=100, delta=0.000001, maxrestart=10,seed=-1)
{
if (missing(centers))
stop("the number or initial clusters 'centers' must be provided")
if(seed<=0){
seed <-runif(1,0,10000000)[1]
}
vars <- colnames(x)
nr <-nrow(x) # nrow() return a integer type
nc <-ncol(x) # integer
if (is.data.frame(centers) || is.matrix(centers))
{
init <- TRUE
k <- nrow(centers)
}
else
{
init <- FALSE
k <- centers
centers <- double(k * nc)
}
# get the setting of feature group
if (is.character(groups) && length(groups) == 1) {
G <- .C(WSKM_parseGroup,as.character(groups),numGroups=integer(1), groupInfo=integer(nc))
} else if (is.vector(groups) && length(groups) == nc) {
G <- list()
grps <- as.factor(groups)
groupNames <- levels(grps)
G$numGroups <- nlevels(grps)
G$groupInfo <- as.integer(as.integer(grps) - 1)
}
set.seed(seed)
Z <- .C(WSKM_fgkm,
x = as.double(as.matrix(x)),
nr,
nc,
k = as.integer(k),
lambda = as.double(lambda),
eta = as.double(eta),
G$numGroups,
G$groupInfo,
delta = as.double(delta),
maxIterations = as.integer(maxiter),
maxRestarts = as.integer(maxrestart),
as.logical(init),
# seed,
cluster = integer(nr),
centers=as.double(as.matrix(centers)),
featureWeight = double(k * nc),
groupWeight = double(k * G$numGroups),
iterations = integer(1),
restarts = integer(1),
totiters = integer(1),
totalCost = double(1),
totss = double(1),
withiness = double(k))
centers <- matrix( Z$centers)
dim(centers) <- c(k, nc)
colnames(centers) <- vars
featureWeight <- matrix(Z$featureWeight)
dim(featureWeight) <- c(k, nc)
colnames(featureWeight) <- vars
groupWeight <- matrix(Z$groupWeight)
dim(groupWeight) <- c(k, G$numGroups )
colnames(groupWeight) <- 1:ncol(groupWeight)
ignore <- which(rowSums(centers==0) == ncol(centers))
if (length(ignore)) {
centers <- centers[-ignore,, drop=FALSE]
featureWeight <- featureWeight[-ignore,, drop=FALSE]
}
rownames(centers) <- 1:nrow(centers)
rownames(featureWeight) <- 1:nrow(featureWeight)
rownames(groupWeight) <- 1:nrow(groupWeight)
cluster <- Z$cluster + 1
size <- aggregate(cluster, list(cluster=cluster), length)[[2]]
result <- list(cluster = cluster,
centers = Z$centers,
totss = Z$totss,
withinss = Z$withinss,
tot.withinss = sum(Z$withiness),
betweenss = Z$totss-sum(Z$withinss),
size = size,
iterations = Z$iterations,
restarts = Z$restarts,
totiters=Z$totiters,
featureWeight = Z$featureWeight,
groupWeight = Z$groupWeight)
dim(result$centers) <- c(k, nc)
dim(result$featureWeight) <- c(k, nc)
dim(result$groupWeight) <- c(k, G$numGroups)
class(result) <- c("kmeans", "fgkm")
return(result)
}
| /scratch/gouwar.j/cran-all/cranData/wskm/R/fgkm.R |
levelplot.ewkm <- function(x, ...)
{
x <- x$weights
dd.row <- as.dendrogram(hclust(dist(x)))
row.ord <- order.dendrogram(dd.row)
dd.col <- as.dendrogram(hclust(dist(t(x))))
col.ord <- order.dendrogram(dd.col)
my.colors <- function(n) rev(heat.colors(n))
levelplot(x[row.ord, col.ord],
aspect = "fill", pretty=TRUE,
xlab="Cluster", ylab="",
colorkey = list(space = "left", col=my.colors),
col.regions=my.colors,
legend =
list(right =
list(fun = dendrogramGrob,
args =
list(x = dd.col, ord = col.ord,
side = "right",
size = 10)),
top =
list(fun = dendrogramGrob,
args =
list(x = dd.row,
side = "top"))),
...)
}
| /scratch/gouwar.j/cran-all/cranData/wskm/R/levelplot.ewkm.R |
plot.ewkm <- function(x, ...)
{
x <- t(x$weights)
rc <- rainbow(nrow(x), start=0, end=.3)
cc <- rainbow(ncol(x), start=0, end=.3)
# 120219 An alternative is to use pheatmap. Is there a ggplot2
# alternative?
hv <- heatmap(x, col = cm.colors(256), scale="column",
RowSideColors = rc, ColSideColors = cc, margins=c(5,10),
xlab = "Cluster", ...)
}
| /scratch/gouwar.j/cran-all/cranData/wskm/R/plot.ewkm.R |
predict.ewkm <- function (object, data, ...)
{
cluster.names <- rownames(object$centers)
cluster.vars <- colnames(object$centers)
out <- apply(data[cluster.vars], 1,
function(d) cluster.names[which.min(lapply(1:nrow(object$centers),
function(i) sqrt(sum(object$weights[i,] * abs(d - object$centers[i,])^2))))])
out <- sapply(out, function(x) ifelse(length(x), x, NA))
return(out)
}
| /scratch/gouwar.j/cran-all/cranData/wskm/R/predict.ewkm.R |
twkm <- function(x, centers, groups, lambda, eta, maxiter=100, delta=0.000001, maxrestart=10,seed=-1)
{
if (missing(centers))
stop("the number or initial clusters 'centers' must be provided")
if(seed<=0){
seed <-runif(1,0,10000000)[1]
}
vars <- colnames(x)
nr <-nrow(x) # nrow() return a integer type
nc <-ncol(x) # integer
if (is.data.frame(centers) || is.matrix(centers))
{
init <- TRUE
k <- nrow(centers)
}
else
{
init <- FALSE
k <- centers
centers <- double(k * nc)
}
# get the setting of feature group
if (is.character(groups) && length(groups) == 1) {
G <- .C(WSKM_parseGroup,as.character(groups),numGroups=integer(1), groupInfo=integer(nc))
} else if (is.vector(groups) && length(groups) == nc) {
G <- list()
grps <- as.factor(groups)
groupNames <- levels(grps)
G$numGroups <- nlevels(grps)
G$groupInfo <- as.integer(as.integer(grps) - 1)
}
set.seed(seed)
Z <- .C(WSKM_twkm,
x = as.double(as.matrix(x)),
nr,
nc,
k = as.integer(k),
lambda = as.double(lambda),
eta = as.double(eta),
G$numGroups,
G$groupInfo,
delta = as.double(delta),
maxIterations = as.integer(maxiter),
maxRestarts = as.integer(maxrestart),
as.logical(init),
# seed,
cluster = integer(nr),
centers=as.double(as.matrix(centers)),
featureWeight = double( nc),
groupWeight = double( G$numGroups),
iterations = integer(1),
restarts = integer(1),
totiters = integer(1),
totalCost = double(1),
totss = double(1),
withiness = double(k))
centers <- matrix( Z$centers)
dim(centers) <- c(k, nc)
colnames(centers) <- vars
featureWeight <- Z$featureWeight
groupWeight <- Z$groupWeight
ignore <- which(rowSums(centers==0) == ncol(centers))
if (length(ignore)) {
centers <- centers[-ignore,, drop=FALSE]
featureWeight <- featureWeight[-ignore,, drop=FALSE]
}
rownames(centers) <- 1:nrow(centers)
cluster <- Z$cluster + 1
size <- aggregate(cluster, list(cluster=cluster), length)[[2]]
result <- list(cluster = cluster,
centers = Z$centers,
totss = Z$totss,
withinss = Z$withinss,
tot.withinss = sum(Z$withiness),
betweenss = Z$totss-sum(Z$withinss),
size = size,
iterations = Z$iterations,
restarts = Z$restarts,
totiters=Z$totiters,
featureWeight = Z$featureWeight,
groupWeight = Z$groupWeight)
dim(result$centers) <- c(k, nc)
class(result) <- c("kmeans", "twkm")
return(result)
}
| /scratch/gouwar.j/cran-all/cranData/wskm/R/twkm.R |
#'@importFrom stats median quantile
#'@importFrom glmnet glmnet
df_lambda <- function(x, y,
alpha,
family,type.multinomial,seq.df,
penalty.factor=NULL,standardize){
x <- as.matrix(x)
n=as.integer(nrow(x))
p=as.integer(ncol(x))
if(length(y)!=n) stop('x and y have different number of rows.')
get_range_glmnet <- function(glmnet_fit,DFDF){
df.loss <- abs(glmnet_fit$df-DFDF)
out.df <- glmnet_fit$df[which.min(df.loss)]
out.lambda <- glmnet_fit$lambda[which.min(df.loss)]
if(sum(df.loss==0)>0){
lambda_upper <- glmnet_fit$lambda[median(which(df.loss==0))]
lambda_lower <- glmnet_fit$lambda[median(which(df.loss==0))]
}else{
lambda_minmax <- glmnet_fit$lambda[order(df.loss,decreasing=FALSE)[1:2]]
lambda_upper <- max(lambda_minmax)
lambda_lower <- min(lambda_minmax)
}
list(df=out.df,lambda=out.lambda,minmax=c(lower=lambda_lower,upper=lambda_upper))
}
fit.lambda_range <- glmnet::glmnet(x=x,y=y,alpha=alpha,nlambda=nrow(x)*15,
family=family,dfmax=nrow(x)*3,
type.multinomial=type.multinomial,
penalty.factor=penalty.factor,standardize=standardize)
lambda.sequence <- fit.lambda_range$lambda
df.sequence <- fit.lambda_range$df
out.lambda.seq <- NULL
for(DFDF in seq.df){
fit.lambda_minmax <- get_range_glmnet(fit.lambda_range,DFDF)
avg.lambda.minmax <- mean(fit.lambda_minmax$minmax)
out.lambda.seq <- c(out.lambda.seq,avg.lambda.minmax)
}
tmp_glmnet_fit <- glmnet::glmnet(x=x,y=y,
alpha=alpha,lambda=out.lambda.seq,family=family,
type.multinomial=type.multinomial,
penalty.factor=penalty.factor,
standardize=standardize)
list(DF=seq.df,df=tmp_glmnet_fit$df,lambda=out.lambda.seq)
}
| /scratch/gouwar.j/cran-all/cranData/wsprv/R/DF.R |
#'A weighted selection probability is developed to locate individual rare variants associated with multiple phenotypes.
#'@name wsprv
#'@description Recently, rare variant association studies with multiple phenotypes have drawn a lot of attentions because association signals can be boosted when rare variants are related with more than one phenotype. Most of existing statistical methods to identify rare variants associated with multiple phenotypes are based on a group test, where a gene or a genetic region is tested one at a time. However, these methods are not designed to locate individual rare variants within a gene or a genetic region. We propose a weighted selection probability to locate individual rare variants within a group after a multiple-phenotype based group test finds significance.
#'@param x A \eqn{n \times (m+p)} matrix with \eqn{n} samples, \eqn{m} covariates and \eqn{p} rare variants where \eqn{m} can be zero, i.e., there does not exist covariates.
#'@param y A \eqn{n \times Q} phenotype matrix with \eqn{n} samples and \eqn{Q} phenotypes where \eqn{Q>1}.
#'@param alpha The mixing parameter of elastic-net, \code{alpha=1} is the lasso, and \code{alpha=0} is the ridge. Default value is 1.
#'@param penalty.factor Separate penalty factors factors can be applied to each coefficient. Can be \code{0} for some variables, which implies no shrinkage, and that variable is always included in the model.
#'@param standardize Genotype standardization. Default is \code{TRUE}.
#'@param type.multinomial A group lasso penalty is used on the multinomial coefficients for a variable when 'grouped'. It ensures the multinomial coefficents are all in or out. Default is 'grouped'.
#'@param rep The number of bootstrap replications. We recommend to use 100 or more to compute weighted selection probability. Default value is 100.
#'@param rate A tuning parameter represents rate of degree of freedom to the number of rare variants. Default value is 0.05.
#'@param gamma The upper \code{gamma} quantile of selection frequencies of individual variants each phenotype to compute the threshold. Default value is 0.01.
#'@details The penalty function of \code{elastic-net} is defined as \deqn{\lambda(\alpha||\beta||_1+\frac{(1-\alpha)}{2}||\beta||_2^2),} where \eqn{\alpha} is a mixing proportion of ridge and the lasso, and \eqn{\beta} is regression coefficients. This penalty is equivalent to the Lasso penalty if \code{alpha=1}. \cr \cr Let \eqn{\eta} be the degree of freedom and it depends on the tuning parameter \eqn{\lambda}, and \code{rate} is computed as \deqn{rate=\frac{\eta}{p},} Note that \eqn{\eta \leq n} is set up in \code{weight_sp} function. \cr \cr Let \eqn{\delta_{\gamma}} be a threshold of \eqn{SF} and it depends on the upper \eqn{\gamma^{th}} qunatile value of \eqn{SF}. Where \eqn{SF=\left\{SF_{11}(\eta),SF_{21}(\eta),\cdots,SF_{pQ}(\eta) \right\}} is a set that contains selection frequencies of individual rare variants each phenotype.
#'@importFrom glmnet glmnet
#'@importFrom mnormt rmnorm
#'@importFrom stats median quantile rnorm
#'@returns
#' \item{res}{A matrix contains the order of weighted selection probabilities from the largest to the smallest and the corresponding weighted selection probabilities.}
#' \item{eta}{eta used.}
#' \item{bootstrap.rep}{The number of bootstrap replications used.}
#' \item{rate}{The tuning parameter \code{rate} used.}
#' \item{gamma}{The upper \code{gamma} quantile of selection frequencies of individual rare variants each phenotype used.}
#'@examples
#'
#' # Generate simulation data
#' n <- 400
#' p <- 100
#' q <- 5
#' MAF <- 0.01
#' geno.prob <- rbind((1-MAF)^2,2*(1-MAF)*MAF,MAF^2)
#' x <- matrix(NA,n,p)
#' set.seed(1)
#' for(i in 1:p) x[,i] <- sample(0:2,n,prob=geno.prob,replace=TRUE)
#' beta <- c(rep(3.0,10),rep(0,(p-10)))
#' cova <- matrix(0.75,q,q)
#' diag(cova) <- 1
#' require(mnormt)
#' err.mat <- rmnorm(n,rep(0,q),cova)
#'
#' y1 <- x %*% beta+err.mat[,1]
#' y2 <- x %*% beta+err.mat[,2]
#' y <- cbind(y1,y2,err.mat[,3:5])
#' # Weighted selection probabilities for individual rare variants without covariates.
#' #If rep=100, time consuming.
#' wsp.rv1 <- weight_sp(x,y,rep=5) # continuous phenotypes
#'
#' # Weighted selection probabilities for individual rare variants with covariates.
#' #If rep=100, time consuming.
#' cx <- cbind(rnorm(n),sample(0:1,n,replace=TRUE))
#' x <- cbind(cx,x)
#' penalty.factor <- c(rep(0,2),rep(1,p))
#' colnames(x) <- c('Age','Gender',paste0('V',3:102))
#' wsp.rv2 <- weight_sp(x,y,penalty.factor=penalty.factor,rep=5) # continuous phenotypes
#'
#'
#'@export
weight_sp <- function(x,y,
alpha=1,
penalty.factor = NULL,standardize=TRUE,type.multinomial=c('grouped','ungrouped'),
rep=100,rate=0.05,gamma=0.01){
x <- as.matrix(x)
n <- as.integer(nrow(x))
p <- as.integer(ncol(x))
if(floor(p*rate) >= n){
seq.df <- n
}else{
seq.df <- floor(p*rate)
}
if (is.null(colnames(x))){
var.names <- paste('V',1:ncol(x),sep='')
}else{
var.names <- colnames(x)
}
if(is.matrix(y)==FALSE) stop('y should be a matrix.')
if (nrow(y)!=n) stop('x and y have different number of rows.')
if(is.null(penalty.factor)) penalty.factor <- rep(1, ncol(x))
if(sum(penalty.factor==0)!=0) seq.df <- seq.df + sum(penalty.factor==0)
type.multinomial <- match.arg(type.multinomial)
q <- as.integer(ncol(y))
boot.n <- n
boot.rep <- rep
boot.mat <- matrix(0,p,q)
boot.app <- rep(0,p)
for(i in 1:boot.rep){
set.seed(125*i)
wt.bt <- sample(n,boot.n,replace=TRUE)
x.boot <- x[wt.bt,]
boot.sum <- apply(x.boot,2,sum)
if(min(boot.sum)==0){
wh <- which(boot.sum==0)
wh.n <- which(boot.sum!=0)
x.boot <- x.boot[,-wh]
}else{
wh.n <- 1:ncol(x.boot)
}
y.boot <- y[wt.bt,]
boot.app[wh.n] <- boot.app[wh.n] + 1
families <- rep('gaussian',q)
families.check <- apply(y,2,function(x) length(table(x)))
if(sum(families.check==2)!=0) families[which(families.check==2)] <- 'binomial'
if(sum(families.check==3|families.check==4|families.check==5)!=0) families[which(families.check==3|families.check==4|families.check==5)] <- 'multinomial'
penalty.factor.temp <- penalty.factor[wh.n]
for(j in 1:q){
if(families[j]=='multinomial'){
opt.lambda <- df_lambda(x=x.boot,y=y.boot[,j],penalty.factor=penalty.factor.temp,alpha=alpha,family=families[j],
seq.df=seq.df,standardize=standardize,type.multinomial=type.multinomial)
fit <- glmnet(x=x.boot,y=y.boot[,j],penalty.factor=penalty.factor.temp,family=families[j],alpha=alpha,
lambda=opt.lambda$lambda,standardize=standardize,type.multinomial=type.multinomial)
boot.mat[,j][wh.n] <- boot.mat[,j][wh.n]+as.numeric(fit$beta[[1]]!=0)
}else{
opt.lambda <- df_lambda(x=x.boot,y=y.boot[,j],penalty.factor=penalty.factor.temp,alpha=alpha,family=families[j],
seq.df=seq.df,standardize=standardize)
fit <- glmnet(x=x.boot,y=y.boot[,j],penalty.factor=penalty.factor.temp,family=families[j],alpha=alpha,
lambda=opt.lambda$lambda,standardize=standardize)
boot.mat[,j][wh.n] <- boot.mat[,j][wh.n]+as.numeric(fit$beta!=0)
}
}
}
if(!is.numeric(penalty.factor)) stop('penalty.factor should be a numeric vector.')
if(sum(penalty.factor==0)==0){
counts.qtl <- quantile(boot.mat,(1-gamma))
boot.sp.mat <- boot.mat/boot.app
wts.temp <- NA
for(k in 1:q){
wts.temp[k] <- sum(boot.mat[,k][boot.mat[,k] > counts.qtl])
}
wts <- wts.temp/sum(wts.temp)
if(sum(is.na(wts))!=0){
wts[is.na(wts)] <- 0
}else{
wts <- wts
}
wsp <- apply(boot.sp.mat%*%diag(wts),1,max)
wsp <- wsp/max(wsp,na.rm=TRUE)
wsp.order <- order(wsp,decreasing=TRUE)
mat <- cbind(wsp.order,wsp[wsp.order])
colnames(mat) <- c('variable','wsp')
rownames(mat) <- var.names[wsp.order]
}else{
cov.loc <- which(penalty.factor==0)
boot.mat.temp <- boot.mat[-cov.loc,]
counts.qtl <- quantile(boot.mat.temp,(1-gamma))
boot.sp.mat.temp <- boot.mat.temp/boot.app[-cov.loc]
wts.temp <- NA
for(k in 1:q){
wts.temp[k] <- sum(boot.mat.temp[,k][boot.mat.temp[,k] > counts.qtl])
}
wts <- wts.temp/sum(wts.temp)
if(sum(is.na(wts))!=0){
wts[is.na(wts)] <- 0
}else{
wts <- wts
}
wsp <- apply(boot.sp.mat.temp%*%diag(wts),1,max)
wsp <- wsp/max(wsp,na.rm=TRUE)
wsp.order <- order(wsp,decreasing=TRUE)
wsp.order.mat <- wsp.order + sum(penalty.factor==0)
mat <- cbind(wsp.order.mat,wsp[wsp.order])
colnames(mat) <- c('variable','wsp')
var.names.mat <- var.names[-cov.loc]
rownames(mat) <- var.names.mat[wsp.order]
}
return(list(res=mat,eta=seq.df,bootstrap.rep=boot.rep,rate=rate,gamma=gamma))
}
| /scratch/gouwar.j/cran-all/cranData/wsprv/R/WSP.R |
.reduce.wsrf <- function(xs, ...)
{
## Reduce multiple models of wsrf into one.
# xs should be a list of objects of wsrf.
tags <- c(.TREES_IDX, .TREE_OOB_ERROR_RATES_IDX, .OOB_SETS_IDX, .OOB_PREDICT_LABELS_IDX, .TREE_IGR_IMPORTANCE_IDX, .WEIGHTS_IDX, .MTRY_IDX, .NODESIZE_IDX)
res <- vector("list", .WSRF_MODEL_SIZE)
names(res) <- .WSRF_MODEL_NAMES
for (tag in tags)
res[[tag]] <- unlist(lapply(xs, function(x, tg) { x[[tg]] }, tag), recursive=FALSE, use.names=FALSE)
for (tag in c(.WEIGHTS_IDX, .MTRY_IDX, .NODESIZE_IDX)) {
if (!is.null(res[[tag]]) && length(unique(res[[tag]]))==1) res[[tag]] <- res[[tag]][1]
else res[tag] <- list(NULL)
}
if (!is.null(xs[[1]][[.META_IDX]]))
res[[.META_IDX]] <- xs[[1]][[.META_IDX]]
if (!is.null(xs[[1]][[.TARGET_DATA_IDX]]))
res[[.TARGET_DATA_IDX]] <- xs[[1]][[.TARGET_DATA_IDX]]
return(res)
}
combine <- function(...) UseMethod("combine")
combine.wsrf <- function(...)
{
## Merge ... into one bigger model of wsrf.
res <- list(...)
areWsrfObjects <- sapply(res, function(x) inherits(x, "wsrf"))
if (any(!areWsrfObjects)) stop("Argument must be a list of wsrf objects")
res <- .reduce.wsrf(res)
.Call(WSRF_afterMergeOrSubset, res)
class(res) <- "wsrf"
return(res)
}
| /scratch/gouwar.j/cran-all/cranData/wsrf/R/combine.wsrf.R |
correlation <- function(object, ...) UseMethod("correlation")
correlation.wsrf <- function(object, ...) {
object[[.CORRELATION_IDX]]
} | /scratch/gouwar.j/cran-all/cranData/wsrf/R/correlation.wsrf.R |
importance <- function(x, ...) UseMethod("importance")
importance.wsrf <- function(x, type=NULL, class=NULL, scale=TRUE, ...) {
imp <- x[[.IMPORTANCE_IDX]]
impSD <- x[[.IMPORTANCESD_IDX]]
hasPermImp <- !is.null(impSD)
igrIdx <- ncol(imp)
hasType <- !is.null(type)
hasClass <- !is.null(class)
## For unmached arguments.
if (!hasPermImp && ((hasType && type == 1) || (!hasType && hasClass)))
stop("That measure has not been computed.") # No Perm-based measures, but required.
if (hasType) {
if (type != 1 && type != 2) stop("Wrong type specified.")
if (type == 2 && hasClass) stop("No class-specific measure for that type.")
}
## When arguments matched and result needs to be converted.
if (hasType && type == 2) # Only MeanDecreaseIGR
imp <- imp[, igrIdx, drop=FALSE]
if (hasType && type == 1 && !hasClass) { # Only MeanDecreaseAccuracy
if (scale) imp <- imp[, -igrIdx] / impSD
else imp <- imp[, -igrIdx]
}
if (!hasType && !hasClass && scale) # Scaled MeanDecreaseAccuracy and MeanDecreaseIGR
imp <- cbind(imp[, -igrIdx] / impSD, imp[, igrIdx, drop=FALSE])
if ((!hasType || type == 1) && hasClass) { # Class-specific measures
whichCol <- match(class, colnames(imp)[-igrIdx])
whichNA <- which(is.na(whichCol))
if (length(whichNA) != 0) {
outmessage <- paste("Class", paste(class[whichNA], collapse=", "), "not found.")
if (length(whichNA) != length(whichCol))
warning(outmessage) # Some classes matched.
else
stop(outmessage) # No class matched.
}
if (length(whichNA)!=0) whichCol <- whichCol[-whichNA]
if (scale) imp <- imp[, whichCol, drop=FALSE] / impSD[, whichCol, drop=FALSE]
else imp <- imp[, whichCol, drop=FALSE]
}
imp
}
| /scratch/gouwar.j/cran-all/cranData/wsrf/R/importance.wsrf.R |
oob.error.rate <- function(object, ...) UseMethod("oob.error.rate")
oob.error.rate.wsrf <- function(object, tree, ...)
{
if (missing(tree) || (is.logical(tree) && length(tree) == 1 && !tree))
{
# return out-of-bag error rate for the forest, length of 1
return(object[[.RF_OOB_ERROR_RATE_IDX]])
}
else
{
# return out-of-bag error rates for specific trees
return(object[[.TREE_OOB_ERROR_RATES_IDX]][tree])
}
}
| /scratch/gouwar.j/cran-all/cranData/wsrf/R/oob.error.rate.wsrf.R |
predict.wsrf <- function(object,
newdata,
type=c("response",
"class",
"vote",
"prob",
"aprob",
"waprob"),
...)
{
if (!inherits(object, "wsrf"))
stop("Not a legitimate wsrf object")
# "class" is the default type.
if (missing(type)) type <- "class"
# Several types are allowed.
type <- match.arg(type, several.ok = TRUE)
# type "response" is the same as "class"
hasResponseType <- ifelse("response" %in% type, TRUE, FALSE)
hasClassType <- ifelse("class" %in% type, TRUE, FALSE)
if (hasClassType && hasResponseType) {
type <- type[-which(type == "response")]
} else if (!hasClassType && hasResponseType) {
type[which(type == "response")] <- "class"
}
# Convert string type into integer flag.
type <- sum(sapply(type, function(x) {
switch(x, class=1, vote=2, prob=4, aprob=8, waprob=16)
}))
# The C++ code for prediction does not handle missing values. So handle
# them here by removing them from the dataset and then add in, in
# the correct places, NA as the results from predict.
complete <- complete.cases(newdata)
rnames <- rownames(newdata)
newdata <- newdata[complete,]
hasmissing <- !all(complete)
nobs <- length(complete)
res <- .Call(WSRF_predict, object, newdata, type)
names(res) <- c("class", "vote", "prob", "aprob", "waprob")
# Deal with names and observations with missing values.
res <- sapply(names(res), function(ty) {
pred <- res[[ty]]
if (is.null(pred)) return(pred)
if (ty == "class") {
if (hasmissing) {
temp <- factor(rep(NA, nobs), levels=levels(pred))
temp[complete] <- pred
pred <- temp
}
names(pred) <- rnames
return(pred)
} else {
if (hasmissing) {
temp <- matrix(NA_real_, nrow=nobs, ncol=ncol(pred))
temp[complete, ] <- pred
colnames(temp) <- colnames(pred)
pred <- temp
}
rownames(pred) <- rnames
return(pred)
}
}, simplify=FALSE)
# In case users aren't aware that type "response" is the same as "class".
if (hasResponseType) res[["response"]] <- res[["class"]]
return(res)
}
| /scratch/gouwar.j/cran-all/cranData/wsrf/R/predict.wsrf.R |
print.wsrf <- function(x, trees, ...)
{
ntree <- length(x[[.TREES_IDX]])
if (missing(trees))
{
cat("A Weighted Subspace Random Forest model with ", ntree,
" tree", ifelse(ntree == 1, "", "s"), ".\n\n", sep="")
cat(sprintf("%38s: %d\n", "No. of variables tried at each split", x[[.MTRY_IDX]]))
cat(sprintf("%38s: %d\n", "Minimum size of terminal nodes", x[[.NODESIZE_IDX]]))
cat(sprintf("%38s: %.2f\n", "Out-of-Bag Error Rate", x[[.RF_OOB_ERROR_RATE_IDX]]))
cat(sprintf("%38s: %.2f\n", "Strength", x[[.STRENGTH_IDX]]))
cat(sprintf("%38s: %.2f\n\n", "Correlation", x[[.CORRELATION_IDX]]))
cat("Confusion matrix:\n")
print(round(x[[.CONFUSION_IDX]], 2))
}
else
{
if (is.logical(trees))
trees <- seq(x[[.TREES_IDX]])[trees]
.Call(WSRF_print, x, trees)
}
# return invisible NULL
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/wsrf/R/print.wsrf.R |
strength <- function(object, ...) UseMethod("strength")
strength.wsrf <- function (object, ...) {
object[[.STRENGTH_IDX]]
} | /scratch/gouwar.j/cran-all/cranData/wsrf/R/strength.wsrf.R |
subset.wsrf <- function(x, trees, ...)
{
## Get a subset of a wsrf model.
# x is a object of wsrf.
# i is a subset of trees indexes.
if (!inherits(x, "wsrf"))
stop("Not a legitimate wsrf object")
tags <- c(.TREES_IDX, .TREE_OOB_ERROR_RATES_IDX, .OOB_SETS_IDX, .OOB_PREDICT_LABELS_IDX, .TREE_IGR_IMPORTANCE_IDX)
res <- vector("list", .WSRF_MODEL_SIZE)
names(res) <- .WSRF_MODEL_NAMES
for (tag in tags)
res[[tag]] <- x[[tag]][trees]
res[[.META_IDX]] <- x[[.META_IDX]]
res[[.TARGET_DATA_IDX]] <- x[[.TARGET_DATA_IDX]]
for (tag in c(.WEIGHTS_IDX, .MTRY_IDX, .NODESIZE_IDX)) {
if (!is.null(x[[tag]])) res[[tag]] <- x[[tag]]
else res[tag] <- list(NULL)
}
.Call(WSRF_afterMergeOrSubset, res)
class(res) <- "wsrf"
return(res)
}
| /scratch/gouwar.j/cran-all/cranData/wsrf/R/subset.wsrf.R |
varCounts.wsrf <- function(object) {
# Return the times of each variable being selected as split condition.
# For evaluating the bias of wsrf towards attribute types (categorical and
# numerical) and the number of values each attribute has.
varnames <- object[[.META_IDX]][["varnames"]]
trees <- object[[.TREES_IDX]]
counts <- vector("integer", length(varnames))
names(counts) <- varnames
for (i in 1:length(trees))
{
tree <- trees[[i]]
for (j in 1:length(tree))
{
node <- tree[[j]]
if (as.integer(node[1]) == 1)
{
varidx <- as.integer(node[4]) + 1
counts[varidx] <- counts[varidx] + 1
}
}
}
return(counts)
}
| /scratch/gouwar.j/cran-all/cranData/wsrf/R/varCounts.wsrf.R |
wsrf <- function(x, ...)
UseMethod("wsrf") | /scratch/gouwar.j/cran-all/cranData/wsrf/R/wsrf.R |
wsrf.default <- function(
x,
y,
mtry=floor(log2(length(x))+1),
ntree=500,
weights=TRUE,
parallel=TRUE,
na.action=na.fail,
importance=FALSE,
nodesize=2,
clusterlogfile,
...) {
# Perform the required na.action, which defaults to failing if there is missing
# data in the dataset.
if (!is.null(na.action)) {
data <- as.data.frame(na.action(cbind(x, y)))
x <- data[-length(data)]
y <- data[[length(data)]]
rm(data)
}
# Prepare to pass execution over to the suitable helper.
if (!is.factor(y))
y <- as.factor(y)
mtry <- as.integer(mtry); if (mtry <= 0) stop("mtry should be at least 1.")
nodesize <- as.integer(nodesize); if (nodesize <= 0) stop("nodesize should be at least 1.")
ntree <- as.integer(ntree); if (ntree <= 0) stop("ntree should be at least 1.")
seeds <- as.integer(runif(ntree) * 10000000)
# Determine what kind of parallel to perform. By default, when
# parallel=TRUE, use 2 less than the number of cores available, or 1
# core if there are only 2 cores.
if (is.logical(parallel) || is.numeric(parallel))
{
if (is.logical(parallel) && parallel)
{
parallel <- detectCores()-2
if (is.na(parallel) || parallel < 1) parallel <- 1
}
model <- .wsrf(x, y, ntree, mtry, nodesize, weights, parallel, seeds, importance, FALSE)
}
else if (is.vector(parallel))
{
model <- .clwsrf(x, y, ntree, mtry, nodesize, weights, serverargs=parallel, seeds, importance, clusterlogfile)
}
else
stop ("Parallel must be logical, character, or numeric.")
class(model) <- "wsrf"
return(model)
}
.wsrf <- function(x, y, ntree, mtry, nodesize, weights, parallel, seeds, importance, ispart)
{
model <- .Call(WSRF_wsrf, x, y, ntree, mtry, nodesize,
weights, parallel, seeds, importance, ispart)
names(model) <- .WSRF_MODEL_NAMES
return(model)
}
.localwsrf <- function(serverargs, x, y, mtry, nodesize, weights, importance)
{
ntree <- serverargs[1][[1]]
parallel <- serverargs[2][[1]]
seeds <- serverargs[3][[1]]
model <- .wsrf(x, y, ntree, mtry, nodesize, weights, parallel, seeds, importance, TRUE)
return(model)
}
.clwsrf <- function(x, y, ntree, mtry, nodesize, weights, serverargs, seeds, importance, clusterlogfile)
{
# Multiple cores on multiple servers.
# where serverargs like c("apollo9", "apollo10", "apollo11", "apollo12")
# or c(apollo9=5, apollo10=8, apollo11=-1)
determineCores <- function()
{
if (.Platform$OS.type == "windows") return(1)
nthreads <- detectCores() - 2
if (nthreads > 0)
return(nthreads)
else
return(1)
}
if (is.vector(serverargs, "character"))
{
nodes <- serverargs
if (missing(clusterlogfile)) cl <- makeCluster(nodes)
else cl <- makeCluster(nodes, outfile=clusterlogfile)
clusterEvalQ(cl, require(wsrf))
parallels <- unlist(clusterCall(cl, determineCores))
}
else if (is.vector(serverargs, "numeric"))
{
nodes <- names(serverargs)
if (missing(clusterlogfile)) cl <- makeCluster(nodes)
else cl <- makeCluster(nodes, outfile=clusterlogfile)
clusterEvalQ(cl, require(wsrf))
parallels <- unlist(clusterCall(cl, determineCores))
parallels <- ifelse(serverargs > 0, serverargs, parallels)
}
else
stop ("Parallel must be a vector of mode character/numeric.")
nservers <- length(nodes)
# just make sure each node has different RNGs in C code, time is
# part of the seed, so this call won't make a reproducible result
clusterSetRNGStream(cl)
# follow specification in "serverargs", calculate corresponding tree
# number for each node
nTreesPerNode <- floor(ntree / sum(parallels)) * parallels
nTreesLeft <- ntree %% sum(parallels)
# cumsumParallels <- cumsum(parallels)
# leftPerNode <- ifelse(nTreesLeft >= cumsumParallels, parallels, 0)
# if (!(nTreesLeft %in% cumsumParallels)) {
# index <- which(nTreesLeft < cumsumParallels)[1]
# if (index == 1)
# leftPerNode[index] <- nTreesLeft
# else
# leftPerNode[index] <- nTreesLeft - cumsumParallels[index - 1]
# }
ones <- rep(1, length(parallels))
leftPerNode <- floor(nTreesLeft / sum(ones)) * ones
left <- nTreesLeft %% sum(ones)
leftPerNode <- leftPerNode + c(rep(1, left), rep(0, length(parallels) - left))
nTreesPerNode <- nTreesPerNode + leftPerNode
parallels <- parallels[which(nTreesPerNode > 0)]
parallels <- as.integer(parallels)
nTreesPerNode <- nTreesPerNode[which(nTreesPerNode > 0)]
nTreesPerNode <- as.integer(nTreesPerNode)
seedsPerNode <- split(seeds, rep(1:nservers, nTreesPerNode))
forests <- parRapply(cl, cbind(nTreesPerNode, parallels, seedsPerNode),
.localwsrf, x, y, mtry, nodesize, weights, importance)
stopCluster(cl)
model <- .reduce.wsrf(forests)
# "afterReduceForCluster" is used for statistics.
.Call(WSRF_afterReduceForCluster, model, x, y)
class(model) <- "wsrf"
return(model)
}
.onAttach <- function(libname, pkgname) {
wsrfDescription <- "wsrf: An R Package for Scalable Weighted Subspace Random Forests."
wsrfVersion <- read.dcf(file=system.file("DESCRIPTION", package=pkgname),
fields="Version")
# Use packageStartupMessage() instead of message() to give startup messages.
packageStartupMessage(wsrfDescription)
packageStartupMessage(paste("Version", wsrfVersion))
packageStartupMessage("Use C++ standard thread library for parallel computing")
# packageStartupMessage("With parallel computing disabled")
# packageStartupMessage("Type wsrfNews() to see new features/changes/bug fixes.")
}
## All the names and indexes of the elements of the model returned by wsrf.
.META <- "meta"; .META_IDX <- 1;
.TARGET_DATA <- "targetData"; .TARGET_DATA_IDX <- 2;
.TREES <- "trees"; .TREES_IDX <- 3;
.TREE_OOB_ERROR_RATES <- "treeOOBErrorRates"; .TREE_OOB_ERROR_RATES_IDX <- 4;
.OOB_SETS <- "OOBSets"; .OOB_SETS_IDX <- 5;
.OOB_PREDICT_LABELS <- "OOBPredictLabels"; .OOB_PREDICT_LABELS_IDX <- 6;
.TREE_IGR_IMPORTANCE <- "treeIgrImportance"; .TREE_IGR_IMPORTANCE_IDX <- 7;
.PREDICTED <- "predicted"; .PREDICTED_IDX <- 8;
.OOB_TIMES <- "oob.times"; .OOB_TIMES_IDX <- 9;
.CONFUSION <- "confusion"; .CONFUSION_IDX <- 10;
.IMPORTANCE <- "importance"; .IMPORTANCE_IDX <- 11;
.IMPORTANCESD <- "importanceSD"; .IMPORTANCESD_IDX <- 12;
.RF_OOB_ERROR_RATE <- "RFOOBErrorRate"; .RF_OOB_ERROR_RATE_IDX <- 13;
.STRENGTH <- "strength"; .STRENGTH_IDX <- 14;
.CORRELATION <- "correlation"; .CORRELATION_IDX <- 15;
.C_S2 <- "c_s2"; .C_S2_IDX <- 16;
.WEIGHTS <- "useweights"; .WEIGHTS_IDX <- 17;
.MTRY <- "mtry"; .MTRY_IDX <- 18;
.NODESIZE <- "nodesize"; .NODESIZE_IDX <- 19;
.WSRF_MODEL_SIZE <- 19
.WSRF_MODEL_NAMES <- c(
.META,
.TARGET_DATA,
.TREES,
.TREE_OOB_ERROR_RATES,
.OOB_SETS,
.OOB_PREDICT_LABELS,
.TREE_IGR_IMPORTANCE,
.PREDICTED,
.OOB_TIMES,
.CONFUSION,
.IMPORTANCE,
.IMPORTANCESD,
.RF_OOB_ERROR_RATE,
.STRENGTH,
.CORRELATION,
.C_S2,
.WEIGHTS,
.MTRY,
.NODESIZE)
| /scratch/gouwar.j/cran-all/cranData/wsrf/R/wsrf.default.R |
wsrf.formula <- function(formula, data, ...)
{
# Determine the information provided by the formula.
target <- as.character(formula[[2]]) # Assumes it is a two sided formula.
target <- sub("^`(.*)`$", "\\1", target) # Remove backticks
target.ind <- which(colnames(data) == target)
if (length(target.ind) == 0) stop("The named target must be included in the dataset.")
if (formula[[3]] == ".") {
inputs <- (1:ncol(data))[-target.ind]
vars <- union(inputs, target.ind)
} else {
inputs <- attr(terms.formula(formula, data=data), "term.labels")
inputs <- sub("^`(.*)`$", "\\1", inputs) # Remove backticks if variable names are non-syntactic
vars <- union(inputs, target)
}
# Retain just the dataset required.
data <- as.data.frame(data[vars])
# Split data into inputs x and response y.
x <- data[-length(data)]
y <- data[[length(data)]]
# rm(data)
model <- wsrf.default(x, y, ...)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/wsrf/R/wsrf.formula.R |
## ----eval=FALSE---------------------------------------------------------------
# install.packages("wsrf")
## ----eval=FALSE---------------------------------------------------------------
# devtools::install_github("simonyansenzhao/wsrf")
## ----usage_load, message=FALSE------------------------------------------------
ds <- iris
dim(ds)
names(ds)
## ----usage_prepare------------------------------------------------------------
target <- "Species"
vars <- names(ds)
## ----message=FALSE------------------------------------------------------------
library("randomForest")
if (sum(is.na(ds[vars]))) ds[vars] <- na.roughfix(ds[vars])
ds[target] <- as.factor(ds[[target]])
(tt <- table(ds[target]))
## -----------------------------------------------------------------------------
(form <- as.formula(paste(target, "~ .")))
## -----------------------------------------------------------------------------
seed <- 42
set.seed(seed)
length(train <- sample(nrow(ds), 0.7*nrow(ds)))
length(test <- setdiff(seq_len(nrow(ds)), train))
## ----eval=FALSE---------------------------------------------------------------
# wsrf(formula, data, ...)
## ----eval=FALSE---------------------------------------------------------------
# wsrf(x,
# y,
# mtry=floor(log2(length(x))+1),
# ntree=500,
# weights=TRUE,
# parallel=TRUE,
# na.action=na.fail,
# importance=FALSE,
# nodesize=2,
# clusterlogfile,
# ...)
## ----usage_build_by_default, message=FALSE------------------------------------
library("wsrf")
model.wsrf.1 <- wsrf(form, data=ds[train, vars], parallel=FALSE)
print(model.wsrf.1)
print(model.wsrf.1, 1) # Print tree 1.
## ----usage_evaluate-----------------------------------------------------------
cl <- predict(model.wsrf.1, newdata=ds[test, vars], type="class")$class
actual <- ds[test, target]
(accuracy.wsrf <- mean(cl == actual, na.rm=TRUE))
## ----usage_build_another, message=FALSE---------------------------------------
set.seed(seed+1)
# Here we build another model without weighting.
model.wsrf.2 <- wsrf(form, data=ds[train, vars], weights=FALSE, parallel=FALSE)
print(model.wsrf.2)
## ----usage_subset_combine-----------------------------------------------------
submodel.wsrf <- subset.wsrf(model.wsrf.1, 1:150)
print(submodel.wsrf)
bigmodel.wsrf <- combine.wsrf(model.wsrf.1, model.wsrf.2)
print(bigmodel.wsrf)
## ----usage_build_on_cluster, eval=FALSE---------------------------------------
# servers <- paste0("node", 31:40)
# model.wsrf.3 <- wsrf(form, data=ds[train, vars], parallel=servers)
| /scratch/gouwar.j/cran-all/cranData/wsrf/inst/doc/wsrf-guide.R |
---
title: "A Quick Start Guide for wsrf"
author: "He Zhao, Graham Williams"
date: "`r Sys.Date()`"
bibliography:
./wsrf-guide.bib
vignette: >
%\VignetteEngine{knitr::rmarkdown}
%\VignetteIndexEntry{A Quick Start Guide for wsrf}
output:
knitr:::html_vignette:
toc: yes
---
## Introduction ##
The [**wsrf**](https://cran.r-project.org/package=wsrf) package is a
parallel implementation of the Weighted Subspace Random Forest
algorithm (wsrf) of @xu2012classifying. A novel variable weighting
method is used for variable subspace selection in place of the
traditional approach of random variable sampling. This new approach
is particularly useful in building models for high dimensional data
--- often consisting of thousands of variables. Parallel computation
is used to take advantage of multi-core machines and clusters of
machines to build random forest models from high dimensional data with
reduced elapsed times.
## Requirements and Installation Notes ##
Currently, **wsrf** requires R (>= 3.3.0),
[**Rcpp**](https://cran.r-project.org/package=Rcpp) (>= 0.10.2)
[@dirk2011rcpp; @dirk2013seamless]. For the use of multi-threading, a
C++ compiler with [C++11](https://en.wikipedia.org/wiki/C%2B%2B11)
standard support of threads is required. To install the latest stable
version of the package, from within R run:
```{r eval=FALSE}
install.packages("wsrf")
```
or the latest development version:
```{r eval=FALSE}
devtools::install_github("simonyansenzhao/wsrf")
```
The version of R before 3.3.0 doesn't provide fully support of C++11,
thus we provided other options for installation of wsrf. From 1.6.0,
we drop the support for those options. One can find the usage in the
documentation from previous version if interested.
## Usage ##
This section demonstrates how to use **wsrf**, especially on a cluster
of machines.
The example uses a small dataset *iris* from R. See the help page in
R (`?iris`) for more details of *iris*. Below are the basic
information of it.
```{r usage_load, message=FALSE}
ds <- iris
dim(ds)
names(ds)
```
Before building the model we need to prepare the training dataset.
First we specify the target variable.
```{r usage_prepare}
target <- "Species"
vars <- names(ds)
```
Next we deal with missing values, using `na.roughfix()` from
**randomForest** to take care of them.
```{r message=FALSE}
library("randomForest")
if (sum(is.na(ds[vars]))) ds[vars] <- na.roughfix(ds[vars])
ds[target] <- as.factor(ds[[target]])
(tt <- table(ds[target]))
```
We construct the formula that describes the model which will predict
the target based on all other variables.
```{r}
(form <- as.formula(paste(target, "~ .")))
```
Finally we create the randomly selected training and test datasets,
setting a seed so that the results can be exactly replicated.
```{r}
seed <- 42
set.seed(seed)
length(train <- sample(nrow(ds), 0.7*nrow(ds)))
length(test <- setdiff(seq_len(nrow(ds)), train))
```
The function to build a weighted random forest model in **wsrf** is:
```{r eval=FALSE}
wsrf(formula, data, ...)
```
and
```{r eval=FALSE}
wsrf(x,
y,
mtry=floor(log2(length(x))+1),
ntree=500,
weights=TRUE,
parallel=TRUE,
na.action=na.fail,
importance=FALSE,
nodesize=2,
clusterlogfile,
...)
```
We use the training dataset to build a random forest model. All
parameters, except `formula` and `data`, use their default values:
`500` for `ntree` --- the number of trees; `TRUE` for `weights` ---
weighted subspace random forest or random forest; `TRUE` for
`parallel` --- use multi-thread or other options, etc.
```{r usage_build_by_default, message=FALSE}
library("wsrf")
model.wsrf.1 <- wsrf(form, data=ds[train, vars], parallel=FALSE)
print(model.wsrf.1)
print(model.wsrf.1, 1) # Print tree 1.
```
Then, `predict` the classes of test data.
```{r usage_evaluate}
cl <- predict(model.wsrf.1, newdata=ds[test, vars], type="class")$class
actual <- ds[test, target]
(accuracy.wsrf <- mean(cl == actual, na.rm=TRUE))
```
Thus, we have built a model that is around `r round(100*accuracy.wsrf,
0)`% accurate on unseen testing data.
Using different random seed, we obtain another model.
```{r usage_build_another, message=FALSE}
set.seed(seed+1)
# Here we build another model without weighting.
model.wsrf.2 <- wsrf(form, data=ds[train, vars], weights=FALSE, parallel=FALSE)
print(model.wsrf.2)
```
We can also derive a subset of the forest from the model or a
combination of multiple forests.
```{r usage_subset_combine}
submodel.wsrf <- subset.wsrf(model.wsrf.1, 1:150)
print(submodel.wsrf)
bigmodel.wsrf <- combine.wsrf(model.wsrf.1, model.wsrf.2)
print(bigmodel.wsrf)
```
Next, we will specify building the model on a cluster of servers.
```{r usage_build_on_cluster, eval=FALSE}
servers <- paste0("node", 31:40)
model.wsrf.3 <- wsrf(form, data=ds[train, vars], parallel=servers)
```
All we need is a character vector specifying the hostnames of which
nodes to use, or a named integer vector, whose values of the elements
give how many threads to use for model building, in other words, how
many trees built simultaneously. More detail descriptions about
**wsrf** are presented in the
[manual](https://cran.r-project.org/package=wsrf/wsrf.pdf).
## References ##
| /scratch/gouwar.j/cran-all/cranData/wsrf/inst/doc/wsrf-guide.Rmd |
---
title: "A Quick Start Guide for wsrf"
author: "He Zhao, Graham Williams"
date: "`r Sys.Date()`"
bibliography:
./wsrf-guide.bib
vignette: >
%\VignetteEngine{knitr::rmarkdown}
%\VignetteIndexEntry{A Quick Start Guide for wsrf}
output:
knitr:::html_vignette:
toc: yes
---
## Introduction ##
The [**wsrf**](https://cran.r-project.org/package=wsrf) package is a
parallel implementation of the Weighted Subspace Random Forest
algorithm (wsrf) of @xu2012classifying. A novel variable weighting
method is used for variable subspace selection in place of the
traditional approach of random variable sampling. This new approach
is particularly useful in building models for high dimensional data
--- often consisting of thousands of variables. Parallel computation
is used to take advantage of multi-core machines and clusters of
machines to build random forest models from high dimensional data with
reduced elapsed times.
## Requirements and Installation Notes ##
Currently, **wsrf** requires R (>= 3.3.0),
[**Rcpp**](https://cran.r-project.org/package=Rcpp) (>= 0.10.2)
[@dirk2011rcpp; @dirk2013seamless]. For the use of multi-threading, a
C++ compiler with [C++11](https://en.wikipedia.org/wiki/C%2B%2B11)
standard support of threads is required. To install the latest stable
version of the package, from within R run:
```{r eval=FALSE}
install.packages("wsrf")
```
or the latest development version:
```{r eval=FALSE}
devtools::install_github("simonyansenzhao/wsrf")
```
The version of R before 3.3.0 doesn't provide fully support of C++11,
thus we provided other options for installation of wsrf. From 1.6.0,
we drop the support for those options. One can find the usage in the
documentation from previous version if interested.
## Usage ##
This section demonstrates how to use **wsrf**, especially on a cluster
of machines.
The example uses a small dataset *iris* from R. See the help page in
R (`?iris`) for more details of *iris*. Below are the basic
information of it.
```{r usage_load, message=FALSE}
ds <- iris
dim(ds)
names(ds)
```
Before building the model we need to prepare the training dataset.
First we specify the target variable.
```{r usage_prepare}
target <- "Species"
vars <- names(ds)
```
Next we deal with missing values, using `na.roughfix()` from
**randomForest** to take care of them.
```{r message=FALSE}
library("randomForest")
if (sum(is.na(ds[vars]))) ds[vars] <- na.roughfix(ds[vars])
ds[target] <- as.factor(ds[[target]])
(tt <- table(ds[target]))
```
We construct the formula that describes the model which will predict
the target based on all other variables.
```{r}
(form <- as.formula(paste(target, "~ .")))
```
Finally we create the randomly selected training and test datasets,
setting a seed so that the results can be exactly replicated.
```{r}
seed <- 42
set.seed(seed)
length(train <- sample(nrow(ds), 0.7*nrow(ds)))
length(test <- setdiff(seq_len(nrow(ds)), train))
```
The function to build a weighted random forest model in **wsrf** is:
```{r eval=FALSE}
wsrf(formula, data, ...)
```
and
```{r eval=FALSE}
wsrf(x,
y,
mtry=floor(log2(length(x))+1),
ntree=500,
weights=TRUE,
parallel=TRUE,
na.action=na.fail,
importance=FALSE,
nodesize=2,
clusterlogfile,
...)
```
We use the training dataset to build a random forest model. All
parameters, except `formula` and `data`, use their default values:
`500` for `ntree` --- the number of trees; `TRUE` for `weights` ---
weighted subspace random forest or random forest; `TRUE` for
`parallel` --- use multi-thread or other options, etc.
```{r usage_build_by_default, message=FALSE}
library("wsrf")
model.wsrf.1 <- wsrf(form, data=ds[train, vars], parallel=FALSE)
print(model.wsrf.1)
print(model.wsrf.1, 1) # Print tree 1.
```
Then, `predict` the classes of test data.
```{r usage_evaluate}
cl <- predict(model.wsrf.1, newdata=ds[test, vars], type="class")$class
actual <- ds[test, target]
(accuracy.wsrf <- mean(cl == actual, na.rm=TRUE))
```
Thus, we have built a model that is around `r round(100*accuracy.wsrf,
0)`% accurate on unseen testing data.
Using different random seed, we obtain another model.
```{r usage_build_another, message=FALSE}
set.seed(seed+1)
# Here we build another model without weighting.
model.wsrf.2 <- wsrf(form, data=ds[train, vars], weights=FALSE, parallel=FALSE)
print(model.wsrf.2)
```
We can also derive a subset of the forest from the model or a
combination of multiple forests.
```{r usage_subset_combine}
submodel.wsrf <- subset.wsrf(model.wsrf.1, 1:150)
print(submodel.wsrf)
bigmodel.wsrf <- combine.wsrf(model.wsrf.1, model.wsrf.2)
print(bigmodel.wsrf)
```
Next, we will specify building the model on a cluster of servers.
```{r usage_build_on_cluster, eval=FALSE}
servers <- paste0("node", 31:40)
model.wsrf.3 <- wsrf(form, data=ds[train, vars], parallel=servers)
```
All we need is a character vector specifying the hostnames of which
nodes to use, or a named integer vector, whose values of the elements
give how many threads to use for model building, in other words, how
many trees built simultaneously. More detail descriptions about
**wsrf** are presented in the
[manual](https://cran.r-project.org/package=wsrf/wsrf.pdf).
## References ##
| /scratch/gouwar.j/cran-all/cranData/wsrf/vignettes/wsrf-guide.Rmd |
#' Adds rank information to a \code{coh} or \code{wlmtest} object
#'
#' When a \code{coh} or \code{wlmtets} object is created, the \code{ranks} slot is NA.
#' This function fills it in.
#'
#' @param obj An object of class \code{coh} or \code{wlmtest}
#'
#' @return \code{addranks} returns another \code{coh} or \code{wlmtest} object with ranks
#' slot now included. If \code{obj$ranks} was not NA, the object is returned as is.
#'
#' @author Thomas Anderson, \email{anderstl@@gmail.com}, Jon Walter, \email{jaw3es@@virginia.edu}; Lawrence
#' Sheppard, \email{lwsheppard@@ku.edu}; Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @note Internal function, no error checking performed
#'
#' @seealso \code{\link{coh}}, \code{\link{wlmtest}}, \code{\link{bandtest}}, \code{browseVignettes("wsyn")}
addranks<-function(obj)
{
if (!any(is.na(obj$ranks)))
{
return(obj)
}
x<-Mod(obj$signif$coher)
y<-Mod(obj$signif$scoher)
nr<-nrow(y)
nc<-ncol(y)
coher<-NA*numeric(nc)
scoher<-matrix(NA,nr,nc)
for (counter in 1:nc)
{
coher[counter]<-sum(x[counter]>y[,counter])/nr
scoher[,counter]<-(rank(y[,counter])-1)/(nr-1)
}
obj$ranks<-list(coher=coher,scoher=scoher)
return(obj)
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/addranks.R |
#' Adds wavelet mean field information to a \code{clust} object
#'
#' When a \code{clust} object is created, the \code{wmfs} slot is NA. This function fills it in.
#'
#' @param obj An object of class \code{clust}
#'
#' @return \code{addwmfs} returns another \code{clust} object with \code{wmfs} slot now included.
#' If \code{obj$wmfs} was not NA, the object is returned as is.
#'
#' @details This function uses the values of \code{scale.min}, \code{scale.max.input},
#' \code{sigma} and \code{f0} stored in \code{obj$methodspecs}. It is possible to create
#' a \code{clust} object with bad values for these slots. This function throws an error in that
#' case. You can use a correlation-based method for calculating the synchrony matrix and
#' still pass values of \code{scale.min}, \code{scale.max.input}, \code{sigma} and \code{f0}
#' to \code{clust} (in fact, this happens by default) - they won't be used by \code{clust},
#' but they will be there for later use by \code{addwmfs} and \code{addwpmfs}.
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{clust}}, \code{\link{addwpmfs}}, \code{browseVignettes("wsyn")}
#'
#' @examples
#' sig<-matrix(.8,5,5)
#' diag(sig)<-1
#' lents<-50
#' if (requireNamespace("mvtnorm",quietly=TRUE))
#' {
#' dat1<-t(mvtnorm::rmvnorm(lents,mean=rep(0,5),sigma=sig))
#' dat2<-t(mvtnorm::rmvnorm(lents,mean=rep(0,5),sigma=sig))
#' }else
#' {
#' dat1<-t(matrix(rep(rnorm(lents),times=5),lents,5))
#' dat2<-t(matrix(rep(rnorm(lents),times=5),lents,5))
#' }
#' dat<-rbind(dat1,dat2)
#' times<-1:lents
#' dat<-cleandat(dat,times,clev=1)$cdat
#' coords<-data.frame(Y=rep(0,10),X=1:10)
#' method<-"coh.sig.fast"
#' clustobj<-clust(dat,times,coords,method,nsurrogs = 100)
#' res<-addwmfs(clustobj)
#'
#' @export
addwmfs<-function(obj)
{
#error checking
if (!inherits(obj,"clust"))
{
stop("Error in addwmfs: obj must be a clust object")
}
h<-obj$methodspecs
errcheck_wavparam(h$scale.min,h$scale.max.input,h$sigma,h$f0,obj$times,"addwmfs")
#if there are NAs in wmfs, proceed, otherwise don't overwrite
if (!any(is.na(obj$wmfs)))
{
return(obj)
}
#compute the wmfs
wmfs<-list()
for (levcount in 1:length(obj$clusters))
{ #for each clustering level, produce a list of wmf objects which are the wmfs
#for the clusters that that level of clustering
thiswmfs<-list()
thisclust<-obj$clusters[[levcount]]
for (clustcount in 1:max(thisclust))
{ #do each cluster
inds<-which(thisclust==clustcount)
if (length(inds)==1)
{ #for clusters with one node, no wmf
thiswmfs[[clustcount]]<-NA
}else
{
thisdat<-obj$dat[thisclust==clustcount,]
thiswmfs[[clustcount]]<-wmf(dat=thisdat,times=obj$times,
scale.min=obj$methodspecs$scale.min,
scale.max.input=obj$methodspecs$scale.max.input,
sigma=obj$methodspecs$sigma,
f0=obj$methodspecs$f0)
}
}
wmfs[[levcount]]<-thiswmfs
}
#put result into the object and return
obj$wmfs<-wmfs
return(obj)
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/addwmfs.R |
#' Adds wavelet phasor mean field information to a \code{clust} object
#'
#' When a \code{clust} object is created, the \code{wpmfs} slot is NA. This function fills it in,
#' or adds to it.
#'
#' @param obj An object of class \code{clust}
#' @param level The clustering level(s) to use. 1 corresponds to no clustering. The default is all
#' levels of clustering.
#' @param sigmethod Method for significance testing the \code{wpmf}, one of \code{quick}, \code{fft},
#' \code{aaft} (see details of the \code{wpmf} function)
#' @param nrand The number of randomizations to be used for significance testing
#'
#' @return \code{addwpmfs} returns another \code{clust} object with \code{wpmfs} slot now included,
#' or more filled in than it was previously. With values of \code{sigmethod} other than
#' \code{"quick"}, this function can be slow, particularly with large \code{nrand}. So in that
#' case the user may want to set \code{level} equal only to one clustering level of interest.
#' Unlike \code{wmf}, old values in \code{obj$wpmfs} are overwritten.
#'
#' @details This function uses the values of \code{scale.min}, \code{scale.max.input},
#' \code{sigma} and \code{f0} stored in \code{obj$methodspecs}. It is possible to create
#' a clust object with bad values for these slots. This function throws an error in that
#' case. You can use a correlation-based method for calculating the synchrony matrix and
#' still pass values of \code{scale.min}, \code{scale.max.input}, \code{sigma} and \code{f0}
#' to \code{clust} (in fact, this happens by default) - they won't be used by \code{clust},
#' but they will be there for later use by \code{addwmfs} and \code{addwpmfs}.
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{clust}}, \code{\link{addwmfs}}, \code{browseVignettes("wsyn")}
#'
#' @examples
#' sig<-matrix(.8,5,5)
#' diag(sig)<-1
#' lents<-50
#' if (requireNamespace("mvtnorm",quietly=TRUE))
#' {
#' dat1<-t(mvtnorm::rmvnorm(lents,mean=rep(0,5),sigma=sig))
#' dat2<-t(mvtnorm::rmvnorm(lents,mean=rep(0,5),sigma=sig))
#' }else
#' {
#' dat1<-t(matrix(rep(rnorm(lents),times=5),lents,5))
#' dat2<-t(matrix(rep(rnorm(lents),times=5),lents,5))
#' }
#' dat<-rbind(dat1,dat2)
#' times<-1:lents
#' dat<-cleandat(dat,times,clev=1)$cdat
#' coords<-data.frame(Y=rep(0,10),X=1:10)
#' method<-"coh.sig.fast"
#' clustobj<-clust(dat,times,coords,method,nsurrogs = 100)
#' res<-addwpmfs(clustobj)
#'
#' @export
addwpmfs<-function(obj,level=1:length(obj$clusters),sigmethod="quick",nrand=1000)
{
#error checking
if (!inherits(obj,"clust"))
{
stop("Error in addwpmfs: obj must be a clust object")
}
h<-obj$methodspecs
errcheck_wavparam(h$scale.min,h$scale.max.input,h$sigma,h$f0,obj$times,"addwpmfs")
#compute the wpmfs
wpmfs<-obj$wpmfs
if (length(wpmfs)==1 && is.na(wpmfs))
{
wpmfs<-list()
}
for (levcount in 1:length(level))
{ #for each clustering level, produce a list of wpmf objects which are the wpmfs
#for the clusters that that level of clustering
thiswpmfs<-list()
thislev<-level[levcount]
thisclust<-obj$clusters[[thislev]]
for (clustcount in 1:max(thisclust))
{ #do each cluster
inds<-which(thisclust==clustcount)
if (length(inds)==1)
{ #for clusters with one node, no wpmf
thiswpmfs[[clustcount]]<-NA
}else
{
thisdat<-obj$dat[thisclust==clustcount,]
thiswpmfs[[clustcount]]<-wpmf(dat=thisdat,times=obj$times,
scale.min=obj$methodspecs$scale.min,
scale.max.input=obj$methodspecs$scale.max.input,
sigma=obj$methodspecs$sigma,
f0=obj$methodspecs$f0,
sigmethod=sigmethod,
nrand=nrand)
}
}
wpmfs[[levcount]]<-thiswpmfs
}
#put result into the object and return
obj$wpmfs<-wpmfs
return(obj)
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/addwpmfs.R |
#' Aggregate significance across a timescale band
#'
#' Computes the aggregate significance of coherence (\code{coh}) or of a wavelet linear model test object
#' (\code{wlmtest}) across a timescale band, accounting for non-independence of timescales. Also gets the
#' average phase across the band, in the case of coherence.
#'
#' @param object An object of class \code{coh} or \code{wlmtest}, must have a non-\code{NA}
#' \code{signif} slot
#' @param band A length-two numeric vector indicating a timescale band
#' @param ... Passed from the generic to specific methods. Not currently used.
#'
#' @return \code{bandtest} returns an object of the same class as its first input but with a
#' \code{bandp} slot added. Or if there was already a \code{bandp} slot, the output has a
#' \code{bandp} slot with an additional row. For a \code{coh} object, the \code{bandp} slot
#' is a data frame with four columns, the first two indicating the timescale band and the third
#' an associated p-value for the test of coherence over that band. The fourth column is the
#' average phase over the band. For a \code{wlmtest} object, the result is only the first three
#' of the above columns.
#'
#' @author Thomas Anderson, \email{anderstl@@gmail.com}, Jon Walter, \email{jaw3es@@virginia.edu}; Lawrence
#' Sheppard, \email{lwsheppard@@ku.edu}; Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @references
#' Sheppard, L.W., et al. (2016) Changes in large-scale climate alter spatial synchrony of aphid
#' pests. Nature Climate Change. DOI: 10.1038/nclimate2881
#'
#' @seealso \code{\link{coh}}, \code{\link{wlm}}, \code{\link{wlmtest}}, \code{browseVignettes("wsyn")}
#'
#' @examples
#' #Example for a coh object
#' times<-(-3:100)
#' ts1<-sin(2*pi*times/10)
#' ts2<-5*sin(2*pi*times/3)
#' artsig_x<-matrix(NA,11,length(times)) #the driver
#' for (counter in 1:11)
#' {
#' artsig_x[counter,]=ts1+ts2+rnorm(length(times),mean=0,sd=1.5)
#' }
#' times<-0:100
#' artsig_y<-matrix(NA,11,length(times)) #the driven
#' for (counter1 in 1:11)
#' {
#' for (counter2 in 1:101)
#' {
#' artsig_y[counter1,counter2]<-mean(artsig_x[counter1,counter2:(counter2+2)])
#' }
#' }
#' artsig_y<-artsig_y+matrix(rnorm(length(times)*11,mean=0,sd=3),11,length(times))
#' artsig_x<-artsig_x[,4:104]
#' artsig_x<-cleandat(artsig_x,times,1)$cdat
#' artsig_y<-cleandat(artsig_y,times,1)$cdat
#' cohobj<-coh(dat1=artsig_x,dat2=artsig_y,times=times,norm="powall",sigmethod="fast",nrand=1000,
#' f0=0.5,scale.max.input=28)
#' cohobj<-bandtest(cohobj,c(2,4))
#'
#' #Example for a wlmtest object - see vignette
#'
#' @export
bandtest<-function(object,...)
{
UseMethod("bandtest",object)
}
#' @rdname bandtest
#' @export
bandtest.default<-function(object,...)
{
stop("Error in bandtest: method not defined for this class")
}
#' @rdname bandtest
#' @export
bandtest.coh<-function(object,band,...)
{
#error checking
if (any(is.na(object$signif)))
{
stop("Error in bandtest.coh: signif cannot be NA")
}
if (!is.numeric(band))
{
stop("Error in bandtest.coh: band must be numeric")
}
if (!is.vector(band))
{
stop("Error in bandtest.coh: band must be a length-two numeric vector")
}
if (length(band)!=2)
{
stop("Error in bandtest.coh: band must be a length-two numeric vector")
}
band<-sort(band)
timescales<-get_timescales(object)
if (band[1]>max(timescales) || band[2]<min(timescales))
{
stop("Error in bandtest.coh: band must include some of the timescales")
}
#add ranks if necessary
if (any(is.na(object$ranks)))
{
object<-addranks(object)
}
#get the p-value
x<-mean(object$ranks$coher[timescales>=band[1] & timescales<=band[2]]) #mean rank across timescales of interest, data
sx<-apply(FUN=mean,
X=object$ranks$scoher[,timescales>=band[1] & timescales<=band[2],drop=FALSE],
MARGIN=1) #mean ranks, surrogates
pval<-(sum(sx>=x)+1)/(length(sx)+1)
#get the average phase
x<-object$coher[timescales>=band[1] & timescales<=band[2]]
mnphs<-mnphase(x)
#form the result and return it
if (any(is.na(object$bandp)))
{
bandp<-data.frame(ts_low_bd=band[1],ts_hi_bd=band[2],p_val=pval,mn_phs=mnphs)
object$bandp<-bandp
return(object)
}else
{
object$bandp[dim(object$bandp)[1]+1,]<-c(band,pval,mnphs)
return(object)
}
}
#' @rdname bandtest
#' @export
bandtest.wlmtest<-function(object,band,...)
{
#error checking
if (!is.numeric(band))
{
stop("Error in bandtest.wlmtest: band must be numeric")
}
if (!is.vector(band))
{
stop("Error in bandtest.wlmtest: band must be a length-two numeric vector")
}
if (length(band)!=2)
{
stop("Error in bandtest.wlmtest: band must be a length-two numeric vector")
}
band<-sort(band)
timescales<-get_timescales(object$wlmobj)
if (band[1]>max(timescales) || band[2]<min(timescales))
{
stop("Error in bandtest.wlmtest: band must include some of the timescales")
}
#add ranks if necessary
if (any(is.na(object$ranks)))
{
object<-addranks(object)
}
#get the p-value
x<-mean(object$ranks$coher[timescales>=band[1] & timescales<=band[2]]) #mean rank across timescales of interest, data
sx<-apply(FUN=mean,
X=object$ranks$scoher[,timescales>=band[1] & timescales<=band[2],drop=FALSE],
MARGIN=1) #mean ranks, surrogates
pval<-(sum(sx>=x)+1)/(length(sx)+1)
#form the result and return it
if (any(is.na(object$bandp)))
{
bandp<-data.frame(ts_low_bd=band[1],ts_hi_bd=band[2],p_val=pval)
object$bandp<-bandp
return(object)
}else
{
object$bandp[dim(object$bandp)[1]+1,]<-c(band,pval)
return(object)
}
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/bandtest.R |
#' The one-parameter family of Box-Cox transformations
#'
#' @param y A numeric, positive values assumed
#' @param lambda The Box-Cox parameter
#'
#' @return \code{bctrans} gives \code{((y^lambda)-1)/lambda} for \code{lambda} not 0 or \code{ln(y)} for \code{lambda} equal to 0.
#'
#' @details Internal function. No error checking done. It is assumed the entries of y are positive.
#'
#' @references
#' Box, GEP and Cox, DR (1964) An analysis of transformations (with discussion). Journal of the Royal Statistical Society B, 26, 211–252.
#'
#' Venables, WN and Ripley, BD (2002) Modern Applied Statistics with S. Fourth edition. Springer.
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{cleandat}}, \code{browseVignettes("wsyn")}
bctrans<-function(y,lambda)
{
if (isTRUE(all.equal(lambda,0)))
{
return(log(y))
}else
{
return(((y^lambda)-1)/lambda)
}
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/bctrans.R |
#' Clean (spatio)temporal data matrices to make them ready for analyses using the \code{wsyn} package
#'
#' A data cleaning function for optimal Box-Cox transformation, detrending, standarizing variance,
#' de-meaning
#'
#' @param dat A locations x time data matrix, or a time series vector (for 1 location)
#' @param times The times of measurement, spacing 1
#' @param clev The level of cleaning to do, 1 through 5. See details.
#' @param lambdas A vector of lambdas to test for optimal Box-Cox transformation, if Box-Cox is
#' performed. Ignored for \code{clev<4}. Defaults to seq(-10,10, by=0.01). See details.
#' @param mints If \code{clev} is 4 or 5, then time series are shifted to have this minimum value
#' before Box-Cox transformation. Default NA means use the smallest difference between consecutive,
#' distinct sorted values. NaN means perform no shift.
#'
#' @return \code{cleandat} returns a list containing the cleaned data, \code{clev}, and the optimal
#' lambdas from the Box-Cox procedure (\code{NA} for \code{clev<4}, see details).
#'
#' @details NAs, Infs, etc. in \code{dat} trigger an error. If \code{clev==1}, time series are (individually)
#' de-meaned. If \code{clev==2}, time series are (individually) linearly detrended and de-meaned. If \code{clev==3},
#' time series are (individually) linearly detrended and de-meaned, and variances are standardized to 1. If
#' \code{clev==4}, an optimal Box-Cox normalization procedure is applied jointly to all time series (so the same
#' Box-Cox transformation is applied to all time series after they are individually shifted depending on the value
#' of \code{mints}). Transformed time series are then individually linearly detrended, de-meaned, and variances are
#' standardized to 1. If \code{clev==5}, an optimal Box-Cox normalization procedure is applied to each time series
#' individually (again after individually shifting according to \code{mints}), and transformed time series are then
#' individually linearly detrended, de-meaned, and variances are standardized to 1. Constant time series and perfect
#' linear trends trigger an error for \code{clev>=3}. If \code{clev>=4} and the optimal \code{lambda} for one or
#' more time series is a boundary case or if there is more than one optimal lambda, it triggers a warning. A wider
#' range of \code{lambda} should be considered in the former case.
#'
#' @author Jonathan Walter, \email{jaw3es@@virginia.edu}; Lawrence Sheppard, \email{lwsheppard@@ku.edu};
#' Daniel Reuman, \email{reuman@@ku.edu}; Lei Zhao, \email{lei.zhao@@cau.edu.cn}
#'
#' @references
#' Box, GEP and Cox, DR (1964) An analysis of transformations (with discussion). Journal of the Royal Statistical Society B, 26, 211–252.
#'
#' Venables, WN and Ripley, BD (2002) Modern Applied Statistics with S. Fourth edition. Springer.
#'
#' Sheppard, LW, et al. (2016) Changes in large-scale climate alter spatial synchrony of aphid pests. Nature Climate Change. DOI: 10.1038/nclimate2881
#'
#' @seealso \code{\link{wt}}, \code{\link{wmf}}, \code{\link{wpmf}}, \code{\link{coh}}, \code{\link{wlm}},
#' \code{\link{wlmtest}}, \code{\link{clust}}, \code{browseVignettes("wsyn")}
#'
#' @examples
#' times<-1:100
#' dat<-rnorm(100)
#' res1<-cleandat(dat,times,1) #this removes the mean
#' res2<-cleandat(dat,times,2) #detrends and removes the mean
#' res3<-cleandat(dat,times,3) #variances also standardized
#' res4<-cleandat(dat,times,4) #also joint Box-Cox applied
#' res5<-cleandat(dat,times,5) #1-3, also indiv Box-Cox
#'
#' @export
#' @importFrom stats lm residuals sd
#' @importFrom MASS boxcox
cleandat<-function(dat,times,clev,lambdas=seq(-10,10,by=0.01),mints=NA)
{
#error checking
if (!(clev %in% 1:5))
{
stop("Error in cleandat: clev must be 1, 2, 3, 4, or 5")
}
if (clev %in% 4:5 && !(is.na(mints) || is.nan(mints) || (is.finite(mints) && mints>0)))
{
stop("Error in cleandat: if clev is 4 or 5, mints must be NA, NaN, or a positive number")
}
errcheck_times(times,"cleandat")
if (inherits(dat,"data.frame"))
{
stop("Error in cleandat: dat must be a vector or matrix, not a dataframe")
}
if (!is.numeric(dat))
{
stop("Error in cleandat: dat must be numeric")
}
wasvect<-FALSE
if (!is.matrix(dat))
{
wasvect<-TRUE
dat<-matrix(dat,1,length(dat))
}
if (length(times)!=dim(dat)[2])
{
stop("Error in cleandat: length of dat and times must be equal")
}
if (!all(is.finite(dat)))
{
stop("Error in cleandat: dat must not contain NAs, NaNs, Infs")
}
#error check for perfect linear trends
if (clev>=3)
{
for (counter in 1:dim(dat)[1])
{
thisrow<-dat[counter,]
if (isTRUE(all.equal(sd(stats::residuals(stats::lm(thisrow~times))),0)))
{
stop("Error in cleandat: cannot perform clev 3 or greater cleaning on time series that are constant or a perfect linear trend")
}
}
}
cdat<-dat
optlambdas<-NA*numeric(1)
#de-mean only
if (clev==1)
{
for (crow in 1:dim(cdat)[1])
{
cdat[crow,]<-cdat[crow,]-mean(cdat[crow,])
}
}
#optimal Box-Cox done jointly on all time series
if (clev==4)
{
#set minimum value for first time series
thisrow<-cdat[1,]
thisrow<-setmints(thisrow,mints)
#do Box-Cox for first time series
bxcxres<-MASS::boxcox(thisrow~times,lambda=lambdas,plotit=FALSE,interp=FALSE)
xfin<-bxcxres$x
yfin<-bxcxres$y
#now go through the rest of the time series
if (dim(cdat)[1]>1)
{
for (crow in 2:dim(cdat)[1])
{
#set minimum value for this row
thisrow<-cdat[crow,]
thisrow<-setmints(thisrow,mints)
#do Box-Cox for this row
bxcxres<-MASS::boxcox(thisrow~times,lambda=lambdas,plotit=FALSE,interp=FALSE)
if (!isTRUE(all.equal(xfin,bxcxres$x))){stop("Error in cleandat: boxcox problem with independent variable")}
yfin<-yfin+bxcxres$y
}
}
#warnings from badly behaved log likelihoods, then set the optimal lambda
inds<-which(yfin==max(yfin))
#plot(xfin,yfin,type='l')
if (length(inds)>1)
{
warning("Warning from cleandat: more than one optimal value of lambda, the first was used")
inds<-inds[1]
}
if (inds==1 || inds==length(lambdas))
{
warning("Warning from cleandat: boundary optimal lambda, use wider range")
}
optlambdas<-xfin[inds]
#now do the transformations
for (crow in 1:dim(cdat)[1])
{
thisrow<-cdat[crow,]
thisrow<-setmints(thisrow,mints)
cdat[crow,]<-bctrans(thisrow,optlambdas)
}
}
#optimal Box-Cox done individually on time series
if (clev==5)
{
optlambdas<-NA*numeric(dim(cdat)[1])
for (crow in 1:dim(cdat)[1])
{
thisrow<-cdat[crow,]
#set minimum value
thisrow<-setmints(thisrow,mints)
bxcxres<-MASS::boxcox(thisrow~times,lambda=lambdas,plotit=FALSE,interp=FALSE)
inds<-which(bxcxres$y==max(bxcxres$y))
if (length(inds)>1)
{
warning("Warning from cleandat: more than one optimal value of lambda, the first was used")
inds<-inds[1]
}
if (inds==1 || inds==length(lambdas))
{
warning("Warning from cleandat: boundary optimal lambda, use wider range")
}
optlambdas[crow]<-bxcxres$x[inds]
cdat[crow,]<-bctrans(thisrow,optlambdas[crow])
}
}
#detrend and de-mean
if (clev>=2)
{
for (crow in 1:dim(cdat)[1])
{
thisrow<-cdat[crow,]
cdat[crow,]<-stats::residuals(stats::lm(thisrow~times))
}
}
#standardize variance to 1
if (clev>=3)
{
for (crow in 1:dim(dat)[1])
{
cdat[crow,]<-cdat[crow,]/sd(cdat[crow,])
}
}
if (wasvect)
{
cdat<-as.vector(cdat)
}
return(list(cdat=cdat,clev=clev,optlambdas=optlambdas))
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/cleandat.R |
#' Community structure detection in networks
#'
#' Community structure detection in networks based on the leading eigenvector of the
#' community matrix
#'
#' @param adj An adjacency matrix. Should be symmetric with diagonal containing zeros.
#'
#' @return \code{cluseigen} returns a list with one element for each of the splits
#' performed by the clustering algorithm. Each element is a vector with entries
#' corresponding to rows and columns of adj and indicating the module membership
#' of the node, following the split. The last element of the list is the final
#' clustering determined by the algorithm when its halting condition is satisfied.
#' The first element is always a vector of all 1s (corresponding to before any
#' splits are performed).
#'
#' @author Lei Zhao, \email{lei.zhao@@cau.edu.cn}; Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @details The difference between this function and the algorithm described
#' by Newman is that this function can be used on an adjacency matrix with
#' negative elements, which is very common for correlation matrices and other
#' measures of pairwise synchrony of time series.
#'
#' @references Gomez S., Jensen P. & Arenas A. (2009). Analysis of community structure
#' in networks of correlated data. Phys Rev E, 80, 016114.
#'
#' Newman M.E.J. (2006). Finding community structure in networks using the eigenvectors of
#' matrices. Phys Rev E, 74, 036104.
#'
#' Newman M.E.J. (2006) Modularity and community structure in networks. PNAS 103, 8577-8582.
#'
#' @seealso \code{\link{clust}}, \code{\link{modularity}}, \code{browseVignettes("wsyn")}
#'
#' @examples
#' adj<-matrix(0, 10, 10) # create a fake adjacency matrix
#' adj[lower.tri(adj)]<-runif(10*9/2, -1, 1)
#' adj<-adj+t(adj)
#' colnames(adj)<-letters[1:10]
#' z<-cluseigen(adj)
#'
#' @export
cluseigen<-function(adj)
{
#error checking
if (!is.numeric(adj))
{
stop("Error in cluseigen: input must be a numeric matrix")
}
if (!is.matrix(adj))
{
stop("Error in cluseigen: input must be a numeric matrix")
}
if (dim(adj)[1]!=dim(adj)[2])
{
stop("Error in cluseigen: input must be a square matrix")
}
if (dim(adj)[1]<2)
{
stop("Error in cluseigen: input matrix must have dimensions at least 2")
}
if(!isSymmetric(unname(adj)))
{
stop("Error in cluseigen: input matrix must be symmetric")
}
if(any(diag(adj)!=0))
{
stop("Error in cluseigen: diagonal of input matrix must contain only zeros")
}
#now do the algorithm
A0<-adj
n<-nrow(A0)
A0.pos<-A0; A0.pos[A0.pos<0]<-0
A0.neg<-A0; A0.neg[A0.neg>0]<-0
A0.neg<-(-A0.neg)
k.pos<-colSums(A0.pos)
m.pos<-sum(k.pos)/2
k.neg<-colSums(A0.neg)
m.neg<-sum(k.neg)/2
if(m.pos==0){tmp1<-0}else{tmp1<-k.pos %o% k.pos/2/m.pos}
if(m.neg==0){tmp2<-0}else{tmp2<-k.neg %o% k.neg/2/m.neg}
B0<- A0 - tmp1 + tmp2
modules<-rep(1, n)
Queue<-vector("list", n) #record the temporal divisions
Queue[[1]]<-modules
a<-1
r<-2 # index of loops
#main function
while(a<=max(modules)){ # while there is always a divisible subgraph
# compute modularity matrix
i.remain<-which(modules==a) # nodes in current (sub)graph to partition
n1<-length(i.remain)
#if a single node, terminate and check the next module
if (n1==1)
{
a<-a+1
next
}
A1<-A0[i.remain,i.remain]
B<-B0[i.remain,i.remain] #first part of Eq. 51 in Newman 2006
diag(B)<-diag(B)-colSums(B) #minus second part of Eq. 51 in Newman 2006
E<-eigen(B,symmetric=TRUE)
#if indivisible, terminate and check the next queue
if(max(E$values)<=1e-5){
a<-a+1
next
}
#if delta_Q < 0, terminate and check the next one
i.max<-which.max(E$values)
v1<-E$vectors[,i.max]
i.pos<-which(v1>0)
i.neg<-which(v1<0)
if(sum(B[i.pos,i.pos])+sum(B[i.neg,i.neg])<=0){
a<-a+1
next
}
A1[A1>0]<-1
A1[A1<0]<-0
if(!is.connected(A1[i.pos,i.pos,drop=FALSE]) | !is.connected(A1[i.neg,i.neg,drop=FALSE])){
a<-a+1
next
}
modules[modules>=a]<-modules[modules>=a]+1
modules[i.remain[i.pos]]<-modules[i.remain[i.pos]]-1
Queue[[r]]<-modules
r<-r+1
}
Queue<-Queue[1:(r-1)]
return(Queue)
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/cluseigen.R |
#' Detection and description of clusters of synchronous locations
#'
#' Generator function for the \code{clust} S3 class, which supports tools for detecting clusters
#' (aka, modules, sub-networks, communities, etc.) of especially synchronous locations.
#'
#' @param dat A locations (rows) x time (columns) matrix of measurements
#' @param times The times at which measurements were made, spacing 1
#' @param coords A data frame containing X,Y coordinates of locations in \code{data}, with column
#' names either \code{X} and \code{Y} or \code{lon} and \code{lat} or \code{longitude} and
#' \code{latitude}. The data frame may contain other columns with additional metainformation
#' about the sites.
#' @param method Method for synchrony calculation. See details.
#' @param tsrange A vector containing the min and max of the focal timescale range. Defaults
#' to all timescales that are valid given choices for scale.min, scale.max.input, f0, sigma.
#' Only used for wavelet-based methods.
#' @param nsurrogs Number of surrogates for significance test. Defaults to 1000. Only used
#' for surrogate-based methods.
#' @param scale.min The smallest scale of fluctuation that will be examined. At least 2. Used
#' only for wavelet-based methods.
#' @param scale.max.input The largest scale of fluctuation guaranteed to be examined. Only used
#' for wavelet-based methods.
#' @param sigma The ratio of each time scale examined relative to the next timescale. Should be
#' greater than 1. Only used for wavelet-based methods.
#' @param f0 The ratio of the period of fluctuation to the width of the envelope. Only used for
#' wavelet-based methods.
#' @param weighted If \code{TRUE}, create a weighted network. If \code{FALSE}, create a binary
#' network using statistical significance. Binary networks are only allowed for networks based
#' on significance.
#' @param sigthresh Significance threshold needed, if \code{weighted} is false, for a network
#' link to be realized. Typically 0.95, 0.99, or 0.999, etc. Only used if \code{weighted} is
#' \code{FALSE}.
#'
#' @return \code{clust} returns an object of class \code{clust}. Slots are:
#' \item{dat}{The input}
#' \item{times}{The input}
#' \item{coords}{The input}
#' \item{methodspecs}{A list with elements specifying the method used, and methodological
#' parameters that were in the input.}
#' \item{adj}{The adjacency matrix that defines the synchrony network}
#' \item{clusters}{A list with one element for each successive split of the networks into
#' subcomponents carried out by the clustering algorithm. Each element is a vector of length equal
#' to the number of nodes in the original network, giving cluster membership of the nodes. The
#' first element is a vector of all 1s, corresponding to before the first clustering split was
#' performed.}
#' \item{modres}{A list of the same length as \code{clusters}, with each element containing the
#' results of calling \code{modularity} on the network split to that level.}
#' \item{mns}{Mean time series for modules. A list of the same length as \code{clusters}.}
#' \item{wmfs}{Wavelet mean fields for modules. \code{NA} when \code{clust} is first called, but
#' \code{addwmfs} causes this entry to be added. It is a list. See documentation for the method
#' \code{addwmfs}.}
#' \item{wpmfs}{Wavelet phasor mean fields for modules. \code{NA} when \code{clust} is first
#' called, but \code{addwpmfs} causes this entry to be added. It is a list. See documentation for
#' the method \code{addwpmfs}.}
#'
#' @details The following values are valid for \code{method}:
#' \code{"pearson"}, \code{"pearson.sig.std"}, \code{"pearson.sig.fft"},
#' \code{"pearson.sig.aaft"},
#' \code{"spearman"}, \code{"spearman.sig.std"}, \code{"spearman.sig.fft"},
#' \code{"spearman.sig.aaft"},
#' \code{"kendall"}, \code{"kendall.sig.std"}, \code{"kendall.sig.fft"},
#' \code{"kendall.sig.aaft"},
#' \code{"ReXWT"}, \code{"ReXWT.sig.fft"}, \code{"ReXWT.sig.aaft"}, \code{"ReXWT.sig.fast"},
#' \code{"coh"}, \code{"coh.sig.fft"}, \code{"coh.sig.aaft"}, \code{"coh.sig.fast"},
#' \code{"phasecoh"}, \code{"phasecoh.sig.fft"}, and \code{"phasecoh.sig.aaft"}.
#' The first portions of these identifiers correspond to the Pearson, Spearman, and Kendall
#' correlations, the real part of the cross-wavelet transform, the wavelet coherence, and the
#' wavelet phase coherence. The second portions of these identifiers, when present, indicates
#' that significance of the measure specified in the first portion of the identifies is to
#' be used for establishing the synchrony matrix. Otherwise the value itself is used. The
#' third part of the \code{method} identifier indicates what type of significance is used.
#'
#' Significance testing is performed using standard approaches (\code{method} flag containg
#' \code{std}; for correlation coefficients,
#' although these are inappropriate for autocorrelated data), or surrogates generated using the
#' Fourier (\code{method} flag containing \code{"fft"}) or amplitude adjusted Fourier
#' surrogates (\code{"aaft"}). For
#' \code{"coh"} and \code{"ReXWT"}, the fast testing algorithm of Sheppard et al. (2017) is also
#' implemented (\code{"fast"}). That method uses implicit Fourier surrogates. The choice of
#' wavelet coherence (method flag containing \code{"coh"}) or the real part of
#' the cross-wavelet
#' transform (method flag containing \code{"ReXWT"}) depends mainly
#' on treatment of out-of-phase
#' relationships. The \code{"ReXWT"} is more akin to a correlation coefficient in that
#' strong in-phase relationships approach 1 and strong antiphase relationships approach -1.
#' Wavelet coherence allows any phase relationship and ranges from 0 to 1. Power normalization
#' is applied for \code{"coh"} and for \code{"ReXWT"}. All significance tests are one-tailed.
#' Synchrony matrices for significance-based methods when \code{weighted} is \code{TRUE}
#' contain 1 minus the p-values.
#'
#' Clustering is performed using the the eigenvector-based modularity method of
#' Newman (2006).
#'
#' @author Jonathan Walter, \email{jaw3es@@virginia.edu}; Daniel Reuman, \email{reuman@@ku.edu};
#' Lei Zhao, \email{lei.zhao@@cau.edu.cn}
#'
#' @references Walter, J. A., et al. (2017) The geography of spatial synchrony. Ecology Letters.
#' doi: 10.1111/ele.12782
#'
#' Newman M.E.J. (2006). Finding community structure in networks using the eigenvectors of
#' matrices. Phys Rev E, 74, 036104.
#'
#' Newman M.E.J. (2006) Modularity and community structure in networks. PNAS 103, 8577-8582.
#'
#' @seealso \code{\link{cluseigen}}, \code{\link{modularity}}, \code{\link{addwmfs}},
#' \code{\link{addwpmfs}},\code{\link{clust_methods}}, \code{\link{synmat}}, \code{\link{plotmap}},
#' \code{browseVignettes("wsyn")}
#'
#' @examples
#' sig<-matrix(.8,5,5)
#' diag(sig)<-1
#' lents<-50
#' if (requireNamespace("mvtnorm",quietly=TRUE))
#' {
#' dat1<-t(mvtnorm::rmvnorm(lents,mean=rep(0,5),sigma=sig))
#' dat2<-t(mvtnorm::rmvnorm(lents,mean=rep(0,5),sigma=sig))
#' }else
#' {
#' dat1<-t(matrix(rep(rnorm(lents),times=5),lents,5))
#' dat2<-t(matrix(rep(rnorm(lents),times=5),lents,5))
#' }
#' dat<-rbind(dat1,dat2)
#' times<-1:lents
#' dat<-cleandat(dat,times,clev=1)$cdat
#' coords<-data.frame(Y=rep(0,10),X=1:10)
#' method<-"coh.sig.fast"
#' res<-clust(dat,times,coords,method,nsurrogs = 50)
#' #nsurrogs should be much higher for a real application
#'
#' @export
clust<-function(dat,times,coords,method,tsrange=c(0,Inf),nsurrogs=1000,
scale.min=2,scale.max.input=NULL,sigma=1.05,f0=1,weighted=TRUE,sigthresh=0.95)
{
#error checking
errcheck_stdat(times,dat,"clust")
if (!inherits(coords,"data.frame"))
{
stop("Error in clust: coords must be a data frame")
}
if (dim(coords)[1]!=dim(dat)[1])
{
stop("Error in clust: coords must have one row for each row of dat")
}
if (!(all(c("X","Y") %in% names(coords))) &&
!(all(c("lat","lon") %in% names(coords))) &&
!(all(c("latitude","longitude") %in% names(coords))))
{
stop("Error in clust: coords must have columns X and Y, or lon and lat, or longitude and latitude")
}
if (!(method %in% c("pearson","pearson.sig.std","pearson.sig.fft","pearson.sig.aaft",
"spearman","spearman.sig.std","spearman.sig.fft","spearman.sig.aaft",
"kendall","kendall.sig.std","kendall.sig.fft","kendall.sig.aaft",
"ReXWT","ReXWT.sig.fft","ReXWT.sig.aaft","ReXWT.sig.fast",
"coh","coh.sig.fft","coh.sig.aaft","coh.sig.fast",
"phasecoh","phasecoh.sig.fft","phasecoh.sig.aaft")))
{
stop("Error in clust: bad value of method")
}
if ((!weighted) && (!grepl("sig", method)))
{ #if they use a non-significance methods and weighted is false, throw an error
stop("Error in clust: unweighted networks available only if method involves a significance test")
}
errcheck_wavparam(scale.min,scale.max.input,sigma,f0,times,"clust")
if (sigthresh<=0 || sigthresh>=1)
{
stop("Error in clust: inappropriate value for sigthresh")
}
#make methodspecs
methodspecs<-list(method=method,tsrange=tsrange,nsurrogs=nsurrogs,
scale.min=scale.min,scale.max.input=scale.max.input,sigma=sigma,f0=f0,
weighted=weighted,sigthresh=sigthresh)
#get the synchrony matrix
adj<-synmat(dat,times,method,tsrange,nsurrogs,
scale.min,scale.max.input,sigma,f0,
weighted,sigthresh)
#do the clustering
adjd<-adj
diag(adjd)<-0
clusters<-cluseigen(adjd)
#get the modularities
modres<-list()
for (counter in 1:length(clusters))
{
modres[[counter]]<-modularity(adj=adjd,membership=clusters[[counter]],decomp=TRUE)
}
#make mean time series
mns<-list()
for (lcount in 1:length(clusters))
{
mem<-clusters[[lcount]]
mns[[lcount]]<-matrix(NA,max(mem),length(times))
for (ccount in 1:(max(mem)))
{
mns[[lcount]][ccount,]<-apply(FUN=mean,MARGIN=2,X=dat[mem==ccount,,drop=F])
}
}
#construct the object
result<-list(dat=dat,times=times,coords=coords,methodspecs=methodspecs,
adj=adj,clusters=clusters,modres=modres,mns=mns,
wmfs=NA,wpmfs=NA)
class(result)<-c("clust","list")
return(result)
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/clust.R |
#' Basic methods for the \code{clust} class
#'
#' Set, get, summary, and print methods for the \code{clust} class.
#'
#' @param object,x,obj An object of class \code{clust}
#' @param newval A new value, for the \code{set_*} methods
#' @param ... Not currently used. Included for argument consistency
#' with existing generics.
#'
#' @return \code{summary.clust} produces a summary of a \code{clust} object.
#' A \code{print.clust} method is also available. For \code{clust} objects,
#' \code{set_*} and \code{get_*} methods are available for all slots (see
#' the documentation for \code{clust} for a list). The \code{set_*} methods
#' just throw an error, to prevent breaking the consistency between the
#' slots of a \code{clust} object.
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{clust}}
#'
#' @examples
#' sig<-matrix(.8,5,5)
#' diag(sig)<-1
#' lents<-50
#' if (requireNamespace("mvtnorm",quietly=TRUE))
#' {
#' dat1<-t(mvtnorm::rmvnorm(lents,mean=rep(0,5),sigma=sig))
#' dat2<-t(mvtnorm::rmvnorm(lents,mean=rep(0,5),sigma=sig))
#' }else
#' {
#' dat1<-t(matrix(rep(rnorm(lents),times=5),lents,5))
#' dat2<-t(matrix(rep(rnorm(lents),times=5),lents,5))
#' }
#' dat<-rbind(dat1,dat2)
#' times<-1:lents
#' dat<-cleandat(dat,times,clev=1)$cdat
#' coords<-data.frame(Y=rep(0,10),X=1:10)
#' method<-"coh.sig.fast"
#' h<-clust(dat,times,coords,method,nsurrogs = 50)
#' #nsurrogs should be much higher for a real application
#' get_times(h)
#' summary(h)
#' print(h)
#'
#' @name clust_methods
NULL
#> NULL
#' @rdname clust_methods
#' @export
summary.clust<-function(object,...)
{
x<-object
ms<-x$methodspecs
h<-ms$scale.max.input
if (is.null(h)){h<-"NULL"}
#whether the wmfs slot is empty or filled
if (inherits(x$wmfs,"list"))
{
h2<-"filled"
}else
{
h2<-"empty"
}
#same for wpmfs slot
if (inherits(x$wpmfs,"list"))
{
h3<-"filled"
}else
{
h3<-"empty"
}
res<-list(class="clust",
times_start=x$times[1],
times_end=x$times[length(x$times)],
times_increment=x$times[2]-x$times[1],
sampling_locs=dim(x$dat)[1],
method=ms$method,
tsrange1=ms$tsrange[1],
tsrange2=ms$tsrange[2],
nsurrogs=ms$nsurrogs,
weighted=ms$weighted,
sigthresh=ms$sigthresh,
scale.min=ms$scale.min,
scale.max.input=h,
sigma=ms$sigma,
f0=ms$f0,
num_split_steps=length(x$clusters)-1,
num_final_modules=max(x$clusters[[length(x$clusters)]]),
final_modularity=x$modres[[length(x$modres)]]$totQ,
wmf_slot_is=h2,
wpmf_slot_is=h3)
#a summary_wsyn object inherits from the list class, but has its own print method
class(res)<-c("summary_wsyn","list")
return(res)
}
#' @rdname clust_methods
#' @export
print.clust<-function(x,...)
{
cat("clust object:\n")
#info on times
cat("times, a length",length(x$times),"numeric vector:\n")
if (length(x$times)<12)
{
cat(paste(x$times),"\n")
}else
{
cat(paste(x$times[1:5]),"...",paste(x$times[(length(x$times)-4):(length(x$times))]),"\n")
}
#number of sampling locations
cat("Number of sampling locations:",dim(x$dat)[1],"\n")
#summary of methodspecs
cat("methodspecs:\n")
w<-x$methodspecs
if (is.null(w$scale.max.input)){w$scale.max.input<-"NULL"}
cat("method=",w$method,"; tsrange=",w$tsrange[1]," to ",w$tsrange[2],"; nsurrogs=",w$nsurrogs,"; weighted=",w$weighted,"; sigthresh=",w$sigthresh,
";\nscale.min=",w$scale.min,"; scale.max.input=",w$scale.max.input,"; sigma=",w$sigma,"; f0=",w$f0,
"\n",sep="")
#number of non-zeros in adj and range of values
cat("adj has",sum(x$adj!=0,na.rm=TRUE),"of",prod(dim(x$adj))-dim(x$adj)[1],
"off-diagonal entries differing from 0; values range from",min(x$adj,na.rm=T),"to",max(x$adj,na.rm=T),"\n")
#number of splits done, number of clusters in final decomp
cat("Number of splitting steps done:",length(x$clusters)-1,"\n")
cat("Number of modules in final decomposition:",length(unique(x$clusters[[length(x$clusters)]])),"\n")
#modularity values for each level
res<-c()
for (counter in 1:length(x$modres))
{
res<-c(res,round(x$modres[[counter]]$totQ,4))
}
cat("Modularity values for each step:",paste(res),"\n")
#whether the wmfs slot is empty or filled
if (inherits(x$wmfs,"list"))
{
cat("The wmfs slot is: filled\n")
}else
{
cat("The wmfs slot is: empty\n")
}
#same for wpmfs slot
if (inherits(x$wpmfs,"list"))
{
cat("The wpmfs slot is: filled\n")
}else
{
cat("The wpmfs slot is: empty\n")
}
}
#' @rdname clust_methods
#' @export
set_times.clust<-function(obj,newval)
{
stop("Error in set_times: times should not be altered for a clust object")
}
#' @rdname setget_methods
#' @export
set_adj<-function(obj,newval)
{
UseMethod("set_adj",obj)
}
#' @rdname setget_methods
#' @export
set_adj.default<-function(obj,newval)
{
stop("Error in set_adj: set_adj not defined for this class")
}
#' @rdname clust_methods
#' @export
set_adj.clust<-function(obj,newval)
{
stop("Error in set_adj: adj should not be altered for a clust object")
}
#' @rdname setget_methods
#' @export
set_clusters<-function(obj,newval)
{
UseMethod("set_clusters",obj)
}
#' @rdname setget_methods
#' @export
set_clusters.default<-function(obj,newval)
{
stop("Error in set_clusters: set_clusters not defined for this class")
}
#' @rdname clust_methods
#' @export
set_clusters.clust<-function(obj,newval)
{
stop("Error in set_clusters: clusters should not be altered for a clust object")
}
#' @rdname setget_methods
#' @export
set_modres<-function(obj,newval)
{
UseMethod("set_modres",obj)
}
#' @rdname setget_methods
#' @export
set_modres.default<-function(obj,newval)
{
stop("Error in set_modres: set_modres not defined for this class")
}
#' @rdname clust_methods
#' @export
set_modres.clust<-function(obj,newval)
{
stop("Error in set_modres: modres should not be altered for a clust object")
}
#' @rdname setget_methods
#' @export
set_mns<-function(obj,newval)
{
UseMethod("set_mns",obj)
}
#' @rdname setget_methods
#' @export
set_mns.default<-function(obj,newval)
{
stop("Error in set_mns: set_mns not defined for this class")
}
#' @rdname clust_methods
#' @export
set_mns.clust<-function(obj,newval)
{
stop("Error in set_mns: mns should not be altered for a clust object")
}
#' @rdname clust_methods
#' @export
set_dat.clust<-function(obj,newval)
{
stop("Error in set_dat: dat should not be altered for a clust object")
}
#' @rdname setget_methods
#' @export
set_coords<-function(obj,newval)
{
UseMethod("set_coords",obj)
}
#' @rdname setget_methods
#' @export
set_coords.default<-function(obj,newval)
{
stop("Error in set_coords: set_coords not defined for this class")
}
#' @rdname clust_methods
#' @export
set_coords.clust<-function(obj,newval)
{
stop("Error in set_coords: coords should not be altered for a clust object")
}
#' @rdname setget_methods
#' @export
set_methodspecs<-function(obj,newval)
{
UseMethod("set_methodspecs",obj)
}
#' @rdname setget_methods
#' @export
set_methodspecs.default<-function(obj,newval)
{
stop("Error in set_methodspecss: set_methodspecs not defined for this class")
}
#' @rdname clust_methods
#' @export
set_methodspecs.clust<-function(obj,newval)
{
stop("Error in set_methodspecs: methodspecs should not be altered for a clust object")
}
#' @rdname setget_methods
#' @export
set_wmfs<-function(obj,newval)
{
UseMethod("set_wmfs",obj)
}
#' @rdname setget_methods
#' @export
set_wmfs.default<-function(obj,newval)
{
stop("Error in set_wmfs: set_wmfs not defined for this class")
}
#' @rdname clust_methods
#' @export
set_wmfs.clust<-function(obj,newval)
{
stop("Error in set_wmfs: wmfs should not be altered for a clust object")
}
#' @rdname setget_methods
#' @export
set_wpmfs<-function(obj,newval)
{
UseMethod("set_wpmfs",obj)
}
#' @rdname setget_methods
#' @export
set_wpmfs.default<-function(obj,newval)
{
stop("Error in set_wpmfs: set_wpmfs not defined for this class")
}
#' @rdname clust_methods
#' @export
set_wpmfs.clust<-function(obj,newval)
{
stop("Error in set_wpmfs: wpmfs should not be altered for a clust object")
}
#' @rdname clust_methods
#' @export
get_times.clust<-function(obj)
{
return(obj$times)
}
#' @rdname setget_methods
#' @export
get_adj<-function(obj)
{
UseMethod("get_adj",obj)
}
#' @rdname setget_methods
#' @export
get_adj.default<-function(obj)
{
stop("Error in get_adj: get_adj not defined for this class")
}
#' @rdname clust_methods
#' @export
get_adj.clust<-function(obj)
{
return(obj$adj)
}
#' @rdname setget_methods
#' @export
get_clusters<-function(obj)
{
UseMethod("get_clusters",obj)
}
#' @rdname setget_methods
#' @export
get_clusters.default<-function(obj)
{
stop("Error in get_clusters: get_clusters not defined for this class")
}
#' @rdname clust_methods
#' @export
get_clusters.clust<-function(obj)
{
return(obj$clusters)
}
#' @rdname setget_methods
#' @export
get_modres<-function(obj)
{
UseMethod("get_modres",obj)
}
#' @rdname setget_methods
#' @export
get_modres.default<-function(obj)
{
stop("Error in get_modres: get_modres not defined for this class")
}
#' @rdname clust_methods
#' @export
get_modres.clust<-function(obj)
{
return(obj$modres)
}
#' @rdname setget_methods
#' @export
get_mns<-function(obj)
{
UseMethod("get_mns",obj)
}
#' @rdname setget_methods
#' @export
get_mns.default<-function(obj)
{
stop("Error in get_mns: get_mns not defined for this class")
}
#' @rdname clust_methods
#' @export
get_mns.clust<-function(obj)
{
return(obj$mns)
}
#' @rdname clust_methods
#' @export
get_dat.clust<-function(obj)
{
return(obj$dat)
}
#' @rdname setget_methods
#' @export
get_coords<-function(obj)
{
UseMethod("get_coords",obj)
}
#' @rdname setget_methods
#' @export
get_coords.default<-function(obj)
{
stop("Error in get_coords: get_coords not defined for this class")
}
#' @rdname clust_methods
#' @export
get_coords.clust<-function(obj)
{
return(obj$coords)
}
#' @rdname setget_methods
#' @export
get_methodspec<-function(obj)
{
UseMethod("get_methodspec",obj)
}
#' @rdname setget_methods
#' @export
get_methodspec.default<-function(obj)
{
stop("Error in get_methodspec: get_methodspec not defined for this class")
}
#' @rdname clust_methods
#' @export
get_methodspec.clust<-function(obj)
{
return(obj$methodspec)
}
#' @rdname setget_methods
#' @export
get_wmfs<-function(obj)
{
UseMethod("get_wmfs",obj)
}
#' @rdname setget_methods
#' @export
get_wmfs.default<-function(obj)
{
stop("Error in get_wmfs: get_wmfs not defined for this class")
}
#' @rdname clust_methods
#' @export
get_wmfs.clust<-function(obj)
{
return(obj$wmfs)
}
#' @rdname setget_methods
#' @export
get_wpmfs<-function(obj)
{
UseMethod("get_wpmfs",obj)
}
#' @rdname setget_methods
#' @export
get_wpmfs.default<-function(obj)
{
stop("Error in get_wpmfs: get_wpmfs not defined for this class")
}
#' @rdname clust_methods
#' @export
get_wpmfs.clust<-function(obj)
{
return(obj$wpmfs)
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/clust_methods.R |
#' Coherence
#'
#' Wavelet coherence and wavelet phase coherence, spatial or for single time series.
#' Also the generator function for the \code{coh} class, which inherits from the \code{list}
#' class.
#'
#' @param dat1 A locations (rows) x time (columns) matrix (for spatial coherence), or a single time series
#' @param dat2 Same format as dat1, same locations and times
#' @param times The times at which measurements were made, spacing 1
#' @param norm The normalization of wavelet transforms to use. Controls the version of the coherence that is
#' performed. One of "none", "phase", "powall", "powind". See details.
#' @param sigmethod The method for significance testing. One of "none", "fftsurrog1", "fftsurrog2", "fftsurrog12",
#' "aaftsurrog1", "aaftsurrog2", "aaftsurrog12", "fast". See details.
#' @param nrand Number of surrogate randomizations to use for significance testing.
#' @param scale.min The smallest scale of fluctuation that will be examined. At least 2.
#' @param scale.max.input The largest scale of fluctuation guaranteed to be examined
#' @param sigma The ratio of each time scale examined relative to the next timescale. Should be greater than 1.
#' @param f0 The ratio of the period of fluctuation to the width of the envelope
#'
#' @return \code{coh} returns an object of class \code{coh}. Slots are:
#' \item{dat1, dat2}{The input data}
#' \item{times}{The times associated with the data}
#' \item{sigmethod}{The method for significance testing, as inputted.}
#' \item{norm}{The normalization of the wavelet transforms that will be used in computing the coherence. Different
#' values result in different versions of the coherence. One of "none", "phase", "powall", "powind". See details.}
#' \item{wtopt}{The inputted wavelet transform options scale.min, scale.max.input, sigma, f0 in a list}
#' \item{timescales}{The timescales associated with the coherence}
#' \item{coher}{The complex magnitude of this quantity is the coherence, calculated in the usual way (which depends
#' on \code{norm}, see details), and with scalloping of the transforms.}
#' \item{signif}{A list with information from the significance testing. Elements are \code{coher} and \code{scoher}.
#' See details.}
#' \item{ranks}{A list with ranking information for \code{signif}. \code{NA} until \code{plotrank} is called, see
#' documentation for \code{plotrank}.}
#' \item{bandp}{A data frame containing results of computing significances of the coherence across timescale bands.
#' Empty on an initial call to \code{coh}, filled in by the function \code{bandtest}. See details.}
#'
#' @details If the dimensions of \code{dat1} and \code{dat2} are \eqn{N} by \eqn{T}
#' (\eqn{N} is 1 for
#' vector \code{dat1} and \code{dat2}), and if the wavelet transform of the \eqn{n}th row
#' of \code{dati} is denoted \eqn{W_{i,n,\sigma}(t)}, then the coherence is the
#' average, over all
#' locations \eqn{n} and times \eqn{t} for which wavelet transforms are
#' available, of the quantity
#' \eqn{w_{1,n,\sigma}(t)w_{2,n,\sigma}(t)^{*}}, where the \eqn{*} represents
#' complex conjugation and
#' \eqn{w_{i,n,\sigma}(t)} is a normalization of the wavelet
#' transform. The normalization used depends
#' on \code{norm}. If \code{norm} is "\code{none}" then raw wavelet transforms are used.
#' If \code{norm} is "\code{phase}" then
#' \eqn{w_{i,n,\sigma}(t)=W_{i,n,\sigma}(t)/|W_{i,n,\sigma}(t)|},
#' which gives the wavelet phase coherence, or the spatial wavelet phase coherence if \eqn{N>1}.
#' If \code{norm} is "\code{powall}" then the normalization is that descibed in the "Wavelet
#' mean field" section of the Methods of Sheppard et al. (2016), giving the version of the
#' coherence that was there called simply the wavelet coherence, or the spatial wavelet
#' coherence if \eqn{N>1}. If \code{norm} is "\code{powind}",
#' then \eqn{w_{i,n,\sigma}(t)} is obtained
#' by dividing \eqn{W_{i,n,\sigma}(t)} by the square root of the average of
#' \eqn{W_{i,n,\sigma}(t)W_{i,n,\sigma}(t)^{*}} over the times for
#' which it is defined; this is done
#' separately for each \eqn{i} and \eqn{n}.
#'
#' The slot \code{signif} is \code{NA} if \code{sigmethod} is "\code{none}". Otherwise, and
#' if \code{sigmethod} is not "\code{fast}", then \code{signif$coher} is the same as
#' \code{coher}, and \code{signif$scoher} is a matrix of dimensions \code{nrand} by
#' \code{length(coher)} with rows with magnitudes equal to coherences of surrogate
#' datasets, computed using
#' the normalization specified by \code{norm}. The type of surrogate used (Fourier surrogates
#' or amplitude adjusted Fourier surrogates, see \code{surrog}), as well as which of the
#' datasets surrogates are computed on (\code{dat1}, \code{dat2}, or both) is determined by
#' \code{sigmethod}. The first part of the value of \code{sigmethod} specifies the
#' type of surrogate used, and the numbers in the second part (1, 2, or 12) specify
#' whether surrogates are applied to \code{dat1}, \code{dat2}, or both, respectively.
#' Synchrony-preserving surrogates are used. A variety of
#' statements of significance (or lack thereof) can be made
#' by comparing \code{signif$coher} with \code{signif$scoher} (see the \code{plotmag},
#' \code{plotrank}, and \code{bandtest} methods
#' for the \code{coh} class). If \code{sigmethod} is
#' "\code{fast}", the fast algorithm of Sheppard et al. (2017) is used. In that case
#' \code{signif$coher} can be compared to \code{signif$scoher} to make significance
#' statements about the coherence in exactly the same way, but \code{signif$coher} will no
#' longer precisely equal \code{coher}, and \code{coher} should not be compared
#' directly to \code{signif$scoher}. Statements about significance of the coherence
#' should be made using \code{signif$coher} and \code{signif$scoher}, whereas \code{coher}
#' should be used whenever the actual value of the coherence is needed. No fast algorithm
#' exists for \code{norm} equal to "\code{phase}" (the phase coherence; Sheppard et al, 2017),
#' so if \code{norm} is "\code{phase}" and \code{sigmethod} is "\code{fast}", the function
#' throws an error.
#'
#' The slots \code{ranks} and \code{bandp} are empty on an initial call to \code{coh}.
#' They are made to compute and hold
#' aggregate significance results over any timescale band of choice. These are filled in
#' when needed by other methods, see \code{plotrank} and \code{bandtest}.
#'
#' Regardless of what the variables represent, the normalized transform of dat1 is multiplied
#' by the conjugate of the normalized transform of dat2. Thus, a positive phase of the coherence
#' indicates dat1 would be leading dat2.
#'
#' @author Thomas Anderson, \email{anderstl@@gmail.com}, Jon Walter, \email{jaw3es@@virginia.edu}; Lawrence
#' Sheppard, \email{lwsheppard@@ku.edu}; Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @references
#' Sheppard, L.W., et al. (2016) Changes in large-scale climate alter spatial synchrony of aphid
#' pests. Nature Climate Change. DOI: 10.1038/nclimate2881
#'
#' Sheppard, L.W., et al. (2017) Rapid surrogate testing of wavelet coherences. European Physical
#' Journal, Nonlinear and Biomedical Physics, 5, 1. DOI: 10.1051/epjnbp/2017000
#'
#' @seealso \code{\link{cleandat}}, \code{\link{coh_methods}}, \code{\link{bandtest}}, \code{\link{plotmag}},
#' \code{\link{plotphase}}, \code{\link{plotrank}}, \code{browseVignettes("wsyn")}
#'
#' @examples
#' times<-1:100
#' dat1<-matrix(rnorm(1000),10,100)
#' dat2<-matrix(rnorm(1000),10,100)
#' dat1<-cleandat(dat1,times,1)$cdat
#' dat2<-cleandat(dat2,times,1)$cdat
#' norm<-"powall"
#' sigmethod<-"fast"
#' nrand<-10
#' res<-coh(dat1,dat2,times,norm,sigmethod,nrand)
#' #for real applications, use a much bigger nrand
#'
#' @export
coh<-function(dat1,dat2,times,norm,sigmethod="none",nrand=1000,scale.min=2,scale.max.input=NULL,sigma=1.05,f0=1)
{
#**error checking
errcheck_times(times,"coh")
errcheck_wavparam(scale.min,scale.max.input,sigma,f0,times,"coh")
wasvect1<-FALSE
if (is.matrix(dat1) && dim(dat1)[1]>1)
{
errcheck_stdat(1:dim(dat1)[2],dat1,"coh")
}else
{
if (!is.matrix(dat1)){wasvect1<-TRUE}
errcheck_tsdat(1:length(dat1),dat1,"coh")
dat1<-matrix(dat1, nrow=1, ncol=length(dat1))
}
wasvect2<-FALSE
if (is.matrix(dat2) && dim(dat2)[1]>1)
{
errcheck_stdat(1:dim(dat2)[2],dat2,"coh")
}else
{
if (!is.matrix(dat2)){wasvect2<-TRUE}
errcheck_tsdat(1:length(dat2),dat2,"coh")
dat2<-matrix(dat2, nrow=1, ncol=length(dat2))
}
if (!isTRUE(all.equal(dim(dat1),dim(dat2))))
{
stop("Error in coh: dimensions of dat1 and dat2 must agree")
}
if (!(norm %in% c("none","phase","powall","powind")))
{
stop("Error in coh: bad value for norm")
}
if (!(sigmethod %in% c("none","fftsurrog1","fftsurrog2","fftsurrog12",
"aaftsurrog1","aaftsurrog2","aaftsurrog12","fast")))
{
stop("Error in coh: bad value for sigmethod")
}
if (sigmethod=="fast" && norm=="phase")
{
stop("Error in coh: no fast significance algorithm for phase coherence")
}
#**get wavelet transforms
h<-warray(dat1,times,scale.min,scale.max.input,sigma,f0)
W1<-h$wavarray
timescales<-h$timescales
h<-warray(dat2,times,scale.min,scale.max.input,sigma,f0)
W2<-h$wavarray
#**normalize
W1<-normforcoh(W1,norm)
W2<-normforcoh(W2,norm)
#**compute coherence
coher<-apply(X=W1*Conj(W2),FUN=mean,MARGIN=3,na.rm=T)
#**for return
wtopt<-list(scale.min=scale.min,scale.max.input=scale.max.input,
sigma=sigma,f0=f0)
#**now do the different cases for how significance is computed
#*no significance requested by user - just return
if (sigmethod=="none")
{
#prepare result
if (wasvect1){dat1<-as.vector(dat1)}
if (wasvect2){dat2<-as.vector(dat2)}
result<-list(dat1=dat1,dat2=dat2,times=times,sigmethod=sigmethod,norm=norm,wtopt=wtopt,
timescales=timescales,coher=coher,signif=NA,ranks=NA,bandp=NA)
class(result)<-c("coh","list")
return(result)
}
#*fast algorithm case
if (sigmethod=="fast")
{
randnums<-runif(nrand*floor((ncol(dat1)-1)/2))
if (dim(dat1)[2] %% 2 == 0)
{
randbits<-sample.int(2,2*nrand,replace=TRUE)-1
}else
{
randbits<-sample.int(2,nrand,replace=TRUE)-1
}
fcres<-fastcohtest(dat1,dat2,scale.min,scale.max.input,sigma,f0,nrand,randnums,randbits,norm)
signif<-list(coher=fcres$coher,scoher=fcres$scoher)
#prepare result
if (wasvect1){dat1<-as.vector(dat1)}
if (wasvect2){dat2<-as.vector(dat2)}
result<-list(dat1=dat1,dat2=dat2,times=times,sigmethod=sigmethod,norm=norm,wtopt=wtopt,
timescales=timescales,coher=coher,signif=signif,ranks=NA,bandp=NA)
class(result)<-c("coh","list")
return(result)
}
#*otherwise sigmethod is one of "fftsurrog1", "fftsurrog2",
#"fftsurrog12", "aaftsurrog1", "aaftsurrog2", "aaftsurrog12",
#all handled below
#figure out what kind of surrogates to use
if (sigmethod %in% c("fftsurrog1","fftsurrog2","fftsurrog12"))
{
surr<-"fft"
}else
{
surr<-"aaft"
}
#surrogate the specified time series and take transforms and normalize
f<-function(x,times,scale.min,scale.max.input,sigma,f0)
{
return(warray(x,times,scale.min,scale.max.input,sigma,f0)$wavarray)
}
sW1<-rep(list(W1),times=nrand)
sW2<-rep(list(W2),times=nrand)
if (sigmethod %in% c("fftsurrog1","fftsurrog12","aaftsurrog1","aaftsurrog12"))
{
sdat1<-surrog(dat1,nrand,surrtype=surr,syncpres=TRUE)
sW1<-lapply(FUN=f,X=sdat1,times=times,scale.min=scale.min,scale.max.input=scale.max.input,sigma=sigma,f0=f0) #take transforms
sW1<-lapply(X=sW1,FUN=normforcoh,norm=norm) #normalize
}
if (sigmethod %in% c("fftsurrog2","fftsurrog12","aaftsurrog2","aaftsurrog12"))
{
sdat2<-surrog(dat2,nrand,surrtype=surr,syncpres=TRUE)
sW2<-lapply(FUN=f,X=sdat2,times=times,scale.min=scale.min,scale.max.input=scale.max.input,sigma=sigma,f0=f0) #take transforms
sW2<-lapply(X=sW2,FUN=normforcoh,norm=norm) #normalize
}
#now compute coherences
scoher<-matrix(complex(real=NA,imaginary=NA),nrand,length(timescales))
for (counter in 1:nrand)
{
scoher[counter,]<-apply(X=sW1[[counter]]*Conj(sW2[[counter]]),FUN=mean,MARGIN=3,na.rm=T)
}
#assemble the significance results
signif<-list(coher=coher,scoher=scoher)
#prepare result
if (wasvect1){dat1<-as.vector(dat1)}
if (wasvect2){dat2<-as.vector(dat2)}
result<-list(dat1=dat1,dat2=dat2,times=times,sigmethod=sigmethod,
norm=norm,wtopt=wtopt,timescales=timescales,coher=coher,
signif=signif,ranks=NA,bandp=NA)
class(result)<-c("coh","list")
return(result)
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/coh.R |
#' Basic methods for the \code{coh} class
#'
#' Set, get, summary, and print methods for the \code{coh} class.
#'
#' @param object,x,obj An object of class \code{coh}
#' @param newval A new value, for the \code{set_*} methods
#' @param ... Not currently used. Included for argument consistency
#' with existing generics.
#'
#' @return \code{summary.coh} produces a summary of a \code{coh} object.
#' A \code{print.coh} method is also available. For \code{coh} objects,
#' \code{set_*} and \code{get_*} methods are available for all slots (see
#' the documentation for \code{coh} for a list). The \code{set_*} methods
#' just throw an error, to prevent breaking the consistency between the
#' slots of a \code{coh} object.
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{coh}}
#'
#' @examples
#' times<-1:100
#' dat1<-matrix(rnorm(1000),10,100)
#' dat2<-matrix(rnorm(1000),10,100)
#' dat1<-cleandat(dat1,times,1)$cdat
#' dat2<-cleandat(dat2,times,1)$cdat
#' norm<-"powall"
#' sigmethod<-"fast"
#' nrand<-10
#' h<-coh(dat1,dat2,times,norm,sigmethod,nrand)
#' get_times(h)
#' summary(h)
#' print(h)
#'
#' @name coh_methods
NULL
#> NULL
#' @rdname coh_methods
#' @export
summary.coh<-function(object,...)
{
x<-object
h<-x$wtopt$scale.max.input
if (is.null(h)){h<-"NULL"}
#whether the ranks slot is full
if (inherits(x$ranks,"list"))
{
h2<-"filled"
}else
{
h2<-"empty"
}
res<-list(class="coh",
times_start=x$times[1],
times_end=x$times[length(x$times)],
times_increment=x$times[2]-x$times[1],
sampling_locs=dim(x$dat1)[1],
timescale_start=x$timescales[1],
timescale_end=x$timescales[length(x$timescales)],
timescale_length=length(x$timescales),
normalization=x$norm,
scale.min=x$wtopt$scale.min,
scale.max.input=h,
sigma=x$wtopt$sigma,
f0=x$wtopt$f0,
sigmethod=x$sigmethod,
ranks_slot_is=h2)
#a summary_wsyn object inherits from the list class, but has its own print method, above
class(res)<-c("summary_wsyn","list")
return(res)
}
#' @rdname coh_methods
#' @export
print.coh<-function(x,...)
{
cat("coh object:\n")
cat("times, a length",length(x$times),"numeric vector:\n")
if (length(x$times)<12)
{
cat(paste(x$times),"\n")
}else
{
cat(paste(x$times[1:5]),"...",paste(x$times[(length(x$times)-4):(length(x$times))]),"\n")
}
cat("Number of sampling locations:",dim(x$dat1)[1],"\n")
cat("timescales, a length",length(x$timescales),"numeric vector:\n")
if (length(x$timescales)<12)
{
cat(paste(x$timescales),"\n")
}else
{
cat(paste(x$timescales[1:5]),"...",paste(x$timescales[(length(x$timescales)-4):(length(x$timescales))]),"\n")
}
cat("norm, the normalization used:",x$norm,"\n")
w<-x$wtopt
if (is.null(w$scale.max.input)){w$scale.max.input<-"NULL"}
cat("wtopt: scale.min=",w$scale.min,"; scale.max.input=",w$scale.max.input,"; sigma=",w$sigma,"; f0=",w$f0,"\n",sep="")
cat("sigmethod, the type of significance testing used:",x$sigmethod,"\n")
if (inherits(x$signif,"list"))
{
cat("Number of surrogates:",dim(x$signif$scoher)[1],"\n")
}else
{
cat("Number of surrogates: NA\n")
}
if (inherits(x$ranks,"list"))
{
cat("The ranks slot is: filled\n")
}else
{
cat("The ranks slot is: empty\n")
}
if (inherits(x$bandp,"data.frame"))
{
cat("Timescale bands tested in bandp slot:\n")
h<-print(x$bandp[,c(1,2)])
}else
{
cat("Timescale bands tested in bandp slot: none")
}
}
#' @rdname coh_methods
#' @export
set_times.coh<-function(obj,newval)
{
stop("Error in set_times: times should not be altered for a coh object")
}
#' @rdname coh_methods
#' @export
set_timescales.coh<-function(obj,newval)
{
stop("Error in set_timescales: timescales should not be altered for a coh object")
}
#' @rdname setget_methods
#' @export
set_coher<-function(obj,newval)
{
UseMethod("set_coher",obj)
}
#' @rdname setget_methods
#' @export
set_coher.default<-function(obj,newval)
{
stop("Error in set_coher: set_coher not defined for this class")
}
#' @rdname coh_methods
#' @export
set_coher.coh<-function(obj,newval)
{
stop("Error in set_coher: coher should not be altered for a coh object")
}
#' @rdname setget_methods
#' @export
set_dat1<-function(obj,newval)
{
UseMethod("set_dat1",obj)
}
#' @rdname setget_methods
#' @export
set_dat1.default<-function(obj,newval)
{
stop("Error in set_dat1: set_dat1 not defined for this class")
}
#' @rdname coh_methods
#' @export
set_dat1.coh<-function(obj,newval)
{
stop("Error in set_dat1: dat1 should not be altered for a coh object")
}
#' @rdname setget_methods
#' @export
set_dat2<-function(obj,newval)
{
UseMethod("set_dat2",obj)
}
#' @rdname setget_methods
#' @export
set_dat2.default<-function(obj,newval)
{
stop("Error in set_dat2: set_dat2 not defined for this class")
}
#' @rdname coh_methods
#' @export
set_dat2.coh<-function(obj,newval)
{
stop("Error in set_dat2: dat2 should not be altered for a coh object")
}
#' @rdname coh_methods
#' @export
set_wtopt.coh<-function(obj,newval)
{
stop("Error in set_wtopt: wtopt should not be altered for a coh object")
}
#' @rdname setget_methods
#' @export
set_norm<-function(obj,newval)
{
UseMethod("set_norm",obj)
}
#' @rdname setget_methods
#' @export
set_norm.default<-function(obj,newval)
{
stop("Error in set_norm: set_norm not defined for this class")
}
#' @rdname coh_methods
#' @export
set_norm.coh<-function(obj,newval)
{
stop("Error in set_norm: norm should not be altered for a coh object")
}
#' @rdname setget_methods
#' @export
set_sigmethod<-function(obj,newval)
{
UseMethod("set_sigmethod",obj)
}
#' @rdname setget_methods
#' @export
set_sigmethod.default<-function(obj,newval)
{
stop("Error in set_sigmethod: set_sigmethod not defined for this class")
}
#' @rdname coh_methods
#' @export
set_sigmethod.coh<-function(obj,newval)
{
stop("Error in set_sigmethod: sigmethod should not be altered for a coh object")
}
#' @rdname coh_methods
#' @export
set_signif.coh<-function(obj,newval)
{
stop("Error in set_signif: signif should not be altered for a coh object")
}
#' @rdname setget_methods
#' @export
set_ranks<-function(obj,newval)
{
UseMethod("set_ranks",obj)
}
#' @rdname setget_methods
#' @export
set_ranks.default<-function(obj,newval)
{
stop("Error in set_ranks: set_ranks not defined for this class")
}
#' @rdname coh_methods
#' @export
set_ranks.coh<-function(obj,newval)
{
stop("Error in set_ranks: ranks should not be altered for a coh object")
}
#' @rdname setget_methods
#' @export
set_bandp<-function(obj,newval)
{
UseMethod("set_bandp",obj)
}
#' @rdname setget_methods
#' @export
set_bandp.default<-function(obj,newval)
{
stop("Error in set_bandp: set_bandp not defined for this class")
}
#' @rdname coh_methods
#' @export
set_bandp.coh<-function(obj,newval)
{
stop("Error in set_bandp: bandp should not be altered for a coh object")
}
#' @rdname coh_methods
#' @export
get_times.coh<-function(obj)
{
return(obj$times)
}
#' @rdname coh_methods
#' @export
get_timescales.coh<-function(obj)
{
return(obj$timescales)
}
#' @rdname setget_methods
#' @export
get_coher<-function(obj)
{
UseMethod("get_coher",obj)
}
#' @rdname setget_methods
#' @export
get_coher.default<-function(obj)
{
stop("Error in get_coher: get_coher not defined for this class")
}
#' @rdname coh_methods
#' @export
get_coher.coh<-function(obj)
{
return(obj$coher)
}
#' @rdname setget_methods
#' @export
get_dat1<-function(obj)
{
UseMethod("get_dat1",obj)
}
#' @rdname setget_methods
#' @export
get_dat1.default<-function(obj)
{
stop("Error in get_dat1: get_dat1 not defined for this class")
}
#' @rdname coh_methods
#' @export
get_dat1.coh<-function(obj)
{
return(obj$dat1)
}
#' @rdname setget_methods
#' @export
get_dat2<-function(obj)
{
UseMethod("get_dat2",obj)
}
#' @rdname setget_methods
#' @export
get_dat2.default<-function(obj)
{
stop("Error in get_dat2: get_dat2 not defined for this class")
}
#' @rdname coh_methods
#' @export
get_dat2.coh<-function(obj)
{
return(obj$dat2)
}
#' @rdname coh_methods
#' @export
get_wtopt.coh<-function(obj)
{
return(obj$wtopt)
}
#' @rdname setget_methods
#' @export
get_norm<-function(obj)
{
UseMethod("get_norm",obj)
}
#' @rdname setget_methods
#' @export
get_norm.default<-function(obj)
{
stop("Error in get_norm: get_norm not defined for this class")
}
#' @rdname coh_methods
#' @export
get_norm.coh<-function(obj)
{
return(obj$norm)
}
#' @rdname setget_methods
#' @export
get_sigmethod<-function(obj)
{
UseMethod("get_sigmethod",obj)
}
#' @rdname setget_methods
#' @export
get_sigmethod.default<-function(obj)
{
stop("Error in get_sigmethod: get_sigmethod not defined for this class")
}
#' @rdname coh_methods
#' @export
get_sigmethod.coh<-function(obj)
{
return(obj$sigmethod)
}
#' @rdname coh_methods
#' @export
get_signif.coh<-function(obj)
{
return(obj$signif)
}
#' @rdname setget_methods
#' @export
get_ranks<-function(obj)
{
UseMethod("get_ranks",obj)
}
#' @rdname setget_methods
#' @export
get_ranks.default<-function(obj)
{
stop("Error in get_ranks: get_ranks not defined for this class")
}
#' @rdname coh_methods
#' @export
get_ranks.coh<-function(obj)
{
return(obj$ranks)
}
#' @rdname setget_methods
#' @export
get_bandp<-function(obj)
{
UseMethod("get_bandp",obj)
}
#' @rdname setget_methods
#' @export
get_bandp.default<-function(obj)
{
stop("Error in get_bandp: get_bandp not defined for this class")
}
#' @rdname coh_methods
#' @export
get_bandp.coh<-function(obj)
{
return(obj$bandp)
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/coh_methods.R |
#' Error check for appropriate spatio-temporal data
#'
#' Error checking whether a times vector and a matrix with each row a time
#' series make a legitimate spatio-temporal data set for wavelet analysis
#'
#' @param times the times of measurement, spacing 1
#' @param dat each row is a time series - must have at least two rows
#' @param callfunc the function calling this one, for error tracking
#'
#' @return \code{errcheck_stdat} returns nothing but throws and error if inputs not appropriate
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
errcheck_stdat<-function(times,dat,callfunc)
{
errcheck_times(times,callfunc)
if (!is.numeric(dat))
{
stop(paste0("Error in errcheck_stdat called by ",callfunc,": dat must be numeric"))
}
if (!is.matrix(dat))
{
stop(paste0("Error in errcheck_stdat called by ",callfunc,": dat must be a matrix"))
}
if (dim(dat)[1]==1)
{
stop(paste0("Error in errcheck_stdat called by ",callfunc,": dat must have at least two rows"))
}
if (length(times)!=dim(dat)[2])
{
stop(paste0("Error in errcheck_stdat called by ",callfunc,": second dimension of dat must equal length of times"))
}
if (!all(is.finite(dat)))
{
stop(paste0("Error in errcheck_stdat called by ",callfunc,": dat must not contain NAs, NaNs, Infs"))
}
for (counter in 1:dim(dat)[1])
{
errcheck_tsdat(times,dat[counter,],callfunc)
}
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/errcheck_stdat.R |
#' Error check \code{times}
#'
#' Error check whether a vector can represent times at which data suitable
#' for wavelet transforms were measured
#'
#' @param times Tests whether this is a numeric vector with unit-spaced increasing values
#' @param callfunc Function calling this one, for better error messaging
#'
#' @return \code{errcheck_times} returns nothing but throws and error if the conditions are not met
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
errcheck_times<-function(times,callfunc)
{
if (!is.numeric(times))
{
stop(paste0("Error in errcheck_times called by ",callfunc,": times must be numeric"))
}
if (length(times)<2)
{
stop(paste0("Error in errcheck_times called by ",callfunc,": times must be a vector"))
}
if (!all(is.finite(times)))
{
stop(paste0("Error in errcheck_times called by ",callfunc,": times must not contain NAs, NaNs, Infs"))
}
d<-diff(times)
if (!isTRUE(all.equal(rep(1,length(d)),d)))
{
stop(paste0("Error in errcheck_times called by ",callfunc,": times must be unit spaced; output timescales in units of cycles per sampling interval"))
}
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/errcheck_times.R |
#' Error check for appropriate temporal data
#'
#' Error checking whether a times vector and t.series vector make a
#' legitimate time series for wavelet analysis
#'
#' @param times times of measurement, spacing 1
#' @param t.series the measurements
#' @param callfunc the function from which this one was called, for error tracking
#'
#' @return \code{errcheck_tsdat} returns nothing but throws and error if inputs not appropriate
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
errcheck_tsdat<-function(times,t.series,callfunc)
{
errcheck_times(times,callfunc)
if (!is.numeric(t.series))
{
stop(paste0("Error in errcheck_tsdat called by ",callfunc,": t.series not numeric"))
}
if (is.matrix(t.series))
{
if (dim(t.series)[1]!=1)
{
stop(paste0("Error in errcheck_tsdat called by ",callfunc,": t.series can either be a vector or matrix with one row"))
}
t.series<-as.vector(t.series)
}
if (length(times)!=length(t.series))
{
stop(paste0("Error in errcheck_tsdat called by ",callfunc,": times and t.series must be the same length"))
}
if (!all(is.finite(t.series)))
{
stop(paste0("Error in errcheck_times called by ",callfunc,": t.series must not contain NAs, NaNs, Infs"))
}
if (!isTRUE(all.equal(mean(t.series),0)))
{
stop(paste0("Error in errcheck_tsdat called by ",callfunc,": t.series must have zero mean"))
}
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/errcheck_tsdat.R |
#' Error check whether inputs are suitable for a tts object
#'
#' @param times times of measurement, spacing 1
#' @param timescales timescales of analysis
#' @param values a times by timescales matrix
#' @param callfunc the function from which this one was called, for error tracking
#'
#' @return \code{errcheck_tts} returns nothing but throws and error if inputs not appropriate
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
errcheck_tts<-function(times,timescales,values,callfunc)
{
errcheck_times(times,callfunc)
#timescale should be a numeric vector with positive entries
if (!is.numeric(timescales))
{
stop(paste0("Error in errcheck_tts called by ",callfunc,": timescales must be numeric"))
}
if (length(timescales)<2)
{
stop(paste0("Error in errcheck_tts called by ",callfunc,": timescales must be a vector"))
}
if (!all(is.finite(timescales)))
{
stop(paste0("Error in errcheck_tts called by ",callfunc,": timescales must not contain NAs, NaNs, Infs"))
}
if (any(timescales<=0))
{
stop(paste0("Error in errcheck_tts called by ",callfunc,": timescales must be positive"))
}
#***DAN: decreasing/increasing, what? add this once you figure it out
#values a numeric or complex-valued matrix, length(times) by length(timescales)
if (!is.numeric(values) && !is.complex(values))
{
stop(paste0("Error in errcheck_tts called by ",callfunc,": values must be numeric or complex"))
}
if (!is.matrix(values))
{
stop(paste0("Error in errcheck_tts called by ",callfunc,": values must be a matrix"))
}
if (dim(values)[1]!=length(times) || dim(values)[2]!=length(timescales))
{
stop(paste0("Error in errcheck_tts called by ",callfunc,": dimensions of values should agree with lengths of times and timescales"))
}
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/errcheck_tts.R |
#' Error check wavelet transform parameters
#'
#' Error check the parameters \code{scale.min}, \code{scale.max.input}, \code{sigma}, \code{f0}
#'
#' @param scale.min The smallest scale of fluctuation that will be examined. At least 2.
#' @param scale.max.input The largest scale of fluctuation that is guaranteed to be examined
#' @param sigma The ratio of each time scale examined relative to the next timescale. Should be greater than 1.
#' @param f0 The ratio of the period of fluctuation to the width of the envelope. Defaults to 1.
#' @param times The times data were measured at, spacing 1
#' @param callfunc Function calling this one, for better error messaging
#'
#' @return \code{errcheck_wavparam} returns nothing but throws and error if the conditions are not met
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
errcheck_wavparam<-function(scale.min,scale.max.input,sigma,f0,times,callfunc)
{
if (!(is.numeric(scale.min) && is.numeric(sigma) && is.numeric(f0)))
{
stop(paste0("Error in errcheck_wavparams called by ",callfunc,": non-numeric scale.min, sigma, or f0"))
}
if (!(is.numeric(scale.max.input) || is.null(scale.max.input)))
{
stop(paste0("Error in errcheck_wavparams called by ",callfunc,": scale.max.input must be numeric or NULL"))
}
if (!(length(scale.min)==1 && length(sigma)==1 && length(f0)==1))
{
stop(paste0("Error in errcheck_wavparams called by ",callfunc,": scale.min, sigma, and f0 must have length 1"))
}
if (!(length(scale.max.input) %in% c(0,1)))
{
stop(paste0("Error in errcheck_wavparams called by ",callfunc,": scale.max.input must be NULL or of length 1"))
}
if (scale.min<2)
{
stop(paste0("Error in errcheck_wavparams called by ",callfunc,": scale.min must be at least 2"))
}
if (sigma<=1)
{
stop(paste0("Error in errcheck_wavparams called by ",callfunc,": sigma must be greater than 1"))
}
if (is.null(scale.max.input)){scale.max.input<-length(times)}
m.max<-floor(log(scale.max.input/scale.min)/log(sigma))+1
if (m.max<5)
{
stop(paste0("Error in errcheck_wavparams called by ",callfunc,": your wavelet parameters indicate you only have ",m.max," timescales, that is not very many"))
}
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/errcheck_wavparam.R |
#' Fast algorithm for significance testing coherence using Fourier surrogates
#'
#' This is the algorithm of Sheppard et al. (2017) (see references).
#'
#' @param dat1 A locations (rows) x time (columns) matrix (for spatial coherence), or a single time series
#' @param dat2 Same format as \code{dat1}, same locations and times
#' @param scale.min The smallest scale of fluctuation that will be examined. At least 2.
#' @param scale.max.input The largest scale of fluctuation guaranteed to be examined
#' @param sigma The ratio of each time scale examined relative to the next timescale. Should be greater than 1.
#' @param f0 The ratio of the period of fluctuation to the width of the envelope
#' @param nrand Number of surrogate randomizations to use for significance testing
#' @param randnums A bunch of independent random numbers uniformly distributed on (0,1).
#' There must be \code{nrand*floor((dim(dat1)[2]-1)/2)} of these.
#' @param randbits A bunch of random bits (0 or 1). There must be \code{nrand} of these if time
#' series are of odd length and \code{2*nrand} if even length. You may pass more than this, so,
#' in particular, you may pass \code{2*nrand} for even or odd length.
#' @param norm The normalization of wavelet transforms to use. Controls the version of the
#' coherence that is performed. One of "none", "powall", "powind". See details in
#' the documentation of \code{coh}.
#'
#' @return \code{fastcohtest} returns a list with these elements:
#' \item{timescales}{The timescales used}
#' \item{coher}{The magnitude of this is the fast-algorithm version of the coherence between
#' the two datasets, for comparison with \code{scoher}}
#' \item{scoher}{A matrix with \code{nrand} rows, the magnitude of each one is the
#' fast-algorithm version of the coherence for a surrogate}
#'
#' @author Lawrence Sheppard, \email{lwsheppard@@ku.edu}; Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @note Internal function, minimal error checking.
#'
#' @references
#' Sheppard, L.W., et al. (2017) Rapid surrogate testing of wavelet coherences. European Physical
#' Journal, Nonlinear and Biomedical Physics, 5, 1. DOI: 10.1051/epjnbp/2017000
#'
#' @importFrom stats fft
fastcohtest<-function(dat1,dat2,scale.min,scale.max.input,sigma,f0,nrand,randnums,randbits,norm)
{
#deal with vector datasets
if (!is.matrix(dat1))
{
dat1<-matrix(dat1,1,length(dat1))
dat2<-matrix(dat2,1,length(dat2))
}
#setup - parallels wt.R
n<-nrow(dat1)
tt<-ncol(dat1)
if(is.null(scale.max.input)){scale.max<-tt}else{scale.max<-scale.max.input}
scale.min <- f0*scale.min
scale.max <- f0*scale.max
m.max.wt <- floor(log(scale.max/scale.min)/log(sigma))+1 #number of timescales minus 1
s2 <- scale.min*sigma^seq(from=0, by=1, to=m.max.wt) #widths of wavelet envelopes
margin2 <- ceiling(sqrt(-(2*s2*s2)*log(0.5)))
m.last <- max(which(margin2<0.5*tt))
if (is.null(scale.max.input))
{
s2<-s2[1:m.last]
}
m.max<-length(s2) #now its number of timescales, differs by one from how m.max is used in wt.R
if (length(randnums)!=nrand*floor((tt-1)/2))
{
stop("Error in fastcohtest: wrong length of randnums")
}
if (any(randnums<0 | randnums>1))
{
stop("Error in fastcohtest: randnums must be between 0 and 1")
}
if (length(randbits)<nrand)
{
stop("Error in fastcohtest: randbits not long enough")
}
if ((length(randbits)<2*nrand) && (dim(dat1)[2] %% 2 == 0))
{
stop("Error in fastcohtest: randbits not long enough")
}
if (!(all(randbits %in% c(0,1))))
{
stop("Error in fastcohtest: randbits can only contain 0s and 1s")
}
#Generate random phases for surrogates with correct symmetry properties
rrr<-2*pi*(matrix(randnums,nrow=nrand,ncol=floor((tt-1)/2))-0.5)
if(tt%%2==0)
{ # timeseries has even length
ts1surrang<-cbind(pi*(randbits[1:nrand]),rrr,
pi*(randbits[(nrand+1):(2*nrand)]),-rrr[,ncol(rrr):1])
}
if(tt%%2!=0)
{ # timeseries has odd length
ts1surrang<-cbind(pi*(randbits[1:nrand]),rrr,-rrr[,ncol(rrr):1])
}
if ((norm %in% c("powall","none")) || (norm=="powind" && n==1))
{
if(n==1)
{ # One location wavelet coherence - in this case, powind is the same as powall
fft1<-stats::fft(dat1) #fft signals 1 and 2
fft2<-stats::fft(dat2)
xfft<-fft1*Conj(fft2) #get cross-spectrum and spectra
xfft1<-fft1*Conj(fft1)
xfft2<-fft2*Conj(fft2)
freqs<-seq(from=0, to=1-(1/tt), by=1/tt)
filt.crosspec<-matrix(NA, nrow=m.max, ncol=tt) #initialize
filt.pow1<-matrix(NA, nrow=m.max, ncol=tt)
filt.pow2<-matrix(NA, nrow=m.max, ncol=tt)
for(stage in 1:m.max)
{
s<-s2[stage]
#find coherence by filtering cross-spectrum
xx<-sqrt(2*pi*s)*(exp(-s^2*(2*pi*(freqs-(1-(f0/s))))^2/2) - exp(-s^2*(2*pi*freqs)^2/2)*exp(-0.5*(2*pi*f0)^2))
m2xx<-xx*Conj(xx)/tt
filt.crosspec[stage,]<-m2xx*xfft
filt.pow1[stage,]<-m2xx*xfft1
filt.pow2[stage,]<-m2xx*xfft2
}
altpow1<-rowMeans(filt.pow1)
altpow2<-rowMeans(filt.pow2)
altcoh<-rowMeans(filt.crosspec)
if (norm %in% c("powall","powind")){altcoh.norm<-altcoh/sqrt(altpow1*altpow2)}
if (norm=="none"){altcoh.norm<-altcoh}
surrcoh<-matrix(NA, nrow=nrand, ncol=m.max)
for(rep in 1:nrand)
{
ts1surrangmat<-matrix(ts1surrang[rep,], nrow=m.max, ncol=tt, byrow=T) #make surrogates
filt.crosspec.surr<-filt.crosspec*exp(complex(imaginary=ts1surrangmat))
surrcoh[rep,]<-rowMeans(filt.crosspec.surr)
}
if (norm %in% c("powall","powind")){surrcoh.norm<-surrcoh/matrix(rep(sqrt(altpow1*altpow2),each=nrow(surrcoh)),nrow(surrcoh),ncol(surrcoh))}
if (norm=="none"){surrcoh.norm<-surrcoh}
}
## Spatial coherence (multiple locations) - done separately from n=1 for speed reasons
if(n>1)
{
fft1<-t(apply(FUN=stats::fft,MARGIN=1,X=dat1)) #fft signals 1 and 2
fft2<-t(apply(FUN=stats::fft,MARGIN=1,X=dat2))
xfft<-fft1*Conj(fft2) #get cross-spectra and spectra
xfft1<-fft1*Conj(fft1)
xfft2<-fft2*Conj(fft2)
sxfft<-apply(xfft, 2, mean) #average cross-spectra across locations
sxfft1<-apply(xfft1, 2, mean)
sxfft2<-apply(xfft2, 2, mean)
freqs<-seq(from=0, to=1-(1/tt), by=1/tt)
filt.crosspec<-matrix(NA, nrow=m.max, ncol=tt) #initialize
filt.pow1<-matrix(NA, nrow=m.max, ncol=tt)
filt.pow2<-matrix(NA, nrow=m.max, ncol=tt)
for(stage in 1:m.max)
{
s<-s2[stage]
#find coherence by filtering cross-spectrum
xx<-sqrt(2*pi*s)*(exp(-s^2*(2*pi*(freqs-(1-(f0/s))))^2/2) - exp(-s^2*(2*pi*freqs)^2/2)*exp(-0.5*(2*pi*f0)^2))
m2xx<-xx*Conj(xx)/tt
filt.crosspec[stage,]<-m2xx*sxfft
filt.pow1[stage,]<-m2xx*sxfft1
filt.pow2[stage,]<-m2xx*sxfft2
}
altpow1<-rowMeans(filt.pow1)
altpow2<-rowMeans(filt.pow2)
altcoh<-rowMeans(filt.crosspec)
if (norm=="powall"){altcoh.norm<-altcoh/sqrt(altpow1*altpow2)}
if (norm=="none"){altcoh.norm<-altcoh}
surrcoh<-matrix(NA, nrow=nrand, ncol=m.max)
for(rep in 1:nrand)
{
ts1surrangmat<-matrix(ts1surrang[rep,], nrow=m.max, ncol=tt, byrow=T) #make surrogates
filt.crosspec.surr<-filt.crosspec*exp(complex(imaginary=ts1surrangmat))
surrcoh[rep,]<-rowMeans(filt.crosspec.surr)
}
if (norm=="powall"){surrcoh.norm<-surrcoh/matrix(rep(sqrt(altpow1*altpow2),each=nrow(surrcoh)),nrow(surrcoh),ncol(surrcoh))}
if (norm=="none"){surrcoh.norm<-surrcoh}
}
res<-list(timescales=s2/f0,coher=altcoh.norm,scoher=surrcoh.norm)
}
if (norm=="powind" && n>1)
{
fft1<-t(apply(FUN=stats::fft,MARGIN=1,X=dat1)) #fft signals 1 and 2
fft2<-t(apply(FUN=stats::fft,MARGIN=1,X=dat2))
xfft1<-fft1*Conj(fft1) #get spectra
xfft2<-fft2*Conj(fft2)
freqs<-seq(from=0, to=1-(1/tt), by=1/tt)
filt.crosspec<-matrix(NA, nrow=m.max, ncol=tt) #initialize
altcoh.norm<-NA*numeric(m.max)
for(stage in 1:m.max)
{
#filter the ffts
s<-s2[stage]
xx<-sqrt(2*pi*s)*(exp(-s^2*(2*pi*(freqs-(1-(f0/s))))^2/2) - exp(-s^2*(2*pi*freqs)^2/2)*exp(-0.5*(2*pi*f0)^2))
xxn<-matrix(xx,n,tt,byrow=TRUE)
filtxfft1<-xxn*Conj(xxn)*xfft1/tt
filtxfft2<-xxn*Conj(xxn)*xfft2/tt
#wavelet power for each n
powfiltxfft1<-rowMeans(filtxfft1)
powfiltxfft2<-rowMeans(filtxfft2)
#normalize ffts
nfft1<-fft1/sqrt(matrix(powfiltxfft1,n,tt))
nfft2<-fft2/sqrt(matrix(powfiltxfft2,n,tt))
#cross spectrum with the normalization
nxfft<-nfft1*Conj(nfft2)
sxfft<-colMeans(nxfft) #1 by tt object, average cross spectrum appropriate to this scale, normalization incorporated
filt.crosspec[stage,]<-xx*Conj(xx)*sxfft/tt
}
#normalized coherence for this stage
altcoh.norm<-rowMeans(filt.crosspec)
surrcoh.norm<-matrix(NA, nrow=nrand, ncol=m.max)
for(rep in 1:nrand)
{
ts1surrangmat<-matrix(ts1surrang[rep,], nrow=m.max, ncol=tt, byrow=T) #make surrogates
filt.crosspec.surr<-filt.crosspec*exp(complex(imaginary=ts1surrangmat))
surrcoh.norm[rep,]<-rowMeans(filt.crosspec.surr)
}
res<-list(timescales=s2/f0,coher=altcoh.norm,scoher=surrcoh.norm)
}
#The algorithm above was developed by Lawrence Sheppard, using a Lancaster
#convention for phase that is opposite the ordinary convention. So switch
#phase, for phase consistency with coherence calculated in the usual way
res$coher<-Conj(res$coher)
res$scoher<-Conj(res$scoher)
return(res)
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/fastcohtest.R |
#' Surrogate time series using Fourier surrogates
#'
#' Creates surrogate time series using Fourier surrogates
#'
#' @param dat A locations x time matrix of observations
#' @param nsurrogs The number of surrogates to produce
#' @param syncpres Logical. TRUE for "synchrony preserving" surrogates (same phase randomizations used for all
#' time series). FALSE leads to independent phase randomizations for all time series.
#'
#' @return \code{fftsurrog} returns a list of nsurrogs surrogate datasets
#'
#' @author Jonathan Walter, \email{jaw3es@@virginia.edu}; Lawrence Sheppard, \email{lwsheppard@@ku.edu};
#' Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @references
#' Sheppard, LW, et al. (2016) Changes in large-scale climate alter spatial synchrony of aphid pests. Nature Climate Change. DOI: 10.1038/nclimate2881
#'
#' Schreiber, T and Schmitz, A (2000) Surrogate time series. Physica D 142, 346-382.
#'
#' Prichard, D and Theiler, J (1994) Generating surrogate data for time series with several simultaneously measured variables. Physical Review Letters 73, 951-954.
#'
#' @note For internal use, no error checking
#'
#' @importFrom stats fft rnorm
fftsurrog<-function(dat,nsurrogs,syncpres)
{
#get ffts of all time series
fftdat<-matrix(complex(real=NA, imaginary=NA), nrow=nrow(dat), ncol=ncol(dat))
for(row in 1:nrow(dat))
{
fftdat[row,]<-stats::fft(dat[row,])
}
fftmod<-Mod(fftdat)
fftarg<-Arg(fftdat)
#now get random phases for each desired surrogate and
#inverse transform to get the surrogates
res<-list()
for(n in 1:nsurrogs)
{
# get and apply random phases
if (syncpres)
{
#synchrony preserving surrogates only need one set of phase pertubations, used for all time series
h<-Arg(stats::fft(stats::rnorm(ncol(dat))))
randomizedphases<-(matrix(rep(h, times=nrow(dat)), nrow(dat), ncol(dat), byrow=TRUE)+fftarg) %% (2*pi)
}else
{
#need separate independent phase perturbations for each time series
h<-matrix(stats::rnorm(ncol(dat)*nrow(dat)),nrow(dat),ncol(dat))
randomizedphases<-(fftarg+t(apply(X=h,MARGIN=1,FUN=function(x){Arg(stats::fft(x))}))) %% (2*pi)
}
fftsurrog<-matrix(complex(modulus=fftmod, argument=randomizedphases),nrow(dat), ncol(dat))
# inverse transform
invmat<-matrix(NA, nrow(dat), ncol(dat))
for(i in 1:nrow(dat)){
invmat[i,]<-stats::fft(fftsurrog[i,], inverse=T)/(ncol(fftsurrog))
}
res[[n]]<-Re(invmat)
}
return(res)
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/fftsurrog.R |
#' Tests if a graph is connected
#'
#' Tests if a graph represented by an adjacency matrix is connected.
#'
#' @param adj An adjacency matrix. Must be a numeric matrix with non-negative entries.
#'
#' @return \code{is.connected} returns \code{TRUE} or \code{FALSE} depending on whether
#' the graph represented in \code{adj} is a connected graph.
#'
#' @details Idea by Ed Scheinerman, circa 2006.
#' Source: http://www.ams.jhu.edu/~ers/matgraph/; routine:
#' matgraph/@graph/isconnected.m
#'
#' @author Lei Zhao, \email{lei.zhao@@cau.edu.cn}
#'
#' @seealso \code{\link{cluseigen}}, \code{\link{clust}}, \code{browseVignettes("wsyn")}
#'
#' @examples
#' g1<-matrix(c(0,0,0,1,1,0,0,0,0,1,0,0,0,0,1,0),4,4)
#' is.connected(g1)
#' g2<-matrix(c(0,1,0,0,1,0,0,0,0,0,0,1,0,0,1,0),4,4)
#' is.connected(g2)
#'
#' @export
is.connected<-function(adj)
{
#error checking
if (!is.numeric(adj))
{
stop("Error in is.connected: input must be a numeric matrix")
}
if (!is.matrix(adj))
{
stop("Error in is.connected: input must be a numeric matrix")
}
if (dim(adj)[1]!=dim(adj)[2])
{
stop("Error in is.connected: input must be a square matrix")
}
if (any(adj<0))
{
stop("Error in is.connected: input matrix cannot have negative entries")
}
#screen for 1x1 matrices, which are automatically connected
if (isTRUE(all.equal(dim(adj),c(1,1))))
{
return(TRUE)
}
#now do the algorithm
if(length(which(colSums(adj)==0))>0)
{ #check for isolated nodes
return(FALSE)
}else
{
x<-c(1,rep(0, nrow(adj)-1))
while(1)
{ #evolve x until a steady state
y<-x
x<-adj%*%x + x
x1<-rep(0,length(x))
x1[x>0]<-1
x<-x1
if(all(x==y)){break}
}
if(sum(x)<length(x))
{
return(FALSE)
}else
{
return(TRUE)
}
}
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/is.connected.R |
#' For converting certain synchrony matrices to unweighted versions
#'
#' Convenience function for converting certain synchrony matrices to unweighted versions
#'
#' @param mat A synchrony matrix based on significance testing
#' @param sigthresh Significance threshold to use
#'
#' @return \code{makeunweighted} converts to an unweighted version of the input. Entries of
#' \code{mat} less than \code{sigthresh} become a 1, other entries become a 0. The diagonal
#' is \code{NA}.
#'
#' @author Lei Zhao, \email{lei.zhao@@cau.edu.cn}, Daniel Reuman \email{reuman@@ku.edu}
#'
#' @note Internal function, no error checking
makeunweighted<-function(mat,sigthresh)
{
nlocs<-dim(mat)[1]
newmat<-matrix(0,nlocs,nlocs)
diag(mat)<-0
newmat[mat<sigthresh]<-1
newmat[mat>=sigthresh]<-0
diag(newmat)<-NA
return(newmat)
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/makeunweighted.R |
#' Mean phase of coherence
#'
#' Gets the mean phase of a bunch of complex numbers
#'
#' @param nums A vector of complex numbers
#'
#' @return \code{mnphase} returns the mean phase
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @note Internal funcion, no error catching
mnphase<-function(nums)
{
mns<-mean(nums/Mod(nums))
res<-Arg(mns)
res[mns==0]<-NA
return(res)
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/mnphase.R |
#' Modularity of a community structure of a graph
#'
#' Computes the modularity of partitioning of a graph into sub-graphs. Similar to the
#' \code{modularity} function in the \code{igraph} package, but allows negative
#' edge weights.
#'
#' @param adj An adjacency matrix, which should be symmetric with zeros on the diagonal.
#' @param membership Vector of length equal to the number of graph nodes (columns/rows
#' of \code{adj}) indicating the cluster/sub-graph each nodes belongs to.
#' @param decomp Logical. If \code{TRUE}, calculate the decomposition of modularity
#' by modules and nodes. Default \code{FALSE}.
#'
#' @return \code{modularity} returns a list containing the following:
#' \item{totQ}{The total modularity. This is the only output if \code{decomp=FALSE}}
#' \item{modQ}{The contribution of each module to the total modularity}
#' \item{nodeQ}{The contribution of each node to the total modularity}
#'
#' @details The difference between this function and the function \code{modularity}
#' in the package \code{igraph} is that this function can be used with an adjacency
#' matrix with negative elements. This is a common case for matrices arrising from a
#' for correlation matrix or another synchrony matrix. If the matrix is non-negative,
#' the result of this function should be exactly the same as the result from
#' \code{modularity} in the \code{igraph} package.
#'
#' @note Adapted from code developed by Robert J. Fletcher, Jr.
#'
#' @author Jonathan Walter, \email{jonathan.walter@@ku.edu}; Lei Zhao,
#' \email{lei.zhao@@cau.edu.cn}; Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @references
#' Fletcher Jr., R.J., et al. (2013) Network modularity reveals critical scales
#' for connectivity in ecology and evolution. Nature Communications. doi: 10.1038//ncomms3572.
#'
#' Gomez S., Jensen P. & Arenas A. (2009). Analysis of community structure in networks
#' of correlated data. Phys Rev E, 80, 016114.
#'
#' Newman M.E. (2006). Finding community structure in networks using the eigenvectors
#' of matrices. Phys Rev E, 74, 036104.
#'
#' @seealso \code{\link{clust}}, \code{\link{cluseigen}}, \code{browseVignettes("wsyn")}
#'
#' @examples
#' adj<-matrix(0, 10, 10) # create a fake adjacency matrix
#' adj[lower.tri(adj)]<-runif(10*9/2, -1, 1)
#' adj<-adj+t(adj)
#' colnames(adj)<-letters[1:10]
#' m<-cluseigen(adj)
#' z<-modularity(adj, m[[length(m)]], decomp=TRUE)
#'
#' @export
modularity<-function(adj,membership,decomp=FALSE)
{
#error checking
if (!is.numeric(adj))
{
stop("Error in modularity: adj must be a numeric matrix")
}
if (!is.matrix(adj))
{
stop("Error in modularity: adj must be a numeric matrix")
}
if (dim(adj)[1]!=dim(adj)[2])
{
stop("Error in modularity: adj must be a square matrix")
}
if (dim(adj)[1]<2)
{
stop("Error in modularity: adj must have dimensions at least 2")
}
if(!isSymmetric(unname(adj)))
{
stop("Error in modularity: adj must be symmetric")
}
if(any(diag(adj)!=0))
{
stop("Error in modularity: diagonal of adj must contain only zeros")
}
if (!is.numeric(membership))
{
stop("Error in modularity: membership must be a numeric vector")
}
if (length(membership)!=dim(adj)[1])
{
stop("Error in modularity: membership must have length equal to the dimension of adj")
}
if (any(diff(sort(unique(membership)))!=1))
{
stop("Error in modularity: entries of membership must be the first n whole numbers")
}
#the algorithm
n<-nrow(adj)
A0<-adj
k<-colSums(A0)
m<-sum(k)/2
n.m<-length(unique(membership))
delta<-matrix(0, n, n)
for(i in 1:n.m){
tmp<-which(membership==i)
delta[tmp,tmp]<-1
}
A0.pos<-A0; A0.pos[A0.pos<0]=0
A0.neg<-A0; A0.neg[A0.neg>0]=0
A0.neg<-(-A0.neg)
k.pos<-colSums(A0.pos)
m.pos<-sum(k.pos)/2
k.neg<-colSums(A0.neg)
m.neg<-sum(k.neg)/2
if(m.pos==0){x1<-0 }else{ x1<-k.pos%o%k.pos/2/m.pos}
if(m.neg==0){x2<-0 }else{ x2<-k.neg%o%k.neg/2/m.neg}
Q<-(A0-x1+x2)*delta
if(decomp==F)
{
return(sum(Q)/2/(m.pos+m.neg))
}else
{
Q.decomp.mod<-rep(NA, n.m)
for(i in 1:n.m){
tmp<-which(membership==i)
Q.decomp.mod[i]<-sum(Q[tmp,tmp])/2/(m.pos+m.neg)
}
Q.decomp.node<-(rowSums(Q)+colSums(Q))/4/(m.pos+m.neg)
#Q.decomp.node.rescale<-(Q.decomp.node-min(Q.decomp.node))/diff(range(Q.decomp.node))
return(list(totQ=sum(Q)/2/(m.pos+m.neg), modQ=Q.decomp.mod,
nodeQ=Q.decomp.node))#, nodeQrs=Q.decomp.node.rescale))
}
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/modularity.R |
#' Normalization for the \code{coh} function
#'
#' A convenience function for performing the normalization step for the \code{coh} function.
#'
#' @param W An array of wavelet transforms, locations by times by timescales
#' @param norm The normalization of wavelet transforms to use. Controls the version of the coherence that is performed. One of "none", "phase", "powall", "powind". See details section of the documentation for \code{coh}.
#'
#' @return \code{normforcoh} returns an array the same dimensions as W of normalized transforms
#'
#' @note Internal function, no error checking
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
normforcoh<-function(W,norm)
{
if (norm=="none")
{
return(W)
}
if (norm=="phase")
{
return(W/Mod(W))
}
if (norm=="powall")
{
normdenom<-sqrt(apply(X=(Mod(W))^2,MARGIN=3,FUN=mean,na.rm=T))
for (i in 1:dim(W)[3])
{
W[,,i]<-W[,,i]/normdenom[i]
}
return(W)
}
if (norm=="powind")
{
normdenom<-sqrt(apply(X=(Mod(W))^2,MARGIN=c(1,3),FUN=mean,na.rm=T))
for (i in 1:dim(W)[2])
{
W[,i,]<-W[,i,]/normdenom
}
return(W)
}
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/normforcoh.R |
#' For plotting the magnitude of values in \code{tts}, \code{coh} and \code{wlmtest} objects
#'
#' For plotting the magnitude of values in \code{tts} objects (and derived classes)
#' against time and timescale, and \code{coh} and \code{wlmtest} objects against timescale
#'
#' @param object An object of class \code{tts} or some class that inherits from \code{tts} or
#' of class \code{coh} or \code{wlmtest}
#' @param zlims z axis limits. If specified, must encompass the range of
#' \code{Mod(get_values(object))}. Default NULL uses this range.
#' @param neat Logical. Should timescales with no values be trimmed?
#' @param colorfill Color spectrum to use, set through colorRampPalette. Default value NULL
#' produces jet colors from Matlab.
#' @param sigthresh Significance threshold(s). Numeric vector with values between 0 and 1.
#' Typically 0.95, 0.99, 0.999, etc. For \code{wpmf} objects, contours are plotted at these
#' values; for \code{coh} and \code{wlmtest} objects the threshholds are plotted on coherence
#' plots.
#' @param colorbar Logical. Should a colorbar legend be plotted?
#' @param title Title for the top of the plot.
#' @param filename Filename (without extension), for saving as pdf. Default value NA saves no
#' file and uses the default graphics device.
#' @param bandprows The rows of \code{object$bandp} for which to display results in \code{coh} plots
#' @param ... Additional graphics parameters passed to \code{image} (\code{graphics} package)
#' if \code{colorbar==FALSE}, or to \code{image.plot} (\code{fields} package) if
#' \code{colorbar==TRUE} (for \code{tts} objects)
#'
#' @details For \code{coh} (respectively, \code{wlmtest}) objects, the modulus of
#' object$coher (respectively, object$wlmobj$coher) is plotted using a solid red line, and
#' the modulus of object$signif$coher is plotted using a dashed red line. The two coherences
#' agree except for \code{sigmethod="fast"}, for which they are close. The dashed line is what
#' should be compared to the distribution of surrogate coherences (black lines, which only appear
#' for \code{coh} objects if \code{signif} is not \code{NA}). Horizontal axis ticks are labeled
#' as timescales, but are spaced on the axis as log(1/timescale), i.e., log frequencies.
#'
#' @author Thomas Anderson, \email{anderstl@@gmail.com}, Jon Walter, \email{jaw3es@@virginia.edu}; Lawrence
#' Sheppard, \email{lwsheppard@@ku.edu}; Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @references
#' Sheppard, L.W., et al. (2016) Changes in large-scale climate alter spatial synchrony of aphid
#' pests. Nature Climate Change. DOI: 10.1038/nclimate2881
#'
#' Sheppard, LW et al. (2019) Synchrony is more than its top-down and climatic parts: interacting
#' Moran effects on phytoplankton in British seas. Plos Computational Biology 15, e1006744. doi: 10.1371/journal.pcbi.1006744
#'
#' @seealso \code{\link{tts}}, \code{\link{wt}}, \code{\link{wmf}}, \code{\link{wpmf}}, \code{\link{coh}},
#' \code{\link{wlmtest}}, \code{\link{plotphase}}, \code{\link{bandtest}}, \code{\link{plotrank}},
#' \code{browseVignettes("wsyn")}
#'
#' @examples
#' #For a wt object
#' time1<-1:100
#' time2<-101:200
#' ts1p1<-sin(2*pi*time1/15)
#' ts1p2<-0*time1
#' ts2p1<-0*time2
#' ts2p2<-sin(2*pi*time2/8)
#' ts1<-ts1p1+ts1p2
#' ts2<-ts2p1+ts2p2
#' ts<-c(ts1,ts2)
#' ra<-rnorm(200,mean=0,sd=0.5)
#' t.series<-ts+ra
#' t.series<-t.series-mean(t.series)
#' times<-c(time1,time2)
#' res<-wt(t.series, times)
#' plotmag(res)
#'
#' #For a wmf object
#' x1<-0:50
#' x2<-51:100
#' x<-c(x1,x2)
#' ts1<-c(sin(2*pi*x1/10),sin(2*pi*x2/5))+1.1
#' dat<-matrix(NA,11,length(x))
#' for (counter in 1:dim(dat)[1])
#' {
#' ts2<-3*sin(2*pi*x/3+2*pi*runif(1))+3.1
#' ts3<-rnorm(length(x),0,1.5)
#' dat[counter,]<-ts1+ts2+ts3
#' dat[counter,]<-dat[counter,]-mean(dat[counter,])
#' }
#' times<-x
#' res<-wmf(dat,times)
#' plotmag(res)
#'
#' #similar calls for wpmf, coh, wlm, wlmtest objects
#' #see documentation
#'
#' @export
#' @importFrom fields image.plot
#' @importFrom graphics image axis par plot lines text
#' @importFrom grDevices colorRampPalette pdf dev.off
#' @importFrom stats quantile
plotmag<-function(object,...)
{
UseMethod("plotmag",object)
}
#' @rdname plotmag
#' @export
plotmag.tts<-function(object,zlims=NULL,neat=TRUE,colorfill=NULL,colorbar=TRUE,title=NULL,filename=NA,...)
{
wav<-Mod(get_values(object))
times<-get_times(object)
timescales<-get_timescales(object)
if(is.null(zlims)){
zlims<-range(wav,na.rm=T)
}else
{
rg<-range(wav,na.rm=T)
if (rg[1]<zlims[1] || rg[2]>zlims[2])
{
stop("Error in plotmag.tts: zlims must encompass the z axis range of what is being plotted")
}
}
if(neat){
inds<-which(!is.na(colMeans(wav,na.rm=T)))
wav<-wav[,inds]
timescales<-timescales[inds]
}
if(is.null(colorfill)){
jetcolors <- c("#00007F", "blue", "#007FFF", "cyan",
"#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")
colorfill<-grDevices::colorRampPalette(jetcolors)
}
ylocs<-pretty(timescales,n=8)
xlocs<-pretty(times,n=8)
if (!is.na(filename))
{
grDevices::pdf(paste0(filename,".pdf"))
}
if (!colorbar)
{
graphics::image(x=times,y=log2(timescales),z=wav,xlab="Time",zlim=zlims,
ylab="Timescale",axes=F,col=colorfill(100),main=title,...)
graphics::axis(1,at = xlocs,labels=xlocs)
graphics::axis(2,at = log2(ylocs),labels = ylocs)
}else
{
fields::image.plot(x=times,y=log2(timescales),z=wav,xlab="Time",zlim=zlims,
ylab="Timescale",axes=F,col=colorfill(100),main=title,...)
graphics::axis(1,at = xlocs,labels=xlocs)
graphics::axis(2,at = log2(ylocs),labels = ylocs)
}
if (!is.na(filename))
{
grDevices::dev.off()
}
}
#' @rdname plotmag
#' @export
#plotmag.wt just the same as tts, we define it explicitly instead of inheriting for the sake of the help files
plotmag.wt<-function(object,zlims=NULL,neat=TRUE,colorfill=NULL,colorbar=TRUE,title=NULL,filename=NA,...)
{
return(plotmag.tts(object,zlims,neat,colorfill,colorbar,title,filename,...))
}
#' @rdname plotmag
#' @export
#plotmag.wmf just the same as tts, we define it explicitly instead of inheriting for the sake of the help files
plotmag.wmf<-function(object,zlims=NULL,neat=TRUE,colorfill=NULL,colorbar=TRUE,title=NULL,filename=NA,...)
{
return(plotmag.tts(object,zlims,neat,colorfill,colorbar,title,filename,...))
}
#' @rdname plotmag
#' @export
plotmag.wpmf<-function(object,zlims=NULL,neat=TRUE,colorfill=NULL,sigthresh=0.95,colorbar=TRUE,title=NULL,filename=NA,...)
{
wav<-Mod(get_values(object))
times<-get_times(object)
timescales<-get_timescales(object)
signif<-get_signif(object)
if (any(sigthresh>=1 | sigthresh<=0))
{
stop("Error in plotmag.wpmf: inappropriate value for sigthresh")
}
if(is.null(zlims)){
zlims<-range(wav,na.rm=T)
}else
{
rg<-range(wav,na.rm=T)
if (rg[1]<zlims[1] || rg[2]>zlims[2])
{
stop("Error in plotmag.wpmf: zlims must encompass the z axis range of what is being plotted")
}
}
if(neat){
inds<-which(!is.na(colMeans(wav,na.rm=T)))
wav<-wav[,inds]
timescales<-timescales[inds]
if (!identical(signif,NA) && (signif[[1]] %in% c("fft","aaft")))
{
signif[[3]]<-signif[[3]][,inds]
}
}
if(is.null(colorfill)){
jetcolors <- c("#00007F", "blue", "#007FFF", "cyan",
"#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")
colorfill<-grDevices::colorRampPalette(jetcolors)
}
ylocs<-pretty(timescales,n=8)
xlocs<-pretty(times,n=8)
if (!is.na(filename))
{
grDevices::pdf(paste0(filename,".pdf"))
}
if (!colorbar)
{
graphics::image(x=times,y=log2(timescales),z=wav,xlab="Time",zlim=zlims,
ylab="Timescale",axes=F,col=colorfill(100),main=title,...)
graphics::axis(1,at = xlocs,labels=xlocs)
graphics::axis(2,at = log2(ylocs),labels = ylocs)
}else
{
fields::image.plot(x=times,y=log2(timescales),z=wav,xlab="Time",zlim=zlims,
ylab="Timescale",axes=F,col=colorfill(100),main=title,...)
graphics::axis(1,at = xlocs,labels=xlocs)
graphics::axis(2,at = log2(ylocs),labels = ylocs)
}
if (!all(is.na(signif)))
{
graphics::par(new=T)
if (signif[[1]]=="quick")
{
q<-stats::quantile(signif[[2]],sigthresh)
graphics::contour(x=times,y=log2(timescales),z=wav,levels=q,drawlabels=F,lwd=2,
xaxs="i",xaxt="n",yaxt="n",xaxp=c(0,1,5),las = 1,frame=F)
}
if (signif[[1]] %in% c("fft","aaft"))
{
graphics::contour(x=times,y=log2(timescales),z=signif[[3]],levels=sigthresh,
drawlabels=F,lwd=2,xaxs="i",xaxt="n",yaxt="n",xaxp=c(0,1,5),
las = 1,frame=F)
}
}
if (!is.na(filename))
{
grDevices::dev.off()
}
}
#' @rdname plotmag
#' @export
plotmag.coh<-function(object,sigthresh=c(0.95,.99),bandprows="all",filename=NA,...)
{
#extract the needed slots
timescales<-get_timescales(object)
coher<-get_coher(object)
signif<-get_signif(object)
bandp<-get_bandp(object)
#error catch
if (any(sigthresh>=1 | sigthresh<=0))
{
stop("Error in plotmag.coh: inappropriate value for sigthresh")
}
if (!identical(bandprows,"all") && !any(is.na(bandp)))
{
if (!is.numeric(bandprows))
{
stop("Error in plotmag.coh: non-numeric value for bandprows")
}
if (!all(bandprows %in% 1:dim(bandp)[1]))
{
stop("Error in plotmag.coh: bandprows must contain row numbers for bandp")
}
}
if (!is.na(filename))
{
grDevices::pdf(paste0(filename,".pdf"))
}
#if signif is absent, then just plot coher v timescales
if (any(is.na(signif)))
{
plot(log(1/timescales),Mod(coher),type="l",lty="solid",xaxt="n",col="red",
xlab="Timescales",ylab="Coherence")
xlocs<-c(min(timescales),pretty(timescales,n=8))
graphics::axis(side=1,at=log(1/xlocs),labels=xlocs)
if (!is.na(filename))
{
grDevices::dev.off()
}
return(NULL)
}
#from here on is if signif is present
#get quantiles for surrogate coherences
qs<-apply(X=Mod(signif$scoher),FUN=stats::quantile,MARGIN=2,prob=sigthresh)
if (length(sigthresh)==1){qs<-matrix(qs,1,length(qs))}
#if bandp is absent, just plot the lines, no p-values
if (any(is.na(bandp)))
{
rg<-range(Mod(coher),Mod(signif$coher),qs,na.rm=T)
plot(log(1/timescales),Mod(coher),type="l",lty="solid",xaxt="n",col="red",
ylim=rg,xlab="Timescales",ylab="Coherence")
xlocs<-c(min(timescales),pretty(timescales,n=8))
graphics::axis(side=1,at=log(1/xlocs),labels=xlocs)
lines(log(1/timescales),Mod(signif$coher),type="l",lty="dashed",col="red")
for (counter in 1:dim(qs)[1])
{
lines(log(1/timescales),qs[counter,])
}
if (!is.na(filename))
{
grDevices::dev.off()
}
return(NULL)
}
#from here on is if signif and bandp are both present
rg<-range(Mod(coher),Mod(signif$coher),qs,na.rm=T)
prc<-0.15
drg<-diff(rg)
rg[2]<-rg[2]+dim(bandp)[1]*prc*drg
plot(log(1/timescales),Mod(coher),type="l",lty="solid",xaxt="n",col="red",
ylim=rg,xlab="Timescales",ylab="Coherence")
xlocs<-c(min(timescales),pretty(timescales,n=8))
graphics::axis(side=1,at=log(1/xlocs),labels=xlocs)
lines(log(1/timescales),Mod(signif$coher),type="l",lty="dashed",col="red")
for (counter in 1:dim(qs)[1])
{
lines(log(1/timescales),qs[counter,])
}
if (bandprows!="all")
{
bandp<-bandp[bandprows,]
}
for (counter in 1:dim(bandp)[1])
{
b1<-unname(bandp[counter,1])
if (b1<min(timescales)){b1<-min(timescales)}
b2<-unname(bandp[counter,2])
if (b2>max(timescales)){b2<-max(timescales)}
p<-unname(bandp[counter,3])
htl<-rg[2]-(counter-1/4-.1)*prc*drg
wwd<-.07*prc*drg
lines(log(1/c(b1,b2)),c(htl,htl))
lines(log(1/c(b1,b1)),c(htl-wwd,htl+wwd))
lines(log(1/c(b2,b2)),c(htl-wwd,htl+wwd))
htt<-rg[2]-(counter-1.2/2-.1)*prc*drg
text(mean(log(1/c(b1,b2))),htt,paste0("p=",round(p,4)),cex=0.66)
}
if (!is.na(filename))
{
grDevices::dev.off()
}
}
#' @rdname plotmag
#' @export
plotmag.wlmtest<-function(object,sigthresh=c(0.95,.99),bandprows="all",filename=NA,...)
{
#extract the needed slots
timescales<-get_timescales(get_wlmobj(object))
coher<-get_coher(get_wlmobj(object))
signif<-get_signif(object)
bandp<-get_bandp(object)
#error catch
if (any(sigthresh>=1 | sigthresh<=0))
{
stop("Error in plotmag.wlmtest: inappropriate value for sigthresh")
}
if (bandprows!="all" && !any(is.na(bandp)))
{
if (!is.numeric(bandprows))
{
stop("Error in plotmag.wlmtest: non-numeric value for bandprows")
}
if (!all(bandprows %in% 1:dim(bandp)[1]))
{
stop("Error in plotmag.wlmtest: bandprows must contain row numbers for bandp")
}
}
if (!is.na(filename))
{
grDevices::pdf(paste0(filename,".pdf"))
}
#get quantiles for surrogate coherences
qs<-apply(X=Mod(signif$scoher),FUN=stats::quantile,MARGIN=2,prob=sigthresh)
if (length(sigthresh)==1){qs<-matrix(qs,1,length(qs))}
#if bandp is absent, just plot the lines, no p-values
if (any(is.na(bandp)))
{
rg<-range(Mod(coher),Mod(signif$coher),qs,na.rm=T)
plot(log(1/timescales),Mod(coher),type="l",lty="solid",xaxt="n",col="red",
ylim=rg,xlab="Timescales",ylab="Coherence")
xlocs<-c(min(timescales),pretty(timescales,n=8))
graphics::axis(side=1,at=log(1/xlocs),labels=xlocs)
lines(log(1/timescales),Mod(signif$coher),type="l",lty="dashed",col="red")
for (counter in 1:dim(qs)[1])
{
lines(log(1/timescales),qs[counter,])
}
if (!is.na(filename))
{
grDevices::dev.off()
}
return(NULL)
}
#from here on is if signif and bandp are both present
rg<-range(Mod(coher),Mod(signif$coher),qs,na.rm=T)
prc<-0.15
drg<-diff(rg)
rg[2]<-rg[2]+dim(bandp)[1]*prc*drg
plot(log(1/timescales),Mod(coher),type="l",lty="solid",xaxt="n",col="red",
ylim=rg,xlab="Timescales",ylab="Coherence")
xlocs<-c(min(timescales),pretty(timescales,n=8))
graphics::axis(side=1,at=log(1/xlocs),labels=xlocs)
lines(log(1/timescales),Mod(signif$coher),type="l",lty="dashed",col="red")
for (counter in 1:dim(qs)[1])
{
lines(log(1/timescales),qs[counter,])
}
if (bandprows!="all")
{
bandp<-bandp[bandprows,]
}
for (counter in 1:dim(bandp)[1])
{
b1<-unname(bandp[counter,1])
if (b1<min(timescales)){b1<-min(timescales)}
b2<-unname(bandp[counter,2])
if (b2>max(timescales)){b2<-max(timescales)}
p<-unname(bandp[counter,3])
htl<-rg[2]-(counter-1/4-.1)*prc*drg
wwd<-.07*prc*drg
lines(log(1/c(b1,b2)),c(htl,htl))
lines(log(1/c(b1,b1)),c(htl-wwd,htl+wwd))
lines(log(1/c(b2,b2)),c(htl-wwd,htl+wwd))
htt<-rg[2]-(counter-1.2/2-.1)*prc*drg
text(mean(log(1/c(b1,b2))),htt,paste0("p=",round(p,4)),cex=0.66)
}
if (!is.na(filename))
{
grDevices::dev.off()
}
}
#' @rdname plotmag
#' @export
plotmag.default<-function(object,...)
{
stop("Error in plotmag: method not defined for this class")
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/plotmag.R |
#' Map clusters from a \code{clust} object
#'
#' Produces a map of the locations of sampling for a \code{clust} object, with colors indicating
#' module (cluster) identity.
#' The sizes of nodes (locations) are scaled according to the strength of membership in its module.
#'
#' @param inclust A \code{clust} object, as created with \code{wsyn::clust}
#' @param spltlvl The split level in the clustering to use. This is the index of inclust$clusters.
#' Default the final split.
#' @param nodesize A length = 2 vector giving the minimum and maximum node size for plotting. Defaults to c(1,3).
#' @param filename a filename, possibly including path info, but without a file extension. If present,
#' exports the plot as a .pdf using the specified filename. Default \code{NA} uses the default plotting
#' device.
#'
#' @return \code{plotmap} produces a map.
#'
#' @author Jonathan Walter, \email{jaw3es@@virginia.edu}
#'
#' @references Walter, J. A., et al. (2017) The geography of spatial synchrony. Ecology Letters.
#' doi: 10.1111/ele.12782
#'
#' @seealso \code{\link{clust}}, \code{browseVignettes("wsyn")}
#'
#' @examples
#' Tmax<-500
#' tim<-1:Tmax
#' ts1<-sin(2*pi*tim/5)
#' ts1s<-sin(2*pi*tim/5+pi/2)
#' ts2<-sin(2*pi*tim/12)
#' ts2s<-sin(2*pi*tim/12+pi/2)
#' gp1A<-1:2
#' gp1B<-3:4
#' gp2A<-5:6
#' gp2B<-7:8
#' d<-matrix(NA,Tmax,8)
#' d[,c(gp1A,gp1B)]<-ts1
#' d[,c(gp2A,gp2B)]<-ts1s
#' d[,c(gp1A,gp2A)]<-d[,c(gp1A,gp2A)]+matrix(ts2,Tmax,4)
#' d[,c(gp1B,gp2B)]<-d[,c(gp1B,gp2B)]+matrix(ts2s,Tmax,4)
#' d<-d+matrix(rnorm(Tmax*8,0,2),Tmax,8)
#' d<-t(d)
#' d<-cleandat(d,1:Tmax,1)$cdat
#' coords<-data.frame(X=c(rep(1,4),rep(2,4)),Y=rep(c(1:2,4:5),times=2))
#' cl5<-clust(dat=d,times=1:Tmax,coords=coords,method="ReXWT",tsrange=c(4,6))
#' plotmap(cl5)
#' cl12<-clust(dat=d,times=1:Tmax,coords=coords,method="ReXWT",tsrange=c(11,13))
#' plotmap(cl12)
#'
#' @export
#' @importFrom graphics legend plot
#' @importFrom grDevices colorRampPalette pdf dev.off
plotmap<-function(inclust, spltlvl=length(inclust$clusters), nodesize=c(1,3), filename=NA)
{
#some checking of validity of inputs
if(!inherits(inclust,"clust"))
{
stop("Error in plotmap: inclust must be a clust object")
}
if(max(inclust$clusters[[spltlvl]])>9){
stop("Error in plotmap: more than 9 modules, plotmap cannot proceed")
}
#convert inclust$coords to common format
if(all(c("X","Y") %in% names(inclust$coords)))
{
coords<-data.frame(X=inclust$coords$X,Y=inclust$coords$Y)
x_label<-"X"
y_label<-"Y"
}
if(all(c("lat","lon") %in% names(inclust$coords)))
{
coords<-data.frame(X=inclust$coords$lon,Y=inclust$coords$lat)
x_label<-"longitude"
y_label<-"latitude"
}
if(all(c("latitude","longitude") %in% names(inclust$coords)))
{
coords<-data.frame(X=inclust$coords$longitude,Y=inclust$coords$latitude)
x_label<-"longitude"
y_label<-"latitude"
}
#make color ramp -- this is Set1 from RColorBrewer
pal=c("#E41A1C","#377EB8","#4DAF4A","#984EA3","#FF7F00","#FFFF33","#A65628","#F781BF","#999999")
if(!is.na(filename)){
pdf(paste0(filename,".pdf"))
}
#expand right side margin for the legend
par.mar<-par("mar")
mar.new<-par.mar
mar.new[4]<-6.1
par(mar=mar.new,xpd=T)
membwgt<-inclust$modres[[spltlvl]]$nodeQ-min(inclust$modres[[spltlvl]]$nodeQ)
membwgt<-membwgt/max(membwgt)
nodecex<-membwgt*(nodesize[2]-nodesize[1]) + nodesize[1]
plot(coords[,1], coords[,2], pch=16, cex=nodecex, col=pal[unlist(inclust$clusters[spltlvl])],
xlab=x_label,ylab=y_label)
#add legends
legx<-par('usr')[2] + 0.01*abs(diff(par('usr')[1:2]))
legy1<-par('usr')[4]
leg1<-legend(legx,legy1,legend=paste0("module ",1:max(unlist(inclust$clusters[spltlvl]))), pch=16, col=
pal[1:max(unlist(inclust$clusters[spltlvl]))],title="Membership",bty="n")
legy2<-legy1 - leg1$rect$h
labs<-round(c(min(inclust$modres[[spltlvl]]$nodeQ,na.rm=T),
mean(inclust$modres[[spltlvl]]$nodeQ,na.rm=T),
max(inclust$modres[[spltlvl]]$nodeQ,na.rm=T)),digits=3)
sizes<-c(min(nodecex),mean(nodecex),max(nodecex))
leg2<-legend(legx,legy2,legend=labs,pt.cex=sizes,pch=1,title="Node weight",bty="n")
par(mar=par.mar) #reset 'mar' graphics parameter
if(!is.na(filename)){dev.off()}
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/plotmap.R |
#' For plotting the phases of values in \code{tts} and \code{coh} objects
#'
#' For plotting the phases of values in \code{tts} objects (and derived classes)
#' against time and timescale, and \code{coh} objects against timescale
#'
#' @param object A \code{coh} object.
#' @param bandprows The rows of \code{object$bandp} for which to display p-value results in the plot
#' @param filename Filename (without extension), for saving as pdf. Default value NA saves no file
#' and uses the default graphics device.
#' @param ... Passed from the generic to specific methods. The plotphase.tss method passes it to
#' fields::image.plot.
#'
#' @author Thomas Anderson, \email{anderstl@@gmail.com}, Jon Walter, \email{jaw3es@@virginia.edu};
#' Lawrence Sheppard, \email{lwsheppard@@ku.edu}; Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @references
#' Sheppard, L.W., et al. (2016) Changes in large-scale climate alter spatial synchrony of aphid
#' pests. Nature Climate Change. DOI: 10.1038/nclimate2881
#'
#' @seealso \code{\link{tts}}, \code{\link{wt}}, \code{\link{wmf}}, \code{\link{wpmf}}, \code{\link{coh}},
#' \code{\link{plotmag}}, \code{\link{plotrank}}, \code{browseVignettes("wsyn")}
#'
#' @examples
#' #For a tts object
#' times<-1:100
#' timescales<-1:100
#' cplx<-complex(modulus=1,argument=seq(from=-pi,to=pi,length.out=100))
#' values1<-matrix(cplx,length(times),length(timescales))
#' tts1<-tts(times,timescales,values1)
#' plotphase(tts1)
#'
#' #For a coh oject
#' times<-(-3:100)
#' ts1<-sin(2*pi*times/10)
#' ts2<-5*sin(2*pi*times/3)
#' artsig_x<-matrix(NA,11,length(times)) #the driver
#' for (counter in 1:11)
#' {
#' artsig_x[counter,]=ts1+ts2+rnorm(length(times),mean=0,sd=1.5)
#' }
#' times<-0:100
#' artsig_y<-matrix(NA,11,length(times)) #the driven
#' for (counter1 in 1:11)
#' {
#' for (counter2 in 1:101)
#' {
#' artsig_y[counter1,counter2]<-mean(artsig_x[counter1,counter2:(counter2+2)])
#' }
#' }
#' artsig_y<-artsig_y+matrix(rnorm(length(times)*11,mean=0,sd=3),11,length(times))
#' artsig_x<-artsig_x[,4:104]
#' artsig_x<-cleandat(artsig_x,times,1)$cdat
#' artsig_y<-cleandat(artsig_y,times,1)$cdat
#' res<-coh(dat1=artsig_x,dat2=artsig_y,times=times,norm="powall",sigmethod="fast",nrand=50,
#' f0=0.5,scale.max.input=28)
#' res<-bandtest(res,c(2,4))
#' res<-bandtest(res,c(4,30))
#' res<-bandtest(res,c(8,12))
#' plotphase(res)
#'
#' @export
#' @importFrom grDevices pdf dev.off
#' @importFrom graphics plot lines text axis
plotphase<-function(object,...)
{
UseMethod("plotphase",object)
}
#' @rdname plotphase
#' @export
plotphase.tts<-function(object,filename=NA,...)
{
zval<-Arg(get_values(object))
times<-get_times(object)
timescales<-get_timescales(object)
ylocs<-pretty(timescales,n=8)
xlocs<-pretty(times,n=8)
colorfill<-grDevices::colorRampPalette(c("black","blue","white","red","black"))
if (!is.na(filename))
{
grDevices::pdf(paste0(filename,".pdf"))
}
fields::image.plot(x=times,y=log2(timescales),z=zval,xlab="Time",zlim=c(-pi,pi),
ylab="Timescale",axes=F,col=colorfill(100),...)
graphics::axis(1,at = xlocs,labels=xlocs)
graphics::axis(2,at = log2(ylocs),labels = ylocs)
if (!is.na(filename))
{
grDevices::dev.off()
}
}
#' @rdname plotphase
#' @export
#plotphase.wt just the same as tts, we define it explicitly instead of inheriting for the sake of the help files
plotphase.wt<-function(object,filename=NA,...)
{
return(plotphase.tts(object,filename=NA,...))
}
#' @rdname plotphase
#' @export
#plotphase.wmf just the same as tts, we define it explicitly instead of inheriting for the sake of the help files
plotphase.wmf<-function(object,filename=NA,...)
{
return(plotphase.tts(object,filename=NA,...))
}
#' @rdname plotphase
#' @export
#plotphase.wpmf just the same as tts, we define it explicitly instead of inheriting for the sake of the help files
plotphase.wpmf<-function(object,filename=NA,...)
{
return(plotphase.tts(object,filename=NA,...))
}
#' @rdname plotphase
#' @export
plotphase.coh<-function(object,bandprows="all",filename=NA,...)
{
#extract the needed slots
timescales<-get_timescales(object)
coher<-get_coher(object)
bandp<-get_bandp(object)
#error catch
if (!identical(bandprows,"all") && !any(is.na(bandp)))
{
if (!is.numeric(bandprows))
{
stop("Error in plotphase.coh: non-numeric value for bandprows")
}
if (!all(bandprows %in% 1:dim(bandp)[1]))
{
stop("Error in plotphase.coh: bandprows must contain row numbers for bandp")
}
}
if (!is.na(filename))
{
grDevices::pdf(paste0(filename,".pdf"))
}
#if bandp is absent, just plot the lines, no p-values
if (any(is.na(bandp)))
{
plot(log(1/timescales),Arg(coher),type="p",xaxt="n",
ylim=c(-pi,pi),xlab="Timescales",ylab="Phase")
xlocs<-c(min(timescales),pretty(timescales,n=8))
graphics::axis(side=1,at=log(1/xlocs),labels=xlocs)
if (!is.na(filename))
{
grDevices::dev.off()
}
return(NULL)
}
#from here on is if bandp is present
#get a new range to leave space for the p-values
rg<-c(-pi,pi)
prc<-0.15
drg<-diff(rg)
rg[2]<-rg[2]+dim(bandp)[1]*prc*drg
#make the main plot
x<-log(1/timescales)
plot(x,Arg(coher),type="p",xaxt="n",yaxt="n",
ylim=rg,xlab="Timescales",ylab="Phase",pch=20,cex=.6)
lines(range(x),c(pi,pi),type='l')
xlocs<-c(min(timescales),pretty(timescales,n=8))
graphics::axis(side=1,at=log(1/xlocs),labels=xlocs)
graphics::axis(side=2,at=c(-pi,-pi/2,0,pi/2,pi),labels=expression(-pi,-pi/2,0,pi/2,pi))
#p-values
if (bandprows!="all")
{
bandp<-bandp[bandprows,]
}
for (counter in 1:dim(bandp)[1])
{
b1<-unname(bandp[counter,1])
if (b1<min(timescales)){b1<-min(timescales)}
b2<-unname(bandp[counter,2])
if (b2>max(timescales)){b2<-max(timescales)}
mnphs<-unname(bandp[counter,4])
htl<-rg[2]-(counter-1/4-.1)*prc*drg
wwd<-.07*prc*drg
lines(log(1/c(b1,b2)),c(htl,htl))
lines(log(1/c(b1,b1)),c(htl-wwd,htl+wwd))
lines(log(1/c(b2,b2)),c(htl-wwd,htl+wwd))
htt<-rg[2]-(counter-1.2/2-.1)*prc*drg
valh<-round(mnphs,2)
text(mean(log(1/c(b1,b2))),htt,
bquote(bar(theta) == .(valh)),cex=0.66)
}
if (!is.na(filename))
{
grDevices::dev.off()
}
return(NULL)
}
#' @rdname plotphase
#' @export
plotphase.default<-function(object,...)
{
stop("Error in plotphase: method not defined for this class")
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/plotphase.R |
#' Plots \code{ranks} slot for \code{coh} and \code{wlmtest} objects
#'
#' Plots the \code{ranks} slot for \code{coh} and \code{wlmtest} objects to help identify statistical
#' significance of coherence
#'
#' @param object A \code{coh} or \code{wlmtest} object. Must have a non-\code{NA} \code{signif} slot.
#' @param sigthresh Significance threshold(s). Numeric vector with values between 0 and 1. Typically
#' 0.95, 0.99, 0.999, etc. The threshhold(s) are plotted on the rank plot as dashed horizontal line(s).
#' @param bandprows The rows of \code{object$bandp} for which to display p-value results in the plot
#' @param filename Filename (without extension), for saving as pdf. Default value NA saves no file and
#' uses the default graphics device.
#' @param ... Passed from the generic to specific methods. Not currently used.
#'
#' @details The plot shows the modulus of \code{object$ranks$coher} versus \code{log(1/object$timescales)}.
#' Horizontal axis ticks are labeled as timescales, but are spaced on the axis as
#' log(1/timescale), i.e., log frequencies. p-values from \code{object$bandp} are displayed
#' above the rank plot.
#'
#' @author Thomas Anderson, \email{anderstl@@gmail.com}, Jon Walter, \email{jaw3es@@virginia.edu}; Lawrence
#' Sheppard, \email{lwsheppard@@ku.edu}; Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @references
#' Sheppard, L.W., et al. (2016) Changes in large-scale climate alter spatial synchrony of aphid
#' pests. Nature Climate Change. DOI: 10.1038/nclimate2881
#'
#' Sheppard, LW et al. (2019) Synchrony is more than its top-down and climatic parts: interacting
#' Moran effects on phytoplankton in British seas. Plos Computational Biology 15, e1006744. doi: 10.1371/journal.pcbi.1006744
#'
#' @seealso \code{\link{coh}}, \code{\link{wlmtest}}, \code{\link{bandtest}}, \code{\link{plotphase}},
#' \code{\link{plotmag}}, \code{browseVignettes("wsyn")}
#'
#' @examples
#' #For a coh object
#' times<-(-3:100)
#' ts1<-sin(2*pi*times/10)
#' ts2<-5*sin(2*pi*times/3)
#' artsig_x<-matrix(NA,11,length(times)) #the driver
#' for (counter in 1:11)
#' {
#' artsig_x[counter,]=ts1+ts2+rnorm(length(times),mean=0,sd=1.5)
#' }
#' times<-0:100
#' artsig_y<-matrix(NA,11,length(times)) #the driven
#' for (counter1 in 1:11)
#' {
#' for (counter2 in 1:101)
#' {
#' artsig_y[counter1,counter2]<-mean(artsig_x[counter1,counter2:(counter2+2)])
#' }
#' }
#' artsig_y<-artsig_y+matrix(rnorm(length(times)*11,mean=0,sd=3),11,length(times))
#' artsig_x<-artsig_x[,4:104]
#' artsig_x<-cleandat(artsig_x,times,1)$cdat
#' artsig_y<-cleandat(artsig_y,times,1)$cdat
#' res<-coh(dat1=artsig_x,dat2=artsig_y,times=times,norm="powall",sigmethod="fast",
#' nrand=100,f0=0.5,scale.max.input=28)
#' #use larger nrand for a real application
#' res<-bandtest(res,c(2,4))
#' res<-bandtest(res,c(8,12))
#' plotrank(res)
#'
#' #For a wlmtest object, see vignette
#'
#' @export
#' @importFrom graphics axis plot lines text
#' @importFrom grDevices pdf dev.off
plotrank<-function(object,...)
{
UseMethod("plotrank",object)
}
#' @rdname plotrank
#' @export
plotrank.coh<-function(object,sigthresh=0.95,bandprows="all",filename=NA,...)
{
#error check
if (any(is.na(object$signif)))
{
stop("Error in plotrank.coh: plotrank.coh needs a signif slot")
}
#extract the needed slots
ranks<-get_ranks(object)
if (any(is.na(ranks)))
{
object<-addranks(object)
ranks<-get_ranks(object)
}
bandp<-get_bandp(object)
timescales<-get_timescales(object)
#more error check
if (any(sigthresh>=1 | sigthresh<=0))
{
stop("Error in plotrank.coh: inappropriate value for sigthresh")
}
if (!identical(bandprows,"all") && !any(is.na(bandp)))
{
if (!is.numeric(bandprows))
{
stop("Error in plotrank.coh: non-numeric value for bandprows")
}
if (!all(bandprows %in% 1:dim(bandp)[1]))
{
stop("Error in plotrank.coh: bandprows must contain row numbers for bandp")
}
}
if (!is.na(filename))
{
grDevices::pdf(paste0(filename,".pdf"))
}
if (any(is.na(bandp)))
{ #if bandp is absent, just plot the lines, no p-values
x<-log(1/timescales)
plot(x,ranks$coher,type="l",lty="solid",xaxt="n",col="red",
xlab="Timescales",ylab="Fract surr gt",ylim=c(0.5,1))
for (counter in 1:length(sigthresh))
{
lines(range(x),c(sigthresh[counter],sigthresh[counter]),lty='dashed')
}
xlocs<-c(min(timescales),pretty(timescales,n=8))
graphics::axis(side=1,at=log(1/xlocs),labels=xlocs)
}else
{ #if bandp is present, plot p-values, too
#get the new vertical axis range to fit the p-vals
rg<-c(0.5,1)
prc<-0.15
drg<-diff(rg)
rg[2]<-rg[2]+dim(bandp)[1]*prc*drg
#plot
x<-log(1/timescales)
plot(x,ranks$coher,type="l",lty="solid",xaxt="n",col="red",
xlab="Timescales",ylab="Fract surr gt",ylim=rg)
lines(range(x),c(1,1),type='l',lty="solid")
for (counter in 1:length(sigthresh))
{
lines(range(x),c(sigthresh[counter],sigthresh[counter]),lty='dashed')
}
xlocs<-c(min(timescales),pretty(timescales,n=8))
graphics::axis(side=1,at=log(1/xlocs),labels=xlocs)
#add the p-vals
if (!identical(bandprows,"all"))
{
bandp<-bandp[bandprows,]
}
for (counter in 1:dim(bandp)[1])
{
b1<-unname(bandp[counter,1])
if (b1<min(timescales)){b1<-min(timescales)}
b2<-unname(bandp[counter,2])
if (b2>max(timescales)){b2<-max(timescales)}
p<-unname(bandp[counter,3])
htl<-rg[2]-(counter-1/4-.1)*prc*drg
wwd<-.07*prc*drg
lines(log(1/c(b1,b2)),c(htl,htl))
lines(log(1/c(b1,b1)),c(htl-wwd,htl+wwd))
lines(log(1/c(b2,b2)),c(htl-wwd,htl+wwd))
htt<-rg[2]-(counter-1.2/2-.1)*prc*drg
text(mean(log(1/c(b1,b2))),htt,paste0("p=",round(p,4)),cex=0.66)
}
}
if (!is.na(filename))
{
grDevices::dev.off()
}
return(NULL)
}
#' @rdname plotrank
#' @export
plotrank.wlmtest<-function(object,sigthresh=0.95,bandprows="all",filename=NA,...)
{
#extract the needed slots
ranks<-get_ranks(object)
if (any(is.na(ranks)))
{
object<-addranks(object)
ranks<-get_ranks(object)
}
bandp<-get_bandp(object)
timescales<-get_timescales(get_wlmobj(object))
#more error check
if (any(sigthresh>=1 | sigthresh<=0))
{
stop("Error in plotrank.wlmtest: inappropriate value for sigthresh")
}
if (!identical(bandprows,"all") && !any(is.na(bandp)))
{
if (!is.numeric(bandprows))
{
stop("Error in plotrank.wlmtest: non-numeric value for bandprows")
}
if (!all(bandprows %in% 1:dim(bandp)[1]))
{
stop("Error in plotrank.wlmtest: bandprows must contain row numbers for bandp")
}
}
if (!is.na(filename))
{
grDevices::pdf(paste0(filename,".pdf"))
}
if (any(is.na(bandp)))
{ #if bandp is absent, just plot the lines, no p-values
x<-log(1/timescales)
plot(x,ranks$coher,type="l",lty="solid",xaxt="n",col="red",
xlab="Timescales",ylab="Fract surr gt",ylim=c(0.5,1))
for (counter in 1:length(sigthresh))
{
lines(range(x),c(sigthresh[counter],sigthresh[counter]),lty='dashed')
}
xlocs<-c(min(timescales),pretty(timescales,n=8))
graphics::axis(side=1,at=log(1/xlocs),labels=xlocs)
}else
{ #if bandp is present, plot p-values, too
#get the new vertical axis range to fit the p-vals
rg<-c(0.5,1)
prc<-0.15
drg<-diff(rg)
rg[2]<-rg[2]+dim(bandp)[1]*prc*drg
#plot
x<-log(1/timescales)
plot(x,ranks$coher,type="l",lty="solid",xaxt="n",col="red",
xlab="Timescales",ylab="Fract surr gt",ylim=rg)
lines(range(x),c(1,1),type='l',lty="solid")
for (counter in 1:length(sigthresh))
{
lines(range(x),c(sigthresh[counter],sigthresh[counter]),lty='dashed')
}
xlocs<-c(min(timescales),pretty(timescales,n=8))
graphics::axis(side=1,at=log(1/xlocs),labels=xlocs)
#add the p-vals
if (!identical(bandprows,"all"))
{
bandp<-bandp[bandprows,]
}
for (counter in 1:dim(bandp)[1])
{
b1<-unname(bandp[counter,1])
if (b1<min(timescales)){b1<-min(timescales)}
b2<-unname(bandp[counter,2])
if (b2>max(timescales)){b2<-max(timescales)}
p<-unname(bandp[counter,3])
htl<-rg[2]-(counter-1/4-.1)*prc*drg
wwd<-.07*prc*drg
lines(log(1/c(b1,b2)),c(htl,htl))
lines(log(1/c(b1,b1)),c(htl-wwd,htl+wwd))
lines(log(1/c(b2,b2)),c(htl-wwd,htl+wwd))
htt<-rg[2]-(counter-1.2/2-.1)*prc*drg
text(mean(log(1/c(b1,b2))),htt,paste0("p=",round(p,4)),cex=0.66)
}
}
if (!is.na(filename))
{
grDevices::dev.off()
}
return(NULL)
}
#' @rdname plotrank
#' @export
plotrank.default<-function(object,...)
{
stop("Error in plotrank: method not defined for this class")
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/plotrank.R |
#' Power of a \code{tts} object
#'
#' Returns the power of a \code{tts} object, i.e., the mean over
#' time of the squared magnitude (which is a function of timescale)
#'
#' @param object A \code{tts} object
#'
#' @return \code{power} returns a data frame with columns timescales and power
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{tts}}, \code{\link{wt}}, \code{\link{wmf}}, \code{\link{wpmf}},
#' \code{browseVignettes("wsyn")}
#'
#' @examples
#' times<-1:10
#' timescales<-1:10
#' values<-matrix(rep(complex(modulus=1,argument=2*pi*c(0:9)/10),times=10),10,10)
#' ttsobj<-tts(times,timescales,values)
#' res<-power(ttsobj)
#'
#' @export
power<-function(object)
{
UseMethod("power",object)
}
#' @export
power.default<-function(object)
{
stop("Error in power: method not defined for this class")
}
#' @rdname power
#' @export
power.tts<-function(object)
{
#extract the relevant components
times<-get_times(object)
timescales<-get_timescales(object)
values<-get_values(object)
#get the power and set up the result data frame
pow<-apply(FUN=mean,X=(Mod(values))^2,MARGIN=2,na.rm=T)
res<-data.frame(timescales=timescales,power=pow)
return(res)
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/power.R |
#' Predicted synchrony of a wavelet linear model
#'
#' Predicted synchrony of a \code{wlm} object. This is described in the
#' first paragraph of Appendix S15 of Sheppard et al (2019).
#'
#' @param wlmobj A \code{wlm} object
#'
#' @return \code{predsync} returns a \code{tts} object. Plotting the magnitude
#' (see \code{plotmag}) displays a picture of predicted synchrony versus time and
#' timescale that is comparable with the wavelet mean field (see \code{wmf}) of
#' the response variable of the model. Calling the \code{power} function on that
#' \code{tts} object should give the same results as one of the columns of output
#' of \code{syncexpl}. Only \code{norm="powall"} implemented so far.
#'
#' @author Thomas Anderson, \email{anderstl@@gmail.com}, Jon Walter, \email{jaw3es@@virginia.edu}; Lawrence
#' Sheppard, \email{lwsheppard@@ku.edu}; Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @references
#' Sheppard, LW et al. (2019) Synchrony is more than its top-down and climatic parts: interacting
#' Moran effects on phytoplankton in British seas. Plos Computational Biology 15, e1006744. doi: 10.1371/journal.pcbi.1006744
#'
#' @seealso \code{\link{wlm}}, \code{\link{tts}}, \code{\link{plotmag}}, \code{\link{wmf}}, \code{\link{power}},
#' \code{\link{syncexpl}}, \code{browseVignettes("wsyn")}
#'
#' @examples
#' times<-(-3:100)
#' ts1<-sin(2*pi*times/10)
#' ts2<-5*sin(2*pi*times/3)
#' artsig_x<-matrix(NA,11,length(times)) #the driver
#' for (counter in 1:11)
#' {
#' artsig_x[counter,]<-ts1+ts2+rnorm(length(times),mean=0,sd=.5)
#' }
#' times<-0:100
#' artsig_y<-matrix(NA,11,length(times)) #the driven
#' for (counter1 in 1:11)
#' {
#' for (counter2 in 1:101)
#' {
#' artsig_y[counter1,counter2]<-mean(artsig_x[counter1,counter2:(counter2+2)])
#' }
#' }
#' artsig_y<-artsig_y+matrix(rnorm(length(times)*11,mean=0,sd=1),11,length(times))
#' artsig_x<-artsig_x[,4:104]
#' artsig_i<-matrix(rnorm(11*length(times)),11,length(times)) #the irrelevant
#' artsig_x<-cleandat(artsig_x,times,1)$cdat
#' artsig_y<-cleandat(artsig_y,times,1)$cdat
#' artsig_i<-cleandat(artsig_i,times,1)$cdat
#' dat<-list(driven=artsig_y,driver=artsig_x,irrelevant=artsig_i)
#' resp<-1
#' pred<-2:3
#' norm<-"powall"
#' wlmobj<-wlm(dat,times,resp,pred,norm)
#'
#' res<-predsync(wlmobj)
#'
#' @export
predsync<-function(wlmobj)
{
UseMethod("predsync",wlmobj)
}
#' @export
predsync.default<-function(wlmobj)
{
stop("Error in predsync: method not defined for this class")
}
#' @rdname predsync
#' @export
predsync.wlm<-function(wlmobj)
{
#get the necessary slots
modval<-get_modval(wlmobj)
coher<-get_coher(wlmobj)
times<-get_times(wlmobj)
timescales<-get_timescales(wlmobj)
norm<-get_norm(wlmobj)
#only powall implemented
if (norm!="powall")
{
stop("Error in predsync: this value of norm not implemented yet")
}
#get the model-predicted synchrony
modval<-normforcoh(modval,norm)
res<-apply(FUN=mean,X=modval,MARGIN=c(2,3)) #model mean field
for (tscounter in 1:length(timescales))
{ #multiply by model-response coherence
res[,tscounter]<-res[,tscounter]*coher[tscounter]
}
#prepare result and return
res<-tts(times,timescales,res)
return(res)
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/predsync.R |
#' Print method for \code{summary_wsyn} class
#'
#' Print method for \code{summary_wsyn} class
#'
#' @param x A \code{summary_wsyn} object
#' @param ... Not currently used. Included for argument consistency
#' with existing generics.
#'
#' @return \code{print.summary_wsyn} is called for its effect of
#' printing to the screen.
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{tts_methods}}, \code{\link{wt_methods}}, \code{\link{wmf_methods}}, \code{\link{wpmf_methods}},
#' \code{\link{coh_methods}}, \code{\link{wlm_methods}}, \code{\link{wlmtest_methods}}, \code{\link{clust_methods}},
#' \code{browseVignettes("wsyn")}
#'
#' @examples
#' times<-1:10
#' timescales<-1/c(1:10)
#' values<-matrix(1,length(times),length(timescales))
#' h<-tts(times,timescales,values)
#' print(summary(h))
#'
#' @export
print.summary_wsyn<-function(x,...)
{
for (counter in 1:length(x))
{
cat(names(x)[counter],": ",x[[counter]],"\n",sep="")
}
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/print.summary_wsyn.R |
#' Shifts a vector according to the argument mints
#'
#' @param ts A vector of numeric values representing a time series
#' @param mints The time series is shifted to have this minimum value. Default NA means use the smallest difference
#' between consecutive, distinct sorted values of the time series. NaN means perform no shift.
#'
#' @return \code{setmints} returns the shifted vector.
#'
#' Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @note This is an internal function, and no error checking is done.
setmints<-function(ts,mints)
{
if (!is.nan(mints))
{
if (is.na(mints))
{
diffs<-diff(sort(ts))
mints<-min(diffs[diffs!=0])
}
ts<-ts-min(ts)+mints
}
return(ts)
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/setmints.R |
#' Creates surrogate time series, either Fourier surrogates or amplitude adjusted
#' Fourier surrogates
#'
#' For significance testing wavelet coherence and other purposes
#'
#' @param dat A locations x time matrix of observations (for multiple-time series input), or a single vector
#' @param nsurrogs The number of surrogates to produce
#' @param surrtype Either "fft" (for Fourier surrogates) or "aaft" (for amplitude adjusted Fourier surrogates).
#' Fourier surrogates are appropriate for time series with normal marginals; otherwise consider aaft surrogates.
#' @param syncpres Logical. TRUE for "synchrony preserving" surrogates (same phase randomizations used for all time
#' series). FALSE leads to independent phase randomizations for all time series.
#'
#' @return \code{surrog} returns a list of nsurrogs surrogate datasets
#'
#' @author Jonathan Walter, \email{jaw3es@@virginia.edu}; Lawrence Sheppard, \email{lwsheppard@@ku.edu};
#' Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @details Fourier surrogates are somewhat faster than \code{aaft} surrogates, and may be much faster when
#' some of the time series in the data have ties. Prenormalization (e.g., using \code{cleandat}) can
#' make it possible to use \code{fft} surrogates.
#'
#' @references
#' Sheppard, LW, et al. (2016) Changes in large-scale climate alter spatial synchrony of aphid pests. Nature Climate
#' Change. DOI: 10.1038/nclimate2881
#'
#' Schreiber, T and Schmitz, A (2000) Surrogate time series. Physica D 142, 346-382.
#'
#' Prichard, D and Theiler, J (1994) Generating surrogate data for time series with several simultaneously measured
#' variables. Physical Review Letters 73, 951-954.
#'
#' @seealso \code{\link{wpmf}}, \code{\link{coh}}, \code{\link{wlmtest}}, \code{\link{synmat}},
#' \code{browseVignettes("wsyn")}
#'
#' @examples
#' times<-1:100
#' dat<-sin(2*pi*times/10)
#' nsurrogs<-10
#' surrtype<-"fft"
#' syncpres<-TRUE
#' res<-surrog(dat,nsurrogs,surrtype,syncpres)
#'
#' @export
#' @importFrom stats qnorm rnorm fft
surrog<-function(dat,nsurrogs,surrtype,syncpres)
{
#error check
wasvect<-FALSE
if (is.matrix(dat) && dim(dat)[1]>1)
{
errcheck_stdat(1:dim(dat)[2],dat,"surrog")
}else
{
if (!is.matrix(dat)){wasvect<-TRUE}
errcheck_tsdat(1:length(dat),dat,"surrog")
dat<-matrix(dat, nrow=1, ncol=length(dat))
}
if (!(surrtype %in% c("fft","aaft")))
{
stop("Error in surrog: bad value for surrtype")
}
#fft surrogates
if (surrtype=="fft")
{
#do the surrogates
res<-fftsurrog(dat,nsurrogs,syncpres)
#if dat was a vector, make output same format
if (wasvect)
{
for (n in 1:length(res))
{
res[[n]]<-as.vector(res[[n]])
}
}
return(res)
}
#aaft surrogates
if (surrtype=="aaft")
{
#get appropriate quantiles of a standard normal
normquant<-stats::qnorm((1:dim(dat)[2])/(dim(dat)[2]+1))
#find out of there are ties
areties<-FALSE
for (counter in 1:dim(dat)[1])
{
if (length(unique(dat[counter,]))<dim(dat)[2])
{
areties<-TRUE
break
}
}
#cases with ties are handled differently, and less efficiently by necessity
if (areties)
{
#map time series onto quantiles of a normal, done separately for each time
#series in each surrogate, breaking ties randomly
datorig<-dat
dat<-list()
for (counter in 1:nsurrogs)
{
datranks<-t(apply(FUN=rank,X=datorig,MARGIN=1,ties.method="random"))
dat[[counter]]<-matrix(normquant[datranks],nrow(datranks),ncol(datranks))
}
#get ffts of all time series for all remappings
fftdat<-list()
fftmod<-list()
fftarg<-list()
for (counter in 1:nsurrogs)
{
fftdat[[counter]]<-matrix(complex(real=NA, imaginary=NA), nrow=nrow(datorig), ncol=ncol(datorig))
for (row in 1:nrow(datorig))
{
fftdat[[counter]][row,]<-stats::fft(dat[[counter]][row,])
}
fftmod[[counter]]<-Mod(fftdat[[counter]])
fftarg[[counter]]<-Arg(fftdat[[counter]])
}
#randomize phases and inverse transform
mpdres<-list()
for(counter in 1:nsurrogs)
{
# get and apply random phases for the current surrogate
if (syncpres)
{
#synchrony preserving surrogates only need one set of phase pertubations, used for all time series
h<-Arg(stats::fft(stats::rnorm(ncol(datorig))))
randomizedphases<-
(matrix(rep(h, times=nrow(datorig)), nrow(datorig), ncol(datorig), byrow=TRUE)+fftarg[[counter]]) %% (2*pi)
}else
{
#need separate independent phase perturbations for each time series
h<-matrix(stats::rnorm(ncol(datorig)*nrow(datorig)),nrow(datorig),ncol(datorig))
randomizedphases<-(fftarg[[counter]]+t(apply(X=h,MARGIN=1,FUN=function(x){Arg(stats::fft(x))}))) %% (2*pi)
}
mpdres[[counter]]<-matrix(complex(modulus=fftmod[[counter]],
argument=randomizedphases),nrow(datorig), ncol(datorig))
# inverse transform
for(row in 1:nrow(mpdres[[counter]]))
{
mpdres[[counter]][row,]<-fft(mpdres[[counter]][row,], inverse=T)/(ncol(mpdres[[counter]]))
}
mpdres[[counter]]<-Re(mpdres[[counter]])
}
#get orders for remapped data, same as orders of ranks of remapped data
datorders<-list()
for (counter in 1:nsurrogs)
{
datorders[[counter]]<-t(apply(FUN=order,X=dat[[counter]],MARGIN=1))
}
#remap results back
res<-list()
for (counter1 in 1:nsurrogs)
{
res[[counter1]]<-NA*datorig
thissurrog<-mpdres[[counter1]]
thissurrogrk<-t(apply(FUN=rank,X=thissurrog,MARGIN=1))
for (counter2 in 1:nrow(datorig))
{
res[[counter1]][counter2,]<-
datorig[counter2,datorders[[counter1]][counter2,thissurrogrk[counter2,]]]
}
}
#if dat was a vector, make output same format
if (wasvect)
{
for (n in 1:length(res))
{
res[[n]]<-as.vector(res[[n]])
}
}
return(res)
}else
{
#map each time series (separately) onto the quantiles of a normal dist
datorig<-dat
datranks<-t(apply(FUN=rank,X=dat,MARGIN=1))
dat<-matrix(normquant[datranks],nrow(datranks),ncol(datranks))
#apply fft surrogates
mpdres<-fftsurrog(dat,nsurrogs,syncpres)
#remap results back
res<-list()
datorders<-t(apply(FUN=order,X=dat,MARGIN=1))
for (counter1 in 1:length(mpdres))
{
res[[counter1]]<-NA*dat
thissurrog<-mpdres[[counter1]]
thissurrogrk<-t(apply(FUN=rank,X=thissurrog,MARGIN=1))
for (counter2 in 1:dim(dat)[1])
{
res[[counter1]][counter2,]<-datorig[counter2,datorders[counter2,thissurrogrk[counter2,]]]
}
}
#if dat was a vector, make output same format
if (wasvect)
{
for (n in 1:length(res))
{
res[[n]]<-as.vector(res[[n]])
}
}
return(res)
}
}
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/surrog.R |
#' Amount of synchrony explained, and related quantities
#'
#' Gives amount of synchrony explained by a wavelet linear model, as a function of
#' timescale, and related quantities (see details)
#'
#' @param object A \code{wlm} object
#'
#' @return \code{syncexpl} returns a data frame with columns for \code{timescales},
#' \code{sync} (the time-averaged square magnitude of the wavelet mean field of the
#' response transforms), \code{syncexpl} (synchrony explained by the model
#' predictors), columns named for each predictor (synchrony explained by that
#' predictor), \code{interactions} (synchrony explained by all interaction effects),
#' columns named for each pair of predictors (synchrony explained by individual
#' pairwise interactions). There are also columns for \code{crossterms} and
#' \code{resids} (residuals). The cross terms must be small for a given timescale band for
#' the other results to be meaningful. All columns are functions of timescales.
#'
#' @details This function only works for \code{norm="powall"} at present. See
#' Sheppard et al (2018) for details of the meaning and computation of the
#' columns.
#'
#' @author Thomas Anderson, \email{anderstl@@gmail.com}, Jon Walter, \email{jaw3es@@virginia.edu}; Lawrence
#' Sheppard, \email{lwsheppard@@ku.edu}; Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @references
#' Sheppard, LW et al. (2019) Synchrony is more than its top-down and climatic parts: interacting
#' Moran effects on phytoplankton in British seas. Plos Computational Biology 15, e1006744. doi: 10.1371/journal.pcbi.1006744
#'
#' @seealso \code{\link{wlm}}, \code{\link{predsync}}, \code{\link{wlmtest}},
#' \code{browseVignettes("wsyn")}
#'
#' @examples
#' times<-(-3:100)
#' ts1<-sin(2*pi*times/10)
#' ts2<-5*sin(2*pi*times/3)
#' artsig_x<-matrix(NA,11,length(times)) #the driver
#' for (counter in 1:11)
#' {
#' artsig_x[counter,]=ts1+ts2+rnorm(length(times),mean=0,sd=1.5)
#' }
#' times<-0:100
#' artsig_y<-matrix(NA,11,length(times)) #the driven
#' for (counter1 in 1:11)
#' {
#' for (counter2 in 1:101)
#' {
#' artsig_y[counter1,counter2]<-mean(artsig_x[counter1,counter2:(counter2+2)])
#' }
#' }
#' artsig_y<-artsig_y+matrix(rnorm(length(times)*11,mean=0,sd=3),11,length(times))
#' artsig_x<-artsig_x[,4:104]
#' artsig_i<-matrix(rnorm(11*length(times)),11,length(times)) #the irrelevant
#' artsig_x<-cleandat(artsig_x,times,1)$cdat
#' artsig_y<-cleandat(artsig_y,times,1)$cdat
#' artsig_i<-cleandat(artsig_i,times,1)$cdat
#'
#' dat<-list(driven=artsig_y,driver=artsig_x,irrelevant=artsig_i)
#' resp<-1
#' pred<-2:3
#' norm<-"powall"
#' wlmobj<-wlm(dat,times,resp,pred,norm)
#'
#' res<-syncexpl(wlmobj)
#'
#' @export
syncexpl<-function(object)
{
UseMethod("syncexpl",object)
}
#' @export
syncexpl.default<-function(object)
{
stop("Error in syncexpl: method not defined for this class")
}
#' @rdname syncexpl
#' @export
syncexpl.wlm<-function(object)
{
#get the necessary slots
modval<-get_modval(object)
coher<-get_coher(object)
timescales<-get_timescales(object)
norm<-get_norm(object)
wts<-object$wts
dat<-object$dat
#only powall implemented so far
if (norm!="powall")
{
stop("Error in syncexpl: this value of norm not implemented yet")
}
#receptacle for results
res<-data.frame(timescales=timescales,
sync=NA*numeric(length(timescales)),
syncexpl=NA*numeric(length(timescales)),
crossterms=NA*numeric(length(timescales)),
resids=NA*numeric(length(timescales)))
if (is.null(names(dat)))
{
pnames<-paste0("pred",1:(length(dat)-1))
}else
{
pnames<-names(dat)
pnames<-pnames[-1]
}
for (counter in 1:length(pnames))
{
res[,pnames[counter]]<-NA*numeric(1)
}
if (length(pnames)>1)
{
res[,"interactions"]<-NA*numeric(1)
for (c1 in 1:(length(pnames)-1))
{
for (c2 in (c1+1):length(pnames))
{
res[,paste0(pnames[c1],"_",pnames[c2])]<-NA*numeric(1)
}
}
}
#compute the actual synchrony of the response
rmf<-apply(FUN=mean,X=wts[[1]],MARGIN=c(2,3)) #response variable mean field
res$sync<-apply(FUN=mean,X=(Mod(rmf))^2,MARGIN=2,na.rm=TRUE) #power
#compute synchrony explained, see Appendix S15 of Sheppard et al (2018)
mmf<-apply(FUN=mean,X=normforcoh(modval,norm),MARGIN=c(2,3)) #model mean field
powmmf<-apply(FUN=mean,X=(Mod(mmf))^2,MARGIN=2,na.rm=TRUE) #power
res$syncexpl<-(Mod(coher))^2*powmmf
#residuals
d<-wts[[1]]-modval
dmf<-apply(FUN=mean,X=d,MARGIN=c(2,3)) #residuals mean field
res$resids<-apply(FUN=mean,X=(Mod(dmf))^2,MARGIN=2,na.rm=TRUE) #power
#cross terms
res$crossterms<-res$sync-res$syncexpl-res$resids
#prepare to get predictors and interaction terms
h<-list()
for (counter in 2:length(wts))
{
#start with the wmf of each predictor
h[[counter-1]]<-apply(FUN=mean,MARGIN=c(2,3),X=wts[[counter]])
#multiply by coefficients
h[[counter-1]]<-h[[counter-1]]*
matrix(rep(object$coefs[,counter-1],each=length(object$times)),
nrow=length(object$times))
}
#synchrony explained by each predictor
for (pc in 1:length(pnames))
{
res[,pnames[pc]]<-apply(FUN=mean,MARGIN=2,X=(Mod(h[[pc]]))^2,na.rm=T)
}
if (length(pnames)>1)
{
#now do each set of interactions
totints<-numeric(dim(res)[1])
for (c1 in 1:(length(pnames)-1))
{
for (c2 in (c1+1):length(pnames))
{
h2<-2*Re(apply(FUN=mean,MARGIN=2,
X=h[[c1]]*Conj(h[[c2]]),na.rm=T))
res[,paste0(pnames[c1],"_",pnames[c2])]<-h2
totints<-totints+h2
}
}
#total interactions
res[,"interactions"]<-totints
}
return(res)
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/syncexpl.R |
#' Synchrony matrices
#'
#' Calculate synchrony matrices using a variety of methods
#'
#' @param dat A locations (rows) x time (columns) matrix of measurements
#' @param times The times at which measurements were made, spacing 1
#' @param method Method for synchrony calculation. See details.
#' @param tsrange A vector containing the min and max of the focal timescale range. Defaults
#' to all timescales that are valid given choices for scale.min, scale.max.input, f0, sigma.
#' Only used for wavelet-based methods.
#' @param nsurrogs Number of surrogates for significance test. Defaults to 1000. Only used
#' for surrogate-based methods.
#' @param scale.min The smallest scale of fluctuation that will be examined. At least 2. Used
#' only for wavelet-based methods.
#' @param scale.max.input The largest scale of fluctuation guaranteed to be examined. Only used
#' for wavelet-based methods.
#' @param sigma The ratio of each time scale examined relative to the next timescale. Should be
#' greater than 1. Only used for wavelet-based methods.
#' @param f0 The ratio of the period of fluctuation to the width of the envelope. Only used for
#' wavelet-based methods.
#' @param weighted If \code{TRUE}, create a weighted network. If \code{FALSE}, create a binary
#' network using statistical significance. Binary networks are only allowed for networks based
#' on significance.
#' @param sigthresh Significance threshold needed, if \code{weighted} is false, for a network
#' link to be realized. Typically 0.95, 0.99, or 0.999, etc. Only used if \code{weighted} is
#' \code{FALSE}.
#'
#' @return \code{synmat} returns a synchrony matrix, of type depending on the \code{method}
#' argument. See details. Diagonal entries are left as \code{NA}.
#'
#' @details The following values are valid for \code{method}:
#' \code{"pearson"}, \code{"pearson.sig.std"}, \code{"pearson.sig.fft"},
#' \code{"pearson.sig.aaft"},
#' \code{"spearman"}, \code{"spearman.sig.std"}, \code{"spearman.sig.fft"},
#' \code{"spearman.sig.aaft"},
#' \code{"kendall"}, \code{"kendall.sig.std"}, \code{"kendall.sig.fft"},
#' \code{"kendall.sig.aaft"},
#' \code{"ReXWT"}, \code{"ReXWT.sig.fft"}, \code{"ReXWT.sig.aaft"}, \code{"ReXWT.sig.fast"},
#' \code{"coh"}, \code{"coh.sig.fft"}, \code{"coh.sig.aaft"}, \code{"coh.sig.fast"},
#' \code{"phasecoh"}, \code{"phasecoh.sig.fft"}, and \code{"phasecoh.sig.aaft"}.
#' The first portions of these identifiers correspond to the Pearson, Spearman, and Kendall
#' correlations, the real part of the cross-wavelet transform, the wavelet coherence, and the
#' wavelet phase coherence. The second portions of these identifiers, when present, indicates
#' that significance of the measure specified in the first portion of the identifies is to
#' be used for establishing the synchrony matrix. Otherwise the value itself is used. The
#' third part of the \code{method} identifier indicates what type of significance is used.
#'
#' Significance testing is performed using standard approaches (\code{method} flag containg
#' \code{std}; for correlation coefficients,
#' although these are inappropriate for autocorrelated data), or surrogates generated using the
#' Fourier (\code{method} flag containing \code{"fft"}) or amplitude adjusted Fourier
#' surrogates (\code{"aaft"}). For
#' \code{"coh"} and \code{"ReXWT"}, the fast testing algorithm of Sheppard et al. (2017) is also
#' implemented (\code{"fast"}). That method uses implicit Fourier surrogates. The choice of
#' wavelet coherence (method flag containing \code{"coh"}) or the real part of
#' the cross-wavelet
#' transform (method flag containing \code{"ReXWT"}) depends mainly
#' on treatment of out-of-phase
#' relationships. The \code{"ReXWT"} is more akin to a correlation coefficient in that
#' strong in-phase relationships approach 1 and strong antiphase relationships approach -1.
#' Wavelet coherence allows any phase relationship and ranges from 0 to 1. Power normalization
#' is applied for \code{"coh"} and for \code{"ReXWT"}. All significance tests are one-tailed.
#' Synchrony matrices for significance-based methods when \code{weighted} is \code{TRUE}
#' contain 1 minus the p-values.
#'
#' @author Jonathan Walter, \email{jaw3es@@virginia.edu}; Daniel Reuman, \email{reuman@@ku.edu};
#' Lei Zhao, \email{lei.zhao@@cau.edu.cn}
#'
#' @references Walter, J. A., et al. (2017) The geography of spatial synchrony. Ecology
#' Letters. doi: 10.1111/ele.12782
#'
#' @seealso \code{\link{clust}}, \code{\link{coh}}, \code{\link{surrog}}, \code{browseVignettes("wsyn")}
#'
#' @examples
#' sig<-matrix(.9,5,5)
#' diag(sig)<-1
#' if (requireNamespace("mvtnorm",quietly=TRUE))
#' {
#' dat1<-t(mvtnorm::rmvnorm(30,mean=rep(0,5),sigma=sig))
#' dat2<-t(mvtnorm::rmvnorm(30,mean=rep(0,5),sigma=sig))
#' }else
#' {
#' dat1<-t(matrix(rep(rnorm(30),times=5),30,5))
#' dat2<-t(matrix(rep(rnorm(30),times=5),30,5))
#' }
#' dat<-rbind(dat1,dat2)
#' times<-1:30
#' dat<-cleandat(dat,times,clev=2)$cdat
#' method<-"pearson.sig.fft"
#' res<-synmat(dat,times,method,nsurrogs=100,weighted=FALSE,
#' sigthresh=0.95)
#'
#' @export
#' @importFrom stats cor cor.test
synmat<-function(dat,times,method,tsrange=c(0,Inf),nsurrogs=1000,
scale.min=2,scale.max.input=NULL,sigma=1.05,f0=1,
weighted=TRUE,sigthresh=0.95)
{
#error checking
errcheck_stdat(times,dat,"synmat")
if (!(method %in% c("pearson","pearson.sig.std","pearson.sig.fft","pearson.sig.aaft",
"spearman","spearman.sig.std","spearman.sig.fft","spearman.sig.aaft",
"kendall","kendall.sig.std","kendall.sig.fft","kendall.sig.aaft",
"ReXWT","ReXWT.sig.fft","ReXWT.sig.aaft","ReXWT.sig.fast",
"coh","coh.sig.fft","coh.sig.aaft","coh.sig.fast",
"phasecoh","phasecoh.sig.fft","phasecoh.sig.aaft")))
{
stop("Error in synmat: bad value of method")
}
if ((!weighted) && (!grepl("sig", method)))
{ #if they use a non-significance methods and weighted is false, throw an error
stop("Error in synmat: unweighted networks available only if method involves a significance test")
}
errcheck_wavparam(scale.min,scale.max.input,sigma,f0,times,"synmat")
if (sigthresh<=0 || sigthresh>=1)
{
stop("Error in synmat: inappropriate value for sigthresh")
}
#basic setup
nlocs<-nrow(dat)
#options corresponding to one of the correlations without considering significance
if (method %in% c("pearson","spearman","kendall"))
{
mat<-cor(t(dat), method=method)
diag(mat)<-NA
return(mat)
}
#options corresponding to one of the correlations, using standard significance
if (method %in% c("pearson.sig.std","kendall.sig.std","spearman.sig.std"))
{
cormeth<-strsplit(method,".",fixed=TRUE)[[1]][1]
mat<-matrix(NA,nlocs,nlocs) #compute the matrix
for (i in 2:nlocs)
{
for (j in 1:(i-1))
{
mat[i,j]<-cor.test(dat[i,],dat[j,],method=cormeth,
alternative="greater")$p.value
mat[j,i]<-mat[i,j]
}
}
if (weighted)
{
mat<-1-mat
}else
{
mat<-makeunweighted(mat,1-sigthresh)
}
return(mat)
}
#options corresponding to one of the correlations, using surrogate-based significance
if (method %in% c("pearson.sig.fft","pearson.sig.aaft",
"kendall.sig.fft","kendall.sig.aaft",
"spearman.sig.fft","spearman.sig.aaft"))
{
#get strings specifying correlation and surrogate methods
h<-strsplit(method,".",fixed=TRUE)[[1]]
cormeth<-h[1]
surrtype<-h[3]
#get correlation matrices for the real data and for surrogates
sdat<-surrog(dat,nsurrogs,surrtype,FALSE)
cormat<-cor(t(dat), method=cormeth)
scormat<-lapply(X=sdat,FUN=function(x){cor(t(x),method=cormeth)})
#get the resulting matrix of p-values
mat<-matrix(0,nrow(cormat),ncol(cormat))
for (counter in 1:nsurrogs)
{
mat<-mat+(cormat<=scormat[[counter]])
}
diag(mat)<-NA
mat<-(mat+1)/(nsurrogs+1)
#convert to the synchrony matrix
if (weighted)
{
mat<-1-mat
}else
{
mat<-makeunweighted(mat,1-sigthresh)
}
return(mat)
}
#wavelet-based, non-"fast" methods
if (method %in% c("ReXWT","ReXWT.sig.fft","ReXWT.sig.aaft",
"coh","coh.sig.fft","coh.sig.aaft",
"phasecoh","phasecoh.sig.fft","phasecoh.sig.aaft"))
{
#options depending on method
h<-strsplit(method,".",fixed=TRUE)[[1]]
if (h[1] %in% c("coh","ReXWT"))
{ #normalization methods to use on wavelet transforms
normmeth<-"powind"
}else
{
normmeth<-"phase"
}
if (h[1] %in% c("ReXWT"))
{ #whether to take the real part or the modulus
treatmeth<-"Re"
}else
{
treatmeth<-"Mod"
}
if (length(h)==1)
{
surrtype<-"none"
}else
{
surrtype<-h[3]
}
#get pairwise coherences/ReXWTs/phase coherences
wavarray<-wavmatwork(dat,times,scale.min,scale.max.input,sigma,f0,normmeth,treatmeth)
timescales<-wavarray$timescales
wavarray<-wavarray$wavarray
#the case of no significance testing
if (surrtype=="none")
{
mat<-apply(FUN=mean,
X=wavarray[,,timescales >= tsrange[1] & timescales <= tsrange[2],drop=FALSE],
MARGIN=c(1,2))
return(mat)
} #from here we are in the case of significance testing
#get surrogates, and pairwise coherences/ReXWTs/phase coherences for them
sdat<-surrog(dat,nsurrogs,surrtype,FALSE)
swavlist<-lapply(FUN=function(x){wavmatwork(x,times,scale.min,scale.max.input,sigma,f0,normmeth,treatmeth)$wavarray},
X=sdat)
swavarray<-array(NA,c(dim(wavarray),nsurrogs))
for (counter in 1:nsurrogs)
{
swavarray[,,,counter]<-swavlist[[counter]]
} #surrogates for wavarray
#next comes all the ranking stuff
rwavarray<-array(wavarray,c(dim(wavarray),nsurrogs))
rks<-apply(FUN=sum,X=(rwavarray>swavarray),MARGIN=1:3)/nsurrogs
srks<-(aperm(apply(X=swavarray,FUN=rank,MARGIN=1:3),c(2,3,4,1))-1)/(nsurrogs-1)
#now get mean ranks for each pair of locations
mnrks<-apply(FUN=mean,
X=rks[,,timescales >= tsrange[1] & timescales <= tsrange[2],drop=FALSE],
MARGIN=1:2)
mnsrks<-apply(FUN=mean,
X=srks[,,timescales >= tsrange[1] & timescales <= tsrange[2],,drop=FALSE],
MARGIN=c(1,2,4))
#now get p-values for each pair of locations
mat<-matrix(0,nrow(mnrks),ncol(mnrks))
for (counter in 1:nsurrogs)
{
mat<-mat+(mnrks<=mnsrks[,,counter])
}
diag(mat)<-NA
mat<-(mat+1)/(nsurrogs+1)
#convert to the synchrony matrix
if (weighted)
{
mat<-1-mat
}else
{
mat<-makeunweighted(mat,1-sigthresh)
}
return(mat)
}
#fast methods
if (method %in% c("ReXWT.sig.fast","coh.sig.fast"))
{
if (method=="ReXWT.sig.fast")
{
f<-function(x){Re(x)}
}
if (method=="coh.sig.fast")
{
f<-function(x){Mod(x)}
}
mat<-matrix(NA,nlocs,nlocs)
randnums<-runif(nsurrogs*floor((dim(dat)[2]-1)/2))
if (dim(dat)[2] %% 2 == 0)
{
randbits<-sample.int(2,2*nsurrogs,replace=TRUE)-1
}else
{
randbits<-sample.int(2,nsurrogs,replace=TRUE)-1
}
for (i in 2:nlocs)
{
for (j in 1:(i-1))
{
h<-fastcohtest(dat[i,],dat[j,],
scale.min,scale.max.input,sigma,f0,
nsurrogs,randnums,randbits,"powind")
x<-f(h$coher[h$timescales >= tsrange[1] & h$timescales <= tsrange[2]])
sx<-f(h$scoher[,h$timescales >= tsrange[1] & h$timescales <= tsrange[2],drop=F])
#next comes all the ranking stuff
rx<-matrix(x,nsurrogs,length(x),byrow = TRUE)
rks<-apply(FUN=sum,X=(rx>sx),MARGIN=2)/nsurrogs
srks<-(apply(X=sx,FUN=rank,MARGIN=2)-1)/(nsurrogs-1)
#now get mean ranks
mnrks<-mean(rks)
mnsrks<-apply(FUN=mean,MARGIN=1,X=srks)
#now prepare to get the p value
mat[i,j]<-sum(mnsrks>=mnrks)
mat[j,i]<-mat[i,j]
}
}
mat<-(mat+1)/(nsurrogs+1) #this gets the actual p-values
if (weighted)
{
mat<-1-mat
}else
{
mat<-makeunweighted(mat,1-sigthresh)
}
return(mat)
}
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/synmat.R |
#' Creator function for the \code{tts} class
#'
#' The \code{tts} (time/timescale) class is for matrices for which the rows correspond
#' to times and the columns correspond to timescales. This is a general class from
#' which other classes inherit (e.g., \code{wt}, \code{wmf}, \code{wpmf}). \code{tts}
#' inherits from the \code{list} class.
#'
#' @param times A numeric vector of increasing real values, spacing 1
#' @param timescales A numeric vector with positive entries
#' @param values A complex or numeric matrix of dimensions \code{length(times)} by
#' \code{length(timescales)}
#'
#' @return \code{tts} returns an object of class \code{tts}. Slots are:
#' \item{times}{a numeric vector of evenly spaced times}
#' \item{timescales}{a numeric vector of positive timescales}
#' \item{values}{a complex or numeric matrix of dimensions \code{length(times)} by \code{length(timescales)}}
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{tts_methods}}, \code{\link{wt}}, \code{\link{wmf}}, \code{\link{wpmf}},
#' \code{browseVignettes("wsyn")}
#'
#' @examples
#' times<-1:10
#' timescales<-1/c(1:10)
#' values<-matrix(1,length(times),length(timescales))
#' h<-tts(times,timescales,values)
#'
#' @export
tts<-function(times,timescales,values)
{
errcheck_tts(times,timescales,values,"tts")
res<-list(times=times,timescales=timescales,values=values)
class(res)<-c("tts","list")
return(res)
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/tts.R |
#' Basic methods for the \code{tts} class
#'
#' Set, get, summary, and print methods for the \code{tts} class.
#'
#' @param object,x,obj An object of class \code{tts}
#' @param newval A new value, for the \code{set_*} methods
#' @param ... Not currently used. Included for argument consistency
#' with existing generics.
#'
#' @return \code{summary.tts} produces a summary of a \code{tts} object.
#' A \code{print.tts} method is also available. For \code{tts} objects,
#' \code{set_*} and \code{get_*} methods are available for all slots,
#' i.e., \code{*} equal to \code{times}, \code{timescales}, and
#' \code{values}. The \code{set_*} methods just throw an error. Although
#' class \code{tts} is flexible enough that setting of individual slots
#' could have been allowed, because \code{wt} and other classes are
#' based on it and because individual slots of those classes should not
#' be changed, for consistency the same is forced for the \code{tts}
#' class.
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{tts}}
#'
#' @examples
#' times<-1:10
#' timescales<-1/c(1:10)
#' values<-matrix(1,length(times),length(timescales))
#' h<-tts(times,timescales,values)
#' get_times(h)
#' summary(h)
#' print(h)
#'
#' @name tts_methods
NULL
#> NULL
#' Set and get methods for classes in the \code{wsyn} package
#'
#' Set and get methods for classes in the \code{wsyn} package. There
#' are methods for each slot of each class, named \code{set_*} and
#' \code{get_*} for \code{*} the slot name. Below are listed function
#' specs for the generics and the default methods.
#'
#' @param obj An object of one of the classes defined in the package
#' @param newval A newvalue of the slot in question, for the \code{set_*} methods
#'
#' @return \code{set_*} methods throw an error - setting of individual
#' slots is not allowed, as it breaks consistency with the other slots.
#' \code{get_*} just returns the value in question.
#'
#' @details There are methods for the \code{tts}, \code{wt}, \code{wmf},
#' \code{wpmf}, \code{coh}, \code{wlm}, \code{wlmtest}, and \code{clust}
#' classes. See documentation for the generator functions for these classes
#' (which in all cases have the same name as the class) for lists of slots
#' for each class.
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @examples
#' times<-1:10
#' timescales<-1/c(1:10)
#' values<-matrix(1,length(times),length(timescales))
#' h<-tts(times,timescales,values)
#' get_times(h)
#'
#' @name setget_methods
NULL
#> NULL
#' @rdname tts_methods
#' @export
summary.tts<-function(object,...)
{
x<-object
res<-list(class="tts",
times_start=x$times[1],
times_end=x$times[length(x$times)],
times_increment=x$times[2]-x$times[1],
timescale_start=x$timescales[1],
timescale_end=x$timescales[length(x$timescales)],
timescale_length=length(x$timescales))
#a summary_wsyn object inherits from the list class, but has its own print method, above
class(res)<-c("summary_wsyn","list")
return(res)
}
#' @rdname tts_methods
#' @export
print.tts<-function(x,...)
{
cat("tts object:\n")
cat("times, a length",length(x$times),"numeric vector:\n")
if (length(x$times)<12)
{
cat(paste(x$times),"\n")
}else
{
cat(paste(x$times[1:5]),"...",paste(x$times[(length(x$times)-4):(length(x$times))]),"\n")
}
cat("timescales, a length",length(x$timescales),"numeric vector:\n")
if (length(x$timescales)<12)
{
cat(paste(x$timescales),"\n")
}else
{
cat(paste(x$timescales[1:5]),"...",paste(x$timescales[(length(x$timescales)-4):(length(x$timescales))]),"\n")
}
if (length(x$timescales)<=5 && length(x$times)<=5)
{
cat("values, a",dim(x$values)[1],"by",dim(x$values)[2],"matrix, to four digits:\n")
print(round(x$values,4))
}else
{
cat("values, a",dim(x$values)[1],"by",dim(x$values)[2],"matrix, upper left to four digits is:\n")
print(round(x$values[1:5,1:5],4))
}
}
#' @rdname setget_methods
#' @export
set_times<-function(obj,newval)
{
UseMethod("set_times",obj)
}
#' @rdname setget_methods
#' @export
set_times.default<-function(obj,newval)
{
stop("Error in set_times: set_times not defined for this class")
}
#' @rdname tts_methods
#' @export
set_times.tts<-function(obj,newval)
{
errcheck_tts(newval,obj$timescales,obj$values,"set_times.tts")
obj$times<-newval
return(obj)
}
#' @rdname setget_methods
#' @export
set_timescales<-function(obj,newval)
{
UseMethod("set_timescales",obj)
}
#' @rdname setget_methods
#' @export
set_timescales.default<-function(obj,newval)
{
stop("Error in set_timescales: set_timescales not defined for this class")
}
#' @rdname tts_methods
#' @export
set_timescales.tts<-function(obj,newval)
{
errcheck_tts(obj$times,newval,obj$values,"set_timescales.tts")
obj$timescales<-newval
return(obj)
}
#' @rdname setget_methods
#' @export
set_values<-function(obj,newval)
{
UseMethod("set_values",obj)
}
#' @rdname setget_methods
#' @export
set_values.default<-function(obj,newval)
{
stop("Error in set_values: set_values not defined for this class")
}
#' @rdname tts_methods
#' @export
set_values.tts<-function(obj,newval)
{
errcheck_tts(obj$times,obj$timescales,newval,"set_values.tts")
obj$values<-newval
return(obj)
}
#' @rdname setget_methods
#' @export
get_times<-function(obj)
{
UseMethod("get_times",obj)
}
#' @rdname setget_methods
#' @export
get_times.default<-function(obj)
{
stop("Error in get_times: get_times not defined for this class")
}
#' @rdname tts_methods
#' @export
get_times.tts<-function(obj)
{
return(obj$times)
}
#' @rdname setget_methods
#' @export
get_timescales<-function(obj)
{
UseMethod("get_timescales",obj)
}
#' @rdname setget_methods
#' @export
get_timescales.default<-function(obj)
{
stop("Error in get_timescales: get_timescales not defined for this class")
}
#' @rdname tts_methods
#' @export
get_timescales.tts<-function(obj)
{
return(obj$timescales)
}
#' @rdname setget_methods
#' @export
get_values<-function(obj)
{
UseMethod("get_values",obj)
}
#' @rdname setget_methods
#' @export
get_values.default<-function(obj)
{
stop("Error in get_values: get_values not defined for this class")
}
#' @rdname tts_methods
#' @export
get_values.tts<-function(obj)
{
return(obj$values)
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/tts_methods.R |
#' Creates an array of wavelet transforms from input timeseries
#'
#' @param dat A locations (rows) x time (columns) matrix
#' @param times A vector of timestep values (e.g. years), spacing 1
#' @param scale.min The smallest scale of fluctuation that will be examined. At least 2.
#' @param scale.max.input The largest scale of fluctuation that will be examined. Note that if this is set too high
#' relative to the length of the timeseries it will be truncated.
#' @param sigma The ratio of each time scale examined relative to the next timescale. Greater than 1.
#' @param f0 The ratio of the period of fluctuation to the width of the envelope
#'
#' @return \code{warray} returns a list containing:
#' \item{wavarray}{locations x time x timescales array of wavelet transforms}
#' \item{times}{the time steps specified (e.g., years)}
#' \item{timescales}{the timescales (1/frequency) computed for the wavelet transforms}
#'
#' @note Important for interpreting the phase: the phases grow through time, i.e., they
#' turn anti-clockwise. This function is internal, no error checking.
#'
#' @author Lauren Hallett, \email{hallett@@uoregon.edu}; Lawrence Sheppard, \email{lwsheppard@@ku.edu};
#' Daniel Reuman, \email{reuman@@ku.edu}
warray <- function(dat, times, scale.min=2, scale.max.input=NULL, sigma=1.05, f0 = 1)
{
# get timescales and do first transform
res1<-wt(dat[1,],times,scale.min,scale.max.input,sigma,f0)
timescales<-get_timescales(res1)
wavarray<-array(NA, dim = c(nrow(dat), ncol(dat), length(timescales)))
wavarray[1,,]<-get_values(res1)
# populate the array with wavelet transforms
if (nrow(dat)>=2)
{
for (i in 2:nrow(dat))
{
wavarray[i,,]<-get_values(wt(dat[i,], times, scale.min, scale.max.input, sigma, f0))
}
}
return(list(wavarray=wavarray,times=times,timescales=timescales))
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/warray.R |
#' Facilitates the computations in synmat for coherence and ReXWT methods
#'
#' Worker/utility function serving the analysis carried out in synmat for methods based
#' on coherence or real part of the cross-wavelet transform.
#'
#' @param dat A locations (rows) x time (columns) matrix of measurements
#' @param times The times at which measurements were made, spacing 1
#' @param scale.min The smallest scale of fluctuation that will be examined. At least 2. Used
#' only for wavelet-based methods.
#' @param scale.max.input The largest scale of fluctuation guaranteed to be examined. Only used
#' for wavelet-based methods.
#' @param sigma The ratio of each time scale examined relative to the next timescale. Should be
#' greater than 1. Only used for wavelet-based methods.
#' @param f0 The ratio of the period of fluctuation to the width of the envelope. Only used for
#' wavelet-based methods.
#' @param norm The normalization of wavelet transforms to be used. One of "none", "phase",
#' "powind".
#' @param treatment Either "Mod" or "Re"
#'
#' @return \code{wavmatwork} returns a list consisting of:
#' \item{timescales}{The timescales of analysis}
#' \item{wavarray}{An array, locations by locations by timescales, containing either the
#' coherences (for \code{treatment="Mod"}) or the real parts of the cross-wavelet transforms
#' (for \code{treatment="Re"}) between locations.}
#'
#' @note Internal function, no error checking done.
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
wavmatwork<-function(dat,times,scale.min,scale.max.input,sigma,f0,norm,treatment)
{
#basic setup
nlocs<-dim(dat)[1]
#prepare wavelet transforms and get timescales
wts<-warray(dat,times,scale.min,scale.max.input,sigma,f0)
timescales<-wts$timescales
wts<-wts$wavarray
wts<-normforcoh(wts,norm)
#get the array output
wavarray<-array(complex(real=NA,imaginary=NA),c(nlocs,nlocs,length(timescales)))
for (i in 2:nlocs)
{
for (j in 1:(i-1))
{
wavarray[i,j,]<-colMeans(wts[i,,]*Conj(wts[j,,]), na.rm=TRUE)
wavarray[j,i,]<-wavarray[i,j,]
}
}
#modulus or real part
if (treatment=="Mod")
{
wavarray<-Mod(wavarray)
}
if (treatment=="Re")
{
wavarray<-Re(wavarray)
}
return(list(timescales=timescales,wavarray=wavarray))
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/wavmatwork.R |
#' Wavelet linear models
#'
#' Fits wavelet linear models. Also the generator function of the \code{wlm} class, which
#' inherits from the \code{list} class.
#'
#' @param dat A list of matrices representing the data (or in the case of one location, a list of
#' vectors). All the same dimensions (respectively, lengths)
#' @param times The times at which measurements were made, spacing 1
#' @param resp Index in dat for the response variable of the model
#' @param pred Vector of indices in dat for the predictor variables of the model; must differ from \code{resp}
#' @param norm The normalization of wavelet transforms to use. One of "none", "powall", "powind". See details.
#' @param scale.min The smallest scale of fluctuation that will be examined. At least 2.
#' @param scale.max.input The largest scale of fluctuation that will be examined. Note that if this is set too high relative to the length of the timeseries it will be truncated.
#' @param sigma The ratio of each time scale examined relative to the next timescale. Greater than 1.
#' @param f0 The ratio of the period of fluctuation to the width of the envelope
#'
#' @return \code{wlm} returns an object of class \code{wlm}. Slots are:
#' \item{dat}{The input data list, but reordered and subsetted so the response is first and only used predictors are included}
#' \item{times}{The times associated with the data}
#' \item{norm}{The input}
#' \item{wtopt}{The inputted wavelet transform options scale.min, scale.max.input, sigma, f0 in a list}
#' \item{wts}{List of transforms, normalized as specified in \code{norm}. Same length as the output \code{dat}, each entry a locations x time x timescales array of transforms.}
#' \item{timescales}{The timescales associated with the wavelet transforms of the data}
#' \item{coefs}{A list (data frame, actually) of complex vectors, each of length the same
#' as \code{timescales}. These are the model coefficients (which depend on timescale), and
#' correspond to the \code{wts}.}
#' \item{modval}{The model values.}
#' \item{coher}{Appropriately normalized version of coherence of the model and response transforms. See details.}
#'
#' @details Normalization is as specified in the documentation for \code{coh}, HOWEVER, only
#' the "\code{powall}" option is currently implemented, other choices throw an error. Details
#' are specified in appendices S7 and S9 of Sheppard et al, 2018. The output \code{modval}
#' is v in appendix S7, and \code{coefs} are the betas in equation 12 in that appendix.
#'
#' @author Thomas Anderson, \email{anderstl@@gmail.com}, Jon Walter, \email{jaw3es@@virginia.edu}; Lawrence
#' Sheppard, \email{lwsheppard@@ku.edu}; Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @references
#' Sheppard, LW et al. (2019) Synchrony is more than its top-down and climatic parts: interacting
#' Moran effects on phytoplankton in British seas. Plos Computational Biology 15, e1006744. doi: 10.1371/journal.pcbi.1006744
#'
#' @seealso \code{\link{wlm_methods}}, \code{\link{wlmtest}}, \code{\link{syncexpl}}, \code{\link{predsync}},
#' \code{browseVignettes("wsyn")}
#'
#' @examples
#' times<-1:30
#' dat<-list(v1=matrix(rnorm(300),10,30),v2=matrix(rnorm(300),10,30),v3=matrix(rnorm(300),10,30),
#' v4=matrix(rnorm(300),10,30),v5=matrix(rnorm(300),10,30))
#' dat<-lapply(FUN=function(x){cleandat(x,times,1)$cdat},X=dat)
#' resp<-2
#' pred<-c(1,3,4)
#' norm<-"powall"
#' res<-wlm(dat,times,resp,pred,norm)
#'
#' @export
wlm<-function(dat,times,resp,pred,norm,scale.min=2,scale.max.input=NULL,sigma=1.05,f0=1)
{
#**error checking
errcheck_times(times,"wlm")
errcheck_wavparam(scale.min,scale.max.input,sigma,f0,times,"wlm")
if (!(norm %in% c("powall","powind","none")))
{
stop("Error in wlm: inappropriate value of norm")
}
if (norm %in% c("powind","none"))
{
stop("Error in wlm: this value of norm not implemented yet")
}
if (!inherits(dat,"list"))
{
stop("Error in wlm: dat must be a list")
}
if (!all(c(resp,pred) %in% 1:length(dat)))
{
stop("Error in wlm: resp and pred must be indices of dat")
}
if (length(resp)!=1)
{
stop("Error in wlm: you can only have one response")
}
if (length(pred)<1)
{
stop("Error in wlm: you must have at least one predictor")
}
if (resp %in% pred)
{
stop("Error in wlm: resp cannot be in pred")
}
#**rearrange dat according to resp and pred, getting the dat output
dat<-dat[c(resp,pred)]
#**more error checking
wasvect<-FALSE
if (is.matrix(dat[[1]]) && dim(dat[[1]])[1]>1)
{
d<-dim(dat[[1]])
for (counter in 1:length(dat))
{
if (!isTRUE(all.equal(dim(dat[[counter]]),d)))
{
stop("Error in wlm: all data matrices must be the same dimensions")
}
errcheck_stdat(times,dat[[counter]],"wlm")
}
}else
{
if (!is.matrix(dat[[1]])){wasvect<-TRUE}
for (counter in 1:length(dat))
{
errcheck_tsdat(times,as.vector(dat[[counter]]),"wlm")
dat[[counter]]<-matrix(dat[[counter]],1,length(times))
}
}
#**get wavelet transforms and normalize, getting the wts output
wts<-lapply(FUN=warray,X=dat,times=times,scale.min=scale.min,
scale.max.input=scale.max.input,sigma=sigma,f0=f0)
timescales<-wts[[1]]$timescales
wts<-lapply(X=wts,FUN=function(x){normforcoh(x$wavarray,norm)})
#**do the fitting by call to the internal wlmfit, getting coefs, modval, coher
h<-wlmfit(wts,norm)
coefs<-h$coefs
names(coefs)<-names(dat)[2:length(dat)]
#**prepare result
if (wasvect){dat<-lapply(FUN=as.vector,X=dat)}
wtopt<-list(scale.min=scale.min,scale.max.input=scale.max.input,
sigma=sigma,f0=f0)
result<-list(dat=dat,times=times,norm=norm,wtopt=wtopt,
wts=wts,timescales=timescales,
coefs=coefs,modval=h$modval,coher=h$coher)
class(result)<-c("wlm","list")
return(result)
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/wlm.R |
#' Basic methods for the \code{wlm} class
#'
#' Set, get, summary, and print methods for the \code{wlm} class.
#'
#' @param object,x,obj An object of class \code{wlm}
#' @param newval A new value, for the \code{set_*} methods
#' @param ... Not currently used. Included for argument consistency
#' with existing generics.
#'
#' @return \code{summary.wlm} produces a summary of a \code{wlm} object.
#' A \code{print.wlm} method is also available. For \code{wlm} objects,
#' \code{set_*} and \code{get_*} methods are available for all slots (see
#' the documentation for \code{wlm} for a list). The \code{set_*} methods
#' just throw an error, to prevent breaking the consistency between the
#' slots of a \code{wlm} object.
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{wlm}}
#'
#' @examples
#' times<-1:30
#' dat<-list(v1=matrix(rnorm(300),10,30),v2=matrix(rnorm(300),10,30),v3=matrix(rnorm(300),10,30),
#' v4=matrix(rnorm(300),10,30),v5=matrix(rnorm(300),10,30))
#' dat<-lapply(FUN=function(x){cleandat(x,times,1)$cdat},X=dat)
#' resp<-2
#' pred<-c(1,3,4)
#' norm<-"powall"
#' h<-wlm(dat,times,resp,pred,norm)
#' get_times(h)
#' summary(h)
#' print(h)
#'
#' @name wlm_methods
NULL
#> NULL
#' @rdname wlm_methods
#' @export
summary.wlm<-function(object,...)
{
x<-object
h<-x$wtopt$scale.max.input
if (is.null(h)){h<-"NULL"}
regform<-paste0(names(x$dat)[1],"~")
for (counter in 2:length(x$dat))
{
regform<-paste0(regform,names(x$dat)[counter])
if (counter<length(x$dat))
{
regform<-paste0(regform,"+")
}
}
res<-list(class="wlm",
times_start=x$times[1],
times_end=x$times[length(x$times)],
times_increment=x$times[2]-x$times[1],
sampling_locs=dim(x$dat[[1]])[1],
timescale_start=x$timescales[1],
timescale_end=x$timescales[length(x$timescales)],
timescale_length=length(x$timescales),
wavelet_regression=regform,
normalization=x$norm,
scale.min=x$wtopt$scale.min,
scale.max.input=h,
sigma=x$wtopt$sigma,
f0=x$wtopt$f0)
#a summary_wsyn object inherits from the list class, but has its own print method, above
class(res)<-c("summary_wsyn","list")
return(res)
}
#' @rdname wlm_methods
#' @export
print.wlm<-function(x,...)
{
cat("wlm object:\n")
cat("times, a length",length(x$times),"numeric vector:\n")
if (length(x$times)<12)
{
cat(paste(x$times),"\n")
}else
{
cat(paste(x$times[1:5]),"...",paste(x$times[(length(x$times)-4):(length(x$times))]),"\n")
}
cat("Number of sampling locations:",dim(x$dat[[1]])[1],"\n")
cat("timescales, a length",length(x$timescales),"numeric vector:\n")
if (length(x$timescales)<12)
{
cat(paste(x$timescales),"\n")
}else
{
cat(paste(x$timescales[1:5]),"...",paste(x$timescales[(length(x$timescales)-4):(length(x$timescales))]),"\n")
}
regform<-paste0(names(x$dat)[1],"~")
for (counter in 2:length(x$dat))
{
regform<-paste0(regform,names(x$dat)[counter])
if (counter<length(x$dat))
{
regform<-paste0(regform,"+")
}
}
cat("The wavelet regression:",regform,"\n")
cat("norm, the normalization used:",x$norm,"\n")
w<-x$wtopt
if (is.null(w$scale.max.input)){w$scale.max.input<-"NULL"}
cat("wtopt: scale.min=",w$scale.min,"; scale.max.input=",w$scale.max.input,"; sigma=",w$sigma,"; f0=",w$f0,"\n",sep="")
}
#' @rdname wlm_methods
#' @export
set_times.wlm<-function(obj,newval)
{
stop("Error in set_times: times should not be altered for a wlm object")
}
#' @rdname wlm_methods
#' @export
set_timescales.wlm<-function(obj,newval)
{
stop("Error in set_timescales: timescales should not be altered for a wlm object")
}
#' @rdname setget_methods
#' @export
set_coefs<-function(obj,newval)
{
UseMethod("set_coefs",obj)
}
#' @rdname setget_methods
#' @export
set_coefs.default<-function(obj,newval)
{
stop("Error in set_coefs: set_coefs not defined for this class")
}
#' @rdname wlm_methods
#' @export
set_coefs.wlm<-function(obj,newval)
{
stop("Error in set_coefs: coefs should not be altered for a wlm object")
}
#' @rdname setget_methods
#' @export
set_modval<-function(obj,newval)
{
UseMethod("set_modval",obj)
}
#' @rdname setget_methods
#' @export
set_modval.default<-function(obj,newval)
{
stop("Error in set_modval: set_modval not defined for this class")
}
#' @rdname wlm_methods
#' @export
set_modval.wlm<-function(obj,newval)
{
stop("Error in set_modval: modval should not be altered for a wlm object")
}
#' @rdname wlm_methods
#' @export
set_coher.wlm<-function(obj,newval)
{
stop("Error in set_coher: coher should not be altered for a wlm object")
}
#' @rdname wlm_methods
#' @export
set_dat.wlm<-function(obj,newval)
{
stop("Error in set_dat: dat should not be altered for a wlm object")
}
#' @rdname wlm_methods
#' @export
set_wtopt.wlm<-function(obj,newval)
{
stop("Error in set_wtopt: wtopt should not be altered for a wlm object")
}
#' @rdname wlm_methods
#' @export
set_norm.wlm<-function(obj,newval)
{
stop("Error in set_norm: norm should not be altered for a wlm object")
}
#' @rdname setget_methods
#' @export
set_wts<-function(obj,newval)
{
UseMethod("set_wts",obj)
}
#' @rdname setget_methods
#' @export
set_wts.default<-function(obj,newval)
{
stop("Error in set_wts: set_wts not defined for this class")
}
#' @rdname wlm_methods
#' @export
set_wts.wlm<-function(obj,newval)
{
stop("Error in set_wts: wts should not be altered for a wlm object")
}
#' @rdname wlm_methods
#' @export
get_times.wlm<-function(obj)
{
return(obj$times)
}
#' @rdname wlm_methods
#' @export
get_timescales.wlm<-function(obj)
{
return(obj$timescales)
}
#' @rdname setget_methods
#' @export
get_coefs<-function(obj)
{
UseMethod("get_coefs",obj)
}
#' @rdname setget_methods
#' @export
get_coefs.default<-function(obj)
{
stop("Error in get_coefs: get_coefs not defined for this class")
}
#' @rdname wlm_methods
#' @export
get_coefs.wlm<-function(obj)
{
return(obj$coefs)
}
#' @rdname setget_methods
#' @export
get_modval<-function(obj)
{
UseMethod("get_modval",obj)
}
#' @rdname setget_methods
#' @export
get_modval.default<-function(obj)
{
stop("Error in get_modval: get_modval not defined for this class")
}
#' @rdname wlm_methods
#' @export
get_modval.wlm<-function(obj)
{
return(obj$modval)
}
#' @rdname wlm_methods
#' @export
get_coher.wlm<-function(obj)
{
return(obj$coher)
}
#' @rdname wlm_methods
#' @export
get_dat.wlm<-function(obj)
{
return(obj$dat)
}
#' @rdname wlm_methods
#' @export
get_wtopt.wlm<-function(obj)
{
return(obj$wtopt)
}
#' @rdname wlm_methods
#' @export
get_norm.wlm<-function(obj)
{
return(obj$norm)
}
#' @rdname setget_methods
#' @export
get_wts<-function(obj)
{
UseMethod("get_wts",obj)
}
#' @rdname setget_methods
#' @export
get_wts.default<-function(obj)
{
stop("Error in get_wts: get_wts not defined for this class")
}
#' @rdname wlm_methods
#' @export
get_wts.wlm<-function(obj)
{
return(obj$wts)
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/wlm_methods.R |
#' Fits a wavelet linear model
#'
#' Stripped down internal function for doing the fitting
#'
#' @param wts List of normalized transforms, normalized as specified in \code{norm}. Each entry a locations x time x timescales array of transforms. The first is the response variable, others are the predictors.
#' @param norm The normalization that was used. One of "none", "powall", "powind". See details.
#'
#' @return \code{wlmfit} returns a list with these elements:
#' \item{coefs}{Model coefficients}
#' \item{modval}{The right had side of the model}
#' \item{coher}{Appropriately normalized coherence of the model and response variable}
#'
#' @details Only \code{norm="powall"} works now, other options throw an error.
#'
#' @note Internal function, no error checking done.
#'
#' @author Thomas Anderson, \email{anderstl@@gmail.com}, Jon Walter, \email{jaw3es@@virginia.edu}; Lawrence
#' Sheppard, \email{lwsheppard@@ku.edu}; Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @references
#' Sheppard, LW et al. (2019) Synchrony is more than its top-down and climatic parts: interacting
#' Moran effects on phytoplankton in British seas. Plos Computational Biology 15, e1006744. doi: 10.1371/journal.pcbi.1006744
wlmfit<-function(wts,norm)
{
#This looks like an error check, but it will be removed when the other normalizations
#are added
if (norm!="powall")
{
stop("Error in wlmfit: only the powall option for norm is implemented so far")
}
#setup - wts[[i]] is locs by times by timescales
N<-dim(wts[[1]])[1] #number of locations
Ti<-dim(wts[[1]])[2] #length of time series
V<-length(wts)-1 #number of predictor variables
lents<-dim(wts[[1]])[3] #number of timescales
#do the fitting
coefs<-as.data.frame(matrix(NA*numeric(1),lents,V))
X<-matrix(complex(real=NA,imaginary=NA),N*Ti,V)
for (tscounter in 1:lents)
{
#make the design matrix
for (vcounter in 1:V)
{
X[,vcounter]<-as.vector(wts[[vcounter+1]][,,tscounter])
}
#make the response variable
y<-as.vector(wts[[1]][,,tscounter])
#get rid of non-finite entries and do the qr.solve call
inds<-which(is.finite(y))
tsres<-qr.solve(X[inds,],y[inds])
#store the result in the desired format
coefs[tscounter,]<-tsres
}
#get modvals and coher
modval<-0*wts[[1]] #holder for the model, right hand side of regression equation
for (vcounter in 1:V)
{
modval<-modval+wts[[vcounter+1]]*array(rep(coefs[,vcounter],each=N*Ti),dim=dim(wts[[1]]))
}
coher<-apply(X=wts[[1]]*Conj(normforcoh(modval,norm)),FUN=mean,MARGIN=3,na.rm=TRUE)
return(list(coefs=coefs,modval=modval,coher=coher))
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/wlmfit.R |
#' Statistical comparison of wavelet linear models
#'
#' Compares a wavelet linear model with a nested model. Also the generator function for
#' the \code{wlmtest} class.
#'
#' @param wlmobj A \code{wlm} object
#' @param drop Either names or indices of variables in \code{wlmobj$dat} that are being
#' dropped to form the simpler, nested model. The first variable in \code{wlmobj$dat},
#' which is the response, is not allowed here.
#' @param sigmethod Method for significance testing. One of "\code{fft}", "\code{aaft}", "\code{fast}". See details.
#' @param nrand The number of randomizations to do for significance
#'
#' @return \code{wlmtest} returns an object of class \code{wlmtest}. Slots are:
#' \item{wlmobj}{The input}
#' \item{drop}{The input}
#' \item{signif}{A list with information from the significance testing. Elements are
#' \code{sigmethod} (the input), \code{coher} and \code{scoher}. See details.}
#' \item{ranks}{A list with ranking information for \code{signif}. \code{NA} until
#' \code{plotrank} or \code{bandtest} is called.}
#' \item{bandp}{A data frame containing results of computing significances across
#' timescale bands. Empty on an initial call to \code{wlmtest}, filled in by the function
#' \code{bandtest}. See details.}
#'
#' @details The slot \code{signif} provides the core information on significance.
#' If \code{sigmethod} is not "\code{fast}", then \code{signif$coher} is the same as
#' \code{wlmobj$coher}, and \code{signif$scoher} is a matrix of dimensions \code{nrand} by
#' \code{length(signif$coher)} with rows equal to coherences between refitted models and the
#' response-variable transforms, for datasets where the variables specified in \code{drop} have
#' been replaced by surrogates. Normalization as specified in \code{norm} is used. The type
#' of surrogate used (Fourier surrogates or amplitude adjusted Fourier surrogates, see
#' \code{surrog}) is determined by \code{sigmethod} ("\code{fft}" or "\code{aaft}").
#' Synchrony-preserving surrogates are used. A variety of statements of significance (or lack
#' thereof) can be made by comparing \code{signif$coher} with \code{signif$scoher} (see the
#' \code{plotmag}, \code{plotrank}, and \code{bandtest} methods
#' for the \code{wlmtest} class). If \code{sigmethod} is
#' "\code{fast}", a fast algorithm of Lawrence Sheppard is used which is a generalization
#' to wavelet linear models of the fast algorithm for coherence described in Sheppard et al (2017).
#' In that case
#' \code{signif$coher} can be compared to \code{signif$scoher} to make significance
#' statements about the coherence in exactly the same way, but \code{signif$coher} will no
#' longer precisely equal \code{wlmobj$coher}, and \code{wlmobj$coher} should not be compared
#' directly to \code{signif$scoher}. Statements about significance of the coherence
#' should be made using \code{signif$coher} and \code{signif$scoher}, whereas \code{wlmobj$coher}
#' should be used whenever the actual value of the coherence is needed.
#'
#' The slots \code{ranks} and \code{bandp} are empty on an initial call to \code{wlmtest}.
#' They are made to compute and hold
#' aggregate significance results over any timescale band of choice. These are filled in
#' when needed by other methods, see \code{plotrank} and \code{bandtest}.
#'
#' @author Thomas Anderson, \email{anderstl@@gmail.com}, Jon Walter, \email{jaw3es@@virginia.edu}; Lawrence
#' Sheppard, \email{lwsheppard@@ku.edu}; Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @references
#' Sheppard, L.W., et al. (2016) Changes in large-scale climate alter spatial synchrony of aphid
#' pests. Nature Climate Change. DOI: 10.1038/nclimate2881
#'
#' Sheppard, L.W., et al. (2017) Rapid surrogate testing of wavelet coherences. European Physical
#' Journal, Nonlinear and Biomedical Physics, 5, 1. DOI: 10.1051/epjnbp/2017000
#'
#' Sheppard, LW et al. (2019) Synchrony is more than its top-down and climatic parts: interacting
#' Moran effects on phytoplankton in British seas. Plos Computational Biology 15, e1006744. doi: 10.1371/journal.pcbi.1006744
#'
#' @seealso \code{\link{wlm}}, \code{\link{plotrank}}, \code{\link{bandtest}}, \code{\link{coh}},
#' \code{\link{wlmtest_methods}}, \code{browseVignettes("wsyn")}
#'
#' @examples
#' times<-1:30
#' dat<-list(v1=matrix(rnorm(300),10,30),v2=matrix(rnorm(300),10,30),v3=matrix(rnorm(300),10,30),
#' v4=matrix(rnorm(300),10,30),v5=matrix(rnorm(300),10,30))
#' dat<-lapply(FUN=function(x){cleandat(x,times,1)$cdat},X=dat)
#' resp<-1
#' pred<-2:3
#' norm<-"powall"
#' wlmobj<-wlm(dat,times,resp,pred,norm)
#' drop<-3
#' sigmethod<-"fft"
#' res<-wlmtest(wlmobj,drop,sigmethod,nrand=10)
#'
#' @export
wlmtest<-function(wlmobj,drop,sigmethod,nrand=1000)
{
#**error checking
if (wlmobj$norm!="powall")
{ #we can assume norm is powall, powind or none, since it is in a wlm object
#this error check will be removed when the other options are implemented
stop("Error in wlmtest: this value of norm not implemented yet")
}
if (!(all(drop %in% 2:length(wlmobj$dat)) ||
all(drop %in% names(wlmobj$dat)[2:length(wlmobj$dat)])))
{
stop("Error in wlmtest: drop must contain names or indices of predictors used in fitting wlmobj")
}
if (length(unique(drop))!=length(drop))
{
stop("Error in wlmtest: drop must not have repeat entries")
}
if (!(sigmethod %in% c("fft","aaft","fast")))
{
stop("Error in wlmtest: bad value for sigmethod")
}
#**convert drop from names to indices if necessary
origdrop<-drop
if (all(drop %in% names(wlmobj$dat)[2:length(wlmobj$dat)]))
{
drop<-which(names(wlmobj$dat)[2:length(wlmobj$dat)] %in% drop)+1
}
#**fast algorithm
if (sigmethod=="fast")
{
stop("Error in wlmtest: fast algorithm not implemented yet")
#***DAN: fill in
#***Dont forget to include sigmethod in signif
#prepare result
result<-list(wlmobj=wlmobj,drop=origdrop,signif=signif,ranks=NA,bandp=NA)
class(result)<-c("wlmtest","list")
return(result)
}
#**slow algorithm
#*get joint surrogates for dropped variables
cddat<-wlmobj$dat[[drop[1]]]
if (length(drop)>1)
{
for (counter in 2:length(drop))
{ #put the data together to get joint surrogates
cddat<-rbind(cddat,wlmobj$dat[[drop[counter]]])
}
}
scddat<-surrog(cddat,nrand,sigmethod,TRUE)
#*do transforms
wscddat<-lapply(FUN=function(x)
{
warray(x,times=wlmobj$times,
scale.min=wlmobj$wtopt$scale.min,
scale.max.input=wlmobj$wtopt$scale.max.input,
sigma=wlmobj$wtopt$sigma,f0=wlmobj$wtopt$f0)$wavarray
},X=scddat)
#*refit for each surrogate, keeping coherences
coher<-wlmobj$coher
scoher<-matrix(NA*complex(real=length(coher)*nrand,
imaginary=length(coher)*nrand),
nrand,length(coher))
wts<-wlmobj$wts
for (scounter in 1:nrand)
{
#replace the appropriate entries of wts by surrogate transforms
for (dcounter in 1:length(drop))
{
st<-(dcounter-1)*dim(wlmobj$dat[[1]])[1]+1
en<-dcounter*dim(wlmobj$dat[[1]])[1]
wts[[drop[dcounter]]]<-normforcoh(wscddat[[scounter]][st:en,,],wlmobj$norm)
}
#refit and just keep the coher result
scoher[scounter,]<-wlmfit(wts,wlmobj$norm)$coher
}
signif<-list(sigmethod=sigmethod,coher=coher,scoher=scoher)
#prepare result
result<-list(wlmobj=wlmobj,drop=origdrop,signif=signif,ranks=NA,bandp=NA)
class(result)<-c("wlmtest","list")
return(result)
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/wlmtest.R |
#' Basic methods for the \code{wlmtest} class
#'
#' Set, get, summary, and print methods for the \code{wlmtest} class.
#'
#' @param object,x,obj An object of class \code{wlmtest}
#' @param newval A new value, for the \code{set_*} methods
#' @param ... Not currently used. Included for argument consistency
#' with existing generics.
#'
#' @return \code{summary.wlmtest} produces a summary of a \code{wlmtest} object.
#' A \code{print.wlmtest} method is also available. For \code{wlmtest} objects,
#' \code{set_*} and \code{get_*} methods are available for all slots (see
#' the documentation for \code{wlmtest} for a list). The \code{set_*} methods
#' just throw an error, to prevent breaking the consistency between the
#' slots of a \code{wlmtest} object.
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{wlmtest}}
#'
#' @examples
#' times<-1:30
#' dat<-list(v1=matrix(rnorm(300),10,30),v2=matrix(rnorm(300),10,30),v3=matrix(rnorm(300),10,30),
#' v4=matrix(rnorm(300),10,30),v5=matrix(rnorm(300),10,30))
#' dat<-lapply(FUN=function(x){cleandat(x,times,1)$cdat},X=dat)
#' resp<-1
#' pred<-2:3
#' norm<-"powall"
#' wlmobj<-wlm(dat,times,resp,pred,norm)
#' drop<-3
#' sigmethod<-"fft"
#' h<-wlmtest(wlmobj,drop,sigmethod,nrand=10)
#' get_times(get_wlmobj(h))
#' summary(h)
#' print(h)
#'
#' @name wlmtest_methods
NULL
#> NULL
#' @rdname wlmtest_methods
#' @export
summary.wlmtest<-function(object,...)
{
x<-object
h<-x$wlmobj$wtopt$scale.max.input
if (is.null(h)){h<-"NULL"}
regform<-paste0(names(x$wlmobj$dat)[1],"~")
for (counter in 2:length(x$wlmobj$dat))
{
regform<-paste0(regform,names(x$wlmobj$dat)[counter])
if (counter<length(x$wlmobj$dat))
{
regform<-paste0(regform,"+")
}
}
#whether the ranks slot is full
if (inherits(x$ranks,"list"))
{
h2<-"filled"
}else
{
h2<-"empty"
}
if (is.numeric(x$drop))
{
h3<-names(x$wlmobj$dat)[x$drop]
}else
{
h3<-x$drop
}
res<-list(class="wlmtest",
times_start=x$wlmobj$times[1],
times_end=x$wlmobj$times[length(x$wlmobj$times)],
times_increment=x$wlmobj$times[2]-x$wlmobj$times[1],
sampling_locs=dim(x$wlmobj$dat[[1]])[1],
timescale_start=x$wlmobj$timescales[1],
timescale_end=x$wlmobj$timescales[length(x$wlmobj$timescales)],
timescale_length=length(x$wlmobj$timescales),
orig_wavelet_regression=regform,
predictors_dropped=h3,
normalization=x$wlmobj$norm,
sigmethod=x$signif$sigmethod,
nsurrogs=dim(x$signif$scoher)[1],
scale.min=x$wlmobj$wtopt$scale.min,
scale.max.input=h,
sigma=x$wlmobj$wtopt$sigma,
f0=x$wlmobj$wtopt$f0,
ranks_slot_is=h2)
#a summary_wsyn object inherits from the list class, but has its own print method, above
class(res)<-c("summary_wsyn","list")
return(res)
}
#' @rdname wlmtest_methods
#' @export
print.wlmtest<-function(x,...)
{
cat("wlmtest object:\n")
cat("wlmobj$times, a length",length(x$wlmobj$times),"numeric vector:\n")
if (length(x$wlmobj$times)<12)
{
cat(paste(x$wlmobj$times),"\n")
}else
{
cat(paste(x$wlmobj$times[1:5]),"...",paste(x$wlmobj$times[(length(x$wlmobj$times)-4):(length(x$wlmobj$times))]),"\n")
}
cat("Number of sampling locations:",dim(x$wlmobj$dat[[1]])[1],"\n")
cat("wlmobj$timescales, a length",length(x$wlmobj$timescales),"numeric vector:\n")
if (length(x$wlmobj$timescales)<12)
{
cat(paste(x$wlmobj$timescales),"\n")
}else
{
cat(paste(x$wlmobj$timescales[1:5]),"...",paste(x$wlmobj$timescales[(length(x$wlmobj$timescales)-4):(length(x$wlmobj$timescales))]),"\n")
}
regform<-paste0(names(x$wlmobj$dat)[1],"~")
for (counter in 2:length(x$wlmobj$dat))
{
regform<-paste0(regform,names(x$wlmobj$dat)[counter])
if (counter<length(x$wlmobj$dat))
{
regform<-paste0(regform,"+")
}
}
cat("The original wavelet regression:",regform,"\n")
if (is.numeric(x$drop))
{
cat("The indices in wlmobj$dat of predictors dropped:",paste(x$drop),"\n")
}else
{
cat("The names of predictors dropped:",paste(x$drop),"\n")
}
cat("wlmobj$norm, the normalization used:",x$wlmobj$norm,"\n")
cat("sigmethod, the type of significance testing used:",x$signif$sigmethod,"\n")
cat("Number of surrogates:",dim(x$signif$scoher)[1],"\n")
w<-x$wlmobj$wtopt
if (is.null(w$scale.max.input)){w$scale.max.input<-"NULL"}
cat("wtopt: scale.min=",w$scale.min,"; scale.max.input=",w$scale.max.input,"; sigma=",w$sigma,"; f0=",w$f0,"\n",sep="")
if (inherits(x$ranks,"list"))
{
cat("The ranks slot is: filled\n")
}else
{
cat("The ranks slot is: empty\n")
}
if (inherits(x$bandp,"data.frame"))
{
cat("Timescale bands tested in bandp slot:\n")
h<-print(x$bandp[,c(1,2)])
}else
{
cat("Timescale bands tested in bandp slot: none")
}
}
#' @rdname setget_methods
#' @export
set_wlmobj<-function(obj,newval)
{
UseMethod("set_wlmobj",obj)
}
#' @rdname setget_methods
#' @export
set_wlmobj.default<-function(obj,newval)
{
stop("Error in set_wlmobj: set_wlmobj not defined for this class")
}
#' @rdname wlmtest_methods
#' @export
set_wlmobj.wlmtest<-function(obj,newval)
{
stop("Error in set_wlmobj: wlmobj should not be altered for a wlmtest object")
}
#' @rdname setget_methods
#' @export
set_drop<-function(obj,newval)
{
UseMethod("set_drop",obj)
}
#' @rdname setget_methods
#' @export
set_drop.default<-function(obj,newval)
{
stop("Error in set_drop: set_drop not defined for this class")
}
#' @rdname wlmtest_methods
#' @export
set_drop.wlmtest<-function(obj,newval)
{
stop("Error in set_drop:drop should not be altered for a wlmtest object")
}
#' @rdname wlmtest_methods
#' @export
set_signif.wlmtest<-function(obj,newval)
{
stop("Error in set_signif: signif should not be altered for a wlmtest object")
}
#' @rdname wlmtest_methods
#' @export
set_ranks.wlmtest<-function(obj,newval)
{
stop("Error in set_ranks: ranks should not be altered for a wlmtest object")
}
#' @rdname wlmtest_methods
#' @export
set_bandp.wlmtest<-function(obj,newval)
{
stop("Error in set_bandp: bandp should not be altered for a wlmtest object")
}
#' @rdname setget_methods
#' @export
get_wlmobj<-function(obj)
{
UseMethod("get_wlmobj",obj)
}
#' @rdname setget_methods
#' @export
get_wlmobj.default<-function(obj)
{
stop("Error in get_wlmobj: get_wlmobj not defined for this class")
}
#' @rdname wlmtest_methods
#' @export
get_wlmobj.wlmtest<-function(obj)
{
return(obj$wlmobj)
}
#' @rdname setget_methods
#' @export
get_drop<-function(obj)
{
UseMethod("get_drop",obj)
}
#' @rdname setget_methods
#' @export
get_drop.default<-function(obj)
{
stop("Error in get_drop: get_drop not defined for this class")
}
#' @rdname wlmtest_methods
#' @export
get_drop.wlmtest<-function(obj)
{
return(obj$drop)
}
#' @rdname wlmtest_methods
#' @export
get_signif.wlmtest<-function(obj)
{
return(obj$signif)
}
#' @rdname wlmtest_methods
#' @export
get_ranks.wlmtest<-function(obj)
{
return(obj$ranks)
}
#' @rdname wlmtest_methods
#' @export
get_bandp.wlmtest<-function(obj)
{
return(obj$bandp)
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/wlmtest_methods.R |
#' Computes the wavelet mean field from a matrix of spatiotemporal data. Also the
#' creator function for the \code{wmf} class.
#'
#' Computes the wavelet mean field from a matrix of spatiotemporal data. Also the
#' creator function for the \code{wmf} class. The \code{wmf} class inherits from the
#' \code{tts} class, which inherits from the \code{list} class.
#'
#' @param dat A locations (rows) x time (columns) matrix
#' @param times A vector of time step values (e.g., years), spacing 1
#' @param scale.min The smallest scale of fluctuation that will be examined. At least 2.
#' @param scale.max.input The largest scale of fluctuation that will be examined. Note that if this is set too high relative to the length of the timeseries it will be truncated.
#' @param sigma The ratio of each time scale examined relative to the next timescale. Greater than 1.
#' @param f0 The ratio of the period of fluctuation to the width of the envelope
#'
#' @return \code{wmf} returns an object of class \code{wmf}. Slots are:
#' \item{values}{A matrix of complex numbers containing the wavelet mean field, of dimensions \code{length(times)}
#' by the number of timescales. Entries not considered reliable (longer timescales, near the edges of the time span)
#' are set to NA.}
#' \item{times}{The time steps specified (e.g., years)}
#' \item{timescales}{The timescales (1/frequency) computed for the wavelet transforms}
#' \item{dat}{The data matrix (locations by time) from which the wmf was computed}
#' \item{wtopt}{The inputted wavelet transform options scale.min, scale.max.input, sigma, f0 in a list}
#'
#' @author Jonathan Walter, \email{jaw3es@@virginia.edu}; Lawrence Sheppard, \email{lwsheppard@@ku.edu};
#' Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @references Sheppard, L.W., et al. (2016) Changes in large-scale climate alter spatial synchrony of aphid pests.
#' Nature Climate Change. DOI: 10.1038/nclimate2881
#'
#' @seealso \code{\link{wmf_methods}}, \code{\link{tts}}, \code{\link{wpmf}}, \code{\link{plotmag}},
#' \code{browseVignettes("wsyn")}
#'
#' @examples
#' times<-1:30 #generate time steps
#' #generate fake count data for 20 locations
#' dat<-matrix(rpois(20*length(times),20),nrow=20,ncol=length(times))
#' dat<-cleandat(dat=dat,times=times,clev=2)$cdat #detrend and demean
#' wmf<-wmf(dat,times)
#'
#' @export
wmf<-function(dat, times, scale.min=2, scale.max.input=NULL, sigma=1.05, f0 = 1){
#check suitability of data
errcheck_stdat(times,dat,"wmf")
errcheck_wavparam(scale.min,scale.max.input,sigma,f0,times,"wmf")
#for return
wtopt<-list(scale.min=scale.min,scale.max.input=scale.max.input,
sigma=sigma,f0=f0)
#do all the transforms
wavarray<-warray(dat, times, scale.min, scale.max.input, sigma, f0)
timescales<-wavarray$timescales
wavarray<-wavarray$wavarray
#Get square modulus, then average over time and location and take square root to
#get denominator for normalization
#Then normalize each timescale by the value of the normalization denomenator for
#that timescale
wavarray<-normforcoh(wavarray,"powall")
#get the wmf by averaging across space
wmf<-apply(wavarray, c(2,3), mean, na.rm=T)
#prepare the result
errcheck_tts(times,timescales,wmf,"wmf")
result<-list(values=wmf,times=times,timescales=timescales,dat=dat,wtopt=wtopt)
class(result)<-c("wmf","tts","list")
return(result)
} | /scratch/gouwar.j/cran-all/cranData/wsyn/R/wmf.R |
#' Basic methods for the \code{wmf} class
#'
#' Set, get, summary, and print methods for the \code{wmf} class.
#'
#' @param object,x,obj An object of class \code{wmf}
#' @param newval A new value, for the \code{set_*} methods
#' @param ... Not currently used. Included for argument consistency
#' with existing generics.
#'
#' @return \code{summary.wmf} produces a summary of a \code{wmf} object.
#' A \code{print.wmf} method is also available. For \code{wmf} objects,
#' \code{set_*} and \code{get_*} methods are available for all slots,
#' i.e., \code{*} equal to \code{times}, \code{timescales}, \code{wtopt},
#' \code{values}, and \code{dat}. The \code{set_*} methods just throw an
#' error, to prevent breaking the consistency between the slots of a
#' \code{wmf} object.
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{wmf}}
#'
#' @examples
#' times<-1:30 #generate time steps
#' #generate fake count data for 20 locations
#' dat<-matrix(rpois(20*length(times),20),nrow=20,ncol=length(times))
#' dat<-cleandat(dat=dat,times=times,clev=2)$cdat #detrend and demean
#' h<-wmf(dat,times)
#' get_times(h)
#' summary(h)
#' print(h)
#'
#' @name wmf_methods
NULL
#> NULL
#' @rdname wmf_methods
#' @export
summary.wmf<-function(object,...)
{
x<-object
h<-x$wtopt$scale.max.input
if (is.null(h)){h<-"NULL"}
res<-list(class="wmf",
times_start=x$times[1],
times_end=x$times[length(x$times)],
times_increment=x$times[2]-x$times[1],
sampling_locs=dim(x$dat)[1],
timescale_start=x$timescales[1],
timescale_end=x$timescales[length(x$timescales)],
timescale_length=length(x$timescales),
scale.min=x$wtopt$scale.min,
scale.max.input=h,
sigma=x$wtopt$sigma,
f0=x$wtopt$f0)
#a summary_wsyn object inherits from the list class, but has its own print method, above
class(res)<-c("summary_wsyn","list")
return(res)
}
#' @rdname wmf_methods
#' @export
print.wmf<-function(x,...)
{
cat("wmf object:\n")
cat("times, a length",length(x$times),"numeric vector:\n")
if (length(x$times)<12)
{
cat(paste(x$times),"\n")
}else
{
cat(paste(x$times[1:5]),"...",paste(x$times[(length(x$times)-4):(length(x$times))]),"\n")
}
cat("Number of sampling locations:",dim(x$dat)[1],"\n")
cat("timescales, a length",length(x$timescales),"numeric vector:\n")
if (length(x$timescales)<12)
{
cat(paste(x$timescales),"\n")
}else
{
cat(paste(x$timescales[1:5]),"...",paste(x$timescales[(length(x$timescales)-4):(length(x$timescales))]),"\n")
}
if (length(x$timescales)<=5 && length(x$times)<=5)
{
cat("values, a",dim(x$values)[1],"by",dim(x$values)[2]," matrix, to four digits:\n")
print(round(x$values,4))
}else
{
cat("values, a",dim(x$values)[1],"by",dim(x$values)[2],"matrix, upper left to four digits is:\n")
print(round(x$values[1:5,1:5],4))
}
w<-x$wtopt
if (is.null(w$scale.max.input)){w$scale.max.input<-"NULL"}
cat("wtopt: scale.min=",w$scale.min,"; scale.max.input=",w$scale.max.input,"; sigma=",w$sigma,"; f0=",w$f0,sep="")
}
#' @rdname wmf_methods
#' @export
set_times.wmf<-function(obj,newval)
{
stop("Error in set_times: times should not be altered for a wmf object")
}
#' @rdname wmf_methods
#' @export
set_timescales.wmf<-function(obj,newval)
{
stop("Error in set_timescales: timescales should not be alterned for a wmf object")
}
#' @rdname wmf_methods
#' @export
set_values.wmf<-function(obj,newval)
{
stop("Error in set_values: values should not be altered for a wmf object")
}
#' @rdname wmf_methods
#' @export
set_dat.wmf<-function(obj,newval)
{
stop("Error in set_dat: dat should not be altered for a wmf object")
}
#' @rdname wmf_methods
#' @export
set_wtopt.wmf<-function(obj,newval)
{
stop("Error in set_wtopt: wtopt should not be altered for a wmf object")
}
#' @rdname wmf_methods
#' @export
get_times.wmf<-function(obj)
{
return(obj$times)
}
#' @rdname wmf_methods
#' @export
get_timescales.wmf<-function(obj)
{
return(obj$timescales)
}
#' @rdname wmf_methods
#' @export
get_values.wmf<-function(obj)
{
return(obj$values)
}
#' @rdname wmf_methods
#' @export
get_dat.wmf<-function(obj)
{
return(obj$dat)
}
#' @rdname wmf_methods
#' @export
get_wtopt.wmf<-function(obj)
{
return(obj$wtopt)
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/wmf_methods.R |
#' Wavelet phasor mean field
#'
#' Computes the wavelet phasor mean field from a matrix of spatiotemporal data. Also the
#' creator function for the \code{wpmf} class. The \code{wpmf} class inherits from the
#' \code{tts} class, which inherits from the \code{list} class.
#'
#' @param dat A locations (rows) x time (columns) matrix
#' @param times A vector of time step values, spacing 1
#' @param scale.min The smallest scale of fluctuation that will be examined. At least 2.
#' @param scale.max.input The largest scale of fluctuation guaranteed to be examined
#' @param sigma The ratio of each time scale examined relative to the next timescale. Should be greater than 1.
#' @param f0 The ratio of the period of fluctuation to the width of the envelop
#' @param sigmethod Method for significance testing the wmpf, one of \code{quick}, \code{fft}, \code{aaft} (see details)
#' @param nrand The number of randomizations to be used for significance testing
#'
#' @return \code{wpmf} returns an object of class \code{wpmf}. Slots are:
#' \item{values}{A matrix of complex numbers containing the wavelet phasor mean field, of dimensions \code{length(times)} by the number of timescales. Entries not considered reliable (longer timescales, near the edges of the time span) are set to NA.}
#' \item{times}{The times associated with the data and the \code{wpmf}}
#' \item{timescales}{The timescales associated with the \code{wpmf}}
#' \item{signif}{A list with information from the significance testing. Format depends on \code{sigmethod} (see details).}
#' \item{dat}{The data matrix (locations by time) from which the \code{wpmf} was computed}
#' \item{wtopt}{The inputted wavelet transform options scale.min, scale.max.input, sigma, f0 in a list}
#'
#' @details For \code{sigmethod} equal to \code{quick}, the empirical wpmf is compared to a distribution of
#' magnitudes of sums of random phasors, using the same number of phasors as there are time series. The \code{signif}
#' output is a list with first element "\code{quick}" and second element a vector of \code{nrand} magnitudes of sums
#' of random phasors. For \code{sigmethod} equal to \code{fft}, the empirical wpmf is compared to wmpfs of
#' Fourier surrogate datasets. The \code{signif} output is a list with first element "\code{fft}", second element
#' equal to \code{nrand}, and third element the fraction of surrogate-based wpmf magnitudes that the empirical wpmf
#' magnitude is greater than (times by timescales matrix). For \code{sigmethod} equal to \code{aaft}, \code{aaft}
#' surrogates are used instead. Output has similar format to the \code{fft} case. Values other than \code{quick},
#' \code{fft}, and \code{aaft} for \code{sigmethod} result in no significance testing.
#'
#' @author Thomas Anderson, \email{anderstl@@gmail.com}, Jon Walter, \email{jaw3es@@virginia.edu}; Lawrence
#' Sheppard, \email{lwsheppard@@ku.edu}; Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @references Sheppard, L.W., et al. (2016) Changes in large-scale climate alter spatial synchrony of aphid
#' pests. Nature Climate Change. DOI: 10.1038/nclimate2881
#'
#' @seealso \code{\link{wpmf_methods}}, \code{\link{wmf}}, \code{\link{tts}}, \code{\link{plotmag}},
#' \code{browseVignettes("wsyn")}
#'
#' @examples
#' times<-1:30 #generate time steps
#' #generate fake count data for 20 locations
#' dat<-matrix(rpois(20*length(times),20),nrow=20,ncol=length(times))
#' dat<-cleandat(dat=dat,times=times,clev=2)$cdat #detrend and demean
#' res<-wpmf(dat,times)
#'
#' @export
#' @importFrom stats runif
wpmf<-function(dat,times,scale.min=2, scale.max.input=NULL, sigma=1.05, f0=1, sigmethod="none", nrand=1000)
{
errcheck_stdat(times,dat,"wpmf")
errcheck_wavparam(scale.min,scale.max.input,sigma,f0,times,"wpmf")
if (sigmethod %in% c("quick","fft","aaft"))
{
#error check nrand
if (!is.numeric(nrand) || !is.finite(nrand) || length(nrand)!=1 || nrand<=0)
{
stop("Error in wpmf: inappropriate value for nrand")
}
}
#for return
wtopt<-list(scale.min=scale.min,scale.max.input=scale.max.input,
sigma=sigma,f0=f0)
#do all the transforms
wavarray<-warray(dat, times, scale.min, scale.max.input, sigma, f0)
timescales<-wavarray$timescales
wavarray<-wavarray$wavarray
#make phasors, then take wpmf by averaging across space
wavarray<-normforcoh(wavarray,"phase")
wpmfres<-apply(wavarray, c(2,3), mean, na.rm=T)
errcheck_tts(times,timescales,wpmfres,"wpmf")
#do significance testing
signif<-NA
if (sigmethod=="quick")
{
#just make nrand mags of sums of random phasors and return
rndphas<-matrix(complex(modulus=1,argument=2*pi*stats::runif((dim(dat)[1])*nrand)),dim(dat)[1],nrand)
signif<-list(sigmethod="quick",magsumrndphas=Mod(apply(FUN=mean,MARGIN=2,X=rndphas)))
}
if (sigmethod=="fft")
{
#get fft surrogates of the data that do not preserve synchrony (this is the null)
surr<-surrog(dat,nrand,"fft",syncpres=FALSE)
#get the wpmf magntude for each surrogate
surrwpmfm<-array(NA,c(dim(wpmfres),nrand))
for (counter in 1:nrand)
{
h<-wpmf(surr[[counter]],times,scale.min,scale.max.input,sigma,f0,sigmethod="none",nrand=1000)
surrwpmfm[,,counter]<-
Mod(get_values(h))
}
#get the fractions of surrogate wpmf magntiudes that the empirical wpmf magntiude is greater than
gt<-matrix(NA,nrow(wpmfres),ncol(wpmfres))
mwpmfres<-Mod(wpmfres)
for (counter1 in 1:dim(wpmfres)[1])
{
for (counter2 in 1:dim(wpmfres)[2])
{
gt[counter1,counter2]<-sum(surrwpmfm[counter1,counter2,]<=mwpmfres[counter1,counter2])/nrand
}
}
signif<-list(sigmethod="fft",nsurrog=nrand,gt=gt)
}
if (sigmethod=="aaft")
{
#get afft surrogates of the data that do not preserve synchrony (this is the null)
surr<-surrog(dat,nrand,"aaft",syncpres=FALSE)
#get the wpmf magnitude for each surrogate
surrwpmfm<-array(NA,c(dim(wpmfres),nrand))
for (counter in 1:nrand)
{
surrwpmfm[,,counter]<-
Mod(get_values(wpmf(surr[[counter]],times,scale.min,scale.max.input,sigma,f0,sigmethod="none",nrand=1000)))
}
#get the fractions of surrogate wpmf magntiudes that the empirical wpmf magnitude is greater than
gt<-matrix(NA,nrow(wpmfres),ncol(wpmfres))
mwpmfres<-Mod(wpmfres)
for (counter1 in 1:dim(wpmfres)[1])
{
for (counter2 in 1:dim(wpmfres)[2])
{
gt[counter1,counter2]<-sum(surrwpmfm[counter1,counter2,]<=mwpmfres[counter1,counter2])/nrand
}
}
signif<-list(sigmethod="aaft",nsurrog=nrand,gt=gt)
}
#prepare result
result<-list(values=wpmfres,times=times,timescales=timescales,signif=signif,dat=dat,wtopt=wtopt)
class(result)<-c("wpmf","tts","list")
return(result)
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/wpmf.R |
#' Basic methods for the \code{wpmf} class
#'
#' Set, get, summary, and print methods for the \code{wpmf} class.
#'
#' @param object,x,obj An object of class \code{wpmf}
#' @param newval A new value, for the \code{set_*} methods
#' @param ... Not currently used. Included for argument consistency
#' with existing generics.
#'
#' @return \code{summary.wpmf} produces a summary of a \code{wpmf} object.
#' A \code{print.wpmf} method is also available. For \code{wpmf} objects,
#' \code{set_*} and \code{get_*} methods are available for all slots,
#' i.e., \code{*} equal to \code{times}, \code{timescales}, \code{wtopt},
#' \code{values}, \code{dat}, and \code{signif}. The \code{set_*} methods just throw an
#' error, to prevent breaking the consistency between the slots of a
#' \code{wpmf} object.
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{wpmf}}
#'
#' @examples
#' times<-1:30 #generate time steps
#' #generate fake count data for 20 locations
#' dat<-matrix(rpois(20*length(times),20),nrow=20,ncol=length(times))
#' dat<-cleandat(dat=dat,times=times,clev=2)$cdat #detrend and demean
#' h<-wpmf(dat,times)
#' get_times(h)
#' summary(h)
#' print(h)
#'
#' @name wpmf_methods
NULL
#> NULL
#' @rdname wpmf_methods
#' @export
summary.wpmf<-function(object,...)
{
x<-object
h<-x$wtopt$scale.max.input
if (is.null(h)){h<-"NULL"}
res<-list(class="wpmf",
times_start=x$times[1],
times_end=x$times[length(x$times)],
times_increment=x$times[2]-x$times[1],
sampling_locs=dim(x$dat)[1],
timescale_start=x$timescales[1],
timescale_end=x$timescales[length(x$timescales)],
timescale_length=length(x$timescales),
scale.min=x$wtopt$scale.min,
scale.max.input=h,
sigma=x$wtopt$sigma,
f0=x$wtopt$f0,
signif_testing=x$signif[[1]])
#a summary_wsyn object inherits from the list class, but has its own print method, above
class(res)<-c("summary_wsyn","list")
return(res)
}
#' @rdname wpmf_methods
#' @export
print.wpmf<-function(x,...)
{
cat("wpmf object:\n")
cat("times, a length",length(x$times),"numeric vector:\n")
if (length(x$times)<12)
{
cat(paste(x$times),"\n")
}else
{
cat(paste(x$times[1:5]),"...",paste(x$times[(length(x$times)-4):(length(x$times))]),"\n")
}
cat("Number of sampling locations:",dim(x$dat)[1],"\n")
cat("timescales, a length",length(x$timescales),"numeric vector:\n")
if (length(x$timescales)<12)
{
cat(paste(x$timescales),"\n")
}else
{
cat(paste(x$timescales[1:5]),"...",paste(x$timescales[(length(x$timescales)-4):(length(x$timescales))]),"\n")
}
if (length(x$timescales)<=5 && length(x$times)<=5)
{
cat("values, a",dim(x$values)[1],"by",dim(x$values)[2]," matrix, to four digits:\n")
print(round(x$values,4))
}else
{
cat("values, a",dim(x$values)[1],"by",dim(x$values)[2],"matrix, upper left to four digits is:\n")
print(round(x$values[1:5,1:5],4))
}
w<-x$wtopt
if (is.null(w$scale.max.input)){w$scale.max.input<-"NULL"}
cat("wtopt: scale.min=",w$scale.min,"; scale.max.input=",w$scale.max.input,"; sigma=",w$sigma,"; f0=",w$f0,"\n",sep="")
if (inherits(x$signif,"list"))
{
cat("significance testing:",x$signif[[1]])
}else
{
cat("significance testing: NA")
}
}
#' @rdname wpmf_methods
#' @export
set_times.wpmf<-function(obj,newval)
{
stop("Error in set_times: times scould not be altered for a wpmf object")
}
#' @rdname wpmf_methods
#' @export
set_timescales.wpmf<-function(obj,newval)
{
stop("Error in set_timescales: timescales should not be altered for a wpmf object")
}
#' @rdname wpmf_methods
#' @export
set_values.wpmf<-function(obj,newval)
{
stop("Error in set_values: values should not be altered for a wpmf object")
}
#' @rdname wpmf_methods
#' @export
set_dat.wpmf<-function(obj,newval)
{
stop("Error in set_dat: dat should not be altered for a wpmf object")
}
#' @rdname wpmf_methods
#' @export
set_wtopt.wpmf<-function(obj,newval)
{
stop("Error in set_wtopt: wtopt should not be altered for a wpmf object")
}
#' @rdname setget_methods
#' @export
set_signif<-function(obj,newval)
{
UseMethod("set_signif",obj)
}
#' @rdname setget_methods
#' @export
set_signif.default<-function(obj,newval)
{
stop("Error in set_signif: set_signif not defined for this class")
}
#' @rdname wpmf_methods
#' @export
set_signif.wpmf<-function(obj,newval)
{
stop("Error in set_signif: signif should not be altered for a wpmf object")
}
#' @rdname wpmf_methods
#' @export
get_times.wpmf<-function(obj)
{
return(obj$times)
}
#' @rdname wpmf_methods
#' @export
get_timescales.wpmf<-function(obj)
{
return(obj$timescales)
}
#' @rdname wpmf_methods
#' @export
get_values.wpmf<-function(obj)
{
return(obj$values)
}
#' @rdname wpmf_methods
#' @export
get_dat.wpmf<-function(obj)
{
return(obj$dat)
}
#' @rdname wpmf_methods
#' @export
get_wtopt.wpmf<-function(obj)
{
return(obj$wtopt)
}
#' @rdname setget_methods
#' @export
get_signif<-function(obj)
{
UseMethod("get_signif",obj)
}
#' @rdname setget_methods
#' @export
get_signif.default<-function(obj)
{
stop("Error in get_signif: get_signif not defined for this class")
}
#' @rdname wpmf_methods
#' @export
get_signif.wpmf<-function(obj)
{
return(obj$signif)
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/wpmf_methods.R |
#' Computes the wavelet transform of a timeseries. Also the creator function for the
#' \code{wt} class.
#'
#' Computes the wavelet transform of a timeseries. Also the creator function for the
#' \code{wt} class. The \code{wt} class inherits from the \code{tts} class, which
#' inherits from the \code{list} class.
#'
#' @param t.series A timeseries of real values
#' @param times A vector of time step values (e.g., years), spacing 1
#' @param scale.min The smallest scale of fluctuation that will be examined. At least 2.
#' @param scale.max.input The largest scale of fluctuation that is guaranteed to be examined
#' @param sigma The ratio of each time scale examined relative to the next timescale. Should be greater than 1.
#' @param f0 The ratio of the period of fluctuation to the width of the envelope. Defaults to 1.
#'
#' @return \code{wt} returns an object of class \code{wt}. Slots are:
#' \item{values}{A matrix of complex numbers, of dimensions \code{length(t.series)} by the number of timescales. Entries not considered reliable (longer timescales, near the edges of the time span) are set to NA.}
#' \item{times}{The time steps specified (e.g. years)}
#' \item{wtopt}{The inputted wavelet transform options scale.min, scale.max.input, sigma, f0 in a list}
#' \item{timescales}{The timescales (1/frequency) computed for the wavelet transform}
#' \item{dat}{The data vector from which the transform was computed}
#'
#' @note Important for interpreting the phase: the phases grow through time, i.e., they turn anti-clockwise.
#'
#' @author Lawrence Sheppard \email{lwsheppard@@ku.edu}, Jonathan Walter
#' \email{jaw3es@@virginia.edu}, Daniel Reuman \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{wt_methods}}, \code{\link{tts}}, \code{\link{plotmag}}, \code{\link{plotphase}},
#' \code{browseVignettes("wsyn")}
#'
#' @examples
#' time1<-1:100
#' time2<-101:200
#' ts1p1<-sin(2*pi*time1/15)
#' ts1p2<-0*time1
#' ts2p1<-0*time2
#' ts2p2<-sin(2*pi*time2/8)
#' ts1<-ts1p1+ts1p2
#' ts2<-ts2p1+ts2p2
#' ts<-c(ts1,ts2)
#' ra<-rnorm(200,mean=0,sd=0.5)
#' t.series<-ts+ra
#' t.series<-t.series-mean(t.series)
#' times<-c(time1,time2)
#' res<-wt(t.series, times)
#'
#' @export
#' @importFrom stats fft
wt <- function(t.series, times, scale.min=2, scale.max.input=NULL, sigma=1.05, f0=1)
{
#error checking
errcheck_tsdat(times,t.series,"wt")
errcheck_wavparam(scale.min,scale.max.input,sigma,f0,times,"wt")
if(is.null(scale.max.input)){
scale.max<-length(t.series)
}
else{
scale.max<-scale.max.input
}
if (is.matrix(t.series))
{
t.series<-as.vector(t.series)
}
#for return
wtopt<-list(scale.min=scale.min,scale.max.input=scale.max.input,
sigma=sigma,f0=f0)
#determine how many frequencies are in the range and make receptacle for results
scale.min <- f0*scale.min
scale.max <- f0*scale.max
m.max <- floor(log(scale.max/scale.min)/log(sigma))+1 #number of timescales
s2 <- scale.min*sigma^seq(from=0, by=1, to=m.max) #widths of wavelet envelopes
margin2 <- ceiling(sqrt(-(2*s2*s2)*log(0.5)))
translength <- length(t.series)
m.last <- max(which(margin2<0.5*translength))
result <- matrix(NA, nrow=translength, ncol=m.max+1)
#wavsize determines the size of the calculated wavelet
wavsize <- ceiling(sqrt(-(2*s2[m.last]*s2[m.last])*log(0.001)));
#preparations for finding components
Y <- stats::fft(c(t.series,rep(0,2*wavsize)))
lenY<-length(Y)
freqs<-seq(from=0, by=1, to=lenY-1)/lenY;
freqs2<-c(seq(from=0, by=1, to=floor(lenY/2)), seq(from=-(ceiling(lenY/2)-1),
by=1, to=-1))/lenY;
#find transform components using wavelets of each frequency
for (stage in 1 : m.last)
{
s.scale<-s2[stage];
#begin calculating wavelet
#margin determines how close large wavelets can come to the edges of the timeseries
margin<-margin2[stage];
#perform convolution
XX <- (2*pi*s.scale)^(0.5)*(exp(-s.scale^2*(2*pi*(freqs-((f0/s.scale))))^2/2) -
(exp(-s.scale^2*(2*pi*(freqs2))^2/2))*
(exp(-0.5*(2*pi*f0)^2)))*exp(-1i*2*pi*wavsize*freqs);
con <- stats::fft((XX*Y),inverse=TRUE)
con <- con/length(con)
#fit result into transform
result[(margin+1):(translength-margin),stage] <-
con[(wavsize + margin + 1):(translength + wavsize - margin)];
}
if(is.null(scale.max.input)){
result<-result[,1:m.last]
timescales<-s2[1:m.last]/f0
errcheck_tts(times,timescales,result,"wt")
result<-list(values=result, times=times, wtopt=wtopt, timescales=timescales, dat=t.series)
class(result)<-c("wt","tts","list")
return(result)
}
else{
timescales<-s2/f0
errcheck_tts(times,timescales,result,"wt")
result<-list(values=result, times = times, wtopt=wtopt, timescales=timescales, dat=t.series)
class(result)<-c("wt","tts","list")
return(result)
}
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/wt.R |
#' Basic methods for the \code{wt} class
#'
#' Set, get, summary, and print methods for the \code{wt} class.
#'
#' @param object,x,obj An object of class \code{wt}
#' @param newval A new value, for the \code{set_*} methods
#' @param ... Not currently used. Included for argument consistency
#' with existing generics.
#'
#' @return \code{summary.wt} produces a summary of a \code{wt} object.
#' A \code{print.wt} method is also available. For \code{wt} objects,
#' \code{set_*} and \code{get_*} methods are available for all slots,
#' i.e., \code{*} equal to \code{times}, \code{timescales}, \code{wtopt},
#' \code{values}, and \code{dat}. The \code{set_*} methods just throw an
#' error, to prevent breaking the consistency between the slots of a
#' \code{wt} object.
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{wt}}
#'
#' @examples
#' time1<-1:100
#' time2<-101:200
#' ts1p1<-sin(2*pi*time1/15)
#' ts1p2<-0*time1
#' ts2p1<-0*time2
#' ts2p2<-sin(2*pi*time2/8)
#' ts1<-ts1p1+ts1p2
#' ts2<-ts2p1+ts2p2
#' ts<-c(ts1,ts2)
#' ra<-rnorm(200,mean=0,sd=0.5)
#' t.series<-ts+ra
#' t.series<-t.series-mean(t.series)
#' times<-c(time1,time2)
#' h<-wt(t.series, times)
#' get_times(h)
#' summary(h)
#' print(h)
#'
#' @name wt_methods
NULL
#> NULL
#' @rdname wt_methods
#' @export
summary.wt<-function(object,...)
{
x<-object
h<-x$wtopt$scale.max.input
if (is.null(h)){h<-"NULL"}
res<-list(class="wt",
times_start=x$times[1],
times_end=x$times[length(x$times)],
times_increment=x$times[2]-x$times[1],
timescale_start=x$timescales[1],
timescale_end=x$timescales[length(x$timescales)],
timescale_length=length(x$timescales),
scale.min=x$wtopt$scale.min,
scale.max.input=h,
sigma=x$wtopt$sigma,
f0=x$wtopt$f0)
#a summary_wsyn object inherits from the list class, but has its own print method, above
class(res)<-c("summary_wsyn","list")
return(res)
}
#' @rdname wt_methods
#' @export
print.wt<-function(x,...)
{
cat("wt object:\n")
cat("times, a length",length(x$times),"numeric vector:\n")
if (length(x$times)<12)
{
cat(paste(x$times),"\n")
}else
{
cat(paste(x$times[1:5]),"...",paste(x$times[(length(x$times)-4):(length(x$times))]),"\n")
}
cat("timescales, a length",length(x$timescales),"numeric vector:\n")
if (length(x$timescales)<12)
{
cat(paste(x$timescales),"\n")
}else
{
cat(paste(x$timescales[1:5]),"...",paste(x$timescales[(length(x$timescales)-4):(length(x$timescales))]),"\n")
}
if (length(x$timescales)<=5 && length(x$times)<=5)
{
cat("values, a",dim(x$values)[1],"by",dim(x$values)[2]," matrix, to four digits:\n")
print(round(x$values,4))
}else
{
cat("values, a",dim(x$values)[1],"by",dim(x$values)[2],"matrix, upper left to four digits is:\n")
print(round(x$values[1:5,1:5],4))
}
w<-x$wtopt
if (is.null(w$scale.max.input)){w$scale.max.input<-"NULL"}
cat("wtopt: scale.min=",w$scale.min,"; scale.max.input=",w$scale.max.input,"; sigma=",w$sigma,"; f0=",w$f0,sep="")
}
#' @rdname wt_methods
#' @export
set_times.wt<-function(obj,newval)
{
stop("Error in set_times: times should not be altered for a wt object")
}
#' @rdname wt_methods
#' @export
set_timescales.wt<-function(obj,newval)
{
stop("Error in set_timescales: timescales should not be alterned for a wt object")
}
#' @rdname wt_methods
#' @export
set_values.wt<-function(obj,newval)
{
stop("Error in set_values: values should not be altered for a wt object")
}
#' @rdname setget_methods
#' @export
set_dat<-function(obj,newval)
{
UseMethod("set_dat",obj)
}
#' @rdname setget_methods
#' @export
set_dat.default<-function(obj,newval)
{
stop("Error in set_dat: set_dat not defined for this class")
}
#' @rdname wt_methods
#' @export
set_dat.wt<-function(obj,newval)
{
stop("Error in set_dat: dat should not be altered for a wt object")
}
#' @rdname setget_methods
#' @export
set_wtopt<-function(obj,newval)
{
UseMethod("set_wtopt",obj)
}
#' @rdname setget_methods
#' @export
set_wtopt.default<-function(obj,newval)
{
stop("Error in set_wtopt: set_wtopt not defined for this class")
}
#' @rdname wt_methods
#' @export
set_wtopt.wt<-function(obj,newval)
{
stop("Error in set_wtopt: wtopt should not be altered for a wt object")
}
#' @rdname wt_methods
#' @export
get_times.wt<-function(obj)
{
return(obj$times)
}
#' @rdname wt_methods
#' @export
get_timescales.wt<-function(obj)
{
return(obj$timescales)
}
#' @rdname wt_methods
#' @export
get_values.wt<-function(obj)
{
return(obj$values)
}
#' @rdname setget_methods
#' @export
get_dat<-function(obj)
{
UseMethod("get_dat",obj)
}
#' @rdname setget_methods
#' @export
get_dat.default<-function(obj)
{
stop("Error in get_dat: get_dat not defined for this class")
}
#' @rdname wt_methods
#' @export
get_dat.wt<-function(obj)
{
return(obj$dat)
}
#' @rdname setget_methods
#' @export
get_wtopt<-function(obj)
{
UseMethod("get_wtopt",obj)
}
#' @rdname setget_methods
#' @export
get_wtopt.default<-function(obj)
{
stop("Error in get_wtopt: get_wtopt not defined for this class")
}
#' @rdname wt_methods
#' @export
get_wtopt.wt<-function(obj)
{
return(obj$wtopt)
}
| /scratch/gouwar.j/cran-all/cranData/wsyn/R/wt_methods.R |
## ----seed_setter_1, echo=F----------------------------------------------------
set.seed(101)
## ----wt_example_1_ts1---------------------------------------------------------
time1<-1:100
time2<-101:200
times<-c(time1,time2)
ts1p1<-sin(2*pi*time1/15)
ts1p2<-0*time2
ts1<-c(ts1p1,ts1p2)
ts<-ts1
## ----wt_example_1_ts2---------------------------------------------------------
ts2p1<-0*time1
ts2p2<-sin(2*pi*time2/8)
ts2<-c(ts2p1,ts2p2)
ts<-ts+ts2
## ----wt_example_1_ts3---------------------------------------------------------
ts3<-rnorm(200,mean=0,sd=0.5)
ts<-ts+ts3
## ----wt_example_1_wt----------------------------------------------------------
library(wsyn)
ts<-cleandat(ts,times,clev=1)
wtres<-wt(ts$cdat,times)
class(wtres)
names(wtres)
## ----wt_example_1_plot, results=FALSE-----------------------------------------
plotmag(wtres)
## ----wt_example_1_plotphase, results=FALSE------------------------------------
plotphase(wtres)
## ----wt_example_1_power, results=FALSE----------------------------------------
h<-power(wtres)
plot(log(1/h$timescales),h$power,type='l',lty="solid",xaxt="n",
xlab="Timescales",ylab="Power")
xlocs<-c(min(h$timescales),pretty(h$timescales,n=8))
graphics::axis(side=1,at=log(1/xlocs),labels=xlocs)
## ----wt_example_1_psmeth------------------------------------------------------
print(wtres)
summary(wtres)
## ----seed_setter_2, echo=F----------------------------------------------------
set.seed(201)
## ----wt_example_2, results=FALSE----------------------------------------------
timeinc<-1 #one sample per year
startfreq<-0.2 #cycles per year
endfreq<-0.1 #cycles per year
times<-1:200
f<-seq(from=startfreq,length.out=length(times),to=endfreq) #frequency for each sample
phaseinc<-2*pi*cumsum(f*timeinc)
t.series<-sin(phaseinc)
t.series<-cleandat(t.series,times,1)$cdat
res<-wt(t.series, times)
plotmag(res)
plotphase(res)
## ----seed_setter_3, echo=F----------------------------------------------------
set.seed(101)
## ----wpmf_example_1_dat_1-----------------------------------------------------
times1<-0:50
times2<-51:100
times<-c(times1,times2)
ts1<-c(sin(2*pi*times1/10),sin(2*pi*times2/5))+1.1
## ----wpmf_example_1_dat_2-----------------------------------------------------
dat<-matrix(NA,11,length(times))
for (counter in 1:dim(dat)[1])
{
ts2<-3*sin(2*pi*times/3+2*pi*runif(1))+3.1
ts3<-rnorm(length(times),0,1.5)
dat[counter,]<-ts1+ts2+ts3
}
dat<-cleandat(dat,times,1)$cdat
## ----wpmf_example_1_plotts----------------------------------------------------
plot(times,dat[1,]/10+1,type='l',xlab="Time",ylab="Time series index",ylim=c(0,12))
for (counter in 2:dim(dat)[1])
{
lines(times,dat[counter,]/10+counter)
}
## ----wpmf_example_1_cors------------------------------------------------------
cmat<-cor(t(dat))
diag(cmat)<-NA
cmat<-as.vector(cmat)
cmat<-cmat[!is.na(cmat)]
hist(cmat,30,xlab="Pearson correlation",ylab="Count")
## ----wpmf_example_1, results=FALSE--------------------------------------------
res<-wpmf(dat,times,sigmethod="quick")
plotmag(res)
## ----wmf_example_1, results=FALSE---------------------------------------------
res<-wpmf(dat,times)
plotmag(res)
## ----seed_setter_4, echo=F----------------------------------------------------
set.seed(101)
## ----coh_example_dat_driver---------------------------------------------------
times<-(-3:100)
ts1<-sin(2*pi*times/10)
ts2<-5*sin(2*pi*times/3)
x<-matrix(NA,11,length(times)) #the driver (environmental) variable
for (counter in 1:11)
{
x[counter,]=ts1+ts2+rnorm(length(times),mean=0,sd=1.5)
}
## ----coh_example_dat_env------------------------------------------------------
times<-0:100
y<-matrix(NA,11,length(times)) #the driven (biological) variable
for (counter1 in 1:11)
{
for (counter2 in 1:101)
{
y[counter1,counter2]<-mean(x[counter1,counter2:(counter2+2)])
}
}
y<-y+matrix(rnorm(length(times)*11,mean=0,sd=3),11,length(times))
x<-x[,4:104]
x<-cleandat(x,times,1)$cdat
y<-cleandat(y,times,1)$cdat
## ----cors_range_widely--------------------------------------------------------
allcors<-c()
for (counter in 1:dim(x)[1])
{
allcors[counter]<-cor(x[counter,],y[counter,])
}
allcors
## ----coh_example_call---------------------------------------------------------
res<-coh(dat1=x,dat2=y,times=times,norm="powall",
sigmethod="fftsurrog1",nrand=100,
f0=0.5,scale.max.input=28)
## ----coh_plot, results=FALSE--------------------------------------------------
plotmag(res)
## ----coh_example_call_fast, results=FALSE-------------------------------------
res<-coh(dat1=x,dat2=y,times=times,norm="powall",
sigmethod="fast",nrand=10000,
f0=0.5,scale.max.input=28)
plotmag(res)
## ----bandtest-----------------------------------------------------------------
res<-bandtest(res,c(8,12))
## ----bandtest_display_bandp---------------------------------------------------
get_bandp(res)
## ----bandtest2----------------------------------------------------------------
res<-bandtest(res,c(2,4))
get_bandp(res)
## ----display_p_1, results=FALSE-----------------------------------------------
plotmag(res)
## ----plotrank_1, results=FALSE------------------------------------------------
plotrank(res)
## ----plotphase_1, results=FALSE-----------------------------------------------
plotphase(res)
## ----seed_setter_5, echo=F----------------------------------------------------
set.seed(3221) #221 #3221
## ----driver_1-----------------------------------------------------------------
lts<-12
sts<-3
mats<-3
times<-seq(from=-mats,to=100)
ts1<-sin(2*pi*times/lts)
ts2<-sin(2*pi*times/sts)
numlocs<-10
d1<-matrix(NA,numlocs,length(times)) #the first driver
for (counter in 1:numlocs)
{
d1[counter,]<-ts1+ts2+rnorm(length(times),mean=0,sd=1.5)
}
## ----driver_2-----------------------------------------------------------------
ts1<-sin(2*pi*times/lts)
ts2<-sin(2*pi*times/sts)
d2<-matrix(NA,numlocs,length(times)) #the second driver
for (counter in 1:numlocs)
{
d2[counter,]<-ts1+ts2+rnorm(length(times),mean=0,sd=1.5)
}
## ----dirrel-------------------------------------------------------------------
dirrel<-matrix(NA,numlocs,length(times)) #the irrelevant env var
for (counter in 1:numlocs)
{
dirrel[counter,]<-rnorm(length(times),mean=0,sd=1.5)
}
## ----popdat-------------------------------------------------------------------
pops<-matrix(NA,numlocs,length(times)) #the populations
for (counter in (mats+1):length(times))
{
aff1<-apply(FUN=mean,X=d1[,(counter-mats):(counter-1)],MARGIN=1)
aff2<-d2[,counter-1]
pops[,counter]<-aff1+aff2+rnorm(numlocs,mean=0,sd=3)
}
pops<-pops[,times>=0]
d1<-d1[,times>=0]
d2<-d2[,times>=0]
dirrel<-dirrel[,times>=0]
times<-times[times>=0]
## ----wmfs_wlmexample, results=FALSE-------------------------------------------
dat<-list(pops=pops,d1=d1,d2=d2,dirrel=dirrel)
dat<-lapply(FUN=function(x){cleandat(x,times,1)$cdat},X=dat)
wmfpop<-wmf(dat$pops,times,scale.max.input=28)
plotmag(wmfpop)
wmfd1<-wmf(dat$d1,times,scale.max.input=28)
plotmag(wmfd1)
wmfd2<-wmf(dat$d2,times,scale.max.input=28)
plotmag(wmfd2)
## ----fit_mod_allpred----------------------------------------------------------
wlm_all<-wlm(dat,times,resp=1,pred=2:4,norm="powall",scale.max.input=28)
## ----sometests----------------------------------------------------------------
wlm_all_dropi<-wlmtest(wlm_all,drop="dirrel",sigmethod="fft",nrand=100)
wlm_all_drop1<-wlmtest(wlm_all,drop="d1",sigmethod="fft",nrand=100)
wlm_all_drop2<-wlmtest(wlm_all,drop="d2",sigmethod="fft",nrand=100)
## ----dropdirrel_1, results=FALSE----------------------------------------------
blong<-c(11,13)
bshort<-c(2,4)
wlm_all_dropi<-bandtest(wlm_all_dropi,band=blong)
wlm_all_dropi<-bandtest(wlm_all_dropi,band=bshort)
plotmag(wlm_all_dropi)
plotrank(wlm_all_dropi)
## ----dropd1_1, results=FALSE--------------------------------------------------
wlm_all_drop1<-bandtest(wlm_all_drop1,band=blong)
wlm_all_drop1<-bandtest(wlm_all_drop1,band=bshort)
plotmag(wlm_all_drop1)
plotrank(wlm_all_drop1)
## ----dropd2_1, results=FALSE--------------------------------------------------
wlm_all_drop2<-bandtest(wlm_all_drop2,band=blong)
wlm_all_drop2<-bandtest(wlm_all_drop2,band=bshort)
plotmag(wlm_all_drop2)
plotrank(wlm_all_drop2)
## ----shortts_syncexpl---------------------------------------------------------
se<-syncexpl(wlm_all)
se_short<-se[se$timescales>=bshort[1] & se$timescales<=bshort[2],]
round(100*colMeans(se_short[,c(3:12)])/mean(se_short$sync),4)
## ----longts_syncexpl----------------------------------------------------------
se_long<-se[se$timescales>=blong[1] & se$timescales<=blong[2],]
round(100*colMeans(se_long[,c(3:12)])/mean(se_long$sync),4)
## ----examp_predsync_1---------------------------------------------------------
pres<-predsync(wlm_all)
plotmag(pres)
plotmag(wmfpop)
## ----examp_predsync_2---------------------------------------------------------
wlm_d1<-wlm(dat,times,resp=1,pred=2,norm="powall",scale.max.input=28)
pres<-predsync(wlm_d1)
plotmag(pres)
## ----artificial_clustering_data-----------------------------------------------
N<-5
Tmax<-100
rho<-0.5
sig<-matrix(rho,N,N)
diag(sig)<-1
d<-t(cbind(mvtnorm::rmvnorm(Tmax,mean=rep(0,N),sigma=sig),
mvtnorm::rmvnorm(Tmax,mean=rep(0,N),sigma=sig)))
d<-cleandat(d,1:Tmax,1)$cdat
## ----pearson_synmat-----------------------------------------------------------
sm<-synmat(d,1:Tmax,method="pearson")
fields::image.plot(1:10,1:10,sm,col=heat.colors(20))
## ----freq_spec_synmat---------------------------------------------------------
N<-20
Tmax<-500
tim<-1:Tmax
ts1<-sin(2*pi*tim/5)
ts1s<-sin(2*pi*tim/5+pi/2)
ts2<-sin(2*pi*tim/12)
ts2s<-sin(2*pi*tim/12+pi/2)
gp1A<-1:5
gp1B<-6:10
gp2A<-11:15
gp2B<-16:20
d<-matrix(NA,Tmax,N)
d[,c(gp1A,gp1B)]<-ts1
d[,c(gp2A,gp2B)]<-ts1s
d[,c(gp1A,gp2A)]<-d[,c(gp1A,gp2A)]+matrix(ts2,Tmax,N/2)
d[,c(gp1B,gp2B)]<-d[,c(gp1B,gp2B)]+matrix(ts2s,Tmax,N/2)
d<-d+matrix(rnorm(Tmax*N,0,2),Tmax,N)
d<-t(d)
d<-cleandat(d,1:Tmax,1)$cdat
## ----detect_tsspecific_sync---------------------------------------------------
sm5<-synmat(dat=d,times=1:Tmax,method="ReXWT",tsrange=c(4,6))
fields::image.plot(1:N,1:N,sm5,col=heat.colors(20))
sm12<-synmat(dat=d,times=1:Tmax,method="ReXWT",tsrange=c(11,13))
fields::image.plot(1:N,1:N,sm12,col=heat.colors(20))
## ----abuse_cor----------------------------------------------------------------
sm<-synmat(dat=d,times=1:Tmax,method="pearson")
fields::image.plot(1:N,1:N,sm,col=heat.colors(20))
## ----clust_demo---------------------------------------------------------------
#make some artificial coordinates for the geographic locations of where data were measured
coords<-data.frame(X=c(rep(1,10),rep(2,10)),Y=rep(c(1:5,7:11),times=2))
#create clusters based on the 5-year timescale range and map them using coords
cl5<-clust(dat=d,times=1:Tmax,coords=coords,method="ReXWT",tsrange=c(4,6))
get_clusters(cl5) #the first element of the list is always all 1s - prior to any splits
#call the mapper here
plotmap(cl5)
#plot mean time series for each module
plot(get_times(cl5)[1:100],get_mns(cl5)[[2]][1,1:100],type='l',col='red',
ylim=range(get_mns(cl5)),xlab="Time step",ylab="Mean pop.")
lines(get_times(cl5)[1:100],get_mns(cl5)[[2]][2,1:100],type='l',col='green')
legend(x="topright",legend=c("mod1","mod2"),lty=c(1,1),col=c("red","green"))
#create wavelet mean fields for each module and plot
cl5<-addwmfs(cl5)
plotmag(get_wmfs(cl5)[[2]][[1]])
plotmag(get_wmfs(cl5)[[2]][[2]])
#create clusters based on the 12-year timescale range and map them using coords
cl12<-clust(dat=d,times=1:Tmax,coords=coords,method="ReXWT",tsrange=c(11,13))
cl12$clusters
#call the mapper here
plotmap(cl12)
#plot mean time series for each module
plot(get_times(cl12)[1:100],get_mns(cl12)[[2]][1,1:100],type='l',col='red',
ylim=range(get_mns(cl12)),xlab="Time step",ylab="Mean pop.")
lines(get_times(cl12)[1:100],get_mns(cl12)[[2]][2,1:100],type='l',col='green')
legend(x="topright",legend=c("mod1","mod2"),lty=c(1,1),col=c("red","green"))
#create wavelet mean fields for each module and plot
cl12<-addwmfs(cl12)
plotmag(get_wmfs(cl12)[[2]][[1]])
plotmag(get_wmfs(cl12)[[2]][[2]])
| /scratch/gouwar.j/cran-all/cranData/wsyn/inst/doc/wsynvignette.R |
---
title: "Wavelet approaches to synchrony (wsyn) package vignette"
author: "Lawrence Sheppard, Jonathan Walter, Thomas Anderson, Lei Zhao, Daniel Reuman"
date: ""
geometry: "left=1cm,right=1cm,top=2.5cm,bottom=2.8cm"
output:
pdf_document:
number_sections: yes
keep_tex: yes
fig_caption: yes
link-citations: True
urlcolor: blue
bibliography: wsynvignette_refs.bib
vignette: >
%\VignetteIndexEntry{"wsyn vignette"}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
The `wsyn` package provides wavelet-based tools for investigating population synchrony.
Population synchrony is the tendency for population densities measured in different locations
to be correlated in their fluctuations through time [@Liebhold_04].
The basic dataset that `wsyn` helps analyze is one or more time
series of the same variable, measured in different locations at the same times; or
two or more variables so measured at the same times and locations. Tools are implemented
for describing synchrony and for investigating its causes and consequences. Wavelet
approaches to synchrony include @Grenfell_01; @Viboud_06; @Keitt_08; @Sheppard_16;
@Sheppard_17; @Sheppard_18; @Walter_17; @Anderson_18. The focus here is on techniques
used by @Sheppard_16; @Sheppard_17; @Sheppard_18; @Walter_17; @Anderson_18.
The techniques can also be used for data of the same format representing quantities
not related to populations, or, in some cases, multiple measurements from the same
location. Some of the functions of this package may also be useful for
studying the case of "community synchrony," i.e., co-located populations of
different species that fluctuate through time in a positively or negatively or
time-lag-correlated way.
<!--Insert additional references from our work as they come out-->
<!--Insert here a summary of the sections-->
# Preparing the data
\noindent A typical dataset for analysis using `wsyn` is an $N \times T$ matrix of numeric
values where rows correspond to sampling
locations (so the number of sampling locations is $N$)
and columns correspond to evenly spaced times during which sampling was conducted (so the
number of times sampling was conducted is $T$).
## Missing data
\noindent Standard implementations of wavelet trasforms require time series consisting
of measurements taken at evenly spaced times, with no missing data. Most functions provided
in `wsyn` make these same assumptions and throw an error if data are missing.
The user is left to decide on and implement a reasonable way of filling missing
data. Measures of synchrony can be influenced by data
filling techniques that are based on spatial interpolation. We therefore recommend
that spatially informed filling procedures not be used. We have previously
used the simple approach of
replacing missing values in a time series by the median of the non-missing values in the
time series [@Sheppard_16]. This approach, and other related simple
procedures [@Sheppard_16], seem
unlikely to artefactually produce significant synchrony, or
coherence relationships with other
variables, but rely on the percentage of missing data being fairly low and may obscure
detection of synchrony or significant coherence relationships if too many data are
missing. For applications which differ meaningfully from the prior work for which
the tools of this package were developed
(e.g., @Sheppard_16; @Sheppard_17; @Sheppard_18; @Walter_17; @Anderson_18),
different ways of dealing with missing data may be more appropriate.
## De-meaning, detrending, standardizing variance, and normalizing
\noindent A function `cleandat` is provided that performs a variety of combinations
of data cleaning typically necessary for analyses implemented in `wsyn`, including
de-meaning, linear detrending, standardization of time series variance, and Box-Cox
transformations to normalize marginal distributions of time series. Most functions in
`wsyn` assume at least that means have been removed from time series, and throw an
error if this is not the case. Approaches based on Fourier surrogate (section \ref{sec:surrog})
require time series with approximately normal marginals.
# The wavelet transform \label{sec:wt}
\noindent The function `wt` implements the complex Morlet wavelet transform, on which most
other `wsyn` functions are based. An S3 class is defined for `wt`, and it inherits from the
generic class `tts`. See the help files for the generator functions `wt` and `tts`
for slot names and other information about these classes. Both classes have `set`
and `get` methods for interacting with the slots, see help files for `tts_methods`,
`wt_methods` and `setget_methods`. The `set` methods just throw an error, since
generally one should not be changing the individual slots of objects of one of
classes in `wsyn` as it breaks the consistency between slots.
Background on the wavelet transform is available from many sources, including
@Addison_02, and we do not recapitulate it. We instead describe the wavelet transform
operationally, and demonstrate the implementation
of the wavelet transform in `wsyn` using examples from Fig. S2 of @Sheppard_18.
Given a time series $x(t)$, $t=1,\ldots,T$, the wavelet transform $W_\sigma(t)$
of $x(t)$ is a complex-valued function of time, $t=1,\ldots,T$, and timescale,
$\sigma$. The magntiude $|W_\sigma(t)|$ is an estimate of the strength of the
oscillations in $x(t)$ at time $t$ occurring at timescale $\sigma$. The complex
phase of $W_\sigma(t)$ gives the phase of these oscilaltions.
```{r seed_setter_1, echo=F}
set.seed(101)
```
To demonstrate the wavelet transform, start by generating some data. Start with a sine wave of
amplitude $1$ and period $15$ that operates for $t=1,\ldots,100$ but then disappears.
```{r wt_example_1_ts1}
time1<-1:100
time2<-101:200
times<-c(time1,time2)
ts1p1<-sin(2*pi*time1/15)
ts1p2<-0*time2
ts1<-c(ts1p1,ts1p2)
ts<-ts1
```
Then add a sine wave of amplitude $1$ and period $8$ that operates for $t=101,\ldots,200$
but before that is absent.
```{r wt_example_1_ts2}
ts2p1<-0*time1
ts2p2<-sin(2*pi*time2/8)
ts2<-c(ts2p1,ts2p2)
ts<-ts+ts2
```
Then add normally distributed white noise of mean $0$ and standard deviation $0.5$.
```{r wt_example_1_ts3}
ts3<-rnorm(200,mean=0,sd=0.5)
ts<-ts+ts3
```
Now apply the wavelet transform, obtaining an object of class `wt`. Default parameter
values for `scale.min`, `scale.max.input`, `sigma` and `f0` are usually good enough for
initial data exploration.
```{r wt_example_1_wt}
library(wsyn)
ts<-cleandat(ts,times,clev=1)
wtres<-wt(ts$cdat,times)
class(wtres)
names(wtres)
```
Methods `get_times`, `get_timescales`, `get_values`, `get_wtopt`, and `get_dat` extract the slots.
Set methods also exist, but these just throw an error since setting individual slots of
a `wt` object will break the relationship between the slots.
There is a `plotmag` method for the `tts` class that plots the magnitude of the transform
against time and timescale.
```{r wt_example_1_plot, results=FALSE}
plotmag(wtres)
```
We can see the oscillations at timescale $15$ for the first hundred time steps, and the
oscilaltions at timescale $8$ for the last 100 time steps, as expected.
Because the wavelet transform
is based on convolution of a wavelet function with the time series, times and timescales
for which the overlap of the wavelet with the time series is insufficient are unreliable
and are omitted. This affects times closer to the edges of the time series, and is the
reason for the "rocketship nose cone" shape of wavelet plots. More values are omitted
for longer timescales because long-timescale wavelets overhang the end of the time series
further in the convolution operation. All plots based on wavelet transforms have the same
property.
There is also a `plotphase` method for the `tts` class that plots the phase of the transform
against time and timescale.
```{r wt_example_1_plotphase, results=FALSE}
plotphase(wtres)
```
One can compute the power.
```{r wt_example_1_power, results=FALSE}
h<-power(wtres)
plot(log(1/h$timescales),h$power,type='l',lty="solid",xaxt="n",
xlab="Timescales",ylab="Power")
xlocs<-c(min(h$timescales),pretty(h$timescales,n=8))
graphics::axis(side=1,at=log(1/xlocs),labels=xlocs)
```
There are also `print` and `summary` methods for the `wt` class and `tts` class. See the help files for
`tts_methods` and `wt_methods`.
```{r wt_example_1_psmeth}
print(wtres)
summary(wtres)
```
```{r seed_setter_2, echo=F}
set.seed(201)
```
Now we give a second example, also from Fig. S2 of @Sheppard_18. The frequency
of oscillation of the data geenerated below
changes gradually from $0.2$ cycles per year (timescale 5 years)
to $0.1$ cycles per year (timescale 10 years).
```{r wt_example_2, results=FALSE}
timeinc<-1 #one sample per year
startfreq<-0.2 #cycles per year
endfreq<-0.1 #cycles per year
times<-1:200
f<-seq(from=startfreq,length.out=length(times),to=endfreq) #frequency for each sample
phaseinc<-2*pi*cumsum(f*timeinc)
t.series<-sin(phaseinc)
t.series<-cleandat(t.series,times,1)$cdat
res<-wt(t.series, times)
plotmag(res)
plotphase(res)
```
The `times` argument to `wt` (and to several other functions, see below)
is tightly constrained. It must be a numeric vector of unit-spaced times
(`diff(times)` is a vector of 1s) with
no missing entries. It must be the same length as the data and correspond to the
timing of measurement of the data. For the most common use cases, the unit
spacing of times will be natural in some time unit, i.e., sampling is typically
conducted with frequency once per time unit for some natural time unit (e.g.,
once per year, month, day, week, fortnight). In those cases, `timescales`
in output and on plots will have units cycles per time unit for the time unit
of sampling. Applications with sampling time step
not equal to 1 in some natural unit of time can view the `times` vector
as a vector of time steps, rather than times, \emph{per se}, and `timescales`
will be in units of cycles per time step.
The arguments `scale.min`, `scale.max.input`, `sigma`, and `f0`,
which are arguments to `wt` and several other functions,
are for constructing the timescales used for
wavelet analysis. The argument `scale.min` is the shortest timescale,
and must be $2$ or greater. Starting from `scale.min`, each timescale is
`sigma` times the previous one, up to the first timescale that equals or
surpasses `scale.max.input`. The scalloping of
wavelet transforms places additional, independently implemented
constraints on the largest timescale examined
so choosing larger `scale.max.input` will only result in longer timescales
up to the limits imposed by scalloping. The argument `f0` is the
ratio of the period of fluctuation to the width of the envelope.
Higher values of `f0` imply higher frequency resolution
(i.e., a wavelet component includes information from a narrower range
of Fourier components) but lower temporal resolution
(the component includes information from a wider range of times).
Resolution should be chosen appropriate to the characteristics of the
data [@Addison_02].
# Time- and timescale-specific measures of synchrony
\noindent The function `wpmf`
implements the wavelet phasor
mean field and the function `wmf` implements the wavelet mean
field. These are techniques for depicting the time and timescale dependence of synchrony,
and are introduced in this section. S3 classes are
defined for `wmf` and `wpmf`, both of which
inherit from the generic class `tts`. See the help files for the generator functions for
these classes (`wmf`, `wpmf` and `tts`, respectively) for slot names and other
information about the classes. There are again `set` and `get` methods for these classes,
and `print` and `summary` methods.
See help files for `wmf_methods` and `wpmf_methods`.
## The wavelet phasor mean field
\noindent The wavelet phasor mean field (the `wpmf` function in `wsyn`) depicts the time and
timescale dependence of phase synchrony of a collection of time series. If $x_n(t)$,
$n=1,\ldots,N$, $t=1,\ldots,T$ are time series of the same variable measured in $N$ locations
at the same times, and if $W_{n,\sigma}(t)$ is the wavelet transform of $x_n(t)$ and
$w_{n,\sigma}(t)=\frac{W_{n,\sigma}(t)}{|W_{n,\sigma}(t)|}$ has only the information about
the complex phases of the transform (unit-magnitude complex numbers such as these are called
\emph{phasors}), then the wavelet phasor mean field is
\begin{equation}
\frac{1}{N} \sum_{n=1}^N w_{n,\sigma}(t).
\end{equation}
For combinations of $t$ and $\sigma$ for which oscillations at time $t$ and timescale
$\sigma$ in the time series $x_n(t)$ have the same phase (they are phase synchronized),
the phasors $w_{n,\sigma}(t)$ will all point in similar directions in the complex plane, and
their sum will be a large-magnitude complex number.
For combinations of $t$ and $\sigma$ for which oscillations at time $t$ and timescale
$\sigma$ in the time series $x_n(t)$ have unrelated phases (they are not phase synchronized),
the phasors $w_{n,\sigma}(t)$ will all point in random, unrelated
directions in the complex plane, and
their sum will be a small-magnitude complex number. Therefore plotting the magnitude of the
wavelet phasor mean field against time and timescale quantifies the time and timescale
dependence of phase synchrony in the $x_n(t)$. The wavelet phasor mean field, being a mean
of phasors, always has magnitude between $0$ and $1$.
```{r seed_setter_3, echo=F}
set.seed(101)
```
We provide an example based on supplementary figure 1 of @Sheppard_16. A related technique,
the wavelet mean field (section \ref{sect:wmf}), was used there, but the wavelet phasor mean
field also applies and is demonstrated here. We construct data consisting of time series
measured for 100 time steps in each of 11 locations. The time series have three components.
The first component is a sine wave of amplitude $1$ and period $10$ years for the first
half of the time series, and is a sine wave of amplitude $1$ and period $5$ for the
second half of the time series. This same signal is present in all $11$ time series and
creates the synchrony among them.
```{r wpmf_example_1_dat_1}
times1<-0:50
times2<-51:100
times<-c(times1,times2)
ts1<-c(sin(2*pi*times1/10),sin(2*pi*times2/5))+1.1
```
The second component is a sine wave of amplitude $1$ and period $3$ years that is
randomly and independently phase shifted in each of the $11$ time series. The third
component is white noise, independently generated for each time series.
```{r wpmf_example_1_dat_2}
dat<-matrix(NA,11,length(times))
for (counter in 1:dim(dat)[1])
{
ts2<-3*sin(2*pi*times/3+2*pi*runif(1))+3.1
ts3<-rnorm(length(times),0,1.5)
dat[counter,]<-ts1+ts2+ts3
}
dat<-cleandat(dat,times,1)$cdat
```
The second and third components do not generate synchrony,
and obscure the synchrony of the first component.
As a result, synchrony cannot be readily detected by visually examining the time series:
```{r wpmf_example_1_plotts}
plot(times,dat[1,]/10+1,type='l',xlab="Time",ylab="Time series index",ylim=c(0,12))
for (counter in 2:dim(dat)[1])
{
lines(times,dat[counter,]/10+counter)
}
```
Nor can synchrony be readily detected by examining the $55$ pairwise correlation coefficients
between the time series, which are widely distributed and include many values above and below
$0$:
```{r wpmf_example_1_cors}
cmat<-cor(t(dat))
diag(cmat)<-NA
cmat<-as.vector(cmat)
cmat<-cmat[!is.na(cmat)]
hist(cmat,30,xlab="Pearson correlation",ylab="Count")
```
But the wavelet phasor mean field sensitively reveals the synchrony and its time
and timescale structure:
```{r wpmf_example_1, results=FALSE}
res<-wpmf(dat,times,sigmethod="quick")
plotmag(res)
```
The `wpmf` function implements assessment of the statistical significance of
phase synchrony in three ways, one of which is demonstrated by the contour lines on
the above plot (which give a $95\%$ confidence level by default - the level can be changed
with the `sigthresh` argument to the `plotmag` method for the `wpmf` class). The method
of significance testing the wavelet phasor mean field plot is controlled with the
`sigmethod` argument to `wpmf`, which can be `quick` (the default), `fft` or `aaft`. The
`quick` method compares the mean field magnitude value for each time and timescale
separately to a distribution of magnitudes of sums of $N$ random, independent
phasors.
Each time/timescale
pair is compared independently to the distribution, and the multiple testing problem is
not accounted for, so some time/timescales pairs will come out as showing "significant"
phase synchrony by chance, i.e., false-positive detections of phase synchrony can occur.
For instance, the small islands of significant synchrony at timescale
approximately $5$ and times about $15$ and $40$ on the above plot are false positives.
Signficance is based on stochastic generation of magnitudes of sums of random phasors,
so significance contours will differ
slightly on repeat runs. Increasing the number of randomizations (argument `nrand` to `wpmf`)
reduces this variation.
The `quick` method can be inaccurate for very short
timescales. The two alternative methods, `fft` and `aaft`, mitigate this problem but
are substantially slower. The `fft` and `aaft` methods are based on surrogate
datasets (section \ref{sec:surrog})
so are discussed in section \ref{sec:wpmfsignif}.
The `power` and `plotphase` methods also work on `wpmf` objects.
Examples of the wavelet phasor mean field technique applied to real datasets are in,
for instance, @Sheppard_18 (their Fig. S1) and @Anderson_18 (their Fig. 4).
## The wavelet mean field \label{sect:wmf}
\noindent The wavelet mean field (the `wmf` function in `wsyn`) depicts the time and
timescale dependence of synchrony of a collection of time series $x_n(t)$ for
$n=1,\ldots,N$ and $t=1,\ldots,T$, taking into account both phase synchrony and
associations through time of magnitudes of oscillations in different time series at
a given timescale. See @Sheppard_16 for a precise mathematical definition.
The plot is similar in format to a wavelet phasor mean field plot, but
without significance contours:
```{r wmf_example_1, results=FALSE}
res<-wpmf(dat,times)
plotmag(res)
```
The wavelet mean field is a more useful technique than the wavelet phasor mean field
insofar as it accounts for associations of magnitudes of oscillation, in addition
to phase synchrony, but it is less useful insofar as significance contours are not
available. The wavelet mean field also has some mathematical advantages, described in
@Sheppard_16 and @Sheppard_18. The "wavelet Moran theorem" and other theorems
described in those references use the wavelet
mean field, not the wavelet phasor mean field,
and can be used to help attribute synchrony to particular causes.
Thus the two techniques are often best used together:
significance of phase synchrony can be identified with the wavelet phasor mean field,
and then once significance is identified, synchrony can be described and studied using
the wavelet mean field.
The `power` and `plotphase` methods also work on `wmf` objects.
Examples of the wavelet mean field applied to real data
are in @Sheppard_16 and @Sheppard_18.
# Coherence
\noindent Coherence is explained by @Sheppard_16 and @Sheppard_17, among others.
We summarize here some of the explanations given in those references. Let
$x_{1,n}(t)$ and $x_{2,n}(t)$ for $n=1,\ldots,N$ and $t=1,\ldots,T$ be two variables
measured at the same $N$ locations and $T$ times. Let $W_{i,n,\sigma}(t)$ ($i=1,2$) be the
corresponding wavelet transforms. The coherence of $x_{1,n}(t)$ and $x_{2,n}(t)$ is the
magnitude of a quantity we denote $\Pi_\sigma^{(12)}$, which is in turn the mean
of $w_{1,n,\sigma}(t)\overline{w_{2,n,\sigma}(t)}$ over all
time-location pairs for which this product of normalized
wavelet transforms is still defined after wavelet scalloping
is performed. The overline is complex conjugation. This product is a function of timescale.
The $w_{1,n,\sigma}(t)$ are wavelet transforms normalized in one of
a few different ways (see below). Because $w_{1,n,\sigma}(t)\overline{w_{2,n,\sigma}(t)}$
is a complex number with phase equal to the phase difference between the two wavelet components,
the mean, $\Pi_\sigma^{(12)}$, of this quantity over times and locations has large
magnitude if
the phase difference between the transforms is consistent over time and across sampling
locations. Coherences essentially measure the strength of association between the variables
in a timescale-specific way that is also not confounded by lagged or phase-shifted
associations. Coherences and related quantities are analyzed in `wsyn` using the function
`coh` and its corresponding S3 class, `coh`, and the S3 methods that go with the class.
Methods comprise `set` and `get` and `print` and `summary` methods (see the help file for
`coh_methods` for these basic methods), as well as `bandtest`, and `plotmag`, `plotrank`, and
`plotphase`.
One possible normalization is phase normalization,
$w_{i,n,\sigma}(t)=\frac{W_{i,n,\sigma}(t)}{|W_{i,n,\sigma}(t)|}$. Set the `norm`
argument of `coh` to "`phase`" to use this normalization. The coherence with
this normalization is often called the \emph{phase coherence}, or the \emph{spatial
phase coherence} if $N>1$ [@Sheppard_17]. Phase coherence measures
the extent to which the two variables have consistent phase differences over time and
across locations, as a function of timescale. The normalization descibed in the "Wavelet
mean field" section of the Methods of @Sheppard_16 gives the version of the
coherence that was there called the \emph{wavelet coherence}, or the
\emph{spatial wavelet coherence} if $N>1$. Set the `norm`
argument of `coh` to "`powall`" to use this normalization.
If `norm` is "`powind`", then $w_{i,n,\sigma}(t)$ is obtained
by dividing $W_{i,n,\sigma}(t)$ by the square root of the average of
$W_{i,n,\sigma}(t) \overline{W_{i,n,\sigma}(t)}$ over the times for which it is
defined; this is done separately for each $i$ and $n$. The final option for `norm`
available to users of `coh` is "`none`", i.e., raw wavelet transforms
are used. For any value of `norm` except "`phase`", the normalized
wavelet components $w_{i,n,\sigma}(t)$ can also have varying magnitudes, and in
that case the coherence reflects not only consistencies in phase between the two
variables over time and across locations, but is also further increased if
there are correlations in the amplitudes of the fluctuations.
We demonstrate coherence and its implementation in `wsyn` via some simulated data. This
demonstration reporoduces an example given in supplementary figure 5 of @Sheppard_16. Data
consist of an environmental-driver variable, `x`, and a driven biological variable,
`y`, between which we compute coherence. Both were measured at $11$ locations. The
environmental variable `x` was constructed as the sum of: 1) a single common signal
of amplitude $1$ and period $10$ years, present at all $11$ locations; 2) a single common
signal of amplitude $5$ and period $3$ years, also present at all $11$ locations; 3) white
noise of mean $0$ and standard deviation 1.5, independently generated for all $11$ locations.
```{r seed_setter_4, echo=F}
set.seed(101)
```
```{r coh_example_dat_driver}
times<-(-3:100)
ts1<-sin(2*pi*times/10)
ts2<-5*sin(2*pi*times/3)
x<-matrix(NA,11,length(times)) #the driver (environmental) variable
for (counter in 1:11)
{
x[counter,]=ts1+ts2+rnorm(length(times),mean=0,sd=1.5)
}
```
Population time series $y_n(t)$ were the moving average, over three time steps, of the
$x_n(t)$, plus white noise: $y_n(t)=\left( \sum_{k=0}^2 x_n(t-k) \right)/3 + \epsilon_n(t)$.
Here the $\epsilon_n(t)$ are independent, normally distributed random variables of mean
$0$ and standard deviation $3$.
```{r coh_example_dat_env}
times<-0:100
y<-matrix(NA,11,length(times)) #the driven (biological) variable
for (counter1 in 1:11)
{
for (counter2 in 1:101)
{
y[counter1,counter2]<-mean(x[counter1,counter2:(counter2+2)])
}
}
y<-y+matrix(rnorm(length(times)*11,mean=0,sd=3),11,length(times))
x<-x[,4:104]
x<-cleandat(x,times,1)$cdat
y<-cleandat(y,times,1)$cdat
```
The function `cleandat` with `clev=1` (mean removal only) was used. Mean removal is
sufficient cleaning for these artificially generated data.
The relationship between the environmental and biological variables cannot
readily be detected using ordinary correlation methods - correlations
through time between the $x_n(t)$ and $y_n(t)$ range widely on both sides of 0:
```{r cors_range_widely}
allcors<-c()
for (counter in 1:dim(x)[1])
{
allcors[counter]<-cor(x[counter,],y[counter,])
}
allcors
```
However, the function `coh` can be used to compute the coherence between `x` and `y`:
```{r coh_example_call}
res<-coh(dat1=x,dat2=y,times=times,norm="powall",
sigmethod="fftsurrog1",nrand=100,
f0=0.5,scale.max.input=28)
```
The normalization to be used is specified via `norm`, as described
above. The `powall` option corresponds to the (spatial) wavelet coherence of
@Sheppard_16. There are several alternative methods for testing the
significance of coherence, and the method used
is controlled by the `sigmethod` argument. All significance
methods are based on "surrogate datasets".
These are datasets that have been randomized in an appropriate way. In addition
to computing coherence of data, coherence is also computed in the same way for
`nrand` surrogate datasets that represent the null hypothesis of no relationship
between `dat1` and `dat2` while retaining other statistical features of the data.
See section \ref{sec:surrog} for details of surrogates and significance testing,
and allowed values of `sigmethod`. Larger values of `nrand` produce more accurate
significance results that are less variable on repeat runs, but also require more
computational time. Using `nrand` at least 1000 or 10000 for final runs is
recommended. This can take some time for values of `sigmethod` other than `fast`
(see section \ref{sec:surrog}), so `100` was used above for demonstration
purposes only. The arguments `f0` and `scale.max.input` control details of the
wavelet transform (see section \ref{sec:wt}) and are set here to agree with values
used by @Sheppard_16.
Coherence can be plotted using `plotmag`:
```{r coh_plot, results=FALSE}
plotmag(res)
```
The red line here is the coherence. The black lines are $95$th and $99$th (these are
the default values for `sigthresh`) quantiles of coherences of surrogate datasets. Coherence is
significant (the red line is above the black lines) for timescales close to $10$ years, but not
significant for timescales close to
$3$ years, as expected since three values of $x$ are averaged to produce one value of $y$,
so the $3$-year periodicity in $x$ is averaged away and does not pass to $y$.
Thus coherence reveals a (timescale-specific) relationship between $x$ and $y$ that
correlation methods did not reveal.
Typically preferable [@Sheppard_17] is the `fast` option to `sigmethod`, because
far more surrogates can be used in the same computational time:
```{r coh_example_call_fast, results=FALSE}
res<-coh(dat1=x,dat2=y,times=times,norm="powall",
sigmethod="fast",nrand=10000,
f0=0.5,scale.max.input=28)
plotmag(res)
```
For the `fast` algorithm (section \ref{sec:surrog}) the modulus of
`res$signif$coher` (plotted above as the dashed red line) can be compared
to quantiles of the modulus of
`res$signif$scoher` (the black lines are $95$th and $99$th quantiles) in the
usual way to make statements about significance of coherence,
but the modulus of `res$signif$coher` is only approximately equal to the standard coherence,
which is the modulus of `res$coher` (and which is plotted above as the solid red line). Thus
one should use the dashed red line above, and `res$signif$coher`, when making conclusions
about the significance of
coherence, and the solid red line, and `res$coher`, when using the actual value of the
coherence. Typically the two red lines are quite similar. For values
of `sigmethod` other than `fast`, they are equal.
The significance indicated on the above plots is done on a timescale-by-timescale
basis, and type-I errors (false positives) are not taken into account. Neither do the
individual timescales correspond to independent tests. A method of
aggregating signficance across a timescale band was described by @Sheppard_16, and
is implemented in `bandtest`:
```{r bandtest}
res<-bandtest(res,c(8,12))
```
A call to `bandtest` computes a
$p$-value for the aggregate significance of coherence across the specified band
(8 to 12 year timescales, in this case), and also computes the average phase of
$\Pi_\sigma^{(12)}$ across the band. This information is added as a new row to
the `bandp` slot of the `coh` object (which was previously `NA` in this case).
```{r bandtest_display_bandp}
get_bandp(res)
```
Doing another timescale band adds another row to `bandp`:
```{r bandtest2}
res<-bandtest(res,c(2,4))
get_bandp(res)
```
The aggregate $p$-values are now displayed on the plot:
```{r display_p_1, results=FALSE}
plotmag(res)
```
These results show the variables $x$ and $y$ are highly significantly coherent
across the timescale band $8$ to $12$ years, but are not signficantly coherent across
the band $2$ to $4$ years, as expected from the way the data were generated and
by comparing the red and black lines.
Band-aggregated $p$-values are produced essentially by averaging the rank in surrogates of the
empirical coherence across timescales. The same procedure is then applied to
each surrogate, ranking it with respect to the other surrogates and taking the mean across
timescales. Comparing the empirical mean rank to the dsitribution of surrogate mean
ranks gives a $p$-value [@Sheppard_16; @Sheppard_17; @Sheppard_18].
One can also display a plot of the ranks of `Mod(res$signif$coher)` in the
distribution of `Mod(res$signif$scoher)` values at each timescale:
```{r plotrank_1, results=FALSE}
plotrank(res)
```
The vertical axis label `Fract surr gt` stands for the fraction of surrogate coherences
that the coherence of the data is greater than at the given timescale, so values
are between $0$ and $1$ and large values indicate significance.
Whenever values exceed the argument `sigthresh` (which takes the default value $0.95$
for the above call to `plotrank`), the coherence is nominally significant. The value(s)
of `sigthresh` are displayed as dashed horizontal line(s) on the plot. This is nominal
significance because of the multiple-testing problem. As can be seen, $p$-values stored
in `bandp` are also displayed on the plot, and these values
aggregate across timescales appropriately and alleviate the multiple-testing problem.
Average phases of $\Pi_\sigma^{(12)}$ across the timescale bands of interest were also
computed by `bandtest` and stored in the `bandp` slot, in units of radians. These
are average phases for the `coher` slot of a `coh` object.
Phases of $\Pi_\sigma^{(12)}$
can be plotted against timescale and average phases in `bandp` displayed using the
`plotphase` function:
```{r plotphase_1, results=FALSE}
plotphase(res)
```
Average phases for timescale bands across which coherence is not significant
(e.g., the $2$ to $4$ year band in the above plot) are
random and meaningless. Average phases for bands across which coherence is
significant (e.g., the $8$ to $12$ band in the plot) can give valuable
information about the nature of the relationship
between the variables [@Sheppard_18].
# Surrogates \label{sec:surrog}
\noindent Some of the text of this section was adapted, with minor modifications only,
from our earlier work [@Sheppard_16; @Sheppard_18; @Anderson_18].
The level of coherence consistent with the null
hypothesis that there is no relationship between two variables depends on the spatial
and temporal autocorrelation of the data. For instance, two variables that fluctuate
regularly at the same frequency and are both highly spatially synchronous will have a
phase difference that is highly consistent over time and space, and therefore will have high
spatial wavelet coherence, even if they are not related. Two irregular oscillators with
low spatial synchrony are less likely to show consistent phase differences over time and
space if they are unrelated. We test coherences
for significance using resampling schemes based on surrogate datasets that randomize
away phase relationship between variables while retaining, to the extent possible, the
spatial and temporal autocorrelation properties and the marginal distributions of the
time series. We use the widely applied Fourier surrogate and amplitude adjusted Fourier
surrogate methods [@Prichard_94; @Schreiber_00], implemented
in the `surrog` function in `wsyn` and summarized below. Surrogates are also used for
applications other than measures of coherence (see, e.g., section \ref{sec:wpmfsignif}).
## Fourier surrogates
\noindent Details are presented elsewhere [@Prichard_94; @Schreiber_00]. We summarize here.
A Fourier surrogate of a time series $x(t)$ is obtained by the following steps:
\begin{itemize}
\item Compute the fast Fourier transform of $x(t)$, here called $X(\tau)$ for the timescale $\tau$
\item Randomize the phases of the transform by multiplying $X(\tau)$ by a random, uniformly distributed unit-magnitude complex number; do this independently for each $\tau$
\item Inverse transform, giving the surrogate time series
\end{itemize}
This procedure can be done using `surrog` with `surrtype="fft"`.
Because only the phases of the Fourier transform are randomized, not the magnitudes,
autocorrelation properties of the surrogate time series are the same as those of
$x(t)$.
Fourier surrogates of $N$ time series $x_n(t)$ measured at locations
$n=1,\ldots,N$ and times $t=1,\ldots,T$ are obtained by the following steps:
\begin{itemize}
\item Compute the fast Fourier transform of $x_n(t)$ for each $n$, here called $X_n(\tau)$
\item Randomize the phases of the transforms by multiplying $X_n(\tau)$ by a random, uniformly distributed unit-magnitude complex number. Do this independently for each $\tau$, but different
random multipliers can optionally be used for each $n$ if desired, or the same phase multiplier
can be used for all $n$, for a given $\tau$ (these are called "synchrony preserving surrogates" - see below)
\item Inverse transform, giving the surrogate time series
\end{itemize}
This procedure can be done using `surrog` with `surrtype=fft`
and with `syncpres=TRUE` (for synchrony-preserving surrogates) or with
`syncpres=FALSE` (for independent surrogates).
Autocorrelation properties of individual time series are preserved, as for the $N=1$ case
covered above. If synchrony-preserving surrogates are used, all cross-correlation
properties between time series are also preserved, because cross spectra are unchanged by
the joint phase randomization. Therefore synchrony is preserved.
Fourier surrogates tend to have normal marginal distributions [@Schreiber_00].
Therefore, to ensure fair
comparisons between statistical descriptors (such as coherences) of real and surrogate
datasets, Fourier surrogates should only be applied to time series that themselves
have approximately normal marginals. The Box-Cox transformations implemented in `cleandat` can
help normalize data prior to analysis. If data are difficult to normalize, or as
an alternative, the amplitude-adjusted Fourier transform surrogates method of the
next section can be used instead.
## Amplitude-adjusted Fourier surrogates
\noindent Amplitude-adjusted Fourier surrogates are described elsewhere [@Schreiber_00].
Either synchrony preserving (`syncpres=TRUE`) of independent (`syncpres=FALSE`)
amplitude-adjusted Fourier (AAFT) surrogates can be obtained from `surrog` using
`surrtype="aaft"`. AAFT surrogates can be applied to non-normal data, and return
time series with exactly the same marginal distributions as the original time series.
AAFT surrogates have approximately the same power spectral (and cross-spectral, in the
case of `syncpres=TRUE`) properties as the original data.
## Fast coherence
\noindent The fast coherence algorithm implemented in `coh` (option `sigmethod="fast"`)
implements Fourier surrogates only, and only applies for `norm` equal to `none`, `powall`,
or `powind`. It is described in detail elsewhere [@Sheppard_17].
## Alternatives to the "quick" method of assessing significance of wavelet phasor mean field values \label{sec:wpmfsignif}
\noindent When `sigmethod` is `fft` in a call to `wpmf`, the
empirical wavelet phasor mean field is compared to wavelet phasor mean fields of
Fourier surrogate datasets.
The `signif` slot of the output is a list with first element `"fft"`, second element equal to
`nrand`, and third element the fraction of surrogate-based wavelet phasor mean field
magnitudes that the empirical wavelet phasor mean field magnitude is greater than
(a times by timescales matrix). For `sigmethod` equal to `aaft`, AAFT
surrogates are used instead. Non-synchrony-preserving surrogates are used.
# Wavelet linear models and their uses for understanding synchrony
\noindent Linear models on wavelet transforms were introduced by @Sheppard_18,
where they were used for understanding the causes of synchrony. We demonstrate
the implementation in `wsyn` of the tools developed by @Sheppard_18, without giving
a complete description of the concepts or mathematics behind those tools.
Such a description is in @Sheppard_18.
## Model construction tools
```{r seed_setter_5, echo=F}
set.seed(3221) #221 #3221
```
First create a diver variable composed of an oscillation of period $12$ years and
an oscillation of period $3$ years, and normally
distributed white noise of mean $0$ and standard deviation $1.5$.
```{r driver_1}
lts<-12
sts<-3
mats<-3
times<-seq(from=-mats,to=100)
ts1<-sin(2*pi*times/lts)
ts2<-sin(2*pi*times/sts)
numlocs<-10
d1<-matrix(NA,numlocs,length(times)) #the first driver
for (counter in 1:numlocs)
{
d1[counter,]<-ts1+ts2+rnorm(length(times),mean=0,sd=1.5)
}
```
Next create a second driver, again composed of an oscillation of period $12$ years and
an oscillation of period $3$ years, and normally
distributed white noise of mean $0$ and standard deviation $1.5$.
```{r driver_2}
ts1<-sin(2*pi*times/lts)
ts2<-sin(2*pi*times/sts)
d2<-matrix(NA,numlocs,length(times)) #the second driver
for (counter in 1:numlocs)
{
d2[counter,]<-ts1+ts2+rnorm(length(times),mean=0,sd=1.5)
}
```
Next create an irrelevant environmental variable. With real data, of course,
one will not necessarily know in advance whether an environmental
variable is irrelevant to a population system. But, for the purpose of
demonstrating the methods, we are playing the dual role of data creator and
analyst.
```{r dirrel}
dirrel<-matrix(NA,numlocs,length(times)) #the irrelevant env var
for (counter in 1:numlocs)
{
dirrel[counter,]<-rnorm(length(times),mean=0,sd=1.5)
}
```
The population in each location is a combination of the two drivers,
plus local variability. Driver 1 is averaged over 3 time steps
in its influence on the populations, so
only the period-12 variability in driver 1 influences the populations.
```{r popdat}
pops<-matrix(NA,numlocs,length(times)) #the populations
for (counter in (mats+1):length(times))
{
aff1<-apply(FUN=mean,X=d1[,(counter-mats):(counter-1)],MARGIN=1)
aff2<-d2[,counter-1]
pops[,counter]<-aff1+aff2+rnorm(numlocs,mean=0,sd=3)
}
pops<-pops[,times>=0]
d1<-d1[,times>=0]
d2<-d2[,times>=0]
dirrel<-dirrel[,times>=0]
times<-times[times>=0]
```
If only the data were available and we were unaware of how they were generated,
we may want to infer the causes of synchrony and its timescale-specific patterns
in the populations. The wavelet mean fields of `pops`, `d1` and `d2` show some
synchrony at timescales of about $3$ and $12$ for all three variables.
```{r wmfs_wlmexample, results=FALSE}
dat<-list(pops=pops,d1=d1,d2=d2,dirrel=dirrel)
dat<-lapply(FUN=function(x){cleandat(x,times,1)$cdat},X=dat)
wmfpop<-wmf(dat$pops,times,scale.max.input=28)
plotmag(wmfpop)
wmfd1<-wmf(dat$d1,times,scale.max.input=28)
plotmag(wmfd1)
wmfd2<-wmf(dat$d2,times,scale.max.input=28)
plotmag(wmfd2)
```
Thus we cannot know for sure
from the wavelet mean fields whether population synchrony at
each timescale is due to synchrony in `d1`, `d2`, or both drivers at that timescale.
However, we can fit wavelet linear models.
Start by fitting a model with all three predictors. Only the `"powall"`
option for `norm` is implemented so far.
```{r fit_mod_allpred}
wlm_all<-wlm(dat,times,resp=1,pred=2:4,norm="powall",scale.max.input=28)
```
We will carry out analyses for this model at long timescales
($11$ to $13$ years) and short timescales
($2$ to $4$ years) simultaneously. First test whether we can drop each variable.
```{r sometests}
wlm_all_dropi<-wlmtest(wlm_all,drop="dirrel",sigmethod="fft",nrand=100)
wlm_all_drop1<-wlmtest(wlm_all,drop="d1",sigmethod="fft",nrand=100)
wlm_all_drop2<-wlmtest(wlm_all,drop="d2",sigmethod="fft",nrand=100)
```
Examine results for dropping `dirrel`, long and short timescales. We find
that `dirrel` does not need to be retained in either long- or short-timescale
models, as expected given how data were constructed:
```{r dropdirrel_1, results=FALSE}
blong<-c(11,13)
bshort<-c(2,4)
wlm_all_dropi<-bandtest(wlm_all_dropi,band=blong)
wlm_all_dropi<-bandtest(wlm_all_dropi,band=bshort)
plotmag(wlm_all_dropi)
plotrank(wlm_all_dropi)
```
Examine results for dropping `d1`, long and short timescales. We find that
`d1` should be retained in a long-timescale model but need not be retained
in a short-timescale model, again as expected:
```{r dropd1_1, results=FALSE}
wlm_all_drop1<-bandtest(wlm_all_drop1,band=blong)
wlm_all_drop1<-bandtest(wlm_all_drop1,band=bshort)
plotmag(wlm_all_drop1)
plotrank(wlm_all_drop1)
```
Examine results for dropping `d2`, long and short timescales. We find that
`d2` should be retained in both a short-timescale model and
in a long-timescale model, again as expected:
```{r dropd2_1, results=FALSE}
wlm_all_drop2<-bandtest(wlm_all_drop2,band=blong)
wlm_all_drop2<-bandtest(wlm_all_drop2,band=bshort)
plotmag(wlm_all_drop2)
plotrank(wlm_all_drop2)
```
Note that only $100$ randomizations were used in this example. This is for speed -
in a real analysis, at least $1000$ randomizations should typically be performed,
and preferably at least $10000$.
<!--***DAN: once the fast algorithm is available, use it, and change this text-->
## Amounts of synchrony explained
\noindent Now we have constructed models for short timescales ($2-4$ years)
and long timescales ($11-13$ years) for the example, finding, as expected,
that `d1` is a driver at long timescales only and `d2` is a driver at short
and long timescales.
How much of the synchrony in the response variable is explained by these drivers
for each timescale band?
For short timescales, almost all the synchrony that can be explained is explained
by Moran effects of `d2`:
```{r shortts_syncexpl}
se<-syncexpl(wlm_all)
se_short<-se[se$timescales>=bshort[1] & se$timescales<=bshort[2],]
round(100*colMeans(se_short[,c(3:12)])/mean(se_short$sync),4)
```
These are percentages of sychrony explained by various factors: `syncexpl` is total
synchrony explained by the predictors for which we have data; `crossterms` must be small
enough for the rest of the results to be interpretable; `d1`, `d2` and `dirrel`
are percentages of sychrony explained by those predictors;
`interactions` is percentage of
synchrony explained by interactions between predictors (see @Sheppard_18); and the
remaining terms are percentages of synchrony explained by individual interactions.
For long timescales, Moran effects of both drivers are present, as are interactions
between these Moran effects:
```{r longts_syncexpl}
se_long<-se[se$timescales>=blong[1] & se$timescales<=blong[2],]
round(100*colMeans(se_long[,c(3:12)])/mean(se_long$sync),4)
```
Note that cross terms are fairly small in both these analyses compared to synchrony
explained. Results can only be interpreted when this is the case.
See @Sheppard_18 for detailed information on cross terms and
interacting Moran effects.
The pattern of synchrony that would pertain if the only drivers of synchrony were
those included in a model can also be produced, and compared to the actual pattern
of synchrony (as represented by the wavelet mean field) to help evaluate the model.
```{r examp_predsync_1}
pres<-predsync(wlm_all)
plotmag(pres)
plotmag(wmfpop)
```
The similarity is pretty good. Now make the comparison using the model with sole
predictor `d1`.
```{r examp_predsync_2}
wlm_d1<-wlm(dat,times,resp=1,pred=2,norm="powall",scale.max.input=28)
pres<-predsync(wlm_d1)
plotmag(pres)
```
The similarity with the wavelet mean field of the populations is pretty good at
long timescales (where the model with sole predictor `d1` was found to be a
good model), but not at short timescales.
<!--# Analysis of plankton populations in UK seas
\noindent All examples so far have used artificial data. It makes sense at this
stage to demonstrate the tools described so far on real data. We carry out a
much simplified version of some of the analyses of @Sheppard_18.
To be completed later.-->
<!--DAN: embed the B-C transformed cal fin, PCI, and temp data in the package and
document them, then do some analyses here of those data using all our tools so far.-->
# Clustering
Tools are provided in `wsyn` for separating sampling locations into
network "clusters" or "modules"
or "communities" (these are three alternative names used)
consisting of sites that are especially synchronous with each other.
@Walter_17 applied this kind of approach to gypsy moth
data. Given an $N \times T$ matrix of
values corresponding to measurements made in $N$ locations over $T$ times, the approach starts
by generating an $N \times N$ synchrony matrix with $i,j$th entry describing the strength
of synchrony between the time series from locations $i$ and $j$ (in one of
several ways - see below). This matrix is then passed
to an existing clustering algorithm to partition the set of locations.
## The synchrony matrix
There are numerous ways to generate a synchrony matrix, and `synmat` provides several
alternatives. For an initial demonstration, create some data in two synchronous clusters.
```{r artificial_clustering_data}
N<-5
Tmax<-100
rho<-0.5
sig<-matrix(rho,N,N)
diag(sig)<-1
d<-t(cbind(mvtnorm::rmvnorm(Tmax,mean=rep(0,N),sigma=sig),
mvtnorm::rmvnorm(Tmax,mean=rep(0,N),sigma=sig)))
d<-cleandat(d,1:Tmax,1)$cdat
```
Then make a synchrony matrix using Pearson correlation.
```{r pearson_synmat}
sm<-synmat(d,1:Tmax,method="pearson")
fields::image.plot(1:10,1:10,sm,col=heat.colors(20))
```
The function `synmat` provides many other options, beyond correlation, for different kinds of synchrony matrices. We demonstrate a frequency-specific approach. First create some artificial data.
```{r freq_spec_synmat}
N<-20
Tmax<-500
tim<-1:Tmax
ts1<-sin(2*pi*tim/5)
ts1s<-sin(2*pi*tim/5+pi/2)
ts2<-sin(2*pi*tim/12)
ts2s<-sin(2*pi*tim/12+pi/2)
gp1A<-1:5
gp1B<-6:10
gp2A<-11:15
gp2B<-16:20
d<-matrix(NA,Tmax,N)
d[,c(gp1A,gp1B)]<-ts1
d[,c(gp2A,gp2B)]<-ts1s
d[,c(gp1A,gp2A)]<-d[,c(gp1A,gp2A)]+matrix(ts2,Tmax,N/2)
d[,c(gp1B,gp2B)]<-d[,c(gp1B,gp2B)]+matrix(ts2s,Tmax,N/2)
d<-d+matrix(rnorm(Tmax*N,0,2),Tmax,N)
d<-t(d)
d<-cleandat(d,1:Tmax,1)$cdat
```
These data have period-5 oscillations which are synchronous within location groups 1 and 2, but are
asynchronous between these groups. Superimposed on the period-5 oscillations are period-12
oscillations which are synchronous within location groups A and B, but are asynchronous between
these groups. Groups 1 and 2 are locations $1-10$ and $11-20$, respectively. Group A is locations
$1-5$ and $11-15$. Group B is locations $6-10$ and $16-20$. So the spatial structure of period-5
oscillations differs from that of period-12 oscillations. Strong local noise is superimposed on top
of the periodic oscillations.
We measure synchrony matrices using portions of the the cross-wavelet transform centered on periods
5, and 12 (in separate synchrony matrices), to detect the different structures on different
timescales.
```{r detect_tsspecific_sync}
sm5<-synmat(dat=d,times=1:Tmax,method="ReXWT",tsrange=c(4,6))
fields::image.plot(1:N,1:N,sm5,col=heat.colors(20))
sm12<-synmat(dat=d,times=1:Tmax,method="ReXWT",tsrange=c(11,13))
fields::image.plot(1:N,1:N,sm12,col=heat.colors(20))
```
This timescale-specific approach reveals the structure of the data better than a
correlation approach.
```{r abuse_cor}
sm<-synmat(dat=d,times=1:Tmax,method="pearson")
fields::image.plot(1:N,1:N,sm,col=heat.colors(20))
```
Several additional synchrony measures with which `synmat` can construct
synchrony matrices are described in the documentation of the function.
Important note: synchrony matrices can have negative values for some of the methods provided by `synmat`. This is appropriate, since correlation and other measures of synchrony can be negative, but it complicates cluster detection (see next section).
## Clustering
The function `clust` computes network modules/clusters and helps keep
information about them organized. That function is also the generator
function for the `clust` class. The class has `print` and `summary` and
`set` and `get` methods (see the help file for `clust_methods`).
The clustering algorithm used is a
slight adaptation of that of @Newman_2006 - see the next section for
details. We illustrate the use of `clust` using the artificial data
from the second example of the previous section.
```{r clust_demo}
#make some artificial coordinates for the geographic locations of where data were measured
coords<-data.frame(X=c(rep(1,10),rep(2,10)),Y=rep(c(1:5,7:11),times=2))
#create clusters based on the 5-year timescale range and map them using coords
cl5<-clust(dat=d,times=1:Tmax,coords=coords,method="ReXWT",tsrange=c(4,6))
get_clusters(cl5) #the first element of the list is always all 1s - prior to any splits
#call the mapper here
plotmap(cl5)
#plot mean time series for each module
plot(get_times(cl5)[1:100],get_mns(cl5)[[2]][1,1:100],type='l',col='red',
ylim=range(get_mns(cl5)),xlab="Time step",ylab="Mean pop.")
lines(get_times(cl5)[1:100],get_mns(cl5)[[2]][2,1:100],type='l',col='green')
legend(x="topright",legend=c("mod1","mod2"),lty=c(1,1),col=c("red","green"))
#create wavelet mean fields for each module and plot
cl5<-addwmfs(cl5)
plotmag(get_wmfs(cl5)[[2]][[1]])
plotmag(get_wmfs(cl5)[[2]][[2]])
#create clusters based on the 12-year timescale range and map them using coords
cl12<-clust(dat=d,times=1:Tmax,coords=coords,method="ReXWT",tsrange=c(11,13))
cl12$clusters
#call the mapper here
plotmap(cl12)
#plot mean time series for each module
plot(get_times(cl12)[1:100],get_mns(cl12)[[2]][1,1:100],type='l',col='red',
ylim=range(get_mns(cl12)),xlab="Time step",ylab="Mean pop.")
lines(get_times(cl12)[1:100],get_mns(cl12)[[2]][2,1:100],type='l',col='green')
legend(x="topright",legend=c("mod1","mod2"),lty=c(1,1),col=c("red","green"))
#create wavelet mean fields for each module and plot
cl12<-addwmfs(cl12)
plotmag(get_wmfs(cl12)[[2]][[1]])
plotmag(get_wmfs(cl12)[[2]][[2]])
```
Color intensity on maps of clusters indicates the strength of contribution
of a node to its module - details in the next section. The function `addwpmfs`
is similar to the function `addwmfs` demonstrated above, but adds wavelet phasor
mean field information to a `clust` object.
## The clustering algorithm
The clustering algorithm used by `clust` is implemented in `cluseigen`, and is
a generalization of the algorithm of @Newman_2006. The algorithm makes use of
the concept of modularity. Modularity was defined by @Newman_2006 for
any unweighted, undirected network paired with a partitioning of the nodes
into modules. The modularity is then a single numeric score which is higher
for better partitionings, i.e., for partitionings that more effectively
group nodes that are more heavily connected and separate nodes that are
less connected. The ideal goal would be to find the partitioning that
maximizes the modularity score, but this is computationally infeasible
for realistically large networks [@Newman_2006]. Instead, the algorithm
of @Newman_2006, which is computationally very efficient, provides a
good partitioning that is not guaranteed to be optimal but is typically
close to optimal [@Newman_2006]. The modularity itself can be computed
rapidly, given a partitioning, with the function `modularity` in `wsyn`.
Modularity is defined in @Newman_2006 for unweighted networks, and the
definition generalizes straightforwardly to weighted networks for which
all weights are non-negative. But synchrony matrices can represent
weighted networks for which some weights are allowed to be negative.
A generalization of modularity for this more general type of network was
defined by @Gomez_2009, and the `modularity` function computes this
generalization. Values are the same as the definition of @Newman_2006
for non-negatively weighted networks. The algorithm implemented by
`cluseigen` is a slight generalization of the original @Newman_2006
algorithm that applies to weighted networks for which some edges are
allowed to have negative weight.
We here describe the generalized algorithm, the validity of which was
realized by Lei Zhao. Let $w_{ij}$ be the adjacency matrix for a network,
so the $ij$th entry of this matrix is the weight of the edge between
nodes $i$ and $j$, $0$ if there is no edge. Let $C_i$ be the
community/module to which node $i$ is assigned and let $C_j$ be the
same for node $j$. The original definition of modularity, for
non-negative $w_{ij}$, is
$$Q=\frac{1}{2w}\sum_{ij} \left(w_{ij}-\frac{w_i w_j}{2w}\right)\delta(C_i,C_j),$$
where $w_i=\sum_j w_{ij}$, $w=\frac{1}{2}\sum_i w_i$, and $\delta$ is
the Kronecker delta function, equal to $1$ when $C_i=C_j$ and $0$ otherwise.
For the case of partitioning into two clusters, @Newman_2006 notes
that $\delta(C_i,C_j)=\frac{1}{2}(s_i s_j +1)$, where we define $s_i$
to be $1$ if node $i$ is in group $1$ and $-1$ if it is in group $2$.
Then
$$Q=\frac{1}{4w}\sum_{ij} \left(w_{ij}-\frac{w_i w_j}{2w}\right)(s_i s_j +1),$$
and it is easy to show this is
$$Q=\frac{1}{4w}\sum_{ij} \left(w_{ij}-\frac{w_i w_j}{2w}\right)s_i s_j.$$
Defining a matrix $\mathbf{B}$ such that $B_{ij}=w_{ij}-\frac{w_i w_j}{2w}$,
we have
$$Q=\frac{1}{4w}\mathbf{s}^\tau \mathbf{B} \mathbf{s},$$
where $\tau$ is transpose and the bold quantities are the vectors/matrices
composed of the indexed quantities denoted with the same symbol.
@Newman_2006 presents an argument that a good way to come close to
optimizing $Q$ over possible partitions into two modules is to choose
$s_i$ to be the same sign as the $i$th entry of the leading eigenvector
of $\mathbf{B}$, if the leading eigenvalue is postive (otherwise the
algorithm halts with no splits, returning the trivial "decomposition"
into one module). This gives the first split, according to the Newman
algorithm, of the network into modules. Subsequent splits are handled in
a similar but not identical way described by @Newman_2006. (In particular
it is incorrect to simply delete edges connecting the two modules from
the first split and apply the algorithm to the resulting graphs.)
The generalized modularity of @Gomez_2009 is defined as follows.
Let $w_{ij}^+ = \max(0,w_{ij})$ and $w_{ij}^- = \max(0,-w_{ij})$ so
that $w_{ij}=w_{ij}^+ - w_{ij}^-$. Let $w_i^+ = \sum_j w_{ij}^+$,
$w_i^- = \sum_j w_{ij}^-$, $w^+ = \frac{1}{2} \sum_i w_i^+$, and
$w^- = \frac{1}{2} \sum_i w_i^-$. Then @Gomez_2009 justifies the
definitions
$$Q^+ = \frac{1}{2w^+} \sum_{ij}\left(w_{ij}^+-\frac{w_i^+ w_j^+}{2w^+}\right)\delta(C_i,C_j),$$
$$Q^- = \frac{1}{2w^-} \sum_{ij}\left(w_{ij}^- - \frac{w_i^- w_j^-}{2w^-}\right)\delta(C_i,C_j),$$
and
$$Q=\frac{2w^+}{2w^+ + 2w^-}Q^+ - \frac{2w^-}{2w^+ + 2w^-}Q^-.$$
This is a generalization of the old definition of $Q$, in the sense
that it reduces to that definition in the case of non-negatively
weighted networks. @Gomez_2009 provides a probabilistic interpretation
that generalizes the probabilisticc interpretation of @Newman_2006.
It is straightforward to show
$$Q=\frac{1}{2w^+ + 2w^-} \sum_{ij} \left(w_{ij}-\left(\frac{w_i^+ w_j^+}{2w^+} - \frac{w_i^- w_j^-}{2w^-}\right)\right)\delta(C_i,C_j).$$
Again considering an initial 2-module split and define $s_i$ and
$s_j$ as previously, we have
$$Q=\frac{1}{4w^+ + 4w^-} \sum_{ij} \left(w_{ij}-\left(\frac{w_i^+ w_j^+}{2w^+} - \frac{w_i^- w_j^-}{2w^-}\right)\right)s_i s_j,$$
which can be written in matrix form as
$$Q=\frac{1}{4w^+ + 4w^-} \mathbf{s}^\tau \mathbf{E} \mathbf{s}.$$
Because the generalized modularity of @Gomez_2009 can be written in the
same matrix format as the modularity expression of @Newman_2006, the
same eigenvector-based algorithm for finding a close-to-optimal value
of the modularity can be used.
The quantity
$$\frac{1}{2w^+ + 2w^-} \sum_{j} \left(w_{ij}-\left(\frac{w_i^+ w_j^+}{2w^+} - \frac{w_i^- w_j^-}{2w^-}\right)\right)\delta(C_i,C_j)$$
is the contribution of node $i$ to the modularity. It is the extent to
which node $i$ is more connected to other nodes in its module than
expected by chance [@Newman_2006;@Gomez_2009], and can be interpreted as
a strength of membership of a node in its module. The `plotmap`
function (demonstrated above) has an option for coloring nodes according
to this quantity.
<!--# Analysis of gypsy moth defoliation time series
To be completed later.-->
# Acknowlegements
This material is based upon work supported by the National Science Foundation
under grant numbers 17114195 and 1442595, and by the James S McDonnell Foundation.
Any opinions, findings, and conclusions
or recommendations expressed in this material are those of the authors and do
not necessarily reflect the views of the National Science Foundation or the McDonnell Foundation.
We thank all
users of the package who have reported or will later report ways in
which the package could be improved.
# References
| /scratch/gouwar.j/cran-all/cranData/wsyn/inst/doc/wsynvignette.Rmd |
---
title: "Wavelet approaches to synchrony (wsyn) package vignette"
author: "Lawrence Sheppard, Jonathan Walter, Thomas Anderson, Lei Zhao, Daniel Reuman"
date: ""
geometry: "left=1cm,right=1cm,top=2.5cm,bottom=2.8cm"
output:
pdf_document:
number_sections: yes
keep_tex: yes
fig_caption: yes
link-citations: True
urlcolor: blue
bibliography: wsynvignette_refs.bib
vignette: >
%\VignetteIndexEntry{"wsyn vignette"}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
The `wsyn` package provides wavelet-based tools for investigating population synchrony.
Population synchrony is the tendency for population densities measured in different locations
to be correlated in their fluctuations through time [@Liebhold_04].
The basic dataset that `wsyn` helps analyze is one or more time
series of the same variable, measured in different locations at the same times; or
two or more variables so measured at the same times and locations. Tools are implemented
for describing synchrony and for investigating its causes and consequences. Wavelet
approaches to synchrony include @Grenfell_01; @Viboud_06; @Keitt_08; @Sheppard_16;
@Sheppard_17; @Sheppard_18; @Walter_17; @Anderson_18. The focus here is on techniques
used by @Sheppard_16; @Sheppard_17; @Sheppard_18; @Walter_17; @Anderson_18.
The techniques can also be used for data of the same format representing quantities
not related to populations, or, in some cases, multiple measurements from the same
location. Some of the functions of this package may also be useful for
studying the case of "community synchrony," i.e., co-located populations of
different species that fluctuate through time in a positively or negatively or
time-lag-correlated way.
<!--Insert additional references from our work as they come out-->
<!--Insert here a summary of the sections-->
# Preparing the data
\noindent A typical dataset for analysis using `wsyn` is an $N \times T$ matrix of numeric
values where rows correspond to sampling
locations (so the number of sampling locations is $N$)
and columns correspond to evenly spaced times during which sampling was conducted (so the
number of times sampling was conducted is $T$).
## Missing data
\noindent Standard implementations of wavelet trasforms require time series consisting
of measurements taken at evenly spaced times, with no missing data. Most functions provided
in `wsyn` make these same assumptions and throw an error if data are missing.
The user is left to decide on and implement a reasonable way of filling missing
data. Measures of synchrony can be influenced by data
filling techniques that are based on spatial interpolation. We therefore recommend
that spatially informed filling procedures not be used. We have previously
used the simple approach of
replacing missing values in a time series by the median of the non-missing values in the
time series [@Sheppard_16]. This approach, and other related simple
procedures [@Sheppard_16], seem
unlikely to artefactually produce significant synchrony, or
coherence relationships with other
variables, but rely on the percentage of missing data being fairly low and may obscure
detection of synchrony or significant coherence relationships if too many data are
missing. For applications which differ meaningfully from the prior work for which
the tools of this package were developed
(e.g., @Sheppard_16; @Sheppard_17; @Sheppard_18; @Walter_17; @Anderson_18),
different ways of dealing with missing data may be more appropriate.
## De-meaning, detrending, standardizing variance, and normalizing
\noindent A function `cleandat` is provided that performs a variety of combinations
of data cleaning typically necessary for analyses implemented in `wsyn`, including
de-meaning, linear detrending, standardization of time series variance, and Box-Cox
transformations to normalize marginal distributions of time series. Most functions in
`wsyn` assume at least that means have been removed from time series, and throw an
error if this is not the case. Approaches based on Fourier surrogate (section \ref{sec:surrog})
require time series with approximately normal marginals.
# The wavelet transform \label{sec:wt}
\noindent The function `wt` implements the complex Morlet wavelet transform, on which most
other `wsyn` functions are based. An S3 class is defined for `wt`, and it inherits from the
generic class `tts`. See the help files for the generator functions `wt` and `tts`
for slot names and other information about these classes. Both classes have `set`
and `get` methods for interacting with the slots, see help files for `tts_methods`,
`wt_methods` and `setget_methods`. The `set` methods just throw an error, since
generally one should not be changing the individual slots of objects of one of
classes in `wsyn` as it breaks the consistency between slots.
Background on the wavelet transform is available from many sources, including
@Addison_02, and we do not recapitulate it. We instead describe the wavelet transform
operationally, and demonstrate the implementation
of the wavelet transform in `wsyn` using examples from Fig. S2 of @Sheppard_18.
Given a time series $x(t)$, $t=1,\ldots,T$, the wavelet transform $W_\sigma(t)$
of $x(t)$ is a complex-valued function of time, $t=1,\ldots,T$, and timescale,
$\sigma$. The magntiude $|W_\sigma(t)|$ is an estimate of the strength of the
oscillations in $x(t)$ at time $t$ occurring at timescale $\sigma$. The complex
phase of $W_\sigma(t)$ gives the phase of these oscilaltions.
```{r seed_setter_1, echo=F}
set.seed(101)
```
To demonstrate the wavelet transform, start by generating some data. Start with a sine wave of
amplitude $1$ and period $15$ that operates for $t=1,\ldots,100$ but then disappears.
```{r wt_example_1_ts1}
time1<-1:100
time2<-101:200
times<-c(time1,time2)
ts1p1<-sin(2*pi*time1/15)
ts1p2<-0*time2
ts1<-c(ts1p1,ts1p2)
ts<-ts1
```
Then add a sine wave of amplitude $1$ and period $8$ that operates for $t=101,\ldots,200$
but before that is absent.
```{r wt_example_1_ts2}
ts2p1<-0*time1
ts2p2<-sin(2*pi*time2/8)
ts2<-c(ts2p1,ts2p2)
ts<-ts+ts2
```
Then add normally distributed white noise of mean $0$ and standard deviation $0.5$.
```{r wt_example_1_ts3}
ts3<-rnorm(200,mean=0,sd=0.5)
ts<-ts+ts3
```
Now apply the wavelet transform, obtaining an object of class `wt`. Default parameter
values for `scale.min`, `scale.max.input`, `sigma` and `f0` are usually good enough for
initial data exploration.
```{r wt_example_1_wt}
library(wsyn)
ts<-cleandat(ts,times,clev=1)
wtres<-wt(ts$cdat,times)
class(wtres)
names(wtres)
```
Methods `get_times`, `get_timescales`, `get_values`, `get_wtopt`, and `get_dat` extract the slots.
Set methods also exist, but these just throw an error since setting individual slots of
a `wt` object will break the relationship between the slots.
There is a `plotmag` method for the `tts` class that plots the magnitude of the transform
against time and timescale.
```{r wt_example_1_plot, results=FALSE}
plotmag(wtres)
```
We can see the oscillations at timescale $15$ for the first hundred time steps, and the
oscilaltions at timescale $8$ for the last 100 time steps, as expected.
Because the wavelet transform
is based on convolution of a wavelet function with the time series, times and timescales
for which the overlap of the wavelet with the time series is insufficient are unreliable
and are omitted. This affects times closer to the edges of the time series, and is the
reason for the "rocketship nose cone" shape of wavelet plots. More values are omitted
for longer timescales because long-timescale wavelets overhang the end of the time series
further in the convolution operation. All plots based on wavelet transforms have the same
property.
There is also a `plotphase` method for the `tts` class that plots the phase of the transform
against time and timescale.
```{r wt_example_1_plotphase, results=FALSE}
plotphase(wtres)
```
One can compute the power.
```{r wt_example_1_power, results=FALSE}
h<-power(wtres)
plot(log(1/h$timescales),h$power,type='l',lty="solid",xaxt="n",
xlab="Timescales",ylab="Power")
xlocs<-c(min(h$timescales),pretty(h$timescales,n=8))
graphics::axis(side=1,at=log(1/xlocs),labels=xlocs)
```
There are also `print` and `summary` methods for the `wt` class and `tts` class. See the help files for
`tts_methods` and `wt_methods`.
```{r wt_example_1_psmeth}
print(wtres)
summary(wtres)
```
```{r seed_setter_2, echo=F}
set.seed(201)
```
Now we give a second example, also from Fig. S2 of @Sheppard_18. The frequency
of oscillation of the data geenerated below
changes gradually from $0.2$ cycles per year (timescale 5 years)
to $0.1$ cycles per year (timescale 10 years).
```{r wt_example_2, results=FALSE}
timeinc<-1 #one sample per year
startfreq<-0.2 #cycles per year
endfreq<-0.1 #cycles per year
times<-1:200
f<-seq(from=startfreq,length.out=length(times),to=endfreq) #frequency for each sample
phaseinc<-2*pi*cumsum(f*timeinc)
t.series<-sin(phaseinc)
t.series<-cleandat(t.series,times,1)$cdat
res<-wt(t.series, times)
plotmag(res)
plotphase(res)
```
The `times` argument to `wt` (and to several other functions, see below)
is tightly constrained. It must be a numeric vector of unit-spaced times
(`diff(times)` is a vector of 1s) with
no missing entries. It must be the same length as the data and correspond to the
timing of measurement of the data. For the most common use cases, the unit
spacing of times will be natural in some time unit, i.e., sampling is typically
conducted with frequency once per time unit for some natural time unit (e.g.,
once per year, month, day, week, fortnight). In those cases, `timescales`
in output and on plots will have units cycles per time unit for the time unit
of sampling. Applications with sampling time step
not equal to 1 in some natural unit of time can view the `times` vector
as a vector of time steps, rather than times, \emph{per se}, and `timescales`
will be in units of cycles per time step.
The arguments `scale.min`, `scale.max.input`, `sigma`, and `f0`,
which are arguments to `wt` and several other functions,
are for constructing the timescales used for
wavelet analysis. The argument `scale.min` is the shortest timescale,
and must be $2$ or greater. Starting from `scale.min`, each timescale is
`sigma` times the previous one, up to the first timescale that equals or
surpasses `scale.max.input`. The scalloping of
wavelet transforms places additional, independently implemented
constraints on the largest timescale examined
so choosing larger `scale.max.input` will only result in longer timescales
up to the limits imposed by scalloping. The argument `f0` is the
ratio of the period of fluctuation to the width of the envelope.
Higher values of `f0` imply higher frequency resolution
(i.e., a wavelet component includes information from a narrower range
of Fourier components) but lower temporal resolution
(the component includes information from a wider range of times).
Resolution should be chosen appropriate to the characteristics of the
data [@Addison_02].
# Time- and timescale-specific measures of synchrony
\noindent The function `wpmf`
implements the wavelet phasor
mean field and the function `wmf` implements the wavelet mean
field. These are techniques for depicting the time and timescale dependence of synchrony,
and are introduced in this section. S3 classes are
defined for `wmf` and `wpmf`, both of which
inherit from the generic class `tts`. See the help files for the generator functions for
these classes (`wmf`, `wpmf` and `tts`, respectively) for slot names and other
information about the classes. There are again `set` and `get` methods for these classes,
and `print` and `summary` methods.
See help files for `wmf_methods` and `wpmf_methods`.
## The wavelet phasor mean field
\noindent The wavelet phasor mean field (the `wpmf` function in `wsyn`) depicts the time and
timescale dependence of phase synchrony of a collection of time series. If $x_n(t)$,
$n=1,\ldots,N$, $t=1,\ldots,T$ are time series of the same variable measured in $N$ locations
at the same times, and if $W_{n,\sigma}(t)$ is the wavelet transform of $x_n(t)$ and
$w_{n,\sigma}(t)=\frac{W_{n,\sigma}(t)}{|W_{n,\sigma}(t)|}$ has only the information about
the complex phases of the transform (unit-magnitude complex numbers such as these are called
\emph{phasors}), then the wavelet phasor mean field is
\begin{equation}
\frac{1}{N} \sum_{n=1}^N w_{n,\sigma}(t).
\end{equation}
For combinations of $t$ and $\sigma$ for which oscillations at time $t$ and timescale
$\sigma$ in the time series $x_n(t)$ have the same phase (they are phase synchronized),
the phasors $w_{n,\sigma}(t)$ will all point in similar directions in the complex plane, and
their sum will be a large-magnitude complex number.
For combinations of $t$ and $\sigma$ for which oscillations at time $t$ and timescale
$\sigma$ in the time series $x_n(t)$ have unrelated phases (they are not phase synchronized),
the phasors $w_{n,\sigma}(t)$ will all point in random, unrelated
directions in the complex plane, and
their sum will be a small-magnitude complex number. Therefore plotting the magnitude of the
wavelet phasor mean field against time and timescale quantifies the time and timescale
dependence of phase synchrony in the $x_n(t)$. The wavelet phasor mean field, being a mean
of phasors, always has magnitude between $0$ and $1$.
```{r seed_setter_3, echo=F}
set.seed(101)
```
We provide an example based on supplementary figure 1 of @Sheppard_16. A related technique,
the wavelet mean field (section \ref{sect:wmf}), was used there, but the wavelet phasor mean
field also applies and is demonstrated here. We construct data consisting of time series
measured for 100 time steps in each of 11 locations. The time series have three components.
The first component is a sine wave of amplitude $1$ and period $10$ years for the first
half of the time series, and is a sine wave of amplitude $1$ and period $5$ for the
second half of the time series. This same signal is present in all $11$ time series and
creates the synchrony among them.
```{r wpmf_example_1_dat_1}
times1<-0:50
times2<-51:100
times<-c(times1,times2)
ts1<-c(sin(2*pi*times1/10),sin(2*pi*times2/5))+1.1
```
The second component is a sine wave of amplitude $1$ and period $3$ years that is
randomly and independently phase shifted in each of the $11$ time series. The third
component is white noise, independently generated for each time series.
```{r wpmf_example_1_dat_2}
dat<-matrix(NA,11,length(times))
for (counter in 1:dim(dat)[1])
{
ts2<-3*sin(2*pi*times/3+2*pi*runif(1))+3.1
ts3<-rnorm(length(times),0,1.5)
dat[counter,]<-ts1+ts2+ts3
}
dat<-cleandat(dat,times,1)$cdat
```
The second and third components do not generate synchrony,
and obscure the synchrony of the first component.
As a result, synchrony cannot be readily detected by visually examining the time series:
```{r wpmf_example_1_plotts}
plot(times,dat[1,]/10+1,type='l',xlab="Time",ylab="Time series index",ylim=c(0,12))
for (counter in 2:dim(dat)[1])
{
lines(times,dat[counter,]/10+counter)
}
```
Nor can synchrony be readily detected by examining the $55$ pairwise correlation coefficients
between the time series, which are widely distributed and include many values above and below
$0$:
```{r wpmf_example_1_cors}
cmat<-cor(t(dat))
diag(cmat)<-NA
cmat<-as.vector(cmat)
cmat<-cmat[!is.na(cmat)]
hist(cmat,30,xlab="Pearson correlation",ylab="Count")
```
But the wavelet phasor mean field sensitively reveals the synchrony and its time
and timescale structure:
```{r wpmf_example_1, results=FALSE}
res<-wpmf(dat,times,sigmethod="quick")
plotmag(res)
```
The `wpmf` function implements assessment of the statistical significance of
phase synchrony in three ways, one of which is demonstrated by the contour lines on
the above plot (which give a $95\%$ confidence level by default - the level can be changed
with the `sigthresh` argument to the `plotmag` method for the `wpmf` class). The method
of significance testing the wavelet phasor mean field plot is controlled with the
`sigmethod` argument to `wpmf`, which can be `quick` (the default), `fft` or `aaft`. The
`quick` method compares the mean field magnitude value for each time and timescale
separately to a distribution of magnitudes of sums of $N$ random, independent
phasors.
Each time/timescale
pair is compared independently to the distribution, and the multiple testing problem is
not accounted for, so some time/timescales pairs will come out as showing "significant"
phase synchrony by chance, i.e., false-positive detections of phase synchrony can occur.
For instance, the small islands of significant synchrony at timescale
approximately $5$ and times about $15$ and $40$ on the above plot are false positives.
Signficance is based on stochastic generation of magnitudes of sums of random phasors,
so significance contours will differ
slightly on repeat runs. Increasing the number of randomizations (argument `nrand` to `wpmf`)
reduces this variation.
The `quick` method can be inaccurate for very short
timescales. The two alternative methods, `fft` and `aaft`, mitigate this problem but
are substantially slower. The `fft` and `aaft` methods are based on surrogate
datasets (section \ref{sec:surrog})
so are discussed in section \ref{sec:wpmfsignif}.
The `power` and `plotphase` methods also work on `wpmf` objects.
Examples of the wavelet phasor mean field technique applied to real datasets are in,
for instance, @Sheppard_18 (their Fig. S1) and @Anderson_18 (their Fig. 4).
## The wavelet mean field \label{sect:wmf}
\noindent The wavelet mean field (the `wmf` function in `wsyn`) depicts the time and
timescale dependence of synchrony of a collection of time series $x_n(t)$ for
$n=1,\ldots,N$ and $t=1,\ldots,T$, taking into account both phase synchrony and
associations through time of magnitudes of oscillations in different time series at
a given timescale. See @Sheppard_16 for a precise mathematical definition.
The plot is similar in format to a wavelet phasor mean field plot, but
without significance contours:
```{r wmf_example_1, results=FALSE}
res<-wpmf(dat,times)
plotmag(res)
```
The wavelet mean field is a more useful technique than the wavelet phasor mean field
insofar as it accounts for associations of magnitudes of oscillation, in addition
to phase synchrony, but it is less useful insofar as significance contours are not
available. The wavelet mean field also has some mathematical advantages, described in
@Sheppard_16 and @Sheppard_18. The "wavelet Moran theorem" and other theorems
described in those references use the wavelet
mean field, not the wavelet phasor mean field,
and can be used to help attribute synchrony to particular causes.
Thus the two techniques are often best used together:
significance of phase synchrony can be identified with the wavelet phasor mean field,
and then once significance is identified, synchrony can be described and studied using
the wavelet mean field.
The `power` and `plotphase` methods also work on `wmf` objects.
Examples of the wavelet mean field applied to real data
are in @Sheppard_16 and @Sheppard_18.
# Coherence
\noindent Coherence is explained by @Sheppard_16 and @Sheppard_17, among others.
We summarize here some of the explanations given in those references. Let
$x_{1,n}(t)$ and $x_{2,n}(t)$ for $n=1,\ldots,N$ and $t=1,\ldots,T$ be two variables
measured at the same $N$ locations and $T$ times. Let $W_{i,n,\sigma}(t)$ ($i=1,2$) be the
corresponding wavelet transforms. The coherence of $x_{1,n}(t)$ and $x_{2,n}(t)$ is the
magnitude of a quantity we denote $\Pi_\sigma^{(12)}$, which is in turn the mean
of $w_{1,n,\sigma}(t)\overline{w_{2,n,\sigma}(t)}$ over all
time-location pairs for which this product of normalized
wavelet transforms is still defined after wavelet scalloping
is performed. The overline is complex conjugation. This product is a function of timescale.
The $w_{1,n,\sigma}(t)$ are wavelet transforms normalized in one of
a few different ways (see below). Because $w_{1,n,\sigma}(t)\overline{w_{2,n,\sigma}(t)}$
is a complex number with phase equal to the phase difference between the two wavelet components,
the mean, $\Pi_\sigma^{(12)}$, of this quantity over times and locations has large
magnitude if
the phase difference between the transforms is consistent over time and across sampling
locations. Coherences essentially measure the strength of association between the variables
in a timescale-specific way that is also not confounded by lagged or phase-shifted
associations. Coherences and related quantities are analyzed in `wsyn` using the function
`coh` and its corresponding S3 class, `coh`, and the S3 methods that go with the class.
Methods comprise `set` and `get` and `print` and `summary` methods (see the help file for
`coh_methods` for these basic methods), as well as `bandtest`, and `plotmag`, `plotrank`, and
`plotphase`.
One possible normalization is phase normalization,
$w_{i,n,\sigma}(t)=\frac{W_{i,n,\sigma}(t)}{|W_{i,n,\sigma}(t)|}$. Set the `norm`
argument of `coh` to "`phase`" to use this normalization. The coherence with
this normalization is often called the \emph{phase coherence}, or the \emph{spatial
phase coherence} if $N>1$ [@Sheppard_17]. Phase coherence measures
the extent to which the two variables have consistent phase differences over time and
across locations, as a function of timescale. The normalization descibed in the "Wavelet
mean field" section of the Methods of @Sheppard_16 gives the version of the
coherence that was there called the \emph{wavelet coherence}, or the
\emph{spatial wavelet coherence} if $N>1$. Set the `norm`
argument of `coh` to "`powall`" to use this normalization.
If `norm` is "`powind`", then $w_{i,n,\sigma}(t)$ is obtained
by dividing $W_{i,n,\sigma}(t)$ by the square root of the average of
$W_{i,n,\sigma}(t) \overline{W_{i,n,\sigma}(t)}$ over the times for which it is
defined; this is done separately for each $i$ and $n$. The final option for `norm`
available to users of `coh` is "`none`", i.e., raw wavelet transforms
are used. For any value of `norm` except "`phase`", the normalized
wavelet components $w_{i,n,\sigma}(t)$ can also have varying magnitudes, and in
that case the coherence reflects not only consistencies in phase between the two
variables over time and across locations, but is also further increased if
there are correlations in the amplitudes of the fluctuations.
We demonstrate coherence and its implementation in `wsyn` via some simulated data. This
demonstration reporoduces an example given in supplementary figure 5 of @Sheppard_16. Data
consist of an environmental-driver variable, `x`, and a driven biological variable,
`y`, between which we compute coherence. Both were measured at $11$ locations. The
environmental variable `x` was constructed as the sum of: 1) a single common signal
of amplitude $1$ and period $10$ years, present at all $11$ locations; 2) a single common
signal of amplitude $5$ and period $3$ years, also present at all $11$ locations; 3) white
noise of mean $0$ and standard deviation 1.5, independently generated for all $11$ locations.
```{r seed_setter_4, echo=F}
set.seed(101)
```
```{r coh_example_dat_driver}
times<-(-3:100)
ts1<-sin(2*pi*times/10)
ts2<-5*sin(2*pi*times/3)
x<-matrix(NA,11,length(times)) #the driver (environmental) variable
for (counter in 1:11)
{
x[counter,]=ts1+ts2+rnorm(length(times),mean=0,sd=1.5)
}
```
Population time series $y_n(t)$ were the moving average, over three time steps, of the
$x_n(t)$, plus white noise: $y_n(t)=\left( \sum_{k=0}^2 x_n(t-k) \right)/3 + \epsilon_n(t)$.
Here the $\epsilon_n(t)$ are independent, normally distributed random variables of mean
$0$ and standard deviation $3$.
```{r coh_example_dat_env}
times<-0:100
y<-matrix(NA,11,length(times)) #the driven (biological) variable
for (counter1 in 1:11)
{
for (counter2 in 1:101)
{
y[counter1,counter2]<-mean(x[counter1,counter2:(counter2+2)])
}
}
y<-y+matrix(rnorm(length(times)*11,mean=0,sd=3),11,length(times))
x<-x[,4:104]
x<-cleandat(x,times,1)$cdat
y<-cleandat(y,times,1)$cdat
```
The function `cleandat` with `clev=1` (mean removal only) was used. Mean removal is
sufficient cleaning for these artificially generated data.
The relationship between the environmental and biological variables cannot
readily be detected using ordinary correlation methods - correlations
through time between the $x_n(t)$ and $y_n(t)$ range widely on both sides of 0:
```{r cors_range_widely}
allcors<-c()
for (counter in 1:dim(x)[1])
{
allcors[counter]<-cor(x[counter,],y[counter,])
}
allcors
```
However, the function `coh` can be used to compute the coherence between `x` and `y`:
```{r coh_example_call}
res<-coh(dat1=x,dat2=y,times=times,norm="powall",
sigmethod="fftsurrog1",nrand=100,
f0=0.5,scale.max.input=28)
```
The normalization to be used is specified via `norm`, as described
above. The `powall` option corresponds to the (spatial) wavelet coherence of
@Sheppard_16. There are several alternative methods for testing the
significance of coherence, and the method used
is controlled by the `sigmethod` argument. All significance
methods are based on "surrogate datasets".
These are datasets that have been randomized in an appropriate way. In addition
to computing coherence of data, coherence is also computed in the same way for
`nrand` surrogate datasets that represent the null hypothesis of no relationship
between `dat1` and `dat2` while retaining other statistical features of the data.
See section \ref{sec:surrog} for details of surrogates and significance testing,
and allowed values of `sigmethod`. Larger values of `nrand` produce more accurate
significance results that are less variable on repeat runs, but also require more
computational time. Using `nrand` at least 1000 or 10000 for final runs is
recommended. This can take some time for values of `sigmethod` other than `fast`
(see section \ref{sec:surrog}), so `100` was used above for demonstration
purposes only. The arguments `f0` and `scale.max.input` control details of the
wavelet transform (see section \ref{sec:wt}) and are set here to agree with values
used by @Sheppard_16.
Coherence can be plotted using `plotmag`:
```{r coh_plot, results=FALSE}
plotmag(res)
```
The red line here is the coherence. The black lines are $95$th and $99$th (these are
the default values for `sigthresh`) quantiles of coherences of surrogate datasets. Coherence is
significant (the red line is above the black lines) for timescales close to $10$ years, but not
significant for timescales close to
$3$ years, as expected since three values of $x$ are averaged to produce one value of $y$,
so the $3$-year periodicity in $x$ is averaged away and does not pass to $y$.
Thus coherence reveals a (timescale-specific) relationship between $x$ and $y$ that
correlation methods did not reveal.
Typically preferable [@Sheppard_17] is the `fast` option to `sigmethod`, because
far more surrogates can be used in the same computational time:
```{r coh_example_call_fast, results=FALSE}
res<-coh(dat1=x,dat2=y,times=times,norm="powall",
sigmethod="fast",nrand=10000,
f0=0.5,scale.max.input=28)
plotmag(res)
```
For the `fast` algorithm (section \ref{sec:surrog}) the modulus of
`res$signif$coher` (plotted above as the dashed red line) can be compared
to quantiles of the modulus of
`res$signif$scoher` (the black lines are $95$th and $99$th quantiles) in the
usual way to make statements about significance of coherence,
but the modulus of `res$signif$coher` is only approximately equal to the standard coherence,
which is the modulus of `res$coher` (and which is plotted above as the solid red line). Thus
one should use the dashed red line above, and `res$signif$coher`, when making conclusions
about the significance of
coherence, and the solid red line, and `res$coher`, when using the actual value of the
coherence. Typically the two red lines are quite similar. For values
of `sigmethod` other than `fast`, they are equal.
The significance indicated on the above plots is done on a timescale-by-timescale
basis, and type-I errors (false positives) are not taken into account. Neither do the
individual timescales correspond to independent tests. A method of
aggregating signficance across a timescale band was described by @Sheppard_16, and
is implemented in `bandtest`:
```{r bandtest}
res<-bandtest(res,c(8,12))
```
A call to `bandtest` computes a
$p$-value for the aggregate significance of coherence across the specified band
(8 to 12 year timescales, in this case), and also computes the average phase of
$\Pi_\sigma^{(12)}$ across the band. This information is added as a new row to
the `bandp` slot of the `coh` object (which was previously `NA` in this case).
```{r bandtest_display_bandp}
get_bandp(res)
```
Doing another timescale band adds another row to `bandp`:
```{r bandtest2}
res<-bandtest(res,c(2,4))
get_bandp(res)
```
The aggregate $p$-values are now displayed on the plot:
```{r display_p_1, results=FALSE}
plotmag(res)
```
These results show the variables $x$ and $y$ are highly significantly coherent
across the timescale band $8$ to $12$ years, but are not signficantly coherent across
the band $2$ to $4$ years, as expected from the way the data were generated and
by comparing the red and black lines.
Band-aggregated $p$-values are produced essentially by averaging the rank in surrogates of the
empirical coherence across timescales. The same procedure is then applied to
each surrogate, ranking it with respect to the other surrogates and taking the mean across
timescales. Comparing the empirical mean rank to the dsitribution of surrogate mean
ranks gives a $p$-value [@Sheppard_16; @Sheppard_17; @Sheppard_18].
One can also display a plot of the ranks of `Mod(res$signif$coher)` in the
distribution of `Mod(res$signif$scoher)` values at each timescale:
```{r plotrank_1, results=FALSE}
plotrank(res)
```
The vertical axis label `Fract surr gt` stands for the fraction of surrogate coherences
that the coherence of the data is greater than at the given timescale, so values
are between $0$ and $1$ and large values indicate significance.
Whenever values exceed the argument `sigthresh` (which takes the default value $0.95$
for the above call to `plotrank`), the coherence is nominally significant. The value(s)
of `sigthresh` are displayed as dashed horizontal line(s) on the plot. This is nominal
significance because of the multiple-testing problem. As can be seen, $p$-values stored
in `bandp` are also displayed on the plot, and these values
aggregate across timescales appropriately and alleviate the multiple-testing problem.
Average phases of $\Pi_\sigma^{(12)}$ across the timescale bands of interest were also
computed by `bandtest` and stored in the `bandp` slot, in units of radians. These
are average phases for the `coher` slot of a `coh` object.
Phases of $\Pi_\sigma^{(12)}$
can be plotted against timescale and average phases in `bandp` displayed using the
`plotphase` function:
```{r plotphase_1, results=FALSE}
plotphase(res)
```
Average phases for timescale bands across which coherence is not significant
(e.g., the $2$ to $4$ year band in the above plot) are
random and meaningless. Average phases for bands across which coherence is
significant (e.g., the $8$ to $12$ band in the plot) can give valuable
information about the nature of the relationship
between the variables [@Sheppard_18].
# Surrogates \label{sec:surrog}
\noindent Some of the text of this section was adapted, with minor modifications only,
from our earlier work [@Sheppard_16; @Sheppard_18; @Anderson_18].
The level of coherence consistent with the null
hypothesis that there is no relationship between two variables depends on the spatial
and temporal autocorrelation of the data. For instance, two variables that fluctuate
regularly at the same frequency and are both highly spatially synchronous will have a
phase difference that is highly consistent over time and space, and therefore will have high
spatial wavelet coherence, even if they are not related. Two irregular oscillators with
low spatial synchrony are less likely to show consistent phase differences over time and
space if they are unrelated. We test coherences
for significance using resampling schemes based on surrogate datasets that randomize
away phase relationship between variables while retaining, to the extent possible, the
spatial and temporal autocorrelation properties and the marginal distributions of the
time series. We use the widely applied Fourier surrogate and amplitude adjusted Fourier
surrogate methods [@Prichard_94; @Schreiber_00], implemented
in the `surrog` function in `wsyn` and summarized below. Surrogates are also used for
applications other than measures of coherence (see, e.g., section \ref{sec:wpmfsignif}).
## Fourier surrogates
\noindent Details are presented elsewhere [@Prichard_94; @Schreiber_00]. We summarize here.
A Fourier surrogate of a time series $x(t)$ is obtained by the following steps:
\begin{itemize}
\item Compute the fast Fourier transform of $x(t)$, here called $X(\tau)$ for the timescale $\tau$
\item Randomize the phases of the transform by multiplying $X(\tau)$ by a random, uniformly distributed unit-magnitude complex number; do this independently for each $\tau$
\item Inverse transform, giving the surrogate time series
\end{itemize}
This procedure can be done using `surrog` with `surrtype="fft"`.
Because only the phases of the Fourier transform are randomized, not the magnitudes,
autocorrelation properties of the surrogate time series are the same as those of
$x(t)$.
Fourier surrogates of $N$ time series $x_n(t)$ measured at locations
$n=1,\ldots,N$ and times $t=1,\ldots,T$ are obtained by the following steps:
\begin{itemize}
\item Compute the fast Fourier transform of $x_n(t)$ for each $n$, here called $X_n(\tau)$
\item Randomize the phases of the transforms by multiplying $X_n(\tau)$ by a random, uniformly distributed unit-magnitude complex number. Do this independently for each $\tau$, but different
random multipliers can optionally be used for each $n$ if desired, or the same phase multiplier
can be used for all $n$, for a given $\tau$ (these are called "synchrony preserving surrogates" - see below)
\item Inverse transform, giving the surrogate time series
\end{itemize}
This procedure can be done using `surrog` with `surrtype=fft`
and with `syncpres=TRUE` (for synchrony-preserving surrogates) or with
`syncpres=FALSE` (for independent surrogates).
Autocorrelation properties of individual time series are preserved, as for the $N=1$ case
covered above. If synchrony-preserving surrogates are used, all cross-correlation
properties between time series are also preserved, because cross spectra are unchanged by
the joint phase randomization. Therefore synchrony is preserved.
Fourier surrogates tend to have normal marginal distributions [@Schreiber_00].
Therefore, to ensure fair
comparisons between statistical descriptors (such as coherences) of real and surrogate
datasets, Fourier surrogates should only be applied to time series that themselves
have approximately normal marginals. The Box-Cox transformations implemented in `cleandat` can
help normalize data prior to analysis. If data are difficult to normalize, or as
an alternative, the amplitude-adjusted Fourier transform surrogates method of the
next section can be used instead.
## Amplitude-adjusted Fourier surrogates
\noindent Amplitude-adjusted Fourier surrogates are described elsewhere [@Schreiber_00].
Either synchrony preserving (`syncpres=TRUE`) of independent (`syncpres=FALSE`)
amplitude-adjusted Fourier (AAFT) surrogates can be obtained from `surrog` using
`surrtype="aaft"`. AAFT surrogates can be applied to non-normal data, and return
time series with exactly the same marginal distributions as the original time series.
AAFT surrogates have approximately the same power spectral (and cross-spectral, in the
case of `syncpres=TRUE`) properties as the original data.
## Fast coherence
\noindent The fast coherence algorithm implemented in `coh` (option `sigmethod="fast"`)
implements Fourier surrogates only, and only applies for `norm` equal to `none`, `powall`,
or `powind`. It is described in detail elsewhere [@Sheppard_17].
## Alternatives to the "quick" method of assessing significance of wavelet phasor mean field values \label{sec:wpmfsignif}
\noindent When `sigmethod` is `fft` in a call to `wpmf`, the
empirical wavelet phasor mean field is compared to wavelet phasor mean fields of
Fourier surrogate datasets.
The `signif` slot of the output is a list with first element `"fft"`, second element equal to
`nrand`, and third element the fraction of surrogate-based wavelet phasor mean field
magnitudes that the empirical wavelet phasor mean field magnitude is greater than
(a times by timescales matrix). For `sigmethod` equal to `aaft`, AAFT
surrogates are used instead. Non-synchrony-preserving surrogates are used.
# Wavelet linear models and their uses for understanding synchrony
\noindent Linear models on wavelet transforms were introduced by @Sheppard_18,
where they were used for understanding the causes of synchrony. We demonstrate
the implementation in `wsyn` of the tools developed by @Sheppard_18, without giving
a complete description of the concepts or mathematics behind those tools.
Such a description is in @Sheppard_18.
## Model construction tools
```{r seed_setter_5, echo=F}
set.seed(3221) #221 #3221
```
First create a diver variable composed of an oscillation of period $12$ years and
an oscillation of period $3$ years, and normally
distributed white noise of mean $0$ and standard deviation $1.5$.
```{r driver_1}
lts<-12
sts<-3
mats<-3
times<-seq(from=-mats,to=100)
ts1<-sin(2*pi*times/lts)
ts2<-sin(2*pi*times/sts)
numlocs<-10
d1<-matrix(NA,numlocs,length(times)) #the first driver
for (counter in 1:numlocs)
{
d1[counter,]<-ts1+ts2+rnorm(length(times),mean=0,sd=1.5)
}
```
Next create a second driver, again composed of an oscillation of period $12$ years and
an oscillation of period $3$ years, and normally
distributed white noise of mean $0$ and standard deviation $1.5$.
```{r driver_2}
ts1<-sin(2*pi*times/lts)
ts2<-sin(2*pi*times/sts)
d2<-matrix(NA,numlocs,length(times)) #the second driver
for (counter in 1:numlocs)
{
d2[counter,]<-ts1+ts2+rnorm(length(times),mean=0,sd=1.5)
}
```
Next create an irrelevant environmental variable. With real data, of course,
one will not necessarily know in advance whether an environmental
variable is irrelevant to a population system. But, for the purpose of
demonstrating the methods, we are playing the dual role of data creator and
analyst.
```{r dirrel}
dirrel<-matrix(NA,numlocs,length(times)) #the irrelevant env var
for (counter in 1:numlocs)
{
dirrel[counter,]<-rnorm(length(times),mean=0,sd=1.5)
}
```
The population in each location is a combination of the two drivers,
plus local variability. Driver 1 is averaged over 3 time steps
in its influence on the populations, so
only the period-12 variability in driver 1 influences the populations.
```{r popdat}
pops<-matrix(NA,numlocs,length(times)) #the populations
for (counter in (mats+1):length(times))
{
aff1<-apply(FUN=mean,X=d1[,(counter-mats):(counter-1)],MARGIN=1)
aff2<-d2[,counter-1]
pops[,counter]<-aff1+aff2+rnorm(numlocs,mean=0,sd=3)
}
pops<-pops[,times>=0]
d1<-d1[,times>=0]
d2<-d2[,times>=0]
dirrel<-dirrel[,times>=0]
times<-times[times>=0]
```
If only the data were available and we were unaware of how they were generated,
we may want to infer the causes of synchrony and its timescale-specific patterns
in the populations. The wavelet mean fields of `pops`, `d1` and `d2` show some
synchrony at timescales of about $3$ and $12$ for all three variables.
```{r wmfs_wlmexample, results=FALSE}
dat<-list(pops=pops,d1=d1,d2=d2,dirrel=dirrel)
dat<-lapply(FUN=function(x){cleandat(x,times,1)$cdat},X=dat)
wmfpop<-wmf(dat$pops,times,scale.max.input=28)
plotmag(wmfpop)
wmfd1<-wmf(dat$d1,times,scale.max.input=28)
plotmag(wmfd1)
wmfd2<-wmf(dat$d2,times,scale.max.input=28)
plotmag(wmfd2)
```
Thus we cannot know for sure
from the wavelet mean fields whether population synchrony at
each timescale is due to synchrony in `d1`, `d2`, or both drivers at that timescale.
However, we can fit wavelet linear models.
Start by fitting a model with all three predictors. Only the `"powall"`
option for `norm` is implemented so far.
```{r fit_mod_allpred}
wlm_all<-wlm(dat,times,resp=1,pred=2:4,norm="powall",scale.max.input=28)
```
We will carry out analyses for this model at long timescales
($11$ to $13$ years) and short timescales
($2$ to $4$ years) simultaneously. First test whether we can drop each variable.
```{r sometests}
wlm_all_dropi<-wlmtest(wlm_all,drop="dirrel",sigmethod="fft",nrand=100)
wlm_all_drop1<-wlmtest(wlm_all,drop="d1",sigmethod="fft",nrand=100)
wlm_all_drop2<-wlmtest(wlm_all,drop="d2",sigmethod="fft",nrand=100)
```
Examine results for dropping `dirrel`, long and short timescales. We find
that `dirrel` does not need to be retained in either long- or short-timescale
models, as expected given how data were constructed:
```{r dropdirrel_1, results=FALSE}
blong<-c(11,13)
bshort<-c(2,4)
wlm_all_dropi<-bandtest(wlm_all_dropi,band=blong)
wlm_all_dropi<-bandtest(wlm_all_dropi,band=bshort)
plotmag(wlm_all_dropi)
plotrank(wlm_all_dropi)
```
Examine results for dropping `d1`, long and short timescales. We find that
`d1` should be retained in a long-timescale model but need not be retained
in a short-timescale model, again as expected:
```{r dropd1_1, results=FALSE}
wlm_all_drop1<-bandtest(wlm_all_drop1,band=blong)
wlm_all_drop1<-bandtest(wlm_all_drop1,band=bshort)
plotmag(wlm_all_drop1)
plotrank(wlm_all_drop1)
```
Examine results for dropping `d2`, long and short timescales. We find that
`d2` should be retained in both a short-timescale model and
in a long-timescale model, again as expected:
```{r dropd2_1, results=FALSE}
wlm_all_drop2<-bandtest(wlm_all_drop2,band=blong)
wlm_all_drop2<-bandtest(wlm_all_drop2,band=bshort)
plotmag(wlm_all_drop2)
plotrank(wlm_all_drop2)
```
Note that only $100$ randomizations were used in this example. This is for speed -
in a real analysis, at least $1000$ randomizations should typically be performed,
and preferably at least $10000$.
<!--***DAN: once the fast algorithm is available, use it, and change this text-->
## Amounts of synchrony explained
\noindent Now we have constructed models for short timescales ($2-4$ years)
and long timescales ($11-13$ years) for the example, finding, as expected,
that `d1` is a driver at long timescales only and `d2` is a driver at short
and long timescales.
How much of the synchrony in the response variable is explained by these drivers
for each timescale band?
For short timescales, almost all the synchrony that can be explained is explained
by Moran effects of `d2`:
```{r shortts_syncexpl}
se<-syncexpl(wlm_all)
se_short<-se[se$timescales>=bshort[1] & se$timescales<=bshort[2],]
round(100*colMeans(se_short[,c(3:12)])/mean(se_short$sync),4)
```
These are percentages of sychrony explained by various factors: `syncexpl` is total
synchrony explained by the predictors for which we have data; `crossterms` must be small
enough for the rest of the results to be interpretable; `d1`, `d2` and `dirrel`
are percentages of sychrony explained by those predictors;
`interactions` is percentage of
synchrony explained by interactions between predictors (see @Sheppard_18); and the
remaining terms are percentages of synchrony explained by individual interactions.
For long timescales, Moran effects of both drivers are present, as are interactions
between these Moran effects:
```{r longts_syncexpl}
se_long<-se[se$timescales>=blong[1] & se$timescales<=blong[2],]
round(100*colMeans(se_long[,c(3:12)])/mean(se_long$sync),4)
```
Note that cross terms are fairly small in both these analyses compared to synchrony
explained. Results can only be interpreted when this is the case.
See @Sheppard_18 for detailed information on cross terms and
interacting Moran effects.
The pattern of synchrony that would pertain if the only drivers of synchrony were
those included in a model can also be produced, and compared to the actual pattern
of synchrony (as represented by the wavelet mean field) to help evaluate the model.
```{r examp_predsync_1}
pres<-predsync(wlm_all)
plotmag(pres)
plotmag(wmfpop)
```
The similarity is pretty good. Now make the comparison using the model with sole
predictor `d1`.
```{r examp_predsync_2}
wlm_d1<-wlm(dat,times,resp=1,pred=2,norm="powall",scale.max.input=28)
pres<-predsync(wlm_d1)
plotmag(pres)
```
The similarity with the wavelet mean field of the populations is pretty good at
long timescales (where the model with sole predictor `d1` was found to be a
good model), but not at short timescales.
<!--# Analysis of plankton populations in UK seas
\noindent All examples so far have used artificial data. It makes sense at this
stage to demonstrate the tools described so far on real data. We carry out a
much simplified version of some of the analyses of @Sheppard_18.
To be completed later.-->
<!--DAN: embed the B-C transformed cal fin, PCI, and temp data in the package and
document them, then do some analyses here of those data using all our tools so far.-->
# Clustering
Tools are provided in `wsyn` for separating sampling locations into
network "clusters" or "modules"
or "communities" (these are three alternative names used)
consisting of sites that are especially synchronous with each other.
@Walter_17 applied this kind of approach to gypsy moth
data. Given an $N \times T$ matrix of
values corresponding to measurements made in $N$ locations over $T$ times, the approach starts
by generating an $N \times N$ synchrony matrix with $i,j$th entry describing the strength
of synchrony between the time series from locations $i$ and $j$ (in one of
several ways - see below). This matrix is then passed
to an existing clustering algorithm to partition the set of locations.
## The synchrony matrix
There are numerous ways to generate a synchrony matrix, and `synmat` provides several
alternatives. For an initial demonstration, create some data in two synchronous clusters.
```{r artificial_clustering_data}
N<-5
Tmax<-100
rho<-0.5
sig<-matrix(rho,N,N)
diag(sig)<-1
d<-t(cbind(mvtnorm::rmvnorm(Tmax,mean=rep(0,N),sigma=sig),
mvtnorm::rmvnorm(Tmax,mean=rep(0,N),sigma=sig)))
d<-cleandat(d,1:Tmax,1)$cdat
```
Then make a synchrony matrix using Pearson correlation.
```{r pearson_synmat}
sm<-synmat(d,1:Tmax,method="pearson")
fields::image.plot(1:10,1:10,sm,col=heat.colors(20))
```
The function `synmat` provides many other options, beyond correlation, for different kinds of synchrony matrices. We demonstrate a frequency-specific approach. First create some artificial data.
```{r freq_spec_synmat}
N<-20
Tmax<-500
tim<-1:Tmax
ts1<-sin(2*pi*tim/5)
ts1s<-sin(2*pi*tim/5+pi/2)
ts2<-sin(2*pi*tim/12)
ts2s<-sin(2*pi*tim/12+pi/2)
gp1A<-1:5
gp1B<-6:10
gp2A<-11:15
gp2B<-16:20
d<-matrix(NA,Tmax,N)
d[,c(gp1A,gp1B)]<-ts1
d[,c(gp2A,gp2B)]<-ts1s
d[,c(gp1A,gp2A)]<-d[,c(gp1A,gp2A)]+matrix(ts2,Tmax,N/2)
d[,c(gp1B,gp2B)]<-d[,c(gp1B,gp2B)]+matrix(ts2s,Tmax,N/2)
d<-d+matrix(rnorm(Tmax*N,0,2),Tmax,N)
d<-t(d)
d<-cleandat(d,1:Tmax,1)$cdat
```
These data have period-5 oscillations which are synchronous within location groups 1 and 2, but are
asynchronous between these groups. Superimposed on the period-5 oscillations are period-12
oscillations which are synchronous within location groups A and B, but are asynchronous between
these groups. Groups 1 and 2 are locations $1-10$ and $11-20$, respectively. Group A is locations
$1-5$ and $11-15$. Group B is locations $6-10$ and $16-20$. So the spatial structure of period-5
oscillations differs from that of period-12 oscillations. Strong local noise is superimposed on top
of the periodic oscillations.
We measure synchrony matrices using portions of the the cross-wavelet transform centered on periods
5, and 12 (in separate synchrony matrices), to detect the different structures on different
timescales.
```{r detect_tsspecific_sync}
sm5<-synmat(dat=d,times=1:Tmax,method="ReXWT",tsrange=c(4,6))
fields::image.plot(1:N,1:N,sm5,col=heat.colors(20))
sm12<-synmat(dat=d,times=1:Tmax,method="ReXWT",tsrange=c(11,13))
fields::image.plot(1:N,1:N,sm12,col=heat.colors(20))
```
This timescale-specific approach reveals the structure of the data better than a
correlation approach.
```{r abuse_cor}
sm<-synmat(dat=d,times=1:Tmax,method="pearson")
fields::image.plot(1:N,1:N,sm,col=heat.colors(20))
```
Several additional synchrony measures with which `synmat` can construct
synchrony matrices are described in the documentation of the function.
Important note: synchrony matrices can have negative values for some of the methods provided by `synmat`. This is appropriate, since correlation and other measures of synchrony can be negative, but it complicates cluster detection (see next section).
## Clustering
The function `clust` computes network modules/clusters and helps keep
information about them organized. That function is also the generator
function for the `clust` class. The class has `print` and `summary` and
`set` and `get` methods (see the help file for `clust_methods`).
The clustering algorithm used is a
slight adaptation of that of @Newman_2006 - see the next section for
details. We illustrate the use of `clust` using the artificial data
from the second example of the previous section.
```{r clust_demo}
#make some artificial coordinates for the geographic locations of where data were measured
coords<-data.frame(X=c(rep(1,10),rep(2,10)),Y=rep(c(1:5,7:11),times=2))
#create clusters based on the 5-year timescale range and map them using coords
cl5<-clust(dat=d,times=1:Tmax,coords=coords,method="ReXWT",tsrange=c(4,6))
get_clusters(cl5) #the first element of the list is always all 1s - prior to any splits
#call the mapper here
plotmap(cl5)
#plot mean time series for each module
plot(get_times(cl5)[1:100],get_mns(cl5)[[2]][1,1:100],type='l',col='red',
ylim=range(get_mns(cl5)),xlab="Time step",ylab="Mean pop.")
lines(get_times(cl5)[1:100],get_mns(cl5)[[2]][2,1:100],type='l',col='green')
legend(x="topright",legend=c("mod1","mod2"),lty=c(1,1),col=c("red","green"))
#create wavelet mean fields for each module and plot
cl5<-addwmfs(cl5)
plotmag(get_wmfs(cl5)[[2]][[1]])
plotmag(get_wmfs(cl5)[[2]][[2]])
#create clusters based on the 12-year timescale range and map them using coords
cl12<-clust(dat=d,times=1:Tmax,coords=coords,method="ReXWT",tsrange=c(11,13))
cl12$clusters
#call the mapper here
plotmap(cl12)
#plot mean time series for each module
plot(get_times(cl12)[1:100],get_mns(cl12)[[2]][1,1:100],type='l',col='red',
ylim=range(get_mns(cl12)),xlab="Time step",ylab="Mean pop.")
lines(get_times(cl12)[1:100],get_mns(cl12)[[2]][2,1:100],type='l',col='green')
legend(x="topright",legend=c("mod1","mod2"),lty=c(1,1),col=c("red","green"))
#create wavelet mean fields for each module and plot
cl12<-addwmfs(cl12)
plotmag(get_wmfs(cl12)[[2]][[1]])
plotmag(get_wmfs(cl12)[[2]][[2]])
```
Color intensity on maps of clusters indicates the strength of contribution
of a node to its module - details in the next section. The function `addwpmfs`
is similar to the function `addwmfs` demonstrated above, but adds wavelet phasor
mean field information to a `clust` object.
## The clustering algorithm
The clustering algorithm used by `clust` is implemented in `cluseigen`, and is
a generalization of the algorithm of @Newman_2006. The algorithm makes use of
the concept of modularity. Modularity was defined by @Newman_2006 for
any unweighted, undirected network paired with a partitioning of the nodes
into modules. The modularity is then a single numeric score which is higher
for better partitionings, i.e., for partitionings that more effectively
group nodes that are more heavily connected and separate nodes that are
less connected. The ideal goal would be to find the partitioning that
maximizes the modularity score, but this is computationally infeasible
for realistically large networks [@Newman_2006]. Instead, the algorithm
of @Newman_2006, which is computationally very efficient, provides a
good partitioning that is not guaranteed to be optimal but is typically
close to optimal [@Newman_2006]. The modularity itself can be computed
rapidly, given a partitioning, with the function `modularity` in `wsyn`.
Modularity is defined in @Newman_2006 for unweighted networks, and the
definition generalizes straightforwardly to weighted networks for which
all weights are non-negative. But synchrony matrices can represent
weighted networks for which some weights are allowed to be negative.
A generalization of modularity for this more general type of network was
defined by @Gomez_2009, and the `modularity` function computes this
generalization. Values are the same as the definition of @Newman_2006
for non-negatively weighted networks. The algorithm implemented by
`cluseigen` is a slight generalization of the original @Newman_2006
algorithm that applies to weighted networks for which some edges are
allowed to have negative weight.
We here describe the generalized algorithm, the validity of which was
realized by Lei Zhao. Let $w_{ij}$ be the adjacency matrix for a network,
so the $ij$th entry of this matrix is the weight of the edge between
nodes $i$ and $j$, $0$ if there is no edge. Let $C_i$ be the
community/module to which node $i$ is assigned and let $C_j$ be the
same for node $j$. The original definition of modularity, for
non-negative $w_{ij}$, is
$$Q=\frac{1}{2w}\sum_{ij} \left(w_{ij}-\frac{w_i w_j}{2w}\right)\delta(C_i,C_j),$$
where $w_i=\sum_j w_{ij}$, $w=\frac{1}{2}\sum_i w_i$, and $\delta$ is
the Kronecker delta function, equal to $1$ when $C_i=C_j$ and $0$ otherwise.
For the case of partitioning into two clusters, @Newman_2006 notes
that $\delta(C_i,C_j)=\frac{1}{2}(s_i s_j +1)$, where we define $s_i$
to be $1$ if node $i$ is in group $1$ and $-1$ if it is in group $2$.
Then
$$Q=\frac{1}{4w}\sum_{ij} \left(w_{ij}-\frac{w_i w_j}{2w}\right)(s_i s_j +1),$$
and it is easy to show this is
$$Q=\frac{1}{4w}\sum_{ij} \left(w_{ij}-\frac{w_i w_j}{2w}\right)s_i s_j.$$
Defining a matrix $\mathbf{B}$ such that $B_{ij}=w_{ij}-\frac{w_i w_j}{2w}$,
we have
$$Q=\frac{1}{4w}\mathbf{s}^\tau \mathbf{B} \mathbf{s},$$
where $\tau$ is transpose and the bold quantities are the vectors/matrices
composed of the indexed quantities denoted with the same symbol.
@Newman_2006 presents an argument that a good way to come close to
optimizing $Q$ over possible partitions into two modules is to choose
$s_i$ to be the same sign as the $i$th entry of the leading eigenvector
of $\mathbf{B}$, if the leading eigenvalue is postive (otherwise the
algorithm halts with no splits, returning the trivial "decomposition"
into one module). This gives the first split, according to the Newman
algorithm, of the network into modules. Subsequent splits are handled in
a similar but not identical way described by @Newman_2006. (In particular
it is incorrect to simply delete edges connecting the two modules from
the first split and apply the algorithm to the resulting graphs.)
The generalized modularity of @Gomez_2009 is defined as follows.
Let $w_{ij}^+ = \max(0,w_{ij})$ and $w_{ij}^- = \max(0,-w_{ij})$ so
that $w_{ij}=w_{ij}^+ - w_{ij}^-$. Let $w_i^+ = \sum_j w_{ij}^+$,
$w_i^- = \sum_j w_{ij}^-$, $w^+ = \frac{1}{2} \sum_i w_i^+$, and
$w^- = \frac{1}{2} \sum_i w_i^-$. Then @Gomez_2009 justifies the
definitions
$$Q^+ = \frac{1}{2w^+} \sum_{ij}\left(w_{ij}^+-\frac{w_i^+ w_j^+}{2w^+}\right)\delta(C_i,C_j),$$
$$Q^- = \frac{1}{2w^-} \sum_{ij}\left(w_{ij}^- - \frac{w_i^- w_j^-}{2w^-}\right)\delta(C_i,C_j),$$
and
$$Q=\frac{2w^+}{2w^+ + 2w^-}Q^+ - \frac{2w^-}{2w^+ + 2w^-}Q^-.$$
This is a generalization of the old definition of $Q$, in the sense
that it reduces to that definition in the case of non-negatively
weighted networks. @Gomez_2009 provides a probabilistic interpretation
that generalizes the probabilisticc interpretation of @Newman_2006.
It is straightforward to show
$$Q=\frac{1}{2w^+ + 2w^-} \sum_{ij} \left(w_{ij}-\left(\frac{w_i^+ w_j^+}{2w^+} - \frac{w_i^- w_j^-}{2w^-}\right)\right)\delta(C_i,C_j).$$
Again considering an initial 2-module split and define $s_i$ and
$s_j$ as previously, we have
$$Q=\frac{1}{4w^+ + 4w^-} \sum_{ij} \left(w_{ij}-\left(\frac{w_i^+ w_j^+}{2w^+} - \frac{w_i^- w_j^-}{2w^-}\right)\right)s_i s_j,$$
which can be written in matrix form as
$$Q=\frac{1}{4w^+ + 4w^-} \mathbf{s}^\tau \mathbf{E} \mathbf{s}.$$
Because the generalized modularity of @Gomez_2009 can be written in the
same matrix format as the modularity expression of @Newman_2006, the
same eigenvector-based algorithm for finding a close-to-optimal value
of the modularity can be used.
The quantity
$$\frac{1}{2w^+ + 2w^-} \sum_{j} \left(w_{ij}-\left(\frac{w_i^+ w_j^+}{2w^+} - \frac{w_i^- w_j^-}{2w^-}\right)\right)\delta(C_i,C_j)$$
is the contribution of node $i$ to the modularity. It is the extent to
which node $i$ is more connected to other nodes in its module than
expected by chance [@Newman_2006;@Gomez_2009], and can be interpreted as
a strength of membership of a node in its module. The `plotmap`
function (demonstrated above) has an option for coloring nodes according
to this quantity.
<!--# Analysis of gypsy moth defoliation time series
To be completed later.-->
# Acknowlegements
This material is based upon work supported by the National Science Foundation
under grant numbers 17114195 and 1442595, and by the James S McDonnell Foundation.
Any opinions, findings, and conclusions
or recommendations expressed in this material are those of the authors and do
not necessarily reflect the views of the National Science Foundation or the McDonnell Foundation.
We thank all
users of the package who have reported or will later report ways in
which the package could be improved.
# References
| /scratch/gouwar.j/cran-all/cranData/wsyn/vignettes/wsynvignette.Rmd |
cluster<-function(data){
res<-kmeans(data,2)
if(res$centers[1,1] > res$centers[2,1]){
l<-which(res$cluster==1)
res$cluster[res$cluster==2]=1
res$cluster[l]=2
}
meth.recode<-res$cluster-1
return(meth.recode)
}
| /scratch/gouwar.j/cran-all/cranData/wtest/R/cluster.R |
Mean.Variance.calculation.by.K<-function(k,data,w.order){
df.column<-w.order+2
x2.column<-df.column-1
mean.mv<-mean(data[which(data[,df.column]==k),x2.column])
var.mv<-var(data[which(data[,df.column]==k),x2.column])
var.mv<-ifelse(is.na(var.mv),0,var.mv)
return(array(c(mean.mv,var.mv),dim=c(1,2)))
}
W.null.calculate.for.hf<-function(w.order,n.sample,n.marker,data){
if(n.sample < nrow(data)){
sample.select<-sample(nrow(data),n.sample)
data<-data[sample.select,]
}
if(n.marker < ncol(data)){
snp.select<-sample(ncol(data),n.marker)
data<-data[,snp.select]
}
y<-sample(0:1,n.sample,replace=T)
w.order<-unlist(w.order)
set<-apply(t(combn(n.marker,w.order)),1,list)
result<-lapply(set,x2.high,data,y,w.order)
result.all<-do.call(rbind,result)
k.row<-3^w.order-1
mean.variance<-array(0,dim=c(k.row,2))
df.column<-w.order+2
k.min<-min(result.all[,df.column])
k.max<-max(result.all[,df.column])
for(i in k.min:k.max){
mean.variance[i,]=Mean.Variance.calculation.by.K(i,result.all,w.order)
}
if(0 %in% mean.variance[,2]){
mean.variance[which(mean.variance[,2]==0),1]=0
}
return(mean.variance)
}
#' Patameter Estimation for W-test Probability Distribution
#'
#' @description Estimate parameters (\emph{h} and \emph{f}) for \code{W-test}.
#' @param B a numeric number specifying the number of replicates. Default is 400.
#' @param data a data frame or matrix containing genotypes in the columns and subjects in the rows. Genotypes should be coded as (0, 1, 2) or (0, 1).
#' @param w.order a numeric number. \code{w.order} = 1 gives main effect calculation. \code{w.order} = 2 gives pairwise interaction calculation. \code{w.order} > 2 gives high order interaction calculation.
#' @param n.sample a numeric number specifying the number of samples to be used for estimating parameters. Default is the total number of samples in the data.
#' @param n.marker a numeric value, the number of biomarkers to include in bootstrapping. For \code{order} = 1, the default = min(P, 1000), and for order = 2, default = min(P, 50). P is the total number of markers in the data.
#' @return a set of \emph{h} and \emph{f} values indexed by \emph{k}, estimated automatically. For main effect, \emph{k} is the number of levels of a predictor variable. For interactions, \emph{k} is the number of categorical combinations of a variable pair.
#' @examples
#' data(diabetes.geno)
#'
#' # Please note that parameter B is recommended to be greater than 400.
#' # For high order interaction analysis (w.order > 2), it is recommended to use default n.sample.
#' hf1 <- hf(data = diabetes.geno, w.order = 1, B = 100)
#' hf2 <- hf(data = diabetes.geno, w.order = 2, B = 80)
#' @export
#' @author Rui Sun, Maggie Haitian Wang
#' @references Maggie Haitian Wang, Rui Sun, Junfeng Guo, Haoyi Weng, Jack Lee, Inchi Hu, Pak Sham and Benny C.Y. Zee (2016). A fast and powerful W-test for pairwise epistasis testing. Nucleic Acids Research. doi:10.1093/nar/gkw347.
#' @seealso \code{\link{wtest}}, \code{\link{w.diagnosis}}, \code{\link{w.qqplot}}
#' @importFrom utils combn
#' @importFrom stats var
hf<-function(data,w.order,B=400,n.sample=nrow(data),n.marker="default.nmarker"){
suppressWarnings(if(typeof(n.marker)=="character") n.marker<-ifelse(w.order==1,1000,50))
n.marker<-min(ncol(data),n.marker)
if(is.data.frame(data))
data<-as.matrix(data)
if(any(is.na(data)))
stop("NA occurs in data")
if(!all(data %in% c(0,1,2)))
stop("all the genotypes in 'data' must be 0, 1 or 2")
if(!is.numeric(B))
B<-as.numeric(B)
set<-apply(array(w.order,dim=c(B,1)),1,list)
result<-lapply(set,W.null.calculate.for.hf,n.sample,n.marker,data)
result<-apply(simplify2array(result),c(1,2),sum)
result<-result/B
h<-result[,1]*2/result[,2]
f<-result[,1]*h
hf.result<-cbind(h,f)
if(1 %in% is.na(hf.result)){
k<-which(is.na(hf.result[,1]))
hf.result[k,1]<-k/(k+1)
hf.result[k,2]<-k
}
k<-c(2:(nrow(hf.result)+1))
hf.result<-cbind(k,hf.result)
return(hf.result)
}
| /scratch/gouwar.j/cran-all/cranData/wtest/R/hf.R |
W.null.calculate.for.hf.set<-function(w.order, y, n.sample, n.pair, data, data.methylation, set.all){
y <- y[sample(1:length(y), length(y))]
w.order <- unlist(w.order)
set.random <- sample(1:nrow(set.all), n.pair)
set <-set.all[set.random, ]
set <- apply(set, 1, list)
result <- lapply(set, x2.set, data, data.methylation, y)
result.all <- do.call(rbind, result)
k.row <- ifelse(w.order==1, 2, 8)
mean.variance <- array(0, dim = c(k.row, 2))
df.column <- ifelse(w.order == 1, 3, 4)
k.min <- min(result.all[, df.column])
k.max <- max(result.all[, df.column])
for(i in k.min:k.max){
mean.variance[i,] = Mean.Variance.calculation.by.K(i, result.all, w.order)
}
if(0 %in% mean.variance[,2]){
mean.variance[which(mean.variance[,2] == 0), 1] = 0
}
return(mean.variance)
}
#' Parameter Estimation for W-test Probability Distribution in Gene-methylation Data
#'
#' @description Estimate parameters (\emph{h} and \emph{f}) for \code{W-test}.
#' @param B a numeric number specifying the number of bootstrapping times. Default is 400.
#' @param geno a data frame or matrix containing genotypes in the columns. Genotypes should be coded as (0, 1, 2) or (0, 1). SNP names should be stored as column names.
#' @param meth a data frame or matrix containing methylation data in the columns. Methylation data should be recoded as (0, 1, 2) or (0, 1). Names of CpG sites should be stored as column names.
#' @param y a numeric vector of 0 or 1.
#' @param geno.pos a data frame containing SNP names and positions in two columns.
#' @param meth.pos a data frame containing CpG names and positions in two columns.
#' @param window.size a numeric number specifying the size of genome distance. Interaction of the SNPs and CpG sites located within the size of genome distance will be evaluated exhaustively.
#' @param n.sample a numeric number specifying the number of samples to be included for estimating parameters. Default is the total number of samples.
#' @param n.pair a numeric value, the number of SNP-CpG pairs to use in bootstrapping. Default = min(P, 1000). P is the total number of pairs within the \code{window.size}.
#' @return a set of \emph{h} and \emph{f} values indexed by \emph{k}, estimated automatically. Variable \emph{k} is the number of categorical combinations of a variable pair.
#'
#' @examples
#' data(SNP.pos)
#' data(CpG.pos)
#' data(genotype)
#' data(methylation)
#' data(phenotype2)
#'
#' # Please note that parameter B is recommended to be greater than 400.
#' hf.pair <- hf.snps.meth(B = 80, geno = genotype, meth = methylation, y = phenotype2,
#' geno.pos = SNP.pos, meth.pos = CpG.pos, window.size = 1000)
#'
#' @export
#' @author Rui Sun, Maggie Haitian Wang
#' @references Maggie Haitian Wang, Rui Sun, Junfeng Guo, Haoyi Weng, Jack Lee, Inchi Hu, Pak Sham and Benny C.Y. Zee (2016). A fast and powerful W-test for pairwise epistasis testing. Nucleic Acids Research. doi:10.1093/nar/gkw347.
#' @importFrom utils combn
#' @importFrom stats var
hf.snps.meth<-function(B = 400, geno, meth, y, geno.pos, meth.pos, window.size, n.sample = nrow(geno), n.pair = 1000){
if(is.data.frame(geno))
geno <- as.matrix(geno)
if(!all(geno %in% c(0,1,2)))
stop("all the genotypes in 'data.genotype' must be 0, 1 or 2")
if(!is.numeric(B))
B <- as.numeric(B)
if(any(is.na(y)))
stop("NA occurs in y")
if(!all(y %in% c(0,1)))
stop("all the genotypes in 'y' must be 0 or 1")
set <- apply(array(2, dim=c(B,1)), 1, list)
snp.names <- colnames(geno)
cpg.names <- colnames(meth)
l1 <- match(snp.names, geno.pos[,1])
l2 <- match(cpg.names, meth.pos[,1])
if(any(is.na(l1)))
stop("missing SNP position exists")
if(any(is.na(l2)))
stop("missing CpG position exists")
geno.pos <- geno.pos[l1,]
meth.pos <- meth.pos[l2,]
index.set <- data.frame()
for(i in 1:nrow(geno.pos)){
index <- which(abs(geno.pos[,2][i] - meth.pos[,2]) <= window.size)
if(length(index)){
index.i <- cbind(i, index)
index.set <- rbind(index.set, index.i)
}
}
n.pair <- min(nrow(index.set), n.pair)
result <- lapply(set, W.null.calculate.for.hf.set, y, n.sample, n.pair, geno, meth, index.set)
result <- apply(simplify2array(result), c(1,2), sum)
result <- result/B
h <- result[,1] * 2/result[,2]
f <- result[,1] * h
hf.result <- cbind(h,f)
if(1 %in% is.na(hf.result)){
k <- which(is.na(hf.result[,1]))
hf.result[k,1] <- k/(k+1)
hf.result[k,2] <- k
}
k <- c(2:(nrow(hf.result) + 1))
hf.result <- cbind(k, hf.result)
return(hf.result)
}
| /scratch/gouwar.j/cran-all/cranData/wtest/R/hf.snps.meth.R |
#' Minor Allele Frequency
#'
#' @description Calculate minor allele frequency.
#' @param data a data frame or matrix containing genotypes in the columns. Genotypes should be coded as (0, 1, 2) or (0, 1).
#' @param which.snp a numeric value, indicating which SNP to calculate. When which.snp = NULL, MAF of all the markers is calculated. Default is NULL.
#' @return The MAF of one marker.
#' @examples
#' data(diabetes.geno)
#' result <- maf(diabetes.geno, which.snp=10)
#' @export
maf <- function(data, which.snp = NULL){
if(!is.null(which.snp) & !is.numeric(which.snp))
stop("the 'which.snp' should be numeric!")
if(!is.matrix(data))
data <- as.matrix(data)
if(any(is.na(data)))
stop("NA occurs in data")
if(!all(data %in% c(0,1,2)))
stop("all the genotypes in data should be 0, 1 or 2")
if(!is.null(which.snp)){
result.maf <- mean(data[,which.snp])/2
}else{
result.maf <- colMeans(data)/2
}
return(result.maf)
}
| /scratch/gouwar.j/cran-all/cranData/wtest/R/maf.R |
#' Recode Methylation Data
#' @description {Code a CpG variable into two levels (high and low) by the two-mean clustering method.}
#' @param data a data frame or matrix contains methylation data in the columns.
#' @examples
#' data(methylation)
#' data.recoded <- methylation.recode(methylation)
#' @export
#' @importFrom stats kmeans
#'
methylation.recode<-function(data){
data.recode<-apply(data,2,cluster)
return(data.recode)
}
| /scratch/gouwar.j/cran-all/cranData/wtest/R/methylation.recode.R |
#' Odds Ratio
#'
#' @description Calculate odds ratio for a single SNP or a pair of SNPs. Single marker odds ratio is computed by contigency table as the odds of disease at minor allele vs the odds of diseases at major allele. Odds ratio of a pair of SNPs is calculated by the Logistic Regression.
#' @param data a data frame or matrix containing genotypes in the columns. Genotypes should be coded as (0, 1, 2) or (0, 1), according to minor allele count.
#' @param y binary values.
#' @param w.order a numeric number taking values 1 or 2. If w.order = 1, odds ratio of main effect is calculated. If w.order = 2, odds ratio of pairwise interaction is calculated.
#' @param which.marker a numeric vector, when w.order = 1, a single value indicating the column index of the variable to calculate; when w.order = 2, a vector indicating the column index of a SNP-pair to calculate.
#' @return The odds ratio of a SNP or a SNP-pair.
#' @export
#' @examples
#' data(diabetes.geno)
#' data(phenotype1)
#' y <- as.numeric(phenotype1)
#' OR.snp4.snp8 <- odds.ratio(diabetes.geno, y, w.order=2, which.marker = c(4,8))
#' OR.snp4 <- odds.ratio(diabetes.geno, y, w.order = 1, which.marker = 4)
#' @importFrom stats glm binomial
odds.ratio<-function(data,y,w.order,which.marker){
if(!is.data.frame(data))
data<-as.data.frame(data)
if(any(is.na(data)))
stop("NA occurs in data")
if(!all(as.matrix(data) %in% c(0,1,2)))
stop("all the genotypes in 'data' must be 0, 1 or 2")
if(any(is.na(y)))
stop("NA occurs in y")
if(!all(y %in% c(0,1)))
stop("all the genotypes in 'y' must be 0 or 1")
if(!is.null(which.marker) & length(which.marker)!=w.order)
stop("the length of 'which.marker' is not equal to 'w.order'")
if(length(y)!=nrow(data))
stop("'data' and 'y' must have the same length")
n.snp<-ncol(data)
if(w.order==1){
or.table<-table(as.matrix(data[,which.marker]),y)
if(0 %in% or.table)
or.table<-or.table+0.5
if(nrow(or.table)==3)
result.oddsratio<-(2*or.table[3,2]+or.table[2,2])*(2*or.table[1,1]+or.table[2,1])/(2*or.table[1,2]+or.table[2,2])/(2*or.table[3,1]+or.table[2,1])
else
result.oddsratio<-or.table[2,2]*(2*or.table[1,1]+or.table[2,1])/(2*or.table[1,2]+or.table[2,2])/or.table[2,1]
}
else if(w.order==2){
result.glm<-glm(y~data[,which.marker[1]]*data[,which.marker[2]],family=binomial)$coefficients[4]
result.oddsratio<-unname(exp(result.glm))
}
return(result.oddsratio)
}
| /scratch/gouwar.j/cran-all/cranData/wtest/R/odds.ratio.R |
#'@useDynLib wtest, .registration = TRUE
table.e1<-function(x,y){
.Call("table_e1",x,y)
}
table.e2<-function(x1,x2,y){
.Call("table_e2",x1,x2,y)
}
| /scratch/gouwar.j/cran-all/cranData/wtest/R/table.function.R |
w.null<-function(index,w.order,n.sample,n.marker,data,hf){
if(n.sample < nrow(data)){
sample.select<-sample(nrow(data),n.sample)
data<-data[sample.select,]
}
if(n.marker < ncol(data)){
snp.select<-sample(ncol(data),n.marker)
data<-data[,snp.select]
}
y<-sample(0:1,n.sample,replace=T)
w.order<-unlist(w.order)
set<-apply(t(combn(n.marker,w.order)),1,list)
result<-lapply(set,x2,data,y,w.order)
result.all<-do.call(rbind,result)
df.column<-ifelse(w.order==1,3,4)
w.column<-df.column-1
w.value<-result.all[,w.column]*hf[result.all[,df.column],1]
df.k<-hf[result.all[,df.column],2]
w.null<-cbind(w.value,df.k)
return(w.null)
}
#' W-test Probability Distribution Diagnostic Plot
#'
#' @description Diagnostic checking of W-test probability distribution estimation.
#' @param data a data frame or matrix containing genotypes in the columns. Genotypes should be coded as (0, 1, 2) or (0, 1).
#' @param w.order an integer value of 0 or 1. \code{w.order} = 1 gives main effect calculation; \code{w.order} = 2 gives pairwise calculation.
#' @param n.rep a numeric value, the number of bootstrapping times.
#' @param n.sample a numeric value, the number of samples to use in bootstrapping. Default is the total number of samples in the data.
#' @param n.marker a numeric value, the number of markers to use in bootstrapping. Default is the total number of markers.
#' @param hf1 \emph{h} and \emph{f} values to calculate main effect, organized as a matrix, with columns (\emph{k}, \emph{h}, \emph{f}), \emph{k} = 2 to 3. Needed when \code{w.order} = 1.
#' @param hf2 \emph{h} and \emph{f} values to calculate interaction associations, organized as a matrix, with columns (\emph{k}, \emph{h}, \emph{f}), \emph{k} = 2 to 9. Needed when \code{w.order} = 2.
#' @param ... graphical parameters.
#'
#' @details {This function evaluates the input W values of main or interaction effects using a set of null Y by the \code{W-test}, and the evaluation is performed in several bootstrap samples to achieve fast and stable output. The W histogram and its theoretical Chi-squared distribution density with \emph{f} degrees of freedom are plotted indexed by \emph{k}. Close overlaying of the histogram and the probability density curve indicates that the estimated \emph{h} and \emph{f} give a good test statistic probability distribution.}
#'
#' @examples
#' data(diabetes.geno)
#' # Please note that parameter B is recommended to be greater than 400.
#' hf1 <- hf(data = diabetes.geno, w.order = 1, B = 100)
#' hf2 <- hf(data = diabetes.geno, w.order = 2, B = 50)
#' w.diagnosis(diabetes.geno, w.order = 1, n.rep = 100, hf1 = hf1, main=NULL, xlab=NULL, ylab=NULL)
#' w.diagnosis(diabetes.geno, w.order = 2, n.rep = 100, hf2 = hf2, main=NULL, xlab=NULL, ylab=NULL)
#' @export
#' @author Rui Sun, Maggie Haitian Wang
#' @references Maggie Haitian Wang, Rui Sun, Junfeng Guo, Haoyi Weng, Jack Lee, Inchi Hu, Pak Sham and Benny C.Y. Zee (2016). A fast and powerful W-test for pairwise epistasis testing. Nucleic Acids Research. doi:10.1093/nar/gkw347.
#' @seealso \code{\link{wtest}}, \code{\link{hf}}, \code{\link{w.qqplot}}
#' @importFrom graphics par hist lines text mtext plot legend
#' @importFrom stats rchisq density
#' @importFrom utils combn
w.diagnosis<-function(data, w.order=c(1,2), n.rep=10, n.sample=nrow(data), n.marker=ncol(data),
hf1="default.hf1", hf2="default.hf2", ...){
suppressWarnings(if(typeof(hf1) == "character"){
hf1 = array(c(0.5,0.667,1,2), dim = c(2,2))
}else{
hf1 = hf1[,2:3]
})
suppressWarnings(if(typeof(hf2) == "character"){
hf2 = array(c(0.5,0.667,0.75,0.8,0.833,0.857,0.875,0.889,1:8), dim = c(8,2))
}else{
hf2 = hf2[,2:3]
})
if(is.data.frame(data))
data <- as.matrix(data)
if(any(is.na(data)))
stop("NA occurs in data")
if(!all(data %in% c(0,1,2)))
stop("all the genotypes in 'data' must be 0, 1 or 2")
if(w.order == 1){
hf <- hf1
}else {
hf <- hf2
}
index <- apply(array(1:n.rep, dim=c(n.rep, 1)), 1, list)
w.value <- lapply(index, w.null, w.order, n.sample, n.marker, data, hf)
w.all <- do.call(rbind, w.value)
k.all <- match(unique(w.all[,2]), hf[,2]) + 1
n.p <- length(k.all)
op <- par(no.readonly = TRUE)
par(mfrow = c(ceiling(n.p/2), min(2,n.p)), oma = c(3, 2, 2, 1), mai = c(0.4,0.4,0.1,0.1), xpd = T)
for(k in sort(k.all,decreasing = T)){
o.v <- w.all[which(w.all[,2] == hf[k-1,2]), 1]
e.v <- rchisq(length(o.v), df = hf[k-1,2])
hist(o.v,freq = F, ylim = c(0, max(max(density(o.v, na.rm = T)[[2]]), max(density(e.v)[[2]]))), ...)
lines(density(e.v, na.rm = T), lty = 1, col = "red")
text(x = max(o.v)*4/5, y = max(density(o.v, na.rm = T)[[2]])*4/5, paste0("k = ", k, "; df = ", round(hf[k-1,2],2)))
}
mtext("W value", outer = T, side = 1, cex = 0.7)
mtext("Density", outer = T, side = 2, cex = 0.7)
par(fig = c(0, 1, 0, 1), oma = c(0, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE)
plot(0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n")
legend("bottom", legend = c("Expected ","Observed"), ncol = 2, cex = 1,
inset = c(0, 0), bty = "n", pch = c(NA,0), col = c("red","black"), lwd = c(1.5,NA), lty = c(1,NA))
par(op)
}
| /scratch/gouwar.j/cran-all/cranData/wtest/R/w.diagnosis.R |
#' W P-values Diagnosis by Q-Q Plot
#' @description Draw a Q-Q plot for W-test
#' @param data a data frame or matrix containing genotypes in the columns. Genotypes should be coded as (0, 1, 2) or (0, 1).
#' @param y a numeric vector of 0 or 1.
#' @param w.order a numeric number taking values 1 or 2. \code{w.order} = 1 gives main effect Q-Q plot. \code{w.order} = 2 gives interaction Q-Q plot.
#' @param hf1 \emph{h} and \emph{f} values to calculate main effect, organized as a matrix, with columns (\emph{k}, \emph{h}, \emph{f}), \emph{k} = 2 to 3. Needed when \code{w.order} = 1.
#' @param hf2 \emph{h} and \emph{f} values to calculate interaction associations, organized as a matrix, with columns (\emph{k}, \emph{h}, \emph{f}), \emph{k} = 2 to 9. Needed when \code{w.order} = 2.
#' @param input.poolsize a numeric number; The maximum number of SNPs to calculate the Q-Q plot. Default is 200. The \code{input.poolsize} is suggested to set as 1000 for \code{w.order} = 1, and 200 for \code{w.order} = 2.
#' @param ... graphical parameters.
#' @return Q-Q plot
#' @details
#' With a given data and y, the p-value of W-test is calculated at given \emph{h} and \emph{f} values, which are plotted against the theoretical distribution.
#'
#' @examples
#' data(diabetes.geno)
#' data(phenotype1)
#' ## Step 1. HF Calculation
#' # Please note that parameter B is recommended to be greater than 400.
#' hf1<-hf(data = diabetes.geno, w.order = 1, B = 200)
#'
#' ## Step 2. Q-Q Plot
#' w.qqplot(data = diabetes.geno, y = phenotype1, w.order = 1, hf1 = hf1, cex =.5)
#' abline(0,1)
#' @export
#' @importFrom utils combn
#' @importFrom stats pchisq runif qqplot
#'
w.qqplot<-function(data, y, w.order=c(1,2), input.poolsize=200, hf1="default.hf1", hf2="default.hf2",
...){
suppressWarnings(if(typeof(hf1)=="character"){hf1=array(c(0.5,0.667,1,2),dim=c(2,2))}else{hf1=hf1[,2:3]})
suppressWarnings(if(typeof(hf2)=="character"){hf2=array(c(0.5,0.667,0.75,0.8,0.833,0.857,0.875,0.889,1:8),dim=c(8,2))}else{hf2=hf2[,2:3]})
if(is.data.frame(data))
data<-as.matrix(data)
if(any(is.na(data)))
stop("NA occurs in data")
if(!all(data %in% c(0,1,2)))
stop("all the genotypes in 'data' must be 0, 1 or 2")
if(any(is.na(y)))
stop("NA occurs in y")
if(!all(y %in% c(0,1)))
stop("all the genotypes in 'y' must be 0 or 1")
n.snp<-ncol(data)
if(w.order==1){
set<-lapply(1:n.snp,list)
}else if(w.order==2){
if(n.snp<=input.poolsize){
set<-apply(t(combn(n.snp,2)),1,list)
}else{
l.select<-sample(1:n.snp,input.poolsize,replace=F)
set<-apply(t(combn(l.select,2)),1,list)
}
}
result<-lapply(set,x2,data,y,w.order)
result.all<-do.call(rbind,result)
x2.column<-ifelse(w.order==1,2,3)
if(w.order==1){
hf<-hf1
}else {
hf<-hf2
}
df.column<-x2.column+1
pval.column<-x2.column+2
w.value<-result.all[,x2.column]*hf[result.all[,df.column],1]
p.value.observed<-pchisq(w.value,df=hf[result.all[,df.column],2],lower.tail=F)
o = -log10(sort(p.value.observed,decreasing=F))
e = -log10(1:length(o)/length(o))
qqplot(e,o, xlab = paste0("Expected -log(p-value)"), ylab = "Observed -log(p-value)", ...)
}
| /scratch/gouwar.j/cran-all/cranData/wtest/R/w.qqplot.R |
#' W-test
#'
#' @description {This function performs the \code{W-test} to calculate main effect or pairwise interactions in case-control studies
#' for categorical data sets. The test measures target variables' distributional difference between cases and controls via a combined
#' log of odds ratio. It follows a Chi-squared probability distribution with data-adaptive degrees of freedom. For pairwise interaction
#' calculation, the user has 3 options: (1) calculate a single pair's W-value, (2) calculate pairwise interaction for a list of variables,
#' which p-values are smaller than a threshold (\code{input.pval}); (3) calculate the pairwise interaction exhaustively for all variables.
#' For both main and interaction calculation, the output can be filtered by p-values, such that only sets with smaller p-value
#' than a threshold (\code{output.pval}) will be returned. An extension of the W-test for rare variant analysis is available in \code{zfa} package.}
#' @param data a data frame or matrix containing genotypes in the columns. Genotypes should be coded as (0, 1, 2) or (0, 1).
#' @param y a numeric vector of 0 or 1.
#' @param w.order an integer value of 0 or 1. \code{w.order} = 1 for main effect calculation; \code{w.order} = 2 for pairwise calculation.
#' @param hf1 \emph{h} and \emph{f} values to calculate main effect, organized as a matrix, with columns (\emph{k}, \emph{h}, \emph{f}), \emph{k} = 2 to 3. Needed when \code{w.order} = 1.
#' @param hf2 \emph{h} and \emph{f} values to calculate interaction associations, organized as a matrix, with columns (\emph{k}, \emph{h}, \emph{f}), \emph{k} = 2 to 9. Needed when \code{w.order} = 2.
#' @param which.marker a numeric vector, when \code{w.order} = 1, a single value indicating the column index of a SNP to calculate, when \code{w.order} = 2, a vector indicating the column index of a SNP-pair to calculate. Default \code{which.marker} = NULL means main or interaction effect will be calculated exhaustively.
#' @param output.pval a p-value threshold for filtering the output. If NULL, all the results will be listed; otherwise, the function will only output the results with p-values smaller than the \code{output.pval}.
#' @param sort a logical value indicating whether or not to sort the output by p-values in ascending order. Default = TRUE.
#' @param input.pval a p-value threshold to select markers for pairwise calculation, used only when \code{w.order} = 2. When specified, only markers with main effect p-value smaller than \code{input.pval} will be passed to interaction effect calculation. Default = 0.10. Set \code{input.pval} = NULL or 1 for exhaustive pairwise calculation.
#' @param input.poolsize an integer, with value less than the number of input variables. It is an optional filter to control the maximum number of variables to include in pairwise calculation, used only when \code{w.order} = 2. When specified, the function selects top \code{input.poolsize} number of variables to calculate pairwise interactions. It can be used separately or jointly with \code{input.pval}, whichever gives smaller input variable pool size. Default = 50. Set \code{input.poolsize} = NULL for exhaustive pairwise calculation. It can be useful for data exploration, when there are a large number of variables with extremely small main effect p-values.
#' @return An object \code{"wtest"} containing:
#'
#' \item{order}{the "w.order" specified.}
#'
#' \item{results}{When \code{w.order} = 1, the test results include: the ID of SNP, the W value, \emph{k}, and p-value. When \code{w.order} = 2 and \code{which.marker} = NULL, the test results include: (information of the pair, column 1-5) [SNP1 name, SNP2, name, W-value, k, p-value]; (Information of the first variable in the pair, column 6-8) [W-value, k, p-value]; (Information of the second variable in the pair, column 9-11) [W-value, k, p-value].}
#'
#' \item{hf1}{The \emph{h} and \emph{f} values used in main effect calculation.}
#'
#' \item{hf2}{The \emph{h} and \emph{f} values used in pairwise interaction calculation.}
#'
#' @details {W-test is a model-free statistical test to measure main effect or pairwise interactions in case-control studies with categorical variables.
#' Theoretically, the test statistic follows a Chi-squared distribution with \emph{f} degrees of freedom. The data-adaptive degree of freedom \emph{f},
#' and a scalar \emph{h} in the test statistics allow the W-test to correct for distributional bias due to sparse data and small sample size.
#' Let \emph{k} be the number of columns of the 2 by \emph{k} contingency table formed by a single variable or a variable pair.
#' When the sample size is large and there is no population stratification, the \emph{h} and \emph{f} will approximate well to the theoretical
#' value \emph{h} = (\emph{k}-1)/\emph{k}, and \emph{f} = \emph{k}-1. When sample size is small and there is population stratification, the \emph{h} and
#' \emph{f} will vary to correct for distributional bias caused by the data structure.}
#'
#' {When \code{w.order} =2, the \code{wtest()} will automatically calculate the main effect first and then do a pre-filter before calculating interactions.
#' This filtering is to avoid overloading the memory before having a better understanding of the data. User can specify a smaller input.pval such as 0.05 or 0.001
#' for less output, or \code{input.pval}=1 or NULL for exhaustive pairwise calculation. Another optional filter is \code{input.poolsize}. It will take the top \code{input.poolsize}
#' number of variables to calculated pairwise effect exhaustively, selected by smallest p-value; when used together with \code{input.pval}, the smaller set will be passed to pairwise calculation.}
#'
#' @examples
#' data(diabetes.geno)
#' data(phenotype1)
#'
#' ## Step 1. HF Calculation
#' # Please note that parameter B is recommended to be greater than 400.
#' hf1 <- hf(data = diabetes.geno, w.order = 1, B = 100)
#' hf2 <- hf(data = diabetes.geno, w.order = 2, B = 50)
#'
#' ## Step 2. W-test Calculation
#' w1 <- wtest(diabetes.geno, phenotype1, w.order = 1, hf1 = hf1)
#' w2 <- wtest(diabetes.geno, phenotype1, w.order = 2, input.pval = 0.3,
#' input.poolsize = 50, output.pval = 0.01, hf1 = hf1, hf2 = hf2)
#' w.pair <- wtest(diabetes.geno, phenotype1, w.order = 2, which.marker = c(10,13), hf2 = hf2)
#' @export
#' @author Rui Sun, Maggie Haitian Wang
#' @references Maggie Haitian Wang, Rui Sun, Junfeng Guo, Haoyi Weng, Jack Lee, Inchi Hu, Pak Sham and Benny C.Y. Zee (2016). A fast and powerful W-test for pairwise epistasis testing. Nucleic Acids Research. doi:10.1093/nar/gkw347.
#' @references Maggie Haitian Wang, Haoyi Weng, Rui Sun, Jack Lee, William K.K. Wu, Ka Chun Chong, Benny C.Y. Zee. (2017). A Zoom-Focus algorithm (ZFA) to locate the optimal testing region for rare variant association tests. Bioinformatics, 33(15), 2330-2336.
#' @seealso \code{\link{hf}}, \code{\link{w.diagnosis}}, \code{\link{w.qqplot}}
#' @importFrom utils combn
#' @importFrom stats pchisq
wtest<-function(data, y, w.order = c(1,2), hf1="default.hf1", hf2="default.hf2",
which.marker = NULL, output.pval = NULL, sort = TRUE, input.pval = 0.10, input.poolsize = 150){
suppressWarnings(if(typeof(hf1) == "character"){
hf1 = array(c(0.5,0.667,1,2), dim=c(2,2))
}else{
hf1 = hf1[,2:3]
})
suppressWarnings(if(typeof(hf2) == "character"){
hf2 = array(c(0.5,0.667,0.75,0.8,0.833,0.857,0.875,0.889,1:8), dim=c(8,2))
}else{
hf2 = hf2[,2:3]
})
if(is.data.frame(data))
data <- as.matrix(data)
if(any(is.na(data)))
stop("NA occurs in data")
if(!all(data %in% c(0,1,2)))
stop("all the genotypes in 'data' must be 0, 1 or 2")
if(any(is.na(y)))
stop("NA occurs in y")
if(!all(y %in% c(0,1)))
stop("all the genotypes in 'y' must be 0 or 1")
if(!is.null(which.marker) & length(which.marker)!=w.order)
stop(gettextf("the length of 'which.marker' is %d, should equal to %d (the number of 'w.order' defined)",length(which.marker),w.order))
if(length(y)!=nrow(data))
stop("'data' and 'y' must have the same length")
cl <- match.call()
n.snp <- ncol(data)
if(!is.null(which.marker)){
set <- list(which.marker)
}else if(w.order==1){
set <- lapply(1:n.snp,list)
}else if(w.order==2){
if(is.null(input.pval) & is.null(input.poolsize)){
set <- apply(t(combn(n.snp,2)), 1, list)
}else{
input.pval <- ifelse(is.null(input.pval), 1, input.pval)
input.poolsize <- ifelse(is.null(input.poolsize), n.snp, input.poolsize)
set.order1 <- lapply(1:n.snp, list)
result.order1 <- lapply(set.order1, x2, data, y, 1)
result.order1.all <- do.call(rbind, result.order1)
w.value.order1 <- result.order1.all[,2]*hf1[result.order1.all[,3],1]
p.value.order1 <- pchisq(w.value.order1, df = hf1[result.order1.all[,3],2], lower.tail = F)
result.order1.all[,2] <- w.value.order1
result.order1.all[,3] <- result.order1.all[,3] + 1
result.order1.all <- cbind(result.order1.all, p.value.order1)
l.select <- which(result.order1.all[,4] < input.pval)
if(length(l.select) > input.poolsize){
result.order1.rank <- result.order1.all[order(result.order1.all[,4], decreasing=F),]
l.select <- result.order1.rank[1:input.poolsize,1]
}
set <- apply(t(combn(l.select,2)), 1, list)
}
}
result <- lapply(set, x2, data, y, w.order)
result.all <- do.call(rbind, result)
x2.column <- ifelse(w.order == 1, 2, 3)
if(w.order == 1){
hf <- hf1
}else {
hf <- hf2
}
df.column <- x2.column + 1
pval.column <- x2.column + 2
w.value <- result.all[,x2.column] * hf[result.all[,df.column],1]
p.value <- pchisq(w.value, df = hf[result.all[,df.column],2], lower.tail = F)
result.all[,x2.column] <- w.value
result.all <- cbind(result.all, p.value)
k <- result.all[,df.column] + 1
result.all[,df.column] <- k
marker.names <- colnames(data)
result.all <- as.data.frame(result.all)
if(w.order == 2){
colnames(result.all) <- c("marker1", "marker2", "w", "k", "p-value")
if(!(is.null(input.pval) & is.null(input.poolsize)) & is.null(which.marker)){
result.all <- cbind(result.all, result.order1.all[result.all[,1],2:4], result.order1.all[result.all[,2],2:4])
colnames(result.all) <- c("marker1", "marker2", "w", "k", "pair.p-value", "marker1.w", "marker1.k", "marker1.p-value", "marker2.w", "marker2.k", "marker2.p-value")
}
result.all[,1] <- marker.names[result.all[,1]]
result.all[,2] <- marker.names[result.all[,2]]
}else{
result.all[,1] <- marker.names[result.all[,1]]
colnames(result.all) <- c("marker", "w", "k", "p-value")
}
if(!is.null(output.pval)){
l.output.pval <- which(result.all[,pval.column] < output.pval)
result.all <- result.all[l.output.pval,]
}
if(sort){
l.order <- order(result.all[,pval.column], decreasing = F)
result.all <- result.all[l.order,]
}
k <- c(2:(nrow(hf1) + 1))
hf1 <- cbind(k, hf1)
k <- c(2:(nrow(hf2) + 1))
hf2 <- cbind(k, hf2)
return(list(call = cl, order = w.order, results = result.all, hf1 = hf1, hf2 = hf2))
}
| /scratch/gouwar.j/cran-all/cranData/wtest/R/wtest.R |
#' W-test for High Order Interaction Analysis
#'
#' @description {This function performs the \code{W-test} to calculate high-order interactions in case-control studies
#' for categorical data sets. The test measures target variables' distributional difference between cases and controls via a combined
#' log of odds ratio. It follows a Chi-squared probability distribution with data-adaptive degrees of freedom. For high-order interaction
#' calculation, the user has 3 options: (1) calculate W-test of a set of SNPs, (2) calculate high-order interaction for a list of variables,
#' which p-values are smaller than a threshold (\code{input.pval}); (3) calculate high-order interaction exhaustively for all variables.
#' Output can be filtered by p-values, such that only sets with smaller p-value than a threshold (\code{output.pval}) will be returned.}
#' @param data a data frame or matrix containing genotypes in the columns. Genotypes should be coded as (0, 1, 2) or (0, 1).
#' @param y a numeric vector of 0 or 1.
#' @param w.order an integer value, indicating the order of high-way interactions. For example, \code{w.order} = 3 for three-way interaction analysis.
#' @param hf1 \emph{h} and \emph{f} values to calculate main effect, organized as a matrix, with columns (\emph{k}, \emph{h}, \emph{f}), \emph{k} = 2 to 3.
#' @param hf.high.order \emph{h} and \emph{f} values to calculate high-order interactions, organized as a matrix, with columns (\emph{k}, \emph{h}, \emph{f}), where \emph{k} is the number of genotype combinations of a set of SNPs.
#' @param which.marker a numeric vector indicating the column index of a set of SNPs to calculate. Default \code{which.marker} = NULL gives an exhaustively high-order interaction calculation.
#' @param output.pval a p-value threshold for filtering the output. If NULL, all the results will be listed; otherwise, the function will only output the results with p-values smaller than the \code{output.pval}.
#' @param sort a logical value indicating whether or not to sort the output by p-values in ascending order. Default = TRUE.
#' @param input.pval a p-value threshold to select markers for high-order interaction calculation, used only when \code{w.order} > 2. When specified, only markers with main effect p-value smaller than \code{input.pval} will be passed to interaction effect calculation. Default = 0.10. Set \code{input.pval} = NULL or 1 for exhaustive calculation.
#' @param input.poolsize an integer, with value less than the number of input variables. It is an optional filter to control the maximum number of variables to include in high-order interaction calculation, used only when \code{w.order} > 2. When specified, the function selects top \code{input.poolsize} number of variables to calculate interactions. It can be used separately or jointly with \code{input.pval}, whichever gives smaller input pool size. Default = 10. Set \code{input.poolsize} = NULL for exhaustive calculation. It can be useful for data exploration, when there are a large number of variables with extremely small main effect p-values.
#' @return An object \code{"wtest"} containing:
#'
#' \item{order}{the "w.order" specified.}
#'
#' \item{results}{When order > 2 and which.marker = NULL, the test results include: (information of a set) [SNPs name, W-value, k, p-value]; (Information of the first variable in the set) [W-value, k, p-value]; (Information of the second variable in the set) [W-value, k, p-value] ...}
#'
#' \item{hf1}{The \emph{h} and \emph{f} values used in main effect calculation.}
#'
#' \item{hf2}{The \emph{h} and \emph{f} values used in high-order interaction calculation.}
#'
#' @details {W-test is a model-free statistical test orginally proposed to measure main effect or pairwise interactions in case-control studies with categorical variables.
#' It can be extended to high-order interaction detection by the \emph{wtest.high()} function. Theoretically, the test statistic follows a Chi-squared distribution with \emph{f} degrees of freedom. The data-adaptive degree of freedom \emph{f},
#' and a scalar \emph{h} in the test statistics allow the W-test to correct for distributional bias due to sparse data and small sample size.
#' Let \emph{k} be the number of columns of the 2 by \emph{k} contingency table formed by a single variable or a variable pair.
#' When the sample size is large and there is no population stratification, the \emph{h} and \emph{f} will approximate well to the theoretical
#' value \emph{h} = (\emph{k}-1)/\emph{k}, and \emph{f} = \emph{k}-1. When sample size is small and there is population stratification, the \emph{h} and
#' \emph{f} will vary to correct for distributional bias caused by the data structure.}
#'
#' {When \code{w.order} > 2, the \code{wtest()} will automatically calculate the main effect first and then do a pre-filter before calculating interactions.
#' This filtering is to avoid overloading the memory before having a better understanding of the data. User can specify a smaller input.pval such as 0.05 or 0.001
#' for less output, or \code{input.pval}=1 or NULL for exhaustive high-order interaction calculation. Another optional filter is \code{input.poolsize}. It will select the top \code{input.poolsize}
#' number of variables, ranked by p-values, to calculate high-order interactions. When used together with \code{input.pval}, the algorithm selects the smaller set in the high-order calculation.}
#'
#' @examples
#' data(diabetes.geno)
#' data(phenotype1)
#'
#' ## Step 1. HF Calculation
#' # Please note that parameter B is recommended to be greater than 400 for w.order = 1 or 2.
#' # For high order interaction analysis (w.order > 2), it is recommended to use default n.sample.
#' hf1 <- hf(data = diabetes.geno, w.order = 1, B = 100)
#' hf.high <- hf(data = diabetes.geno, w.order = 3, B = 30, n.marker = 10)
#'
#' ## Step 2. W-test Calculation
#' w1 <- wtest.high(diabetes.geno, phenotype1, w.order = 1, hf1 = hf1)
#' w3 <- wtest.high(diabetes.geno, phenotype1, w.order = 3, input.pval = 0.3,
#' input.poolsize = 50, output.pval = 0.5, hf1 = hf1, hf.high.order = hf.high)
#' w.set <- wtest.high(diabetes.geno, phenotype1, w.order = 3, which.marker = c(10,13,20),
#' hf.high.order = hf.high)
#' @export
#' @author Rui Sun, Maggie Haitian Wang
#' @references Maggie Haitian Wang, Rui Sun, Junfeng Guo, Haoyi Weng, Jack Lee, Inchi Hu, Pak Sham and Benny C.Y. Zee (2016). A fast and powerful W-test for pairwise epistasis testing. Nucleic Acids Research. doi:10.1093/nar/gkw347.
#' @seealso \code{\link{hf}}, \code{\link{w.diagnosis}}, \code{\link{w.qqplot}}
#' @importFrom utils combn
#' @importFrom stats pchisq
wtest.high<-function(data,y,w.order=3,hf1="default.hf1",hf.high.order="default.high",
which.marker=NULL,output.pval=NULL,sort=TRUE,input.pval=0.10,input.poolsize=10){
suppressWarnings(if(typeof(hf1)=="character"){
hf1=array(c(0.5,0.667,1,2),dim=c(2,2))}else{hf1=hf1[,2:3]})
suppressWarnings(if(typeof(hf.high.order)=="character"){
hf.high.order=array(c(c(1:(3^w.order-1))/c(2:3^w.order),1:(3^w.order-1)),dim=c(3^w.order-1,2))
}else{hf.high.order=hf.high.order[,2:3]})
if(is.data.frame(data))
data<-as.matrix(data)
if(any(is.na(data)))
stop("NA occurs in data")
if(!all(data %in% c(0,1,2)))
stop("all the genotypes in 'data' must be 0, 1 or 2")
if(any(is.na(y)))
stop("NA occurs in y")
if(!all(y %in% c(0,1)))
stop("all the genotypes in 'y' must be 0 or 1")
if(!is.null(which.marker) & length(which.marker)!=w.order)
stop(gettextf("the length of 'which.marker' is %d, should equal to %d (the number of 'w.order' defined)",length(which.marker),w.order))
if(length(y)!=nrow(data))
stop("'data' and 'y' must have the same length")
cl <- match.call()
n.snp<-ncol(data)
if(!is.null(which.marker)){
set<-list(which.marker)
}else if(w.order==1){
set<-lapply(1:n.snp,list)
}else if(w.order > 1){
if(is.null(input.pval) & is.null(input.poolsize)){
set<-apply(t(combn(n.snp,w.order)),1,list)
}else{
input.pval<-ifelse(is.null(input.pval),1,input.pval)
input.poolsize<-ifelse(is.null(input.poolsize),n.snp,input.poolsize)
set.order1<-lapply(1:n.snp,list)
result.order1<-lapply(set.order1,x2,data,y,1)
result.order1.all<-do.call(rbind,result.order1)
w.value.order1<-result.order1.all[,2]*hf1[result.order1.all[,3],1]
p.value.order1<-pchisq(w.value.order1,df=hf1[result.order1.all[,3],2],lower.tail=F)
result.order1.all[,2]<-w.value.order1
result.order1.all[,3]<-result.order1.all[,3]+1
result.order1.all<-cbind(result.order1.all,p.value.order1)
l.select<-which(result.order1.all[,4]<input.pval)
if(length(l.select)>input.poolsize){
result.order1.rank<-result.order1.all[order(result.order1.all[,4],decreasing=F),]
l.select<-result.order1.rank[1:input.poolsize,1]
}
set<-apply(t(combn(l.select,w.order)),1,list)
}
}
result<-lapply(set,x2.high,data,y,w.order)
result.all<-do.call(rbind,result)
if(w.order==1){
hf<-hf1
}else {
hf<-hf.high.order
}
x2.column<-w.order+1
df.column<-w.order+2
pval.column<-w.order+3
w.value<-result.all[,x2.column]*hf[result.all[,df.column],1]
p.value<-pchisq(w.value,df=hf[result.all[,df.column],2],lower.tail=F)
adjusted.p.value <- p.value*nrow(result.all)
adjusted.p.value[adjusted.p.value>1] <- 1
result.all[,x2.column]<-w.value
result.all<-cbind(result.all,p.value,adjusted.p.value)
k<-result.all[,df.column]+1
result.all[,df.column]<-k
marker.names<-colnames(data)
result.all<-as.data.frame(result.all)
if(w.order>1){
snps.names<-paste0("marker",c(1:w.order))
colnames.result.all<-c(snps.names,"w","k","p-value","adjusted.p-value")
if(!(is.null(input.pval) & is.null(input.poolsize)) & is.null(which.marker)){
for(i in 1:w.order){
result.all<-cbind(result.all,result.order1.all[result.all[,i],2:4])
main.snp.name<-c(paste0("marker",i,".w"),paste0("marker",i,".k"),paste0("marker",i,".p-value"))
colnames.result.all<-c(colnames.result.all, main.snp.name)
}
}
colnames(result.all)<-colnames.result.all
for(j in 1:w.order){
result.all[,j]<-marker.names[result.all[,j]]
}
}else{
result.all[,1]<-marker.names[result.all[,1]]
colnames(result.all)<-c("marker","w","k","p-value","adjusted.p-value")
}
if(!is.null(output.pval)){
l.output.pval<-which(result.all[,pval.column]<output.pval)
result.all<-result.all[l.output.pval,]
}
if(sort){
l.order<-order(result.all[,pval.column],decreasing=F)
result.all<-result.all[l.order,]
}
k<-c(2:(nrow(hf1)+1))
hf1<-cbind(k,hf1)
k<-c(2:(nrow(hf.high.order)+1))
hf.high.order<-cbind(k,hf.high.order)
if(!is.null(which.marker)){
result.all <- result.all[,-ncol(result.all)]
}
return(list(call = cl, order=w.order, results = result.all))
}
| /scratch/gouwar.j/cran-all/cranData/wtest/R/wtest.high.R |
#' W-test for Gene-methylation Interaction Analysis
#' @description {Calculate cis-gene-methylation interaction of a (SNP, CpG) pair in user-defined window, and can run in a genome-wide manner. The output can be filtered by p-values, such that only sets with smaller
#' p-value than the threshold (\code{output.pval}) will be returned.}
#' @param geno a data frame or matrix containing genotypes in the columns and subjects in the rows. Genotypes should be coded as (0, 1, 2) or (0, 1). SNP names should be stored as column names of the data.
#' @param meth a data frame or matrix containing methylation data in the columns. Methylation data should be recoded as (0, 1, 2) or (0, 1). Names of CpG sites should be stored as column names of the data.
#' @param y a numeric vector of 0 or 1.
#' @param geno.pos a data frame containing SNP names and positions in two columns.
#' @param meth.pos a data frame containing CpG names and positions in two columns.
#' @param window.size a numeric number specifying the size of genome distance. Interaction effects of the SNPs and CpG sites located within the size of genome distance will be evaluated exhaustively.
#' @param hf \emph{h} and \emph{f} values to calculate gene-methylation interaction associations, organized as a matrix, with columns (\emph{k}, \emph{h}, \emph{f}), \emph{k} = 2 to 6.
#' @param output.pval a p-value threshold for filtering the output. If NULL, all results will be listed; otherwise, the function will only output the results with p-values smaller than \code{output.pval}.
#' @param sort a logical value indicating whether or not to sort the output by p-values in ascending order. Default = TRUE.
#' @param which.marker a vector indicating the column index of a SNP-CpG pair to calculate. Default \code{which.marker} = NULL means interaction pairs located within \code{window.size} will be calculated exhaustively.
#' @return An object \code{"wtest.snps.meth"} containing:
#'
#' \item{results}{The test results include: SNP name, CpG name, SNP position, CpG position, W value, \emph{k}, and p-value.}
#'
#' \item{hf}{The \emph{h} and \emph{f} values used for each \emph{k} in pairwise calculation, where \emph{k} = 2 to 6.}
#'
#' @details {Calculate cis-gene-methylation interaction of a (SNP, CpG) pair in user-defined window, and can run in a genome-wide manner. The output can be filtered by p-values, such that only sets with smaller
#' p-value than the threshold (\code{output.pval}) will be returned.}
#'
#' @examples
#' data(SNP.pos)
#' data(CpG.pos)
#' data(genotype)
#' data(methylation)
#' data(phenotype2)
#'
#' w <- 13000
#'
#' # Recode methylation data
#' methylation <- methylation.recode(methylation)
#'
#' ## Step 1. HF Calculation.
#' # Please note that parameter B is recommended to be greater than 400.
#' hf.pair <- hf.snps.meth(B = 80, geno = genotype, meth = methylation, y = phenotype2,
#' geno.pos = SNP.pos, meth.pos = CpG.pos, window.size = w)
#'
#' ## Step 2. Application
#' result <- wtest.snps.meth(geno = genotype, meth = methylation, y = phenotype2, geno.pos = SNP.pos,
#' meth.pos = CpG.pos, window.size = w, hf = hf.pair, output.pval = 0.1)
#'
#' @export
#' @author Rui Sun, Maggie Haitian Wang
#' @references Maggie Haitian Wang, Rui Sun, Junfeng Guo, Haoyi Weng, Jack Lee, Inchi Hu, Pak Sham and Benny C.Y. Zee (2016). A fast and powerful W-test for pairwise epistasis testing. Nucleic Acids Research. doi:10.1093/nar/gkw347.
#' @seealso \code{\link{wtest}}, \code{\link{hf.snps.meth}}
#' @importFrom utils combn
#' @importFrom stats pchisq
wtest.snps.meth <- function(geno, meth, y, geno.pos, meth.pos, window.size = 1e4, hf = "default.hf",
output.pval = NULL, sort = TRUE, which.marker = NULL){
suppressWarnings(if(typeof(hf) == "character"){hf = array(c(0.5,0.667,0.75,0.8,0.833,0.857,0.875,0.889,1:8), dim = c(8,2))}else{hf = hf[,2:3]})
if(is.data.frame(geno))
geno <- as.matrix(geno)
if(any(is.na(geno)))
stop("NA occurs in data.genotype")
if(is.data.frame(meth))
meth <- as.matrix(meth)
if(any(is.na(meth)))
stop("NA occurs in data.methylation")
if(!all(geno %in% c(0,1,2)))
stop("all the genotypes in 'data.genotype' must be 0, 1 or 2")
if(any(is.na(y)))
stop("NA occurs in y")
if(!all(y %in% c(0,1)))
stop("all the genotypes in 'y' must be 0 or 1")
if(length(y)!=nrow(geno) || nrow(meth)!=nrow(geno))
stop("'data.genotype', 'data.methylation' and 'y' must have the same length")
cl <- match.call()
snp.names <- colnames(geno)
cpg.names <- colnames(meth)
l1 <- match(snp.names, geno.pos[,1])
l2 <- match(cpg.names, meth.pos[,1])
if(any(is.na(l1)))
stop("missing SNP position exists")
if(any(is.na(l2)))
stop("missing CpG position exists")
geno.pos <- geno.pos[l1,]
meth.pos <- meth.pos[l2,]
if(!is.null(which.marker)){
set <- list(which.marker)
}else{
index.set<-data.frame()
for(i in 1:nrow(geno.pos)){
index <- which(abs(geno.pos[,2][i] - meth.pos[,2]) <= window.size)
if(length(index)){
index.i <- cbind(i, index)
index.set <- rbind(index.set, index.i)
}
}
set <- apply(index.set, 1, list)
}
n.snp <- ncol(geno)
n.cpg <- ncol(meth)
result <- lapply(set, x2.set, geno, meth, y)
result.all <- do.call(rbind,result)
x2.column <- 3
df.column <- x2.column+1
pval.column <- x2.column+2
w.value <- result.all[,x2.column] * hf[result.all[,df.column],1]
p.value <- pchisq(w.value, df = hf[result.all[,df.column],2], lower.tail = F)
result.all[,x2.column] <- w.value
result.all <- cbind(result.all,p.value)
k <- result.all[,df.column]+1
result.all[,df.column] <- k
result.all <- as.data.frame(result.all)
colnames(result.all) <- c("SNP","CpG","w","k","p-value")
result.all[,1] <- snp.names[result.all[,1]]
result.all[,2] <- cpg.names[result.all[,2]]
if(!is.null(output.pval)){
l.output.pval <- which(result.all[,pval.column] < output.pval)
result.all <- result.all[l.output.pval,]
}
if(sort){
l.order <- order(result.all[,pval.column], decreasing=F)
result.all <- result.all[l.order,]
}
result.all$SNP.BP<-geno.pos[match(result.all[,1],geno.pos[,1]),2]
result.all$CpG.BP<-meth.pos[match(result.all[,2],meth.pos[,1]),2]
result.all<-result.all[,c(1,2,6,7,3,4,5)]
k <- c(2:(nrow(hf)+1))
hf <- cbind(k,hf)
colnames(hf)<-c("k","h","f")
return(list(call = cl, results = result.all, hf = hf))
}
| /scratch/gouwar.j/cran-all/cranData/wtest/R/wtest.snps.meth.R |
x2<-function(set,data,y,w.order){
set<- unlist(set)
snps<-data[,set]
if(w.order==1){
O.table<-table.e1(snps,y)
O.orginal<-array(O.table,dim=c(3,2))
}else{
O.table<-table.e2(snps[,1],snps[,2],y)
O.orginal<-array(O.table,dim=c(9,2))
}
O<-O.orginal[rowSums(O.orginal)!=0,]
df<-NROW(O)-1
if(df == 0)
stop("genotypes should have at least two levels!")
if(0 %in% O)
O<-O+0.5
o<-O
O<-t(t(O)/colSums(O))
O.p<-O/(1-O)
OR<-O.p[,2]/O.p[,1]
o<-cbind(o,t(colSums(o)-t(o)))
sd<-sqrt(rowSums(1/o))
x2.value=sum((log(OR)/sd)^2)
x2.result<-c(unlist(set),x2.value,df)
return(x2.result)
}
| /scratch/gouwar.j/cran-all/cranData/wtest/R/x2.R |
x2.high<-function(set,data,y,w.order){
set<- unlist(set)
snps<-data[,set]
if(w.order==1){
O.table<-table.e1(snps,y)
O.orginal<-array(O.table,dim=c(3,2))
}else if(w.order==2){
O.table<-table.e2(snps[,1],snps[,2],y)
O.orginal<-array(O.table,dim=c(9,2))
}else{
if(nrow(as.data.frame(y))!=nrow(data)){
y=t(y)
}
snps.table <- table(cbind(as.data.frame(snps),y))
O.orginal <- array(snps.table[1:length(snps.table)], dim=(c(length(snps.table)/2,2)))
}
O<-O.orginal[rowSums(O.orginal)!=0,]
df<-NROW(O)-1
if(df == 0)
stop("genotypes should have at least two levels!")
if(0 %in% O)
O<-O+0.5
o<-O
O<-t(t(O)/colSums(O))
O.p<-O/(1-O)
OR<-O.p[,2]/O.p[,1]
o<-cbind(o,t(colSums(o)-t(o)))
sd<-sqrt(rowSums(1/o))
x2.value=sum((log(OR)/sd)^2)
x2.result<-c(unlist(set),x2.value,df)
return(x2.result)
}
| /scratch/gouwar.j/cran-all/cranData/wtest/R/x2.high.R |
x2.set<-function(set,data,data.methylation,y){
set<- unlist(set)
snps<-data[,set[1]]
cpg<-data.methylation[,set[2]]
O.table<-table.e2(snps,cpg,y)
O.orginal<-array(O.table,dim=c(9,2))
O<-O.orginal[rowSums(O.orginal)!=0,]
df<-NROW(O)-1
if(df == 0)
stop("genotypes should have at least two levels!")
if(0 %in% O)
O<-O+0.5
o<-O
O<-t(t(O)/colSums(O))
O.p<-O/(1-O)
OR<-O.p[,2]/O.p[,1]
o<-cbind(o,t(colSums(o)-t(o)))
sd<-sqrt(rowSums(1/o))
x2.value=sum((log(OR)/sd)^2)
x2.result<-c(unlist(set),x2.value,df)
return(x2.result)
}
| /scratch/gouwar.j/cran-all/cranData/wtest/R/x2.set.R |
#' @title Auto-Covariance and Correlation Functions
#' @description The ACF function computes the estimated
#' autocovariance or autocorrelation for both univariate and multivariate cases.
#' @author Yunxiang Zhang
#' @param x A \code{matrix} with dimensions \eqn{N \times S}{N x S} or N observations and S processes
#' @param lagmax A \code{integer} indicating the max lag.
#' @param cor A \code{bool} indicating whether the correlation
#' (\code{TRUE}) or covariance (\code{FALSE}) should be computed.
#' @param demean A \code{bool} indicating whether the data should be detrended
#' (\code{TRUE}) or not (\code{FALSE})
#' @return An \code{array} of dimensions \eqn{N \times S \times S}{N x S x S}.
#' @details
#' \code{lagmax} default is \eqn{10*log10(N/m)} where \eqn{N} is the number of
#' observations and \eqn{m} is the number of series being compared. If
#' \code{lagmax} supplied is greater than the number of observations, then one
#' less than the total will be taken.
#' @export
#' @examples
#' # Get Autocorrelation
#' m = ACF(datasets::AirPassengers)
#'
#' # Get Autocovariance and do not remove trend from signal
#' m = ACF(datasets::AirPassengers, cor = FALSE, demean = FALSE)
#' @importFrom stats is.ts
#' @importFrom stats acf
ACF = function(x, lagmax = 0, cor = TRUE, demean = TRUE){
if (sum(class(x) %in% "gts") == 1){
x = as.numeric(x)
}
# Change the data to matrix form
if(is.ts(x) || is.atomic(x)){
x2 = data.matrix(x)
}
#Get the acf value of the data
acfe = acf(x, lagmax = lagmax , cor = cor, demean = demean, plot = FALSE)
acfe = acfe$acf
# Get the data name
varName = deparse(substitute(RW))
# Adjust the name for data
dimnames(acfe) = list(seq_len(nrow(acfe))-1, "ACF", varName)
if (is.null(attr(x, "data_name"))){
acfe = structure(acfe, n = nrow(x2), class = c("auto_corr", "array"))
}else{
acfe = structure(acfe, n = nrow(x2), main = attr(x, "data_name"), class = c("ACF", "array"))
}
acfe
}
#' @title Auto-Covariance and Correlation Functions
#' @description The acf function computes the estimated
#' autocovariance or autocorrelation for both univariate and multivariate cases.
#' @author Yunxiang Zhang
#' @param x An \code{"ACF"} object from \code{\link{ACF}}.
#' @param show.ci A \code{bool} indicating whether to show confidence region
#' @param ci A \code{double} containing the 1-alpha level. Default is 0.95
#' @param ... Additional parameters
#' @return An \code{array} of dimensions \eqn{N \times S \times S}{N x S x S}.
#' @rdname plot.auto_corr
#' @keywords internal
#' @export
#' @examples
#' # Calculate the Autocorrelation
#' m = ACF(datasets::AirPassengers)
#'
#' # Plot with 95% CI
#' plot(m)
#'
#' # Plot with 90% CI
#' plot(m, ci = 0.90)
#'
#' # Plot without 95% CI
#' plot(m, show.ci = FALSE)
#' @importFrom grDevices rgb
plot.auto_corr = function(x, show.ci = TRUE, alpha = 0.05, main = NULL, ...){
if (sum(class(x) %in% "gts") == 1){
x = as.numeric(x)
}
# TO ADD AS INPUTS
xlab = "Lags"
ylab = "ACF"
col_ci = rgb(0, 0.6, 1, 0.2)
alpha = 0.05
# Quiet the warnings...
Lag = xmin = xmax = ymin = ymax = NULL
# Wide to long array transform
x2 = as.data.frame.table(x, responseName = "ACF")
colnames(x2) = c("Lag", "Signal X", "Signal Y", "ACF")
# Remove character cast
x2$Lag = as.numeric(x2$Lag)
# Range
x_range = range(x2$Lag)
if (show.ci == TRUE){
n = attr(x,"n")
mult = qnorm(1-alpha/2)
y_range = range(c(x2$ACF, 1/sqrt(n)*mult*c(-1,1)))
}else{
y_range = range(0:1)
}
x_ticks = seq(x_range[1], x_range[2], by = 1)
y_ticks = seq(y_range[1], y_range[2], by = 0.05)
old_pars = par(mar = c(5.1, 5.1, 1, 2.1))
on.exit(par(old_pars))
# Title
if (is.null(main)){
if (is.null(attr(x,"data_name"))){
main = paste0("ACF of ",as.character((x2$`Signal Y`)[1]))
}else{
main = paste0("ACF of ", attr(x,"data_name"))
}
}
else {
main = main
}
# Main plot
plot(NA, xlim = c(1, max(x2$Lag)), ylim = y_range,
xlab = xlab, ylab = ylab, xaxt = 'n',
yaxt = 'n', bty = "n", ann = FALSE)
win_dim = par("usr")
par(new = TRUE)
plot(NA, xlim = c(0, max(x2$Lag)), ylim = c(win_dim[3], win_dim[4] + 0.09*(win_dim[4] - win_dim[3])),
xlab = xlab, ylab = ylab, xaxt = 'n', yaxt = 'n', bty = "n")
win_dim = par("usr")
# Add grid
grid(NULL, NULL, lty = 1, col = "grey95")
# Add title
x_vec = c(win_dim[1], win_dim[2], win_dim[2], win_dim[1])
y_vec = c(win_dim[4], win_dim[4],
win_dim[4] - 0.09*(win_dim[4] - win_dim[3]),
win_dim[4] - 0.09*(win_dim[4] - win_dim[3]))
polygon(x_vec, y_vec, col = "grey95", border = NA)
text(x = mean(c(win_dim[1], win_dim[2])),
y = (win_dim[4] - 0.09/2*(win_dim[4] - win_dim[3])),
main)
# Add axes and box
lines(x_vec[1:2], rep((win_dim[4] - 0.09*(win_dim[4] - win_dim[3])),2), col = 1)
box()
axis(1, padj = 0.3)
y_axis = axis(2, labels = FALSE, tick = FALSE)
y_axis = y_axis[y_axis < (win_dim[4] - 0.09*(win_dim[4] - win_dim[3]))]
axis(2, padj = -0.2, at = y_axis)
abline(h = 0, lty = 1, lwd = 2)
# Plot CI
if(show.ci){
clim0 = 1/sqrt(n)*mult
rect(xleft = -2, ybottom = -clim0, xright = 2*x_range[2],
ytop = clim0, col = col_ci, lwd = 0)
}
# Plot ACF
segments(x0 = x_ticks, y0 = rep(0, x_range[2]), x1 = x_ticks, y1 = x2$ACF, lty = 1, lwd = 1)
}
| /scratch/gouwar.j/cran-all/cranData/wv/R/ACF.R |
#' @title Calculate Theoretical Allan Variance for Stationary First-Order Autoregressive
#' (AR1) Process
#' @description
#' This function allows us to calculate the theoretical allan variance for stationary
#' first-order autoregressive (AR1) process.
#' @export
#' @usage av_ar1(n, phi, sigma2)
#' @param n An \code{integer} value for the size of the cluster.
#' @param phi A \code{double} value for the autocorrection parameter \eqn{\phi}{phi}.
#' @param sigma2 A \code{double} value for the variance parameter \eqn{\sigma ^2}{sigma^2}.
#' @return A \code{double} indicating the theoretical allan variance for AR1 process.
#' @note This function is based on the calculation of the theoretical allan variance
#' for stationary AR1 process raised in "Allan Variance of Time Series Models for
#' Measurement Data" by Nien Fan Zhang.) This calculation
#' is fundamental and necessary for the study in "A Study of the Allan Variance for Constant-Mean
#' Non-Stationary Processes" by Xu et al. (IEEE Signal Processing Letters, 2017).
#' @author Yuming Zhang
#' @examples
#' av1 = av_ar1(n = 5, phi = 0.9, sigma2 = 1)
#' av2 = av_ar1(n = 8, phi = 0.5, sigma2 = 2)
av_ar1 = function(n, phi, sigma2){
numerator = n-3*phi-n*phi^2+4*phi^(n+1)-phi^(2*n+1)
denominator = n^2 * (1-phi)^2 * (1-phi^2)
result = numerator / denominator * sigma2
return(result)
}
#' @title Calculate Theoretical Allan Variance for Stationary White Noise Process
#' @description
#' This function allows us to calculate the theoretical allan variance for stationary
#' white noise process.
#' @export
#' @usage av_wn(sigma2, n)
#' @param sigma2 A \code{double} value for the variance parameter \eqn{\sigma ^2}{sigma^2}.
#' @param n An \code{integer} value for the size of the cluster.
#' @return A \code{double} indicating the theoretical allan variance for the white noise
#' process.
#' @note This function is based on the calculation of the theoretical allan variance
#' for stationary white noise process raised in "Allan Variance of Time Series Models for
#' Measurement Data" by Nien Fan Zhang. This calculation
#' is fundamental and necessary for the study in "A Study of the Allan Variance for Constant-Mean
#' Non-Stationary Processes" by Xu et al. (IEEE Signal Processing Letters, 2017).
#' @author Yuming Zhang
#' @examples
#' av1 = av_wn(sigma2 = 1, n = 5)
#' av2 = av_wn(sigma2 = 2, n = 8)
av_wn = function(sigma2, n){
result = sigma2/n
return(result)
} | /scratch/gouwar.j/cran-all/cranData/wv/R/ANVS.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @title Discrete Wavelet Transform
#' @description Calculation of the coefficients for the discrete wavelet transformation.
#' @param x A \code{vector} with dimensions \eqn{N\times 1}{N x 1}.
#' @param filter_name A \code{string} indicating the filter.
#' @param nlevels An \code{integer}, \eqn{J}, indicating the level of the decomposition.
#' @return y A \code{field<vec>} that contains the wavelet coefficients for each decomposition level
#' @details
#' Performs a level J decomposition of the time series using the pyramid algorithm
#' @author James Balamuta and Justin Lee
#' @keywords internal
dwt_cpp <- function(x, filter_name, nlevels) {
.Call('_wv_dwt_cpp', PACKAGE = 'wv', x, filter_name, nlevels)
}
#' @title Maximum Overlap Discrete Wavelet Transform
#' @description
#' Calculation of the coefficients for the discrete wavelet transformation
#' @inheritParams dwt_cpp
#' @return y A \code{field<vec>} that contains the wavelet coefficients for each decomposition level
#' @keywords internal
#' @details
#' Performs a level J decomposition of the time series using the pyramid algorithm.
#' Use this implementation to supply custom parameters instead of modwt(x),
#' which serves as a wrapper function.
#' @author James Balamuta and Justin Lee
#' @keywords internal
modwt_cpp <- function(x, filter_name, nlevels) {
.Call('_wv_modwt_cpp', PACKAGE = 'wv', x, filter_name, nlevels)
}
#' ARMA process to WV
#'
#' This function computes the Haar Wavelet Variance of an ARMA process
#' @param ar A \code{vec} containing the coefficients of the AR process
#' @param ma A \code{vec} containing the coefficients of the MA process
#' @param sigma2 A \code{double} containing the residual variance
#' @template misc/tau
#' @return A \code{vec} containing the wavelet variance of the ARMA process.
#' @details
#' The function is a generic implementation that requires a stationary theoretical autocorrelation function (ACF)
#' and the ability to transform an ARMA(\eqn{p},\eqn{q}) process into an MA(\eqn{\infty}{infinity}) (e.g. infinite MA process).
#' @template to_wv/haar_arma
#' @backref src/process_to_wv.cpp
#' @backref src/process_to_wv.h
#' @export
#' @seealso \code{\link{ARMAtoMA_cpp}}, \code{\link{ARMAacf_cpp}}, and \code{\link{arma11_to_wv}}
arma_to_wv <- function(ar, ma, sigma2, tau) {
.Call('_wv_arma_to_wv', PACKAGE = 'wv', ar, ma, sigma2, tau)
}
#' @title Helper Function for ARMA to WV Approximation
#' @description Indicates where the minimum ARMAacf value is and returns that as an index.
#' @param ar A \code{vec} containing the coefficients of the AR process
#' @param ma A \code{vec} containing the coefficients of the MA process
#' @param last_tau An \code{int} the Jth scale of 2^(1:J)
#' @param alpha A \code{double} indicating the cutoff.
#' @return A \code{vec} containing the wavelet variance of the ARMA process.
#' @keywords internal
#' @seealso \code{\link{arma_to_wv_app}}
acf_sum <- function(ar, ma, last_tau, alpha = 0.99) {
.Call('_wv_acf_sum', PACKAGE = 'wv', ar, ma, last_tau, alpha)
}
#' ARMA process to WV Approximation
#'
#' This function computes the (haar) WV of an ARMA process
#' @param ar A \code{vec} containing the coefficients of the AR process
#' @param ma A \code{vec} containing the coefficients of the MA process
#' @param sigma2 A \code{double} containing the residual variance
#' @template misc/tau
#' @param alpha A \code{double} indicating the cutoff.
#' @return A \code{vec} containing the wavelet variance of the ARMA process.
#' @keywords internal
#' @details
#' This function provides an approximation to the \code{\link{arma_to_wv}} as computation times
#' were previously a concern. However, this is no longer the case and, thus, this has been left
#' in for the curious soul to discover...
#' @template to_wv/haar_arma
#' @template misc/haar_wv_formulae_link
#' @backref src/process_to_wv.cpp
#' @backref src/process_to_wv.h
#' @seealso \code{\link{ARMAtoMA_cpp}}, \code{\link{ARMAacf_cpp}}, \code{\link{acf_sum}} and \code{\link{arma_to_wv}}
arma_to_wv_app <- function(ar, ma, sigma2, tau, alpha = 0.9999) {
.Call('_wv_arma_to_wv_app', PACKAGE = 'wv', ar, ma, sigma2, tau, alpha)
}
#' ARMA(1,1) to WV
#'
#' This function computes the WV (haar) of an Autoregressive Order 1 - Moving Average Order 1 (ARMA(1,1)) process.
#' @param phi A \code{double} corresponding to the autoregressive term.
#' @param theta A \code{double} corresponding to the moving average term.
#' @param sigma2 A \code{double} the variance of the process.
#' @template misc/tau
#' @return A \code{vec} containing the wavelet variance of the ARMA(1,1) process.
#' @details
#' This function is significantly faster than its generalized counter part
#' \code{\link{arma_to_wv}}
#'
#' @template to_wv/haar_arma11
#' @backref src/process_to_wv.cpp
#' @backref src/process_to_wv.h
#' @seealso \code{\link{arma_to_wv}}
#' @export
arma11_to_wv <- function(phi, theta, sigma2, tau) {
.Call('_wv_arma11_to_wv', PACKAGE = 'wv', phi, theta, sigma2, tau)
}
#' AR(1) process to WV
#'
#' This function computes the Haar WV of an AR(1) process
#' @param phi A \code{double} that is the phi term of the AR(1) process
#' @param sigma2 A \code{double} corresponding to variance of AR(1) process
#' @template misc/tau
#' @return A \code{vec} containing the wavelet variance of the AR(1) process.
#' @details
#' This function is significantly faster than its generalized counter part
#' \code{\link{arma_to_wv}}.
#'
#' @template to_wv/haar_ar1
#' @backref src/process_to_wv.cpp
#' @backref src/process_to_wv.h
#' @seealso \code{\link{arma_to_wv}}, \code{\link{arma11_to_wv}}
#' @export
ar1_to_wv <- function(phi, sigma2, tau) {
.Call('_wv_ar1_to_wv', PACKAGE = 'wv', phi, sigma2, tau)
}
#' Moving Average Order 1 (MA(1)) to WV
#'
#' This function computes the WV (haar) of a Moving Average order 1 (MA1) process.
#' @param theta A \code{double} corresponding to the moving average term.
#' @param sigma2 A \code{double} the variance of the process.
#' @template misc/tau
#' @return A \code{vec} containing the wavelet variance of the MA(1) process.
#' @details
#' This function is significantly faster than its generalized counter part
#' \code{\link{arma_to_wv}}.
#'
#' @template to_wv/haar_ma1
#' @backref src/process_to_wv.cpp
#' @backref src/process_to_wv.h
#' @seealso \code{\link{arma_to_wv}}, \code{\link{arma11_to_wv}}
#' @export
ma1_to_wv <- function(theta, sigma2, tau) {
.Call('_wv_ma1_to_wv', PACKAGE = 'wv', theta, sigma2, tau)
}
#' Quantisation Noise (QN) to WV
#'
#' This function compute the Haar WV of a Quantisation Noise (QN) process
#' @param q2 A \code{double} corresponding to variance of drift
#' @template misc/tau
#' @return A \code{vec} containing the wavelet variance of the QN.
#' @template to_wv/haar_qn
#' @backref src/process_to_wv.cpp
#' @backref src/process_to_wv.h
#' @export
qn_to_wv <- function(q2, tau) {
.Call('_wv_qn_to_wv', PACKAGE = 'wv', q2, tau)
}
#' @title Gaussian White Noise to WV
#' @description This function compute the Haar WV of a Gaussian White Noise process
#' @param sigma2 A \code{double} corresponding to variance of WN
#' @template misc/tau
#' @return A \code{vec} containing the wavelet variance of the white noise.
#' @template to_wv/haar_wn
#' @export
wn_to_wv <- function(sigma2, tau) {
.Call('_wv_wn_to_wv', PACKAGE = 'wv', sigma2, tau)
}
#' @title Random Walk to WV
#' @description This function compute the WV (haar) of a Random Walk process
#' @param gamma2 A \code{double} corresponding to variance of RW
#' @template misc/tau
#' @return A \code{vec} containing the wavelet variance of the random walk.
#' @template to_wv/haar_rw
#' @export
rw_to_wv <- function(gamma2, tau) {
.Call('_wv_rw_to_wv', PACKAGE = 'wv', gamma2, tau)
}
#' @title Drift to WV
#' @description This function compute the WV (haar) of a Drift process
#' @param omega A \code{double} corresponding to the slope of the drift
#' @template misc/tau
#' @return A \code{vec} containing the wavelet variance of the drift.
#' @template to_wv/haar_dr
#' @export
dr_to_wv <- function(omega, tau) {
.Call('_wv_dr_to_wv', PACKAGE = 'wv', omega, tau)
}
#' Model Process to WV
#'
#' This function computes the summation of all Processes to WV (haar) in a given model
#' @param theta A \code{vec} containing the list of estimated parameters.
#' @param desc A \code{vector<string>} containing a list of descriptors.
#' @param objdesc A \code{field<vec>} containing a list of object descriptors.
#' @template misc/tau
#' @return A \code{vec} containing the wavelet variance of the model.
#' @export
#' @keywords internal
theoretical_wv <- function(theta, desc, objdesc, tau) {
.Call('_wv_theoretical_wv', PACKAGE = 'wv', theta, desc, objdesc, tau)
}
#' Each Models Process Decomposed to WV
#'
#' This function computes each process to WV (haar) in a given model.
#' @param theta A \code{vec} containing the list of estimated parameters.
#' @param desc A \code{vector<string>} containing a list of descriptors.
#' @param objdesc A \code{field<vec>} containing a list of object descriptors.
#' @template misc/tau
#' @return A \code{mat} containing the wavelet variance of each process in the model
#' @export
#' @keywords internal
decomp_theoretical_wv <- function(theta, desc, objdesc, tau) {
.Call('_wv_decomp_theoretical_wv', PACKAGE = 'wv', theta, desc, objdesc, tau)
}
#' Decomposed WV to Single WV
#'
#' This function computes the combined processes to WV (haar) in a given model.
#' @param decomp A \code{mat} with scales as rows and processes as columns
#' @return A \code{vec} containing the wavelet variance of the process for the overall model
#' @export
#' @keywords internal
decomp_to_theo_wv <- function(decomp) {
.Call('_wv_decomp_to_theo_wv', PACKAGE = 'wv', decomp)
}
#' @title Generate a sequence of values
#' @description Creates a vector containing a sequence of values starting at the initial point and going to the terminal point.
#' @param a An \code{int}, that denotes the starting point.
#' @param b An \code{int}, that denotes the ending point.
#' @return A \code{vector} containing values moving from a to b. There are no restrictions on A's range.
#' @author James J Balamuta
#' @keywords internal
seq_cpp <- function(a, b) {
.Call('_wv_seq_cpp', PACKAGE = 'wv', a, b)
}
#' @title Generate a sequence of values based on supplied number
#' @description Creates a vector containing a sequence of values starting at 1 and going to the terminal point.
#' @param n An \code{int} that denotes the length of the vector.
#' @return A \code{vector} containing values moving from 1 to n.
#' @author James J Balamuta
#' @keywords internal
seq_len_cpp <- function(n) {
.Call('_wv_seq_len_cpp', PACKAGE = 'wv', n)
}
#' @title Find Quantiles
#' @description Attempts to find quantiles
#' @param x A \code{vec} of data
#' @param probs A \code{vec} of the quantiles to find.
#' @return A \code{vector} containing the quantiles
#' @author James J Balamuta
#' @keywords internal
quantile_cpp <- function(x, probs) {
.Call('_wv_quantile_cpp', PACKAGE = 'wv', x, probs)
}
#' @title Lagged Differences in Armadillo
#' @description Returns the ith difference of a time series of rth lag.
#' @param x A \code{vec} that is the time series
#' @param lag A \code{unsigned int} that indicates the lag
#' @param differences A \code{dif} that indicates how many differences should be taken
#' @return A \code{vector} containing the differenced time series.
#' @author James J Balamuta
#' @keywords internal
diff_cpp <- function(x, lag, differences) {
.Call('_wv_diff_cpp', PACKAGE = 'wv', x, lag, differences)
}
#' @title Converting an ARMA Process to an Infinite MA Process
#' @description Takes an ARMA function and converts it to an infinite MA process.
#' @param ar A \code{column vector} of length p
#' @param ma A \code{column vector} of length q
#' @param lag_max A \code{int} of the largest MA(Inf) coefficient required.
#' @return A \code{column vector} containing coefficients
#' @details This function is a port of the base stats package's ARMAtoMA. There is no significant speed difference between the two.
#' @author James J Balamuta
#' @keywords internal
ARMAtoMA_cpp <- function(ar, ma, lag_max) {
.Call('_wv_ARMAtoMA_cpp', PACKAGE = 'wv', ar, ma, lag_max)
}
#' @title Time Series Convolution Filters
#' @description Applies a convolution filter to a univariate time series.
#' @param x A \code{column vector} of length T
#' @param filter A \code{column vector} of length f
#' @param sides An \code{int} that takes either 1:for using past values only or 2: filter coefficients are centered around lag 0.
#' @param circular A \code{bool} that indicates if the filter should be wrapped around the ends of the time series.
#' @return A \code{column vec} that contains the results of the filtering process.
#' @details This is a port of the cfilter function harnessed by the filter function in stats.
#' It is about 5-7 times faster than R's base function. The benchmark was done on iMac Late 2013 using vecLib as the BLAS.
#' @author James J Balamuta
#' @keywords internal
cfilter <- function(x, filter, sides, circular) {
.Call('_wv_cfilter', PACKAGE = 'wv', x, filter, sides, circular)
}
#' @title Time Series Recursive Filters
#' @description Applies a recursive filter to a univariate time series.
#' @usage rfilter(x, filter, init)
#' @param x A \code{column vector} of length T
#' @param filter A \code{column vector} of length f
#' @param init A \code{column vector} of length f that contains the initial values of the time series in reverse.
#' @return x A \code{column vector} with its contents reversed.
#' @details Note: The length of 'init' must be equal to the length of 'filter'.
#' This is a port of the rfilter function harnessed by the filter function in stats.
#' It is about 6-7 times faster than R's base function. The benchmark was done on iMac Late 2013 using vecLib as the BLAS.
#' @author James J Balamuta
#' @keywords internal
rfilter <- function(x, filter, init) {
.Call('_wv_rfilter', PACKAGE = 'wv', x, filter, init)
}
#' @title Compute Theoretical ACF for an ARMA Process
#' @description Compute the theoretical autocorrelation function for an ARMA process.
#' @usage ARMAacf_cpp(ar,ma,lag_max)
#' @param ar A \code{vector} of length p containing AR coefficients
#' @param ma A \code{vector} of length q containing MA coefficients
#' @param lag_max A \code{unsigned integer} indicating the maximum lag necessary
#' @return x A \code{matrix} listing values from 1...nx in one column and 1...1, 2...2,....,n...n, in the other
#' @details This is an implementaiton of the ARMAacf function in R. It is approximately 40x times faster. The benchmark was done on iMac Late 2013 using vecLib as the BLAS.
#' @author James J Balamuta
#' @keywords internal
ARMAacf_cpp <- function(ar, ma, lag_max) {
.Call('_wv_ARMAacf_cpp', PACKAGE = 'wv', ar, ma, lag_max)
}
#' @title Discrete Fourier Transformation for Autocovariance Function
#' @description Calculates the autovariance function (ACF) using Discrete Fourier Transformation.
#' @param x A \code{cx_vec}.
#' @return A \code{vec} containing the ACF.
#' @details
#' This implementation is 2x as slow as Rs.
#' Two issues: 1. memory resize and 2. unoptimized fft algorithm in arma.
#' Consider piping back into R and rewrapping the object. (Decrease of about 10 microseconds.)
#' @keywords internal
dft_acf <- function(x) {
.Call('_wv_dft_acf', PACKAGE = 'wv', x)
}
#' @title Mean of the First Difference of the Data
#' @description The mean of the first difference of the data
#' @param x A \code{vec} containing the data
#' @return A \code{double} that contains the mean of the first difference of the data.
#' @keywords internal
mean_diff <- function(x) {
.Call('_wv_mean_diff', PACKAGE = 'wv', x)
}
#' Replicate a Vector of Elements \eqn{n} times
#'
#' This function takes a vector and replicates all of the data \eqn{n} times
#' @param x A \code{vec} containing the data
#' @param n An \code{unsigned int} indicating the number of times the vector should be repeated.
#' @return A \code{vec} with repeated elements of the initial supplied vector.
#' @keywords internal
num_rep <- function(x, n) {
.Call('_wv_num_rep', PACKAGE = 'wv', x, n)
}
#' @rdname diff_inv
intgr_vec <- function(x, xi, lag) {
.Call('_wv_intgr_vec', PACKAGE = 'wv', x, xi, lag)
}
#' @param xi A \code{vec} with length \eqn{lag*d} that provides initial values for the integration.
#' @rdname diff_inv
diff_inv_values <- function(x, lag, d, xi) {
.Call('_wv_diff_inv_values', PACKAGE = 'wv', x, lag, d, xi)
}
#' Discrete Intergral: Inverse Difference
#'
#' Takes the inverse difference (e.g. goes from diff() result back to previous vector)
#' @param x A \code{vec} containing the data
#' @param lag An \code{unsigned int} indicating the lag between observations.
#' @param d An \code{unsigned int} which gives the number of "differences" to invert.
#' @keywords internal
diff_inv <- function(x, lag, d) {
.Call('_wv_diff_inv', PACKAGE = 'wv', x, lag, d)
}
#' @title Auto-Covariance and Correlation Functions
#' @description The acf function computes the estimated
#' autocovariance or autocorrelation for both univariate and multivariate cases.
#' @param x A \code{matrix} with dimensions \eqn{N \times S}{N x S} or N observations and S processes
#' @param lagmax A \code{integer}
#' @param cor A \code{bool} indicating whether the correlation
#' (\code{TRUE}) or covariance (\code{FALSE}) should be computed.
#' @param demean A \code{bool} indicating whether the data should be detrended
#' (\code{TRUE}) or not (\code{FALSE})
#' @keywords internal
.acf <- function(x, lagmax = 0L, cor = TRUE, demean = TRUE) {
.Call('_wv_acf', PACKAGE = 'wv', x, lagmax, cor, demean)
}
#' Create the ts.model obj.desc given split values
#'
#' Computes the total phi and total theta vector length.
#' @param ar A \code{vec} containing the non-seasonal phi parameters.
#' @param ma A \code{vec} containing the non-seasonal theta parameters.
#' @param sar A \code{vec} containing the seasonal phi parameters.
#' @param sma A \code{vec} containing the seasonal theta parameters.
#' @param s An \code{unsigned integer} containing the frequency of seasonality.
#' @param i An \code{unsigned integer} containing the number of non-seasonal differences.
#' @param si An \code{unsigned integer} containing the number of seasonal differences.
#' @return A \code{vec} with rows:
#' \describe{
#' \item{np}{Number of Non-Seasonal AR Terms}
#' \item{nq}{Number of Non-Seasonal MA Terms}
#' \item{nsp}{Number of Seasonal AR Terms}
#' \item{nsq}{Number of Seasonal MA Terms}
#' \item{nsigma}{Number of Variances (always 1)}
#' \item{s}{Season Value}
#' \item{i}{Number of non-seasonal differences}
#' \item{si}{Number of Seasonal Differences}
#' }
sarma_objdesc <- function(ar, ma, sar, sma, s, i, si) {
.Call('_wv_sarma_objdesc', PACKAGE = 'wv', ar, ma, sar, sma, s, i, si)
}
#' Calculates Length of Seasonal Padding
#'
#' Computes the total phi and total theta vector length.
#' @param np An \code{unsigned int} containing the number of non-seasonal phi parameters.
#' @param nq An \code{unsigned int} containing the number of non-seasonal theta parameters.
#' @param nsp An \code{unsigned int} containing the number of seasonal phi parameters.
#' @param nsq An \code{unsigned int} containing the number of seasonal theta parameters.
#' @seealso \code{\link{sarma_components}}
#' @return A \code{vec} with rows:
#' \describe{
#' \item{p}{Number of phi parameters}
#' \item{q}{Number of theta parameters}
#' }
#' @keywords internal
#'
sarma_calculate_spadding <- function(np, nq, nsp, nsq, ns) {
.Call('_wv_sarma_calculate_spadding', PACKAGE = 'wv', np, nq, nsp, nsq, ns)
}
#' Determine parameter expansion based upon objdesc
#'
#' Calculates the necessary vec space needed to pad the vectors
#' for seasonal terms.
#' @param objdesc A \code{vec} with the appropriate sarima object description
#' @return A \code{vec} with the structure:
#' \describe{
#' \item{np}{Number of Non-Seasonal AR Terms}
#' \item{nq}{Number of Non-Seasonal MA Terms}
#' \item{nsp}{Number of Seasonal AR Terms}
#' \item{nsq}{Number of Seasonal MA Terms}
#' \item{ns}{Number of Seasons (e.g. 12 is year)}
#' \item{p}{Total number of phi terms}
#' \item{q}{Total number of theta terms}
#' }
#' @keywords internal
sarma_components <- function(objdesc) {
.Call('_wv_sarma_components', PACKAGE = 'wv', objdesc)
}
#' Efficient way to merge items together
#' @keywords internal
sarma_params_construct <- function(ar, ma, sar, sma) {
.Call('_wv_sarma_params_construct', PACKAGE = 'wv', ar, ma, sar, sma)
}
#' (Internal) Expand the SARMA Parameters
#' @param params A \code{vec} containing the theta values of the parameters.
#' @inheritParams sarma_calculate_spadding
#' @param p An \code{unsigned int} that is the total size of the phi vector.
#' @param q An \code{unsigned int} that is the total size of the theta vector.
#' @return A \code{field<vec>} that contains the expansion.
#' @keywords internal
sarma_expand_unguided <- function(params, np, nq, nsp, nsq, ns, p, q) {
.Call('_wv_sarma_expand_unguided', PACKAGE = 'wv', params, np, nq, nsp, nsq, ns, p, q)
}
#' Expand Parameters for an SARMA object
#'
#' Creates an expanded PHI and THETA vector for use in other objects.
#' @param params A \code{vec} containing the theta values of the parameters.
#' @param objdesc A \code{vec} containing the model term information.
#' @return A \code{field<vec>} of size two as follows:
#' \itemize{
#' \item AR values
#' \item THETA values
#' }
#' @details
#' The \code{objdesc} is assumed to have the structure of:
#' \itemize{
#' \item AR(p)
#' \item MA(q)
#' \item SAR(P)
#' \item SMA(Q)
#' \item Seasons
#' }
#' @keywords internal
sarma_expand <- function(params, objdesc) {
.Call('_wv_sarma_expand', PACKAGE = 'wv', params, objdesc)
}
#' Compute the Spatial Wavelet Coefficients
#' @param X is a matrix with row, col orientation
#' @param J1,J2 is the levels of decomposition along the rows, columns
#' @export
#' @return A \code{list} of \code{vectors} containing the wavelet coefficients.
#' @details
#' By default this function will return the wavelet coefficient in
#' addition to the wavelet
sp_modwt_cpp <- function(X, J1, J2) {
.Call('_wv_sp_modwt_cpp', PACKAGE = 'wv', X, J1, J2)
}
#' Haar filter for a spatial case
#' @param jscale An \code{int} of the Number of Scales
#' @export
sp_hfilter <- function(jscale) {
.Call('_wv_sp_hfilter', PACKAGE = 'wv', jscale)
}
#' @title Generate eta3 confidence interval
#' @description Computes the eta3 CI
#' @param y A \code{vec} that computes the modwt dot product of each wavelet coefficient divided by their length.
#' @param dims A \code{String} indicating the confidence interval being calculated.
#' @param alpha_ov_2 A \code{double} that indicates the \eqn{\left(1-p\right)*\alpha}{(1-p)*alpha} confidence level
#' @return A \code{matrix} with the structure:
#' \itemize{
#' \item{Column 1}{Wavelet Variance}
#' \item{Column 2}{Chi-squared Lower Bounds}
#' \item{Column 3}{Chi-squared Upper Bounds}
#' }
#' @keywords internal
ci_eta3 <- function(y, dims, alpha_ov_2) {
.Call('_wv_ci_eta3', PACKAGE = 'wv', y, dims, alpha_ov_2)
}
#' @title Generate eta3 robust confidence interval
#' @description Computes the eta3 robust CI
#' @param wv_robust A \code{vec} that computes the modwt dot product of each wavelet coefficient divided by their length.
#' @param wv_ci_class A \code{mat} that contains the CI mean, CI Lower, and CI Upper
#' @param alpha_ov_2 A \code{double} that indicates the \eqn{\left(1-p\right)*\alpha}{(1-p)*alpha} confidence level
#' @param eff A \code{double} that indicates the efficiency.
#' @return A \code{matrix} with the structure:
#' \itemize{
#' \item{Column 1}{Robust Wavelet Variance}
#' \item{Column 2}{Chi-squared Lower Bounds}
#' \item{Column 3}{Chi-squared Upper Bounds}
#' }
#' @details
#' Within this function we are scaling the classical
#' @keywords internal
ci_eta3_robust <- function(wv_robust, wv_ci_class, alpha_ov_2, eff) {
.Call('_wv_ci_eta3_robust', PACKAGE = 'wv', wv_robust, wv_ci_class, alpha_ov_2, eff)
}
#' @title Generate a Confidence interval for a Univariate Time Series
#' @description Computes an estimate of the multiscale variance and a chi-squared confidence interval
#' @param signal_modwt_bw A \code{field<vec>} that contains the modwt or dwt decomposition
#' @param wv A \code{vec} that contains the wave variance.
#' @param type A \code{String} indicating the confidence interval being calculated.
#' @param alpha_ov_2 A \code{double} that indicates the \eqn{\left(1-p\right)*\alpha}{(1-p)*alpha} confidence level.
#' @param robust A \code{boolean} to determine the type of wave estimation.
#' @param eff A \code{double} that indicates the efficiency.
#' @return A \code{matrix} with the structure:
#' \itemize{
#' \item{Column 1}{Wavelet Variance}
#' \item{Column 2}{Chi-squared Lower Bounds}
#' \item{Column 3}{Chi-squared Upper Bounds}
#' }
#' @keywords internal
#' @details
#' This function can be expanded to allow for other confidence interval calculations.
ci_wave_variance <- function(signal_modwt_bw, wv, type = "eta3", alpha_ov_2 = 0.025, robust = FALSE, eff = 0.6) {
.Call('_wv_ci_wave_variance', PACKAGE = 'wv', signal_modwt_bw, wv, type, alpha_ov_2, robust, eff)
}
#' @title Generate a Wave Variance for a Univariate Time Series
#' @description Computes an estimate of the wave variance
#' @param signal_modwt_bw A \code{field<vec>} that contains the modwt or dwt decomposition
#' @param robust A \code{boolean} to determine the type of wave estimation.
#' @param eff A \code{double} that indicates the efficiency.
#' @return A \code{vec} that contains the wave variance.
#' @keywords internal
wave_variance <- function(signal_modwt_bw, robust = FALSE, eff = 0.6) {
.Call('_wv_wave_variance', PACKAGE = 'wv', signal_modwt_bw, robust, eff)
}
#' @title Computes the (MODWT) wavelet variance
#' @description Calculates the (MODWT) wavelet variance
#' @param signal_modwt_bw A \code{field<vec>} that contains the modwt decomposition after it has been brick walled.
#' @param robust A \code{boolean} that triggers the use of the robust estimate.
#' @param eff A \code{double} that indicates the efficiency as it relates to an MLE.
#' @param alpha A \code{double} that indicates the \eqn{\left(1-p\right)*\alpha}{(1-p)*alpha} confidence level
#' @param ci_type A \code{String} indicating the confidence interval being calculated. Valid value: "eta3"
#' @return A \code{mat} with the structure:
#' \itemize{
#' \item{"variance"}{Wavelet Variance}
#' \item{"low"}{Lower CI}
#' \item{"high"}{Upper CI}
#' }
#' @keywords internal
#' @details
#' This function does the heavy lifting with the signal_modwt_bw
wvar_cpp <- function(signal_modwt_bw, robust, eff, alpha, ci_type) {
.Call('_wv_wvar_cpp', PACKAGE = 'wv', signal_modwt_bw, robust, eff, alpha, ci_type)
}
#' @title Computes the (MODWT) wavelet variance
#' @description Calculates the (MODWT) wavelet variance
#' @param signal A \code{vec} that contains the data.
#' @param robust A \code{boolean} that triggers the use of the robust estimate.
#' @param eff A \code{double} that indicates the efficiency as it relates to an MLE.
#' @param alpha A \code{double} that indicates the \eqn{\left(1-p\right)\times \alpha}{(1-p)*alpha} confidence level
#' @param ci_type A \code{string} indicating the confidence interval being calculated. Valid value: "eta3"
#' @param strWavelet A \code{string} indicating the type of wave filter to be applied. Must be "haar"
#' @param decomp A \code{string} indicating whether to use "modwt" or "dwt" decomp
#' @return A \code{mat} with the structure:
#' \itemize{
#' \item{"variance"}{Wavelet Variance}
#' \item{"low"}{Lower CI}
#' \item{"high"}{Upper CI}
#' }
#' @keywords internal
#' @details
#' This function powers the wvar object. It is also extendable...
modwt_wvar_cpp <- function(signal, nlevels, robust, eff, alpha, ci_type, strWavelet, decomp) {
.Call('_wv_modwt_wvar_cpp', PACKAGE = 'wv', signal, nlevels, robust, eff, alpha, ci_type, strWavelet, decomp)
}
#' @title Computes the MO/DWT wavelet variance for multiple processes
#' @description Calculates the MO/DWT wavelet variance
#' @param signal A \code{matrix} that contains the same number of observations per dataset
#' @param robust A \code{boolean} that triggers the use of the robust estimate.
#' @param eff A \code{double} that indicates the efficiency as it relates to an MLE.
#' @param alpha A \code{double} that indicates the \eqn{\left(1-p\right)\times \alpha}{(1-p)*alpha} confidence level
#' @param ci_type A \code{string} indicating the confidence interval being calculated. Valid value: "eta3"
#' @param strWavelet A \code{string} indicating the type of wave filter to be applied. Must be "haar"
#' @param decomp A \code{string} indicating whether to use "modwt" or "dwt" decomp
#' @return A \code{field<mat>} with the structure:
#' \itemize{
#' \item{"variance"}{Wavelet Variance}
#' \item{"low"}{Lower CI}
#' \item{"high"}{Upper CI}
#' }
#' @keywords internal
#' @details
#' This function processes the decomposition of multiple signals quickly
batch_modwt_wvar_cpp <- function(signal, nlevels, robust, eff, alpha, ci_type, strWavelet, decomp) {
.Call('_wv_batch_modwt_wvar_cpp', PACKAGE = 'wv', signal, nlevels, robust, eff, alpha, ci_type, strWavelet, decomp)
}
#' @title Computes the MODWT scales
#' @description Calculates the MODWT scales
#' @param nb_level A \code{integer} that contains the level of decomposition J.
#' @return A \code{vec} that contains 2^1, ... , 2^J
#' @keywords internal
#' @details
#' Used in wvar object.
scales_cpp <- function(nb_level) {
.Call('_wv_scales_cpp', PACKAGE = 'wv', nb_level)
}
compute_cov_cpp <- function(coef1, coef2, variance, lower, upper) {
.Call('_wv_compute_cov_cpp', PACKAGE = 'wv', coef1, coef2, variance, lower, upper)
}
| /scratch/gouwar.j/cran-all/cranData/wv/R/RcppExports.R |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.