content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Euclidean distance between two covariance matrices
#'
#' @description Computes the Euclidean distance (Frobenius norm) between two variance-covariance matrices of same dimensions
#'
#' @param S1 a variance-covariance matrix
#' @param S2 a variance-covariance matrix
#'
#' @return Euclidean distance between S1 and S2 following Dryden et al. (2009).
#'
#' @references Dryden IL, Koloydenko A, Zhou D (2009)
#' Non-Euclidean statistics for covariance matrices, with applications to diffusion tensor imaging.
#' \emph{The Annals of Applied Statistics 3}:1102-1123.
#' \url{https://projecteuclid.org/euclid.aoas/1254773280}
#'
#' @examples
#'
#' # Data matrix of 2D landmark coordinates
#' data("Tropheus.IK.coord")
#' coords <- which(names(Tropheus.IK.coord) == "X1"):which(names(Tropheus.IK.coord) == "Y19")
#' proc.coord <- as.matrix(Tropheus.IK.coord[coords])
#'
#' # Data reduction
#' phen.pca <- prcomp(proc.coord, rank. = 5, tol = sqrt(.Machine$double.eps))
#' pc.scores <- phen.pca$x
#'
#' # Covariance matrix of each population
#' S.phen.pop <- cov.group(pc.scores, groups = Tropheus.IK.coord$POP.ID)
#'
#' # Euclidean distance between the covariance matrices of 2 populations
#' # (IKA1 relative to IKS5)
#' dist.a1s5 <- euclidean.dist(S.phen.pop[, , "IKA1"], S.phen.pop[, , "IKS5"])
#'
#' @export
euclidean.dist <-
function (S1, S2) {
if (is.data.frame(S2))
S2 <- as.matrix(S2)
if (is.data.frame(S1))
S1 <- as.matrix(S1)
if (is.null(S1) | is.null(S2))
stop("supply both 'S1' and 'S2'")
if (!is.matrix(S1) | !is.matrix(S2))
stop("'S1' and 'S2' must be matrices or data frames")
if (!all.equal(dim(S1), dim(S2)))
stop("'S1' and 'S2' must be square matrices of the same dimensions")
S12 <- S1 - S2
M <- t(S12) %*% S12
trM <- sum(diag(M))
distEucl <- sqrt(trM)
return(distEucl)
}
|
/scratch/gouwar.j/cran-all/cranData/vcvComp/R/euclidean.dist.R
|
#' Squared distance matrix
#'
#' @description Computes the squared distance matrix of a set of covariance matrices
#'
#' @param Sm a (p x p x m) array of covariance matrices,
#' where p is the number of variables and m the number of groups.
#' @param dist. "Riemannian" or "Euclidean"
#' @param method an integer for the method of matrix inversion
#' @param pa an integer for the parameter of matrix inversion
#'
#' @return The matrix of squared Riemannian or Euclidean distances
#'
#' @seealso See \code{\link{minv}} for the method and the parameter used for the matrix inversion
#'
#' @examples
#'
#' # Data matrix of 2D landmark coordinates
#' data("Tropheus.IK.coord")
#' coords <- which(names(Tropheus.IK.coord) == "X1"):which(names(Tropheus.IK.coord) == "Y19")
#' proc.coord <- as.matrix(Tropheus.IK.coord[coords])
#'
#' # Data reduction
#' phen.pca <- prcomp(proc.coord, rank. = 5, tol = sqrt(.Machine$double.eps))
#' pc.scores <- phen.pca$x
#'
#' # Covariance matrix of each population
#' S.phen.pop <- cov.group(pc.scores, groups = Tropheus.IK.coord$POP.ID)
#'
#' # Squared Riemannian distance matrix of the covariance matrices of all populations
#' eigen.phen.r <- mat.sq.dist(S.phen.pop, dist. = "Riemannian")
#'
#' # Squared Euclidean distance matrix of the covariance matrices of all populations
#' eigen.phen.e <- mat.sq.dist(S.phen.pop, dist. = "Euclidean")
#'
#' @export
mat.sq.dist <-
function (Sm, dist. = "Riemannian", method = 0, pa = 0) {
k <- dim(Sm)[[3]]
tol <- .Machine$double.eps * k # Machine tolerance
V <- matrix(0, nrow = k, ncol = k)
for (l in 1:k) {
for (m in 1:k) {
if (m != l) {
if (dist. == "Euclidean") {
E_lm <- euclidean.dist(Sm[, , l], Sm[, , m])
}
if (dist. == "Riemannian") {
E_lm <- relative.eigen(Sm[, , l], Sm[, , m], method, pa)$distCov
}
if ((E_lm)^2 > tol) {
V[l, m] <- (E_lm) ^ 2
}
}
}
}
rownames(V) <- dimnames(Sm)[[3]]
colnames(V) <- dimnames(Sm)[[3]]
return(V)
}
|
/scratch/gouwar.j/cran-all/cranData/vcvComp/R/mat.sq.dist.R
|
#' Matrix pseudoinverse
#'
#' @description Computes the inverse or the pseudoinverse of a matrix
#'
#' @param M a numeric matrix (square matrix)
#' @param method an integer for the method of inversion.
#' If method = 0, only the nonzero eigenvalues are kept;
#' if method = 1, only the eigenvalues above a threshold are kept;
#' if method = 2, only the several first eigenvalues are kept;
#' if method = 3, a Tikhonov regularization (= ridge regression) is performed.
#' @param pa an integer for the parameter of inversion.
#' If method = 1, pa is the threshold below which the eigenvalues are not kept;
#' if method = 2, pa is an positive integer number corresponding to number of eigenvalues that are kept;
#' if method = 3, pa is the scaling factor for the identity matrix
#'
#' @return A numeric matrix corresponding to the pseudoinverse of M
#'
#' @examples
#'
#' # Data matrix of 2D landmark coordinates
#' data("Tropheus.IK.coord")
#' coords <- which(names(Tropheus.IK.coord) == "X1"):which(names(Tropheus.IK.coord) == "Y19")
#' proc.coord <- as.matrix(Tropheus.IK.coord[coords])
#'
#' # Covariance matrix of each population
#' S.phen.pop <- cov.group(proc.coord, groups = Tropheus.IK.coord$POP.ID)
#'
#' # Pseudo-inversion of a square matrix (covariance matrix of the population IKS5)
#' S2 <- S.phen.pop[, , "IKS5"]
#' invS2 <- minv(S2, method = 0, pa = 0) # Pseudoinverse keeping non-zero eigenvalues
#' invS2 <- minv(S2, method = 1, pa = 10^-8) # Pseudoinverse keeping eigenvalues above 10^-8
#' invS2 <- minv(S2, method = 2, pa = 5) # Pseudoinverse keeping the first five eigenvalues
#' invS2 <- minv(S2, method = 3, pa = 0.5) # Ridge regression with Tikhonov factor of 0.5
#'
#' @export
minv <-
function (M, method = 0, pa = 0) {
# Checkings
if (length(dim(M)) > 2L || !is.numeric(M))
stop("'M' must be a numeric matrix")
if (!is.matrix(M))
M <- as.matrix(M)
# Pseudoinverse of M
if (method == 0 | method == 1 | method == 2) {
E <- eigen(M)
D <- E$values
D0 <- rep(0, length(D))
# Keeps only the values above 'pa'
if (method == 0 | method == 1) {
if (method == 0) { pa <- .Machine$double.eps * max(dim(M)) * max(D) } # Machine tolerance value
for (i in 1:length(D0)) {
if (abs(D[i]) > pa) { D0[i] <- 1 / (D[i]) }
}
}
# Keeps only the 'pa' first eigenvalues
if (method == 2) {
if (pa > length(D)) { pa <- length(D) }
for (i in 1:pa) { D0[i] <- 1 / (D[i]) }
}
}
# Ridge regression = Tikhonov regularization
if (method == 3) {
ID <- diag(nrow(M)) # Identity matrix
M0 <- pa * ID + M
E <- eigen(M0)
D <- E$values
D0 <- 1 / D
}
V <- E$vectors
MF <- V %*% diag(D0) %*% t(V)
invisible(MF)
}
|
/scratch/gouwar.j/cran-all/cranData/vcvComp/R/minv.R
|
#' Principal coordinates ordination
#'
#' @description Performs a principal coordinates analysis of a distance matrix
#'
#' @param V a square distance matrix
#'
#' @return
#' A list containing the following named components:
#' \item{k}{the number of groups (value)}
#' \item{vectors}{the eigenvectors of the centered inner product matrix (matrix)}
#' \item{values}{the eigenvalues of the centered inner product matrix (vector)}
#' \item{PCoords}{the principal coordinates = scaled eigenvectors (matrix)}
#' \item{Variance}{a dataframe containing the following named variables:
#' \describe{
#' \item{eigenvalues}{eigenvalues of the centered inner product matrix}
#' \item{variance}{variance of each principal coordinate}
#' \item{exVar}{proportion of the total variation accounted by each principal coordinate}
#' \item{cumVar}{cumulative proportion of the total variation accounted by principal coordinate}
#' }
#' }
#'
#' @examples
#'
#' # Data matrix of 2D landmark coordinates
#' data("Tropheus.IK.coord")
#' coords <- which(names(Tropheus.IK.coord) == "X1"):which(names(Tropheus.IK.coord) == "Y19")
#' proc.coord <- as.matrix(Tropheus.IK.coord[coords])
#'
#' # Data reduction
#' phen.pca <- prcomp(proc.coord, rank. = 5, tol = sqrt(.Machine$double.eps))
#' pc.scores <- phen.pca$x
#'
#' # Covariance matrix of each population
#' S.phen.pop <- cov.group(pc.scores, groups = Tropheus.IK.coord$POP.ID)
#'
#' # Squared distance matrix of the covariance matrices of all populations
#' eigen.phen.pop <- mat.sq.dist(S.phen.pop, dist. = "Riemannian") # Riemannian distances
#'
#' # Ordination of the squared distance matrix
#' prcoa.pop <- pr.coord(eigen.phen.pop)
#'
#' # Visualization
#' plot(prcoa.pop$PCoords[, 1], prcoa.pop$PCoords[, 2])
#' abline(h = 0) ; abline(v = 0)
#' text(prcoa.pop$PCoords[, 1], prcoa.pop$PCoords[, 1], labels = rownames(prcoa.pop$PCoords))
#'
#' @export
pr.coord <-
function (V) {
if (is.data.frame(V))
V <- as.matrix(V)
else if (!is.matrix(V))
stop("'V' must be a matrix or a data frame")
if (!all(is.finite(V)))
stop("'V' must contain finite values only")
if (dim(V)[1] != dim(V)[2])
stop("'V' must be a square matrix")
# Centered inner product matrix
k <- dim(V)[1]
H <- diag(k) - matrix((1 / k), nrow = k, ncol = k) # centering matrix
D <- - 0.5 * H %*% V %*% H
# Number of principal coordinates
max_pc <- k - 1
# Eigenanalysis
E <- eigen(D)
vectors <- E$vectors[, 1:max_pc]
rownames(vectors) <- rownames(V)
colnames(vectors) <- paste("PCo", 1:max_pc, sep = "")
L <- E$values[1:max_pc]
L0 <- rep(0, length(L))
# Keeps only the nonzero eigenvalues (above tol or below -tol)
tol <- .Machine$double.eps * max(dim(D)) * max(L) # Machine tolerance value
for (i in 1:length(L)) {
if (abs(L[i]) > tol) {
L0[i] <- L[i]
}
}
values <- L0
PCoords <- vectors %*% diag(sqrt(values))
colnames(PCoords) <- paste("PCo", 1:max_pc, sep = "")
variance <- values / max_pc
exVar <- values / sum(values)
cumVar <- exVar
for (i in 2:max_pc) {
cumVar[i] <- cumVar[i - 1] + exVar[i]
}
Variance <- data.frame("eigenvalues" = values, "variance" = variance, "exVar" = exVar, "cumVar" = cumVar)
prCoord <- list("k" = k, "vectors" = vectors, "values" = values, "PCoords" = PCoords, "Variance" = Variance)
return(prCoord)
}
|
/scratch/gouwar.j/cran-all/cranData/vcvComp/R/pr.coord.R
|
#' Proportionality test of two variance-covariance matrices
#'
#' @description Tests the proportionality of two variance-covariance matrices
#'
#' @param n the sample size(s), given as a number or a vector of length 2
#' @param S1 a variance-covariance matrix
#' @param S2 a variance-covariance matrix
#' @param method an integer for the method of matrix inversion (see function 'minv')
#' @param pa an integer for the parameter of matrix inversion (see function 'minv')
#'
#' @return The P-value for the test of proportionality between two variance-covariance matrices
#'
#' @importFrom stats pchisq
#'
#' @seealso \code{\link{relative.eigen}} for the computation of relative eigenvalues,
#' @seealso \code{\link{minv}} for the method and the parameter used for the matrix inversion,
#' @seealso \code{\link[stats:Chisquare]{pchisq}} for Chi-squared distribution
#'
#' @references Mardia KV, Kent JT, Bibby JM (1979)
#' \emph{Multivariate analysis}. Academic Press, London.
#'
#' @examples
#'
#' # Data matrix of 2D landmark coordinates
#' data("Tropheus.IK.coord")
#' coords <- which(names(Tropheus.IK.coord) == "X1"):which(names(Tropheus.IK.coord) == "Y19")
#' proc.coord <- as.matrix(Tropheus.IK.coord[coords])
#'
#' # Data reduction
#' phen.pca <- prcomp(proc.coord, rank. = 5, tol = sqrt(.Machine$double.eps))
#' pc.scores <- phen.pca$x
#'
#' # Covariance matrix of each population
#' S.phen.pop <- cov.group(pc.scores, groups = Tropheus.IK.coord$POP.ID)
#'
#' # Maximum likelihood test of proportionality between 2 covariance matrices
#' # (IKA1 relative to IKS5) - 71 and 75 are the sample sizes
#' prop.vcv.test(n = c(71, 75), S.phen.pop[,,"IKA1"], S.phen.pop[,,"IKS5"])
#'
#' @export
prop.vcv.test <-
function (n, S1, S2, method = 0, pa = 0) {
if (is.null(n))
stop("supply the sample size 'n'")
if (!is.vector(n) | !is.numeric(n))
stop("supply the sample size 'n' as a number or a numeric vector")
if (length(n) < 1 | length(n) > 2)
stop("supply the sample size 'n' as a single number or a vector of length 2")
if (length(n) == 2)
n <- 2 / (1 / n[1] + 1 / n[2]) # harmonic mean
if (is.data.frame(S2))
S2 <- as.matrix(S2)
if (is.data.frame(S1))
S1 <- as.matrix(S1)
if (is.null(S1) | is.null(S2))
stop("supply both 'S1' and 'S2'")
if (!is.matrix(S1) | !is.matrix(S2))
stop("'S1' and 'S2' must be matrices or data frames")
if (!all.equal(dim(S1), dim(S2)))
stop("'S1' and 'S2' must be square matrices of the same dimensions")
relValues <- relative.eigen(S1, S2, method = 0, pa = 0)$relValues # relative eigenvalues
p <- length(relValues) # number of relative eigenvalues
if (n < 10 * p) {
warning("The sample size is not very large compared to the number of relative eigenvalues.")
}
a <- mean(relValues) # arithmetic mean
g <- prod(relValues) ^ (1 / p) # geometric mean
val <- 0.5 * n * p * log(a / g)
ddl <- (p - 1) * (p + 2) / 2
pValue <- pchisq(q = val, df = ddl, lower.tail = FALSE)
return(pValue)
}
|
/scratch/gouwar.j/cran-all/cranData/vcvComp/R/prop.vcv.test.R
|
#' Ratio of generalized variances
#'
#' @description Computes the (log-transformed) ratios of the generalized variances
#' of a set of covariance matrices
#'
#' @param Sm a (p x p x m) array of covariance matrices,
#' where p is the number of variables and m the number of groups.
#' @param logGV a logical argument to indicate if the ratios should be log-transformed
#'
#' @return The matrix of the (log-transformed) ratios of the generalized variances.
#' For each row, the ratio corrresponds to the group of the row
#' relative to the group of a column.
#'
#' @examples
#'
#' # Data matrix of 2D landmark coordinates
#' data("Tropheus.IK.coord")
#' coords <- which(names(Tropheus.IK.coord) == "X1"):which(names(Tropheus.IK.coord) == "Y19")
#' proc.coord <- as.matrix(Tropheus.IK.coord[coords])
#'
#' # Data reduction
#' phen.pca <- prcomp(proc.coord, rank. = 5, tol = sqrt(.Machine$double.eps))
#' pc.scores <- phen.pca$x
#'
#' # Covariance matrix of each population
#' S.phen.pop <- cov.group(pc.scores, groups = Tropheus.IK.coord$POP.ID)
#'
#' # Ratio of the generalized variances of 2 populations (IKA1 and IKS5)
#' relGV.multi(S.phen.pop[, , c("IKA1", "IKS5")], logGV = FALSE)
#'
#' @export
relGV.multi <-
function (Sm, logGV = TRUE) {
k <- dim(Sm)[[3]]
V <- matrix(1, nrow = k, ncol = k)
for (l in 1:k) {
for (m in 1:k) {
if (m != l) {
V[l, m] <- det(Sm[, , l]) / det(Sm[, , m])
}
}
}
if (logGV == TRUE) {
V <- log(V)
}
rownames(V) <- dimnames(Sm)[[3]]
colnames(V) <- dimnames(Sm)[[3]]
return(V)
}
|
/scratch/gouwar.j/cran-all/cranData/vcvComp/R/relGV.multi.R
|
#' Relative eigenanalysis
#'
#' @description Computes the Riemanian distance between two variance-covariance matrices of same dimensions and the relative eigenvectors and eigenvalues of S1 with respect to S2
#'
#' @param S1 a variance-covariance matrix
#' @param S2 a variance-covariance matrix
#' @param method an integer for the method of matrix inversion (see function 'minv')
#' @param pa an integer for the parameter of matrix inversion (see function 'minv')
#'
#' @return
#' A list containing the following named components:
#' \item{relValues}{the vector of relative eigenvalues}
#' \item{relVectors}{the matrix of relative eigenvectors}
#' \item{distCov}{the distance between the two covariance matrices}
#' \item{relGV}{the product of the nonzero relative eigenvalues = the ratio of the generalized variances.
#' The generalized variance corresponds to the determinant of the covariance matrix.}
#' \item{logGV}{the log ratio of the generalized variances}
#' \item{q}{the number of nonzero eigenvalues}
#'
#' @seealso See \code{\link{minv}} for the method and the parameter used for the matrix inversion
#'
#' @references Bookstein F, Mitteroecker P (2014)
#' Comparing covariance matrices by relative eigenanalysis, with applications to organismal biology.
#' \emph{Evolutionary Biology 41}: 336-350.
#' \url{https://doi.org/10.1007/s11692-013-9260-5}
#'
#' @examples
#'
#' # Data matrix of 2D landmark coordinates
#' data("Tropheus.IK.coord")
#' coords <- which(names(Tropheus.IK.coord) == "X1"):which(names(Tropheus.IK.coord) == "Y19")
#' proc.coord <- as.matrix(Tropheus.IK.coord[coords])
#'
#' # Data reduction
#' phen.pca <- prcomp(proc.coord, rank. = 5, tol = sqrt(.Machine$double.eps))
#' pc.scores <- phen.pca$x
#'
#' # Covariance matrix of each population
#' S.phen.pop <- cov.group(pc.scores, groups = Tropheus.IK.coord$POP.ID)
#'
#' # Relative PCA = relative eigenanalysis between 2 covariance matrices
#' # (population IKA1 relative to IKS5)
#' relEigen.a1s5 <- relative.eigen(S.phen.pop[, , "IKA1"], S.phen.pop[, , "IKS5"])
#'
#' @export
relative.eigen <-
function (S1, S2, method = 0, pa = 0) {
if (is.data.frame(S2))
S2 <- as.matrix(S2)
if (is.data.frame(S1))
S1 <- as.matrix(S1)
if (is.null(S1) | is.null(S2))
stop("supply both 'S1' and 'S2'")
if (!is.matrix(S1) | !is.matrix(S2))
stop("'S1' and 'S2' must be matrices or data frames")
if (!all.equal(dim(S1), dim(S2)))
stop("'S1' and 'S2' must be square matrices of the same dimensions")
# Computation of S2^-1 S1
invS2 <- minv(S2, method, pa)
M <- invS2 %*% S1
# Eigenvectors and eigenvalues
eigenM <- eigen(M)
relVectors <- Re(eigenM$vectors)
rownames(relVectors) <- colnames(S1)
D <- Re(eigenM$values)
D0 <- rep(0, length(D))
# Keeps only the nonzero eigenvalues (above tol or below -tol)
tol <- .Machine$double.eps * max(dim(M)) * max(D) # Machine tolerance value
for (i in 1:length(D)) {
if (abs(D[i]) > tol) {
D0[i] <- D[i]
q <- i
}
}
relValues <- D0
# Riemannian distance between S1 and S2
sqLogVal <- (log(relValues[1:q])) ^ 2
distCov <- sqrt(sum(sqLogVal))
# Product of the nonzero relative eigenvalues = ratio of the generalized variances
relGV <- prod(relValues[1:q])
logGV <- log(relGV)
relEigen <- list("relValues" = relValues,
"relVectors" = relVectors,
"distCov" = distCov,
"relGV" = relGV,
"logGV" = logGV,
"q" = q)
return(relEigen)
}
|
/scratch/gouwar.j/cran-all/cranData/vcvComp/R/relative.eigen.R
|
#' Scaling factor between two matrices
#'
#' @description Computes the maximum-likelihood estimate
#' of the scaling factor between two proportional covariance matrices.
#' Note that the scaling factor between the two matrices
#' is equal to the arithmetic mean of their relative eigenvalues.
#'
#' @param S1 a variance-covariance matrix
#' @param S2 a variance-covariance matrix
#' @param method an integer for the method of matrix inversion (see function 'minv')
#' @param pa an integer for the parameter of matrix inversion (see function 'minv')
#'
#' @return The scaling factor between the two matrices.
#'
#' @seealso See \code{\link{minv}} for the method and the parameter used for the matrix inversion
#'
#' @examples
#'
#' # Data matrix of 2D landmark coordinates
#' data("Tropheus.IK.coord")
#' coords <- which(names(Tropheus.IK.coord) == "X1"):which(names(Tropheus.IK.coord) == "Y19")
#' proc.coord <- as.matrix(Tropheus.IK.coord[coords])
#'
#' # Between-group (B) and within-group (W) covariance matrices for all populations
#' B <- cov.B(proc.coord, groups = Tropheus.IK.coord$POP.ID, sex = Tropheus.IK.coord$Sex)
#' W <- cov.W(proc.coord, groups = Tropheus.IK.coord$POP.ID, sex = Tropheus.IK.coord$Sex)
#'
#' # ML estimate of the scaling factor between B and W
#' sc <- scaling.BW(B, W)
#'
#' # Scaling of B to W
#' Bsc <- B / sc
#'
#' @export
scaling.BW <-
function (S1, S2, method = 0, pa = 0) {
if (is.data.frame(S2))
S2 <- as.matrix(S2)
if (is.data.frame(S1))
S1 <- as.matrix(S1)
if (is.null(S1) | is.null(S2))
stop("supply both 'S1' and 'S2'")
if (!is.matrix(S1) | !is.matrix(S2))
stop("'S1' and 'S2' must be matrices or data frames")
if (!all.equal(dim(S1), dim(S2)))
stop("'S1' and 'S2' must be square matrices of the same dimensions")
p <- dim(S1)[1]
M <- minv(S2, method = 0, pa = 0) %*% S1
trM <- sum(diag(M))
k <- trM / p
return(k)
}
|
/scratch/gouwar.j/cran-all/cranData/vcvComp/R/scaling.BW.R
|
## ----load, echo = TRUE--------------------------------------------------------
library("vcvComp")
data("Tropheus")
## ----var_SexPop---------------------------------------------------------------
outliers <- c(18, 56, 155, 351, 624)
Tropheus.IK <- Tropheus[- outliers, ]
# Sample reduced to six populations
Tropheus.IK <- subset(Tropheus.IK, subset = POP.ID %in% levels(POP.ID)[1:6])
Tropheus.IK$POP.ID <- factor(Tropheus.IK$POP.ID)
# New variable combining population and sex
Tropheus.IK$SexPop <- paste(Tropheus.IK$POP.ID, Tropheus.IK$Sex, sep = "_")
Tropheus.IK$SexPop <- as.factor(Tropheus.IK$SexPop)
## ----LM-----------------------------------------------------------------------
PHEN <- as.matrix(Tropheus.IK[which(names(Tropheus.IK) == "X1"):
which(names(Tropheus.IK) == "Y19")])
rownames(PHEN) <- Tropheus.IK$List_TropheusData_ID
## ----GPA----------------------------------------------------------------------
library("geomorph") # load packages geomorph, rgl and RRPP
# conversion matrix -> array (19 landmarks, 2 dimensions)
PHEN_array <- arrayspecs(PHEN, p = 19, k = 2)
# Procrustes superimposition
phen.gpa <- gpagen(PHEN_array, print.progress = FALSE)
# conversion array -> matrix of Procrustes coordinates
proc.coord <- two.d.array(phen.gpa$coords)
colnames(proc.coord) <- colnames(PHEN)
## ----PCA----------------------------------------------------------------------
phen.pca <- prcomp(proc.coord, rank. = 5, tol = sqrt(.Machine$double.eps))
pc.scores <- phen.pca$x
## ----Pop vcv------------------------------------------------------------------
S.phen.pooled <- cov.group(pc.scores, groups = Tropheus.IK$POP.ID, sex = Tropheus.IK$Sex)
## ----Pop_PCoA-----------------------------------------------------------------
eigen.phen <- mat.sq.dist(S.phen.pooled, dist. = "Riemannian") # Riemannian distances
prcoa <- pr.coord(eigen.phen) # ordination
prcoa$Variance # variance explained
## ----Pop_PCoA_screePlot, fig.height = 3, fig.width = 4, fig.cap = paste("**Figure 1**", "Fraction of variance explained by each principal coordinate in the ordination of the six *Tropheus moorii* populations.")----
# Visualization of the variance explained by each dimension (fig. 1)
barplot(prcoa$Variance$exVar, las = 1, col = "darkblue",
names.arg = 1:nrow(prcoa$Variance), cex.axis = 0.8, cex = 0.8,
xlab = "Dimensions", ylab = "Variance explained")
## ----Pop_PCoA_ordination, fig.height = 3.8, fig.width = 4.5, fig.cap = paste("**Figure 2**", "Scatterplot of the first three principal coordinates (PCoord) in the ordination of the six *Tropheus moorii* populations. Populations living in sympatry are shown in dark blue, allopatric populations in light blue.")----
# Visualization of PCo1, PCo2 and PCo3 (fig. 2)
coul.pop <- c(rep("blue", 3), rep("darkblue", 3)) # colors
pch.pop <- c(rep(19, 3), rep(15, 3)) # symbols
pco3d <- c(1, 2, 3) # dimensions
xyzlab <- c(paste("PCoord", pco3d[1]),
paste("PCoord", pco3d[2]),
paste("PCoord", pco3d[3]))
s3d <- scatterplot3d::scatterplot3d(prcoa$PCoords[, pco3d[1:3]],
xlab = xyzlab[1], ylab = xyzlab[2], zlab = xyzlab[3],
color = coul.pop, pch = 19, angle = 55,
type = "h", lty.hplot = 3,
cex.symbols = 1, cex.axis = 0.8)
s3d.coords <- s3d$xyz.convert(prcoa$PCoords[, pco3d[1:3]])
text(s3d.coords$x, s3d.coords$y,
labels = row.names(prcoa$PCoords),
pos = 4, cex = 0.7, col = coul.pop)
## ----IKA1-IKS5_ML_test--------------------------------------------------------
table(Tropheus.IK$POP.ID) # sample sizes
prop.vcv.test(n = c(69,75), S.phen.pooled[,,"IKA1"], S.phen.pooled[,,"IKS5"]) # ML test
## ----IKA1-IKS5_relPCA, fig.height = 3, fig.width = 4, fig.cap = paste("**Figure 3**", "Relative eigenvalues (on a log scale) of the population IKA1 relative to IKS5.")----
# Ratio of generalized variances of IKA1 and IKS5
relGV.multi(S.phen.pooled[, , c("IKA1", "IKS5")], logGV = FALSE)
# Relative PCA = relative eigenanalysis
relEigen.a1s5 <- relative.eigen(S.phen.pooled[, , "IKA1"], S.phen.pooled[, , "IKS5"])
relEigen.a1s5$relValues # relative eigenvalues
# Visualization of the relative eigenvalues (fig. 3)
plot(relEigen.a1s5$relValues[1:relEigen.a1s5$q],
log = "y", las = 1, col = "blue", type = "b",
main = "IKA1 relative to IKS5", cex = 0.8,
cex.main = 1, cex.axis = 0.8, cex.sub = 0.7,
sub = paste("Relative generalized variance =", relEigen.a1s5$relGV),
xlab = NA, ylab = "Relative eigenvalues")
abline(h = 1)
## ----IKA1-IKS5_vcvPattern, fig.height = 3, fig.width = 7, fig.cap = paste("**Figure 4**", "Visualization as thin-plate spline (TPS) deformation grids (Bookstein, 1991) of the shape pattern corresponding to the first relative PC, which has the maximal excess of variance in IKA1 relative to IKS5.")----
# Shape patterns corresponding to the relative eigenvectors
a1s5 <- c(which(Tropheus.IK$POP.ID %in% "IKA1"),
which(Tropheus.IK$POP.ID %in% "IKS5")) # specimens
REF.A1S5 <- mshape(phen.gpa$coords[, , a1s5]) # average shape
A.A1S5 <- arrayspecs(t(phen.pca$rotation %*% relEigen.a1s5$relVectors),
p = 19, k = 2) # loadings
# Graphical parameters
WF <- cbind(c(1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 1, 12, 14, 14),
c(19, 18, 18, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 15))
gp3 <- gridPar(grid.col = "grey", tar.link.col = "blue",
tar.pt.size = 0.7, tar.pt.bg = "blue")
# Visualization of the first dimension (fig. 4)
par(new = FALSE, mfrow = c(1, 2), mar = c(0.5, 0.5, 0.5, 0.5))
plotRefToTarget(REF.A1S5, (REF.A1S5 - 0.01 * A.A1S5[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "-", line = -1)
plotRefToTarget(REF.A1S5, (REF.A1S5 + 0.01 * A.A1S5[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "+", line = -1)
title("First relative eigenvector", outer = TRUE, line = - 1)
## ----Sex_Pop_PCoA-------------------------------------------------------------
S.phen.mf <- cov.group(pc.scores, groups = Tropheus.IK$SexPop) # covariance matrices
eigen.phen.mf <- mat.sq.dist(S.phen.mf, dist. = "Riemannian") # Riemannian distances
prcoa.mf <- pr.coord(eigen.phen.mf) # ordination
prcoa.mf$Variance # variance explained
## ----Sex_Pop_PCoA_screePlot, fig.height = 3, fig.width = 4, fig.cap = paste("**Figure 5**", "Fraction of variance explained by each principal coordinate of the 12 sex-specific covariance matrices.")----
# Visualization of the variance explained by each dimension (fig. 5)
barplot(prcoa.mf$Variance$exVar, las = 1, col = "darkblue",
names.arg = 1:nrow(prcoa.mf$Variance), cex.axis = 0.8, cex = 0.8,
xlab = "Dimensions", ylab = "Variance explained")
## ----Sex_Pop_PCoA_ordination, fig.height = 3.5, fig.width = 6, fig.cap = paste("**Figure 6**", "Principal coordinates ordination of the 12 sex-specific covariance matrices. Males in blue, females in red. Populations living in sympatry with *Tropheus polli* in dark colors.")----
# Visualization of PCo1 and PCo2 (fig. 6)
coul.mf <- c(rep(c("red", "blue"), 3), rep(c("darkred", "darkblue"), 3)) # colors
pch.mf <- c(rep(19, 6), rep(15, 6)) # symbols
pco <- c(1, 2) # dimensions
plot(prcoa.mf$PCoords[, pco[1]], prcoa.mf$PCoords[, pco[2]],
xlab = paste("Principal coordinate", pco[1]),
ylab = paste("Principal coordinate", pco[2]),
asp = 1, las = 1, pch = pch.mf, col = coul.mf, cex.axis = 0.8)
abline(h = 0) ; abline(v = 0)
text(prcoa.mf$PCoords[, pco[1]], prcoa.mf$PCoords[, pco[2]],
labels = rownames(prcoa.mf$PCoords),
adj = 1.5, cex = 0.6, col = coul.mf)
## ----Sex_IKA1_relPCA----------------------------------------------------------
pop.ika1 <- grep("IKA1", levels(Tropheus.IK$SexPop))
relEigen.ika1 <- relative.eigen(S.phen.mf[, , pop.ika1[1]], S.phen.mf[, , pop.ika1[2]])
relEigen.ika1$relGV # ratio of generalized variances
relEigen.ika1$relValues # relative eigenvalues
## ----Sex_IKA1_relVal, fig.height = 3, fig.width = 4, fig.cap = paste("**Figure 7**", "Relative eigenvalues (maximal ratios of variance) of females relative to males for the population IKA1.")----
# Visualization of the relative eigenvalues (fig. 7)
plot(relEigen.ika1$relValues[1:relEigen.ika1$q],
log = "y", las = 1, col = "blue", type = "b",
main = "IKA1: females / males", cex.main = 1, cex.axis = 0.8,
xlab = NA, ylab = "Relative eigenvalues")
abline(h = 1)
## ----Sex_IKA1_vcvPattern, fig.height = 5, fig.width = 7, fig.cap = paste("**Figure 8**", "Visualization as thin-plate spline (TPS) deformation grids (Bookstein, 1991) of the shape patterns corresponding to the first relative PC, which has the maximal excess of variance in females relative to males for the population IKA1 (*top*), and the last relative PC, which has the maximal excess of variance in males relative to females (*bottom*).")----
# Population IKA1: average shape and loadings
ika1 <- which(Tropheus.IK$POP.ID %in% "IKA1") # specimens
REF.IKA1 <- mshape(phen.gpa$coords[, , ika1]) # average shape
A.IKA1 <- arrayspecs(t(phen.pca$rotation %*% relEigen.ika1$relVectors),
p = 19, k = 2) # loadings
# Graphical parameters
WF <- cbind(c(1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 1, 12, 14, 14),
c(19, 18, 18, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 15))
gp3 <- gridPar(grid.col = "grey", tar.link.col = "blue",
tar.pt.size = 0.7, tar.pt.bg = "blue")
par(new = FALSE, mfrow = c(2, 2), mar = c(0.5, 0.5, 1, 0.5))
# Visualization of the first dimension (fig. 8: top)
plotRefToTarget(REF.IKA1, (REF.IKA1 - 0.01 * A.IKA1[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "-", line = - 1)
plotRefToTarget(REF.IKA1, (REF.IKA1 + 0.01 * A.IKA1[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "+", line = - 1)
title("First relative eigenvector", outer = TRUE, line = - 1)
# Visualization of the last dimension (fig. 8: bottom)
plotRefToTarget(REF.IKA1, (REF.IKA1 - 0.01 * A.IKA1[, , 5]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "-", line = - 1)
plotRefToTarget(REF.IKA1, (REF.IKA1 + 0.01 * A.IKA1[, , 5]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "+", line = - 1)
title("Last relative eigenvector", outer = TRUE, line = -16)
## ----BW_ML_test---------------------------------------------------------------
# Computation of B and W (pooled by sex)
B <- cov.B(pc.scores, groups = Tropheus.IK$POP.ID, sex = Tropheus.IK$Sex)
W <- cov.W(pc.scores, groups = Tropheus.IK$POP.ID, sex = Tropheus.IK$Sex)
# Proportionality test between B and W = ML test
prop.vcv.test(n = c(6, 511), B, W) # 6 groups, 511 specimens
## ----BW_Pop_PCoA--------------------------------------------------------------
Bsc <- B / scaling.BW(B, W) # scale B to W
# Create an array of group covariance matrices, B and W
S.bw <- array(c(S.phen.pooled, W, Bsc),
dim = c(dim(S.phen.pooled)[[1]],
dim(S.phen.pooled)[[2]],
dim(S.phen.pooled)[[3]] + 2))
dimnames(S.bw) <- list(dimnames(S.phen.pooled)[[1]],
dimnames(S.phen.pooled)[[2]],
c(dimnames(S.phen.pooled)[[3]], "W", "B"))
# Ordination
eigen.phen.bw <- mat.sq.dist(S.bw, dist. = "Riemannian")
prcoa.bw <- pr.coord(eigen.phen.bw)
## ----BW_Pop_PCoA_ordination, fig.height = 3.9, fig.width = 5, fig.cap = paste("**Figure 9**", "Principal coordinates ordination of the six populations (males and females pooled), along with their between-group (**B**) and their within-group (**W**) covariance matrices.")----
# Visualization of PCo1, PCo2 and PCo3 (fig. 9)
coul.bw <- c(coul.pop, rep("darkgreen", 2)) # colors
pco3d <- c(1, 2, 3) # dimensions
xyzlab <- c(paste("PCoord", pco3d[1]),
paste("PCoord", pco3d[2]),
paste("PCoord", pco3d[3]))
s3d <- scatterplot3d::scatterplot3d(prcoa.bw$PCoords[, pco3d[1:3]],
xlab = xyzlab[1], ylab = xyzlab[2], zlab = xyzlab[3],
color = coul.bw, pch = 19, angle = 55,
type = "h", lty.hplot = 3,
cex.symbols = 1, cex.axis = 0.8)
s3d.coords <- s3d$xyz.convert(prcoa.bw$PCoords[, pco3d[1:3]])
text(s3d.coords$x, s3d.coords$y, labels = row.names(prcoa.bw$PCoords),
pos = 4, cex = 0.7, col = coul.bw)
## ----BW_relPCA, fig.height = 3, fig.width = 4, fig.cap = paste("**Figure 10**", "Relative eigenvalues of the between-group covariance matrix versus the within-group covariance matrix for the six *Tropheus* populations.")----
# Relative PCA of B with respect to W
relEigenBW <- relative.eigen(B, W)
relEigenBW$relValues # relative eigenvalues
# Test differences between two successive relative eigenvalues
eigen.test(n = c(6, 511), relValues = relEigenBW$relValues)
# Visualization of the relative eigenvalues (fig. 10)
plot(relEigenBW$relValues[1:relEigenBW$q],
log = "y", las = 1, col = "blue", type = "b",
main = "B relative to W", cex.main = 1, cex.axis = 0.8,
xlab = NA, ylab = "Relative eigenvalues")
abline(h = 1)
## ----BW_vcvPattern, fig.height = 5, fig.width = 7, fig.cap = paste("**Figure 11**", "Visualization of the shape patterns corresponding to the variance within populations (*top*), and the last relative PC, which has the maximal excess of variance within populations relative to that between populations (*bottom*).")----
# Shape patterns corresponding to the relative eigenvectors
REF <- mshape(phen.gpa$coords) # average shape
A <- arrayspecs(t(phen.pca$rotation %*% relEigenBW$relVectors), p=19, k=2) # loadings
# Graphical parameters
WF <- cbind(c(1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 1, 12, 14, 14),
c(19, 18, 18, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 15))
gp3 <- gridPar(grid.col = "grey", tar.link.col = "blue",
tar.pt.size = 0.7, tar.pt.bg = "blue")
par(new = FALSE, mfrow = c(2, 2), mar = c(0.5, 0.5, 1, 0.5))
# Visualization of the first dimension (fig. 11: top)
plotRefToTarget(REF, (REF - 0.01 * A[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "-", line = - 1)
plotRefToTarget(REF, (REF + 0.01 * A[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "+", line = - 1)
title("First relative eigenvector", outer = TRUE, line = - 1)
# Visualization of the last dimension (fig. 11: bottom)
plotRefToTarget(REF, (REF - 0.01 * A[, , 5]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "-", line = - 1)
plotRefToTarget(REF, (REF + 0.01 * A[, , 5]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "+", line = - 1)
title("Last relative eigenvector", outer = TRUE, line = - 16)
|
/scratch/gouwar.j/cran-all/cranData/vcvComp/inst/doc/vcvComp-worked-example.R
|
---
title: 'vcvComp: worked example'
author: "Anne Le Maitre and Philipp Mitteroecker"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vcvComp: worked example}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
The `vcvComp` package comprises a data frame (`Tropheus`) of 723 observations of 57 variables extracted from a freely available dataset, downloaded from the Dryad digital repository (https://doi.org/10.5061/dryad.fc02f). The observations correspond to cichlid fishes of the species *Tropheus moorii* (color morphs 'Kaiser' and 'Kirschfleck') and *T. polli* collected from eight locations of Lake Tanganyika (Kerschbaumer et al., 2014). The main numerical variables provided are the 2D Cartesian coordinates of 19 landmarks quantifying the external body morphology of adult fishes (Herler et al., 2010) and the genotypes for 6 microsatellite markers.
## Case study
To illustrate the application of relative eigenanalysis by means of the `vcvComp` package, we studied variation of body shape within and between different fish populations of the cichlid genus *Tropheus*. We used 511 specimens of the sample from Kerschbaumer et al. (2014), consisting of six populations of the color morph 'Kaiser' of the species *Tropheus moorii*. Three of these populations (IKS3, IKS4, IKS5) live in sympatry with the cichlid species *T. polli*, whereas the three other populations (IKA1, IKA2, IKA3) live alone. As the allopatric and sympatric populations differ in trophic niche and thus presumably also in their selective regime, we investigated if and how they differ in phenotypic variance-covariance structure. We also explored differences in variance pattern between female and male specimens, because these populations show significant sexual dimorphism in mean head shape (cichlids are maternal mouthbrooders; Herler et al., 2010; Kerschbaumer et al., 2014). Finally, we searched for signs of stabilizing and divergent selection among the six *Tropheus* populations by contrasting within- and between-group covariance matrices.
First, we loaded the `vcvComp` package and the data.
```{r load, echo = TRUE}
library("vcvComp")
data("Tropheus")
```
Five specimens are outliers for landmark 2 and were excluded from the sample. After selecting the subsample, we created a new variable combining population and sex.
```{r var_SexPop}
outliers <- c(18, 56, 155, 351, 624)
Tropheus.IK <- Tropheus[- outliers, ]
# Sample reduced to six populations
Tropheus.IK <- subset(Tropheus.IK, subset = POP.ID %in% levels(POP.ID)[1:6])
Tropheus.IK$POP.ID <- factor(Tropheus.IK$POP.ID)
# New variable combining population and sex
Tropheus.IK$SexPop <- paste(Tropheus.IK$POP.ID, Tropheus.IK$Sex, sep = "_")
Tropheus.IK$SexPop <- as.factor(Tropheus.IK$SexPop)
```
The landmark coordinates were extracted to create a matrix.
```{r LM}
PHEN <- as.matrix(Tropheus.IK[which(names(Tropheus.IK) == "X1"):
which(names(Tropheus.IK) == "Y19")])
rownames(PHEN) <- Tropheus.IK$List_TropheusData_ID
```
Then, we performed a generalised Procrustes superimposition (Rohlf and Slice, 1990) of the landmark coordinates using the function `gpagen` of the `geomorph` package.
```{r GPA}
library("geomorph") # load packages geomorph, rgl and RRPP
# conversion matrix -> array (19 landmarks, 2 dimensions)
PHEN_array <- arrayspecs(PHEN, p = 19, k = 2)
# Procrustes superimposition
phen.gpa <- gpagen(PHEN_array, print.progress = FALSE)
# conversion array -> matrix of Procrustes coordinates
proc.coord <- two.d.array(phen.gpa$coords)
colnames(proc.coord) <- colnames(PHEN)
```
We reduced the Procrustes shape coordinates to the first five principal components to avoid collinearities and to guarantee a sufficient excess of cases over variables in the further analyses.
```{r PCA}
phen.pca <- prcomp(proc.coord, rank. = 5, tol = sqrt(.Machine$double.eps))
pc.scores <- phen.pca$x
```
## Population comparison
Because the population samples were not balanced regarding sex, we computed the pooled population covariance matrices as unweighted averages of the corresponding male and female covariance matrices.
```{r Pop vcv}
S.phen.pooled <- cov.group(pc.scores, groups = Tropheus.IK$POP.ID, sex = Tropheus.IK$Sex)
```
To explore the heterogeneity of variance-covariance structure in body shape across populations, we performed an ordination analysis of the six pooled within-sex covariance matrices.
```{r Pop_PCoA}
eigen.phen <- mat.sq.dist(S.phen.pooled, dist. = "Riemannian") # Riemannian distances
prcoa <- pr.coord(eigen.phen) # ordination
prcoa$Variance # variance explained
```
The first three principal coordinates together accounted for 88% of the total variance (Fig. 1); this also equals the fraction of summed squared Riemannian distances explained by the summed squared Euclidean distances within the first three principal coordinates.
```{r Pop_PCoA_screePlot, fig.height = 3, fig.width = 4, fig.cap = paste("**Figure 1**", "Fraction of variance explained by each principal coordinate in the ordination of the six *Tropheus moorii* populations.")}
# Visualization of the variance explained by each dimension (fig. 1)
barplot(prcoa$Variance$exVar, las = 1, col = "darkblue",
names.arg = 1:nrow(prcoa$Variance), cex.axis = 0.8, cex = 0.8,
xlab = "Dimensions", ylab = "Variance explained")
```
The populations living in sympatry (IKS3, IKS4 and IKS5) were separated from the allopatric populations (IKA1, IKA2, IKA3) along the third principal coordinate (Fig. 2).
```{r Pop_PCoA_ordination, fig.height = 3.8, fig.width = 4.5, fig.cap = paste("**Figure 2**", "Scatterplot of the first three principal coordinates (PCoord) in the ordination of the six *Tropheus moorii* populations. Populations living in sympatry are shown in dark blue, allopatric populations in light blue.")}
# Visualization of PCo1, PCo2 and PCo3 (fig. 2)
coul.pop <- c(rep("blue", 3), rep("darkblue", 3)) # colors
pch.pop <- c(rep(19, 3), rep(15, 3)) # symbols
pco3d <- c(1, 2, 3) # dimensions
xyzlab <- c(paste("PCoord", pco3d[1]),
paste("PCoord", pco3d[2]),
paste("PCoord", pco3d[3]))
s3d <- scatterplot3d::scatterplot3d(prcoa$PCoords[, pco3d[1:3]],
xlab = xyzlab[1], ylab = xyzlab[2], zlab = xyzlab[3],
color = coul.pop, pch = 19, angle = 55,
type = "h", lty.hplot = 3,
cex.symbols = 1, cex.axis = 0.8)
s3d.coords <- s3d$xyz.convert(prcoa$PCoords[, pco3d[1:3]])
text(s3d.coords$x, s3d.coords$y,
labels = row.names(prcoa$PCoords),
pos = 4, cex = 0.7, col = coul.pop)
```
To investigate the actual differences in variance-covariance pattern between sympatric and allopatric populations, we compared the populations IKA1 and IKS5, but other pairs of sympatric and allopatric populations led to very similar results. The ML test indicated that the covariance matrices of IKA1 and IKS5 deviate from proportionality at *p* = 0.02.
```{r IKA1-IKS5_ML_test}
table(Tropheus.IK$POP.ID) # sample sizes
prop.vcv.test(n = c(69,75), S.phen.pooled[,,"IKA1"], S.phen.pooled[,,"IKS5"]) # ML test
```
The generalized variance of IKA1 was only 18% less than that of IKS5, but the relative PCA showed that the various shape features deviate strongly in their variational properties across populations (Fig. 3). The first relative PC was roughly twice as variable in IKA1 than in IKS5 (first relative eigenvalue was 2.3), whereas the variance of the last relative PC in IKA1 was only half of that in IKS5 (last relative eigenvalue was 0.49).
```{r IKA1-IKS5_relPCA, fig.height = 3, fig.width = 4, fig.cap = paste("**Figure 3**", "Relative eigenvalues (on a log scale) of the population IKA1 relative to IKS5.")}
# Ratio of generalized variances of IKA1 and IKS5
relGV.multi(S.phen.pooled[, , c("IKA1", "IKS5")], logGV = FALSE)
# Relative PCA = relative eigenanalysis
relEigen.a1s5 <- relative.eigen(S.phen.pooled[, , "IKA1"], S.phen.pooled[, , "IKS5"])
relEigen.a1s5$relValues # relative eigenvalues
# Visualization of the relative eigenvalues (fig. 3)
plot(relEigen.a1s5$relValues[1:relEigen.a1s5$q],
log = "y", las = 1, col = "blue", type = "b",
main = "IKA1 relative to IKS5", cex = 0.8,
cex.main = 1, cex.axis = 0.8, cex.sub = 0.7,
sub = paste("Relative generalized variance =", relEigen.a1s5$relGV),
xlab = NA, ylab = "Relative eigenvalues")
abline(h = 1)
```
The shape patterns depicted by each relative eigenvector can be visualized by deformations of the average shape along the positive and the negative directions of the corresponding vector (Fig. 4). Note that when the initial variables are reduced to the first components, as is the case here, the loadings of the eigenvectors must be multiplied by the loadings of the principal components to get shape patterns.
```{r IKA1-IKS5_vcvPattern, fig.height = 3, fig.width = 7, fig.cap = paste("**Figure 4**", "Visualization as thin-plate spline (TPS) deformation grids (Bookstein, 1991) of the shape pattern corresponding to the first relative PC, which has the maximal excess of variance in IKA1 relative to IKS5.")}
# Shape patterns corresponding to the relative eigenvectors
a1s5 <- c(which(Tropheus.IK$POP.ID %in% "IKA1"),
which(Tropheus.IK$POP.ID %in% "IKS5")) # specimens
REF.A1S5 <- mshape(phen.gpa$coords[, , a1s5]) # average shape
A.A1S5 <- arrayspecs(t(phen.pca$rotation %*% relEigen.a1s5$relVectors),
p = 19, k = 2) # loadings
# Graphical parameters
WF <- cbind(c(1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 1, 12, 14, 14),
c(19, 18, 18, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 15))
gp3 <- gridPar(grid.col = "grey", tar.link.col = "blue",
tar.pt.size = 0.7, tar.pt.bg = "blue")
# Visualization of the first dimension (fig. 4)
par(new = FALSE, mfrow = c(1, 2), mar = c(0.5, 0.5, 0.5, 0.5))
plotRefToTarget(REF.A1S5, (REF.A1S5 - 0.01 * A.A1S5[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "-", line = -1)
plotRefToTarget(REF.A1S5, (REF.A1S5 + 0.01 * A.A1S5[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "+", line = -1)
title("First relative eigenvector", outer = TRUE, line = - 1)
```
The shape features captured by relative PC 1 were head shape, relative eye size, and body depth (maximum distance between dorsal and ventral parts); these were the features with maximal excess of variance in allopatric populations relative to sympatric populations (Fig. 4). Or in other words, these were the shape feature maximally canalized in the populations living in sympatry.
In Lake Tanganyika, allopatric populations of *Tropheus moorii* live in the whole water column, whereas populations in sympatry with *T. polli* usually are forced to live at greater water depth (Kerschbaumer et al., 2014). The broader trophic niche and larger environmental heterogeneity in allopatric populations may account for the larger variance in body depth and head shape, but the higher competition and harder living conditions in sympatric populations may also impose a stronger stabilizing selection regime than in allopatric populations.
## Comparison between sexes
We separated males and females and performed a principal coordinates analysis of the 12 sex-specific variance-covariance matrices in order to investigate deviations in variance-covariance structure between the sexes.
```{r Sex_Pop_PCoA}
S.phen.mf <- cov.group(pc.scores, groups = Tropheus.IK$SexPop) # covariance matrices
eigen.phen.mf <- mat.sq.dist(S.phen.mf, dist. = "Riemannian") # Riemannian distances
prcoa.mf <- pr.coord(eigen.phen.mf) # ordination
prcoa.mf$Variance # variance explained
```
The first two components together accounted for 62% of total variance (Fig. 5) and showed that for all populations, except IKA3, males had higher values than females for the first principal coordinate (Fig. 6).
```{r Sex_Pop_PCoA_screePlot, fig.height = 3, fig.width = 4, fig.cap = paste("**Figure 5**", "Fraction of variance explained by each principal coordinate of the 12 sex-specific covariance matrices.")}
# Visualization of the variance explained by each dimension (fig. 5)
barplot(prcoa.mf$Variance$exVar, las = 1, col = "darkblue",
names.arg = 1:nrow(prcoa.mf$Variance), cex.axis = 0.8, cex = 0.8,
xlab = "Dimensions", ylab = "Variance explained")
```
```{r Sex_Pop_PCoA_ordination, fig.height = 3.5, fig.width = 6, fig.cap = paste("**Figure 6**", "Principal coordinates ordination of the 12 sex-specific covariance matrices. Males in blue, females in red. Populations living in sympatry with *Tropheus polli* in dark colors.")}
# Visualization of PCo1 and PCo2 (fig. 6)
coul.mf <- c(rep(c("red", "blue"), 3), rep(c("darkred", "darkblue"), 3)) # colors
pch.mf <- c(rep(19, 6), rep(15, 6)) # symbols
pco <- c(1, 2) # dimensions
plot(prcoa.mf$PCoords[, pco[1]], prcoa.mf$PCoords[, pco[2]],
xlab = paste("Principal coordinate", pco[1]),
ylab = paste("Principal coordinate", pco[2]),
asp = 1, las = 1, pch = pch.mf, col = coul.mf, cex.axis = 0.8)
abline(h = 0) ; abline(v = 0)
text(prcoa.mf$PCoords[, pco[1]], prcoa.mf$PCoords[, pco[2]],
labels = rownames(prcoa.mf$PCoords),
adj = 1.5, cex = 0.6, col = coul.mf)
```
To explore this shared sex difference in variance-covariance pattern, we perform a relative PCA of the females relative to the males of IKA1.
```{r Sex_IKA1_relPCA}
pop.ika1 <- grep("IKA1", levels(Tropheus.IK$SexPop))
relEigen.ika1 <- relative.eigen(S.phen.mf[, , pop.ika1[1]], S.phen.mf[, , pop.ika1[2]])
relEigen.ika1$relGV # ratio of generalized variances
relEigen.ika1$relValues # relative eigenvalues
```
Overall, females were approximately half as variable as males (ratio of generalized eigenvalues was 0.57), but pooling over all dimensions was again misleading here. In fact, the first relative PC was 4.6 times more variable in females than in males, whereas the other dimensions were all more variable in males (Fig. 7).
```{r Sex_IKA1_relVal, fig.height = 3, fig.width = 4, fig.cap = paste("**Figure 7**", "Relative eigenvalues (maximal ratios of variance) of females relative to males for the population IKA1.")}
# Visualization of the relative eigenvalues (fig. 7)
plot(relEigen.ika1$relValues[1:relEigen.ika1$q],
log = "y", las = 1, col = "blue", type = "b",
main = "IKA1: females / males", cex.main = 1, cex.axis = 0.8,
xlab = NA, ylab = "Relative eigenvalues")
abline(h = 1)
```
The first relative PC mainly corresponded to the relative size of the head, whereas the last three relative PCs were all related to the shape and relative orientation of the head and mouth (Fig. 8).
```{r Sex_IKA1_vcvPattern, fig.height = 5, fig.width = 7, fig.cap = paste("**Figure 8**", "Visualization as thin-plate spline (TPS) deformation grids (Bookstein, 1991) of the shape patterns corresponding to the first relative PC, which has the maximal excess of variance in females relative to males for the population IKA1 (*top*), and the last relative PC, which has the maximal excess of variance in males relative to females (*bottom*).")}
# Population IKA1: average shape and loadings
ika1 <- which(Tropheus.IK$POP.ID %in% "IKA1") # specimens
REF.IKA1 <- mshape(phen.gpa$coords[, , ika1]) # average shape
A.IKA1 <- arrayspecs(t(phen.pca$rotation %*% relEigen.ika1$relVectors),
p = 19, k = 2) # loadings
# Graphical parameters
WF <- cbind(c(1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 1, 12, 14, 14),
c(19, 18, 18, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 15))
gp3 <- gridPar(grid.col = "grey", tar.link.col = "blue",
tar.pt.size = 0.7, tar.pt.bg = "blue")
par(new = FALSE, mfrow = c(2, 2), mar = c(0.5, 0.5, 1, 0.5))
# Visualization of the first dimension (fig. 8: top)
plotRefToTarget(REF.IKA1, (REF.IKA1 - 0.01 * A.IKA1[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "-", line = - 1)
plotRefToTarget(REF.IKA1, (REF.IKA1 + 0.01 * A.IKA1[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "+", line = - 1)
title("First relative eigenvector", outer = TRUE, line = - 1)
# Visualization of the last dimension (fig. 8: bottom)
plotRefToTarget(REF.IKA1, (REF.IKA1 - 0.01 * A.IKA1[, , 5]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "-", line = - 1)
plotRefToTarget(REF.IKA1, (REF.IKA1 + 0.01 * A.IKA1[, , 5]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "+", line = - 1)
title("Last relative eigenvector", outer = TRUE, line = -16)
```
Cichlids are mouth brooders and females typically have a larger head and mouth than males. This pattern of sexual dimorphism in body shape was also found for the present *Tropheus moorii* sample (Herler et al., 2010; Kerschbaumer et al., 2014). The increased variance in relative head size likely is a direct consequence of the enlarged head in females, whereas other aspects of head morphology, such as the relative position and orientation of the mouth, seems to be more canalized in females than in males.
## Stabilizing versus divergent selection of cichlid body shape
Under idealized assumptions, the expected amount of phenotypic change due to genetic drift is proportional to the amount of additive genetic variation in the ancestral population. Extending this model of neutral evolution to multiple traits leads to the expectation that the between-group covariance matrix for a set of related populations is proportional to the additive genetic covariance matrix of their common ancestral population (Lande, 1979). This rational has inspired statistical tests for natural selection by contrasting the covariance matrix of population means with the pooled phenotypic within-population covariance matrix (as an estimate of the ancestral genetic covariance matrix; e.g. Martin et al., 2008): deviations from proportionality are signs of stabilizing or divergent selection. Most of these approaches, however, only rely on statistical significance tests of proportionality of the between- and within-population covariance matrices (**B** and **W**, respectively). Relative PCA ideally complements these approaches as an exploratory tool to identify the specific trait combinations that deviate from the null model of neutral evolution (Bookstein and Mitteroecker, 2014). If both divergent and stabilizing selection acted in a set of populations, the first relative PCs of **B** with respect to **W** will reveal the trait combinations that were affected by divergent selection (the features with maximal between-population variance relative to within-population variance), and the last relative PCs will show the trait combinations under stabilizing selection (least between-population variance relative to within-population variance).
We computed **B** and **W** (pooled by sex) for the *Tropheus* populations based on the first five PCs of the full Procrustes data. As we have only six populations in this example, **B** is estimated with great uncertainty and also the chi-square approximation in the proportionality test is critical; results have to be interpreted with care. The ML test suggested a deviation from proportionality between **B** and **W** (*p* = 0.034) and thus the action of selective forces.
```{r BW_ML_test}
# Computation of B and W (pooled by sex)
B <- cov.B(pc.scores, groups = Tropheus.IK$POP.ID, sex = Tropheus.IK$Sex)
W <- cov.W(pc.scores, groups = Tropheus.IK$POP.ID, sex = Tropheus.IK$Sex)
# Proportionality test between B and W = ML test
prop.vcv.test(n = c(6, 511), B, W) # 6 groups, 511 specimens
```
However, this test does not specify the *magnitude* of deviation from proportionality, and especially with the small number of populations in this example, the interpretation of the *p*-value alone is not sufficient. We thus performed an ordination of the six population covariance matrices (pooled by sex), together with **W** and **B** (scaled to fit **W** using the mean of their relative eigenvalues).
```{r BW_Pop_PCoA}
Bsc <- B / scaling.BW(B, W) # scale B to W
# Create an array of group covariance matrices, B and W
S.bw <- array(c(S.phen.pooled, W, Bsc),
dim = c(dim(S.phen.pooled)[[1]],
dim(S.phen.pooled)[[2]],
dim(S.phen.pooled)[[3]] + 2))
dimnames(S.bw) <- list(dimnames(S.phen.pooled)[[1]],
dimnames(S.phen.pooled)[[2]],
c(dimnames(S.phen.pooled)[[3]], "W", "B"))
# Ordination
eigen.phen.bw <- mat.sq.dist(S.bw, dist. = "Riemannian")
prcoa.bw <- pr.coord(eigen.phen.bw)
```
Relative to the heterogeneity of population covariance matrices, **B** clearly deviates from **W** along the first principal coordinate (Fig. 9). The interpretation of the relative PCs of **B** and **W** thus seems warranted.
```{r BW_Pop_PCoA_ordination, fig.height = 3.9, fig.width = 5, fig.cap = paste("**Figure 9**", "Principal coordinates ordination of the six populations (males and females pooled), along with their between-group (**B**) and their within-group (**W**) covariance matrices.")}
# Visualization of PCo1, PCo2 and PCo3 (fig. 9)
coul.bw <- c(coul.pop, rep("darkgreen", 2)) # colors
pco3d <- c(1, 2, 3) # dimensions
xyzlab <- c(paste("PCoord", pco3d[1]),
paste("PCoord", pco3d[2]),
paste("PCoord", pco3d[3]))
s3d <- scatterplot3d::scatterplot3d(prcoa.bw$PCoords[, pco3d[1:3]],
xlab = xyzlab[1], ylab = xyzlab[2], zlab = xyzlab[3],
color = coul.bw, pch = 19, angle = 55,
type = "h", lty.hplot = 3,
cex.symbols = 1, cex.axis = 0.8)
s3d.coords <- s3d$xyz.convert(prcoa.bw$PCoords[, pco3d[1:3]])
text(s3d.coords$x, s3d.coords$y, labels = row.names(prcoa.bw$PCoords),
pos = 4, cex = 0.7, col = coul.bw)
```
We performed a relative PCA of **B** with respect to **W** (Fig. 10). The first relative eigenvalue is more than 5 times larger than the second one, which is significant at *p* < 0.05, and similarly for the last relative eigenvalue. This supports an interpretation of the first and last relative PC, even though the absolute relative eigenvalues cannot be evolutionarily interpreted without knowing the variance ratio expected under neutral evolution, which depends on the number of generations since divergence as well as effective population size (Lande, 1979). One way to estimate this threshold is based on genetic data using the $F_{ST}$ statistic (Holsinger et al., 2009). Under pure genetic drift, $\mathbf{B}=F_{ST}/(1-F_{ST})\mathbf{W}$ (Lynch and Walsh, 1998; Martin et al., 2008). Kerschbaumer et al. (2014) reported $F_{ST}$ values for these populations ranging from 0.033 to 0.085, which translates into ratios of between- and within-population variance of 0.034-0.093. The first relative eigenvalue (2.06) clearly exceeded this threshold and suggests strong divergent selection. The last relative eigenvalue (0.025) may indicate weak stabilizing selection.
```{r BW_relPCA, fig.height = 3, fig.width = 4, fig.cap = paste("**Figure 10**", "Relative eigenvalues of the between-group covariance matrix versus the within-group covariance matrix for the six *Tropheus* populations.")}
# Relative PCA of B with respect to W
relEigenBW <- relative.eigen(B, W)
relEigenBW$relValues # relative eigenvalues
# Test differences between two successive relative eigenvalues
eigen.test(n = c(6, 511), relValues = relEigenBW$relValues)
# Visualization of the relative eigenvalues (fig. 10)
plot(relEigenBW$relValues[1:relEigenBW$q],
log = "y", las = 1, col = "blue", type = "b",
main = "B relative to W", cex.main = 1, cex.axis = 0.8,
xlab = NA, ylab = "Relative eigenvalues")
abline(h = 1)
```
The first relative PC corresponded to overall body depth and the positions of caudal, dorsal and ventral fins (Fig. 11). Apparently, these features, which determine the hydrodynamics and swimming ability of the fish, were under strong divergent selection in the studied *Tropheus* populations: their inter-population variance strongly exceeds the variation expected for neutral evolution.
The shape pattern corresponding to the last relative eigenvector mainly involved the shape of the head, especially the position and orientation of the mouth (Fig. 11). These features, which crucially affect feeding performance, likely were under stabilizing selection in the *Tropheus* populations. This is supported by our finding that females show little variation in mouth position and orientation despite a large and variable head size (Fig. 8).
```{r BW_vcvPattern, fig.height = 5, fig.width = 7, fig.cap = paste("**Figure 11**", "Visualization of the shape patterns corresponding to the variance within populations (*top*), and the last relative PC, which has the maximal excess of variance within populations relative to that between populations (*bottom*).")}
# Shape patterns corresponding to the relative eigenvectors
REF <- mshape(phen.gpa$coords) # average shape
A <- arrayspecs(t(phen.pca$rotation %*% relEigenBW$relVectors), p=19, k=2) # loadings
# Graphical parameters
WF <- cbind(c(1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 1, 12, 14, 14),
c(19, 18, 18, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 15))
gp3 <- gridPar(grid.col = "grey", tar.link.col = "blue",
tar.pt.size = 0.7, tar.pt.bg = "blue")
par(new = FALSE, mfrow = c(2, 2), mar = c(0.5, 0.5, 1, 0.5))
# Visualization of the first dimension (fig. 11: top)
plotRefToTarget(REF, (REF - 0.01 * A[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "-", line = - 1)
plotRefToTarget(REF, (REF + 0.01 * A[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "+", line = - 1)
title("First relative eigenvector", outer = TRUE, line = - 1)
# Visualization of the last dimension (fig. 11: bottom)
plotRefToTarget(REF, (REF - 0.01 * A[, , 5]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "-", line = - 1)
plotRefToTarget(REF, (REF + 0.01 * A[, , 5]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "+", line = - 1)
title("Last relative eigenvector", outer = TRUE, line = - 16)
```
## References
Bookstein F (1991). *Morphometric tools for landmark data: geometry and biology.* Cambridge University Press, Cambridge (UK); New York.
Bookstein F, Mitteroecker P (2014) Comparing covariance matrices by relative eigenanalysis, with applications to organismal biology. *Evolutionary Biology 41*: 336--350. https://doi.org/10.1007/s11692-013-9260-5
Herler J, Kerschbaumer M, Mitteroecker P, et al. (2010) Sexual dimorphism and population divergence in the Lake Tanganyika cichlid fish genus *Tropheus*. *Frontiers in Zoology 7*:4. https://doi.org/10.1186/1742-9994-7-4
Holsinger KE, Weir BS (2009) Genetics in geographically structured populations: defining, estimating and interpreting $F_{ST}$. *Nature Review Genetics 10(9)*:639--650. https://doi.org/10.1038/nrg2611
Kerschbaumer M, Mitteroecker P, Sturmbauer C (2014) Evolution of body shape in sympatric versus non-sympatric Tropheus populations of Lake Tanganyika. *Heredity 112(2)*: 89--98. https://doi.org/10.1038/hdy.2013.78
Kerschbaumer M, Mitteroecker P, Sturmbauer C (2013) Data from: Evolution of body shape in sympatric versus non-sympatric *Tropheus* populations of Lake Tanganyika. *Dryad Digital Repository*. https://doi.org/10.5061/dryad.fc02f
Lande R (1979) Quantitative genetic analysis of multivariate evolution, applied to brain:body size allometry. *Evolution 33(1 part 2)*:402--416. https://doi.org/10.1111/j.1558-5646.1979.tb04694.x
Lynch M, Walsh B (1998) *Genetics and Analysis of Quantitative Traits*. Sinauer Associates, Sunderland, MA.
Martin G, Chapuis E, Goudet J (2008) Multivariate $Q_{ST}$-$F_{ST}$ comparisons: a neutrality test for the evolution of the g matrix in structured populations. *Genetics 180(4)*:2135--49. https://doi.org/10.1534/genetics.107.080820
Rohlf FJ, Slice DE (1990) Extensions of the procrustes method for the optimal superimposition of landmarks. *Systematic Zoology 39(1)*:40--59. https://doi.org/10.2307/2992207
|
/scratch/gouwar.j/cran-all/cranData/vcvComp/inst/doc/vcvComp-worked-example.Rmd
|
---
title: 'vcvComp: worked example'
author: "Anne Le Maitre and Philipp Mitteroecker"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{vcvComp: worked example}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
The `vcvComp` package comprises a data frame (`Tropheus`) of 723 observations of 57 variables extracted from a freely available dataset, downloaded from the Dryad digital repository (https://doi.org/10.5061/dryad.fc02f). The observations correspond to cichlid fishes of the species *Tropheus moorii* (color morphs 'Kaiser' and 'Kirschfleck') and *T. polli* collected from eight locations of Lake Tanganyika (Kerschbaumer et al., 2014). The main numerical variables provided are the 2D Cartesian coordinates of 19 landmarks quantifying the external body morphology of adult fishes (Herler et al., 2010) and the genotypes for 6 microsatellite markers.
## Case study
To illustrate the application of relative eigenanalysis by means of the `vcvComp` package, we studied variation of body shape within and between different fish populations of the cichlid genus *Tropheus*. We used 511 specimens of the sample from Kerschbaumer et al. (2014), consisting of six populations of the color morph 'Kaiser' of the species *Tropheus moorii*. Three of these populations (IKS3, IKS4, IKS5) live in sympatry with the cichlid species *T. polli*, whereas the three other populations (IKA1, IKA2, IKA3) live alone. As the allopatric and sympatric populations differ in trophic niche and thus presumably also in their selective regime, we investigated if and how they differ in phenotypic variance-covariance structure. We also explored differences in variance pattern between female and male specimens, because these populations show significant sexual dimorphism in mean head shape (cichlids are maternal mouthbrooders; Herler et al., 2010; Kerschbaumer et al., 2014). Finally, we searched for signs of stabilizing and divergent selection among the six *Tropheus* populations by contrasting within- and between-group covariance matrices.
First, we loaded the `vcvComp` package and the data.
```{r load, echo = TRUE}
library("vcvComp")
data("Tropheus")
```
Five specimens are outliers for landmark 2 and were excluded from the sample. After selecting the subsample, we created a new variable combining population and sex.
```{r var_SexPop}
outliers <- c(18, 56, 155, 351, 624)
Tropheus.IK <- Tropheus[- outliers, ]
# Sample reduced to six populations
Tropheus.IK <- subset(Tropheus.IK, subset = POP.ID %in% levels(POP.ID)[1:6])
Tropheus.IK$POP.ID <- factor(Tropheus.IK$POP.ID)
# New variable combining population and sex
Tropheus.IK$SexPop <- paste(Tropheus.IK$POP.ID, Tropheus.IK$Sex, sep = "_")
Tropheus.IK$SexPop <- as.factor(Tropheus.IK$SexPop)
```
The landmark coordinates were extracted to create a matrix.
```{r LM}
PHEN <- as.matrix(Tropheus.IK[which(names(Tropheus.IK) == "X1"):
which(names(Tropheus.IK) == "Y19")])
rownames(PHEN) <- Tropheus.IK$List_TropheusData_ID
```
Then, we performed a generalised Procrustes superimposition (Rohlf and Slice, 1990) of the landmark coordinates using the function `gpagen` of the `geomorph` package.
```{r GPA}
library("geomorph") # load packages geomorph, rgl and RRPP
# conversion matrix -> array (19 landmarks, 2 dimensions)
PHEN_array <- arrayspecs(PHEN, p = 19, k = 2)
# Procrustes superimposition
phen.gpa <- gpagen(PHEN_array, print.progress = FALSE)
# conversion array -> matrix of Procrustes coordinates
proc.coord <- two.d.array(phen.gpa$coords)
colnames(proc.coord) <- colnames(PHEN)
```
We reduced the Procrustes shape coordinates to the first five principal components to avoid collinearities and to guarantee a sufficient excess of cases over variables in the further analyses.
```{r PCA}
phen.pca <- prcomp(proc.coord, rank. = 5, tol = sqrt(.Machine$double.eps))
pc.scores <- phen.pca$x
```
## Population comparison
Because the population samples were not balanced regarding sex, we computed the pooled population covariance matrices as unweighted averages of the corresponding male and female covariance matrices.
```{r Pop vcv}
S.phen.pooled <- cov.group(pc.scores, groups = Tropheus.IK$POP.ID, sex = Tropheus.IK$Sex)
```
To explore the heterogeneity of variance-covariance structure in body shape across populations, we performed an ordination analysis of the six pooled within-sex covariance matrices.
```{r Pop_PCoA}
eigen.phen <- mat.sq.dist(S.phen.pooled, dist. = "Riemannian") # Riemannian distances
prcoa <- pr.coord(eigen.phen) # ordination
prcoa$Variance # variance explained
```
The first three principal coordinates together accounted for 88% of the total variance (Fig. 1); this also equals the fraction of summed squared Riemannian distances explained by the summed squared Euclidean distances within the first three principal coordinates.
```{r Pop_PCoA_screePlot, fig.height = 3, fig.width = 4, fig.cap = paste("**Figure 1**", "Fraction of variance explained by each principal coordinate in the ordination of the six *Tropheus moorii* populations.")}
# Visualization of the variance explained by each dimension (fig. 1)
barplot(prcoa$Variance$exVar, las = 1, col = "darkblue",
names.arg = 1:nrow(prcoa$Variance), cex.axis = 0.8, cex = 0.8,
xlab = "Dimensions", ylab = "Variance explained")
```
The populations living in sympatry (IKS3, IKS4 and IKS5) were separated from the allopatric populations (IKA1, IKA2, IKA3) along the third principal coordinate (Fig. 2).
```{r Pop_PCoA_ordination, fig.height = 3.8, fig.width = 4.5, fig.cap = paste("**Figure 2**", "Scatterplot of the first three principal coordinates (PCoord) in the ordination of the six *Tropheus moorii* populations. Populations living in sympatry are shown in dark blue, allopatric populations in light blue.")}
# Visualization of PCo1, PCo2 and PCo3 (fig. 2)
coul.pop <- c(rep("blue", 3), rep("darkblue", 3)) # colors
pch.pop <- c(rep(19, 3), rep(15, 3)) # symbols
pco3d <- c(1, 2, 3) # dimensions
xyzlab <- c(paste("PCoord", pco3d[1]),
paste("PCoord", pco3d[2]),
paste("PCoord", pco3d[3]))
s3d <- scatterplot3d::scatterplot3d(prcoa$PCoords[, pco3d[1:3]],
xlab = xyzlab[1], ylab = xyzlab[2], zlab = xyzlab[3],
color = coul.pop, pch = 19, angle = 55,
type = "h", lty.hplot = 3,
cex.symbols = 1, cex.axis = 0.8)
s3d.coords <- s3d$xyz.convert(prcoa$PCoords[, pco3d[1:3]])
text(s3d.coords$x, s3d.coords$y,
labels = row.names(prcoa$PCoords),
pos = 4, cex = 0.7, col = coul.pop)
```
To investigate the actual differences in variance-covariance pattern between sympatric and allopatric populations, we compared the populations IKA1 and IKS5, but other pairs of sympatric and allopatric populations led to very similar results. The ML test indicated that the covariance matrices of IKA1 and IKS5 deviate from proportionality at *p* = 0.02.
```{r IKA1-IKS5_ML_test}
table(Tropheus.IK$POP.ID) # sample sizes
prop.vcv.test(n = c(69,75), S.phen.pooled[,,"IKA1"], S.phen.pooled[,,"IKS5"]) # ML test
```
The generalized variance of IKA1 was only 18% less than that of IKS5, but the relative PCA showed that the various shape features deviate strongly in their variational properties across populations (Fig. 3). The first relative PC was roughly twice as variable in IKA1 than in IKS5 (first relative eigenvalue was 2.3), whereas the variance of the last relative PC in IKA1 was only half of that in IKS5 (last relative eigenvalue was 0.49).
```{r IKA1-IKS5_relPCA, fig.height = 3, fig.width = 4, fig.cap = paste("**Figure 3**", "Relative eigenvalues (on a log scale) of the population IKA1 relative to IKS5.")}
# Ratio of generalized variances of IKA1 and IKS5
relGV.multi(S.phen.pooled[, , c("IKA1", "IKS5")], logGV = FALSE)
# Relative PCA = relative eigenanalysis
relEigen.a1s5 <- relative.eigen(S.phen.pooled[, , "IKA1"], S.phen.pooled[, , "IKS5"])
relEigen.a1s5$relValues # relative eigenvalues
# Visualization of the relative eigenvalues (fig. 3)
plot(relEigen.a1s5$relValues[1:relEigen.a1s5$q],
log = "y", las = 1, col = "blue", type = "b",
main = "IKA1 relative to IKS5", cex = 0.8,
cex.main = 1, cex.axis = 0.8, cex.sub = 0.7,
sub = paste("Relative generalized variance =", relEigen.a1s5$relGV),
xlab = NA, ylab = "Relative eigenvalues")
abline(h = 1)
```
The shape patterns depicted by each relative eigenvector can be visualized by deformations of the average shape along the positive and the negative directions of the corresponding vector (Fig. 4). Note that when the initial variables are reduced to the first components, as is the case here, the loadings of the eigenvectors must be multiplied by the loadings of the principal components to get shape patterns.
```{r IKA1-IKS5_vcvPattern, fig.height = 3, fig.width = 7, fig.cap = paste("**Figure 4**", "Visualization as thin-plate spline (TPS) deformation grids (Bookstein, 1991) of the shape pattern corresponding to the first relative PC, which has the maximal excess of variance in IKA1 relative to IKS5.")}
# Shape patterns corresponding to the relative eigenvectors
a1s5 <- c(which(Tropheus.IK$POP.ID %in% "IKA1"),
which(Tropheus.IK$POP.ID %in% "IKS5")) # specimens
REF.A1S5 <- mshape(phen.gpa$coords[, , a1s5]) # average shape
A.A1S5 <- arrayspecs(t(phen.pca$rotation %*% relEigen.a1s5$relVectors),
p = 19, k = 2) # loadings
# Graphical parameters
WF <- cbind(c(1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 1, 12, 14, 14),
c(19, 18, 18, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 15))
gp3 <- gridPar(grid.col = "grey", tar.link.col = "blue",
tar.pt.size = 0.7, tar.pt.bg = "blue")
# Visualization of the first dimension (fig. 4)
par(new = FALSE, mfrow = c(1, 2), mar = c(0.5, 0.5, 0.5, 0.5))
plotRefToTarget(REF.A1S5, (REF.A1S5 - 0.01 * A.A1S5[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "-", line = -1)
plotRefToTarget(REF.A1S5, (REF.A1S5 + 0.01 * A.A1S5[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "+", line = -1)
title("First relative eigenvector", outer = TRUE, line = - 1)
```
The shape features captured by relative PC 1 were head shape, relative eye size, and body depth (maximum distance between dorsal and ventral parts); these were the features with maximal excess of variance in allopatric populations relative to sympatric populations (Fig. 4). Or in other words, these were the shape feature maximally canalized in the populations living in sympatry.
In Lake Tanganyika, allopatric populations of *Tropheus moorii* live in the whole water column, whereas populations in sympatry with *T. polli* usually are forced to live at greater water depth (Kerschbaumer et al., 2014). The broader trophic niche and larger environmental heterogeneity in allopatric populations may account for the larger variance in body depth and head shape, but the higher competition and harder living conditions in sympatric populations may also impose a stronger stabilizing selection regime than in allopatric populations.
## Comparison between sexes
We separated males and females and performed a principal coordinates analysis of the 12 sex-specific variance-covariance matrices in order to investigate deviations in variance-covariance structure between the sexes.
```{r Sex_Pop_PCoA}
S.phen.mf <- cov.group(pc.scores, groups = Tropheus.IK$SexPop) # covariance matrices
eigen.phen.mf <- mat.sq.dist(S.phen.mf, dist. = "Riemannian") # Riemannian distances
prcoa.mf <- pr.coord(eigen.phen.mf) # ordination
prcoa.mf$Variance # variance explained
```
The first two components together accounted for 62% of total variance (Fig. 5) and showed that for all populations, except IKA3, males had higher values than females for the first principal coordinate (Fig. 6).
```{r Sex_Pop_PCoA_screePlot, fig.height = 3, fig.width = 4, fig.cap = paste("**Figure 5**", "Fraction of variance explained by each principal coordinate of the 12 sex-specific covariance matrices.")}
# Visualization of the variance explained by each dimension (fig. 5)
barplot(prcoa.mf$Variance$exVar, las = 1, col = "darkblue",
names.arg = 1:nrow(prcoa.mf$Variance), cex.axis = 0.8, cex = 0.8,
xlab = "Dimensions", ylab = "Variance explained")
```
```{r Sex_Pop_PCoA_ordination, fig.height = 3.5, fig.width = 6, fig.cap = paste("**Figure 6**", "Principal coordinates ordination of the 12 sex-specific covariance matrices. Males in blue, females in red. Populations living in sympatry with *Tropheus polli* in dark colors.")}
# Visualization of PCo1 and PCo2 (fig. 6)
coul.mf <- c(rep(c("red", "blue"), 3), rep(c("darkred", "darkblue"), 3)) # colors
pch.mf <- c(rep(19, 6), rep(15, 6)) # symbols
pco <- c(1, 2) # dimensions
plot(prcoa.mf$PCoords[, pco[1]], prcoa.mf$PCoords[, pco[2]],
xlab = paste("Principal coordinate", pco[1]),
ylab = paste("Principal coordinate", pco[2]),
asp = 1, las = 1, pch = pch.mf, col = coul.mf, cex.axis = 0.8)
abline(h = 0) ; abline(v = 0)
text(prcoa.mf$PCoords[, pco[1]], prcoa.mf$PCoords[, pco[2]],
labels = rownames(prcoa.mf$PCoords),
adj = 1.5, cex = 0.6, col = coul.mf)
```
To explore this shared sex difference in variance-covariance pattern, we perform a relative PCA of the females relative to the males of IKA1.
```{r Sex_IKA1_relPCA}
pop.ika1 <- grep("IKA1", levels(Tropheus.IK$SexPop))
relEigen.ika1 <- relative.eigen(S.phen.mf[, , pop.ika1[1]], S.phen.mf[, , pop.ika1[2]])
relEigen.ika1$relGV # ratio of generalized variances
relEigen.ika1$relValues # relative eigenvalues
```
Overall, females were approximately half as variable as males (ratio of generalized eigenvalues was 0.57), but pooling over all dimensions was again misleading here. In fact, the first relative PC was 4.6 times more variable in females than in males, whereas the other dimensions were all more variable in males (Fig. 7).
```{r Sex_IKA1_relVal, fig.height = 3, fig.width = 4, fig.cap = paste("**Figure 7**", "Relative eigenvalues (maximal ratios of variance) of females relative to males for the population IKA1.")}
# Visualization of the relative eigenvalues (fig. 7)
plot(relEigen.ika1$relValues[1:relEigen.ika1$q],
log = "y", las = 1, col = "blue", type = "b",
main = "IKA1: females / males", cex.main = 1, cex.axis = 0.8,
xlab = NA, ylab = "Relative eigenvalues")
abline(h = 1)
```
The first relative PC mainly corresponded to the relative size of the head, whereas the last three relative PCs were all related to the shape and relative orientation of the head and mouth (Fig. 8).
```{r Sex_IKA1_vcvPattern, fig.height = 5, fig.width = 7, fig.cap = paste("**Figure 8**", "Visualization as thin-plate spline (TPS) deformation grids (Bookstein, 1991) of the shape patterns corresponding to the first relative PC, which has the maximal excess of variance in females relative to males for the population IKA1 (*top*), and the last relative PC, which has the maximal excess of variance in males relative to females (*bottom*).")}
# Population IKA1: average shape and loadings
ika1 <- which(Tropheus.IK$POP.ID %in% "IKA1") # specimens
REF.IKA1 <- mshape(phen.gpa$coords[, , ika1]) # average shape
A.IKA1 <- arrayspecs(t(phen.pca$rotation %*% relEigen.ika1$relVectors),
p = 19, k = 2) # loadings
# Graphical parameters
WF <- cbind(c(1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 1, 12, 14, 14),
c(19, 18, 18, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 15))
gp3 <- gridPar(grid.col = "grey", tar.link.col = "blue",
tar.pt.size = 0.7, tar.pt.bg = "blue")
par(new = FALSE, mfrow = c(2, 2), mar = c(0.5, 0.5, 1, 0.5))
# Visualization of the first dimension (fig. 8: top)
plotRefToTarget(REF.IKA1, (REF.IKA1 - 0.01 * A.IKA1[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "-", line = - 1)
plotRefToTarget(REF.IKA1, (REF.IKA1 + 0.01 * A.IKA1[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "+", line = - 1)
title("First relative eigenvector", outer = TRUE, line = - 1)
# Visualization of the last dimension (fig. 8: bottom)
plotRefToTarget(REF.IKA1, (REF.IKA1 - 0.01 * A.IKA1[, , 5]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "-", line = - 1)
plotRefToTarget(REF.IKA1, (REF.IKA1 + 0.01 * A.IKA1[, , 5]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "+", line = - 1)
title("Last relative eigenvector", outer = TRUE, line = -16)
```
Cichlids are mouth brooders and females typically have a larger head and mouth than males. This pattern of sexual dimorphism in body shape was also found for the present *Tropheus moorii* sample (Herler et al., 2010; Kerschbaumer et al., 2014). The increased variance in relative head size likely is a direct consequence of the enlarged head in females, whereas other aspects of head morphology, such as the relative position and orientation of the mouth, seems to be more canalized in females than in males.
## Stabilizing versus divergent selection of cichlid body shape
Under idealized assumptions, the expected amount of phenotypic change due to genetic drift is proportional to the amount of additive genetic variation in the ancestral population. Extending this model of neutral evolution to multiple traits leads to the expectation that the between-group covariance matrix for a set of related populations is proportional to the additive genetic covariance matrix of their common ancestral population (Lande, 1979). This rational has inspired statistical tests for natural selection by contrasting the covariance matrix of population means with the pooled phenotypic within-population covariance matrix (as an estimate of the ancestral genetic covariance matrix; e.g. Martin et al., 2008): deviations from proportionality are signs of stabilizing or divergent selection. Most of these approaches, however, only rely on statistical significance tests of proportionality of the between- and within-population covariance matrices (**B** and **W**, respectively). Relative PCA ideally complements these approaches as an exploratory tool to identify the specific trait combinations that deviate from the null model of neutral evolution (Bookstein and Mitteroecker, 2014). If both divergent and stabilizing selection acted in a set of populations, the first relative PCs of **B** with respect to **W** will reveal the trait combinations that were affected by divergent selection (the features with maximal between-population variance relative to within-population variance), and the last relative PCs will show the trait combinations under stabilizing selection (least between-population variance relative to within-population variance).
We computed **B** and **W** (pooled by sex) for the *Tropheus* populations based on the first five PCs of the full Procrustes data. As we have only six populations in this example, **B** is estimated with great uncertainty and also the chi-square approximation in the proportionality test is critical; results have to be interpreted with care. The ML test suggested a deviation from proportionality between **B** and **W** (*p* = 0.034) and thus the action of selective forces.
```{r BW_ML_test}
# Computation of B and W (pooled by sex)
B <- cov.B(pc.scores, groups = Tropheus.IK$POP.ID, sex = Tropheus.IK$Sex)
W <- cov.W(pc.scores, groups = Tropheus.IK$POP.ID, sex = Tropheus.IK$Sex)
# Proportionality test between B and W = ML test
prop.vcv.test(n = c(6, 511), B, W) # 6 groups, 511 specimens
```
However, this test does not specify the *magnitude* of deviation from proportionality, and especially with the small number of populations in this example, the interpretation of the *p*-value alone is not sufficient. We thus performed an ordination of the six population covariance matrices (pooled by sex), together with **W** and **B** (scaled to fit **W** using the mean of their relative eigenvalues).
```{r BW_Pop_PCoA}
Bsc <- B / scaling.BW(B, W) # scale B to W
# Create an array of group covariance matrices, B and W
S.bw <- array(c(S.phen.pooled, W, Bsc),
dim = c(dim(S.phen.pooled)[[1]],
dim(S.phen.pooled)[[2]],
dim(S.phen.pooled)[[3]] + 2))
dimnames(S.bw) <- list(dimnames(S.phen.pooled)[[1]],
dimnames(S.phen.pooled)[[2]],
c(dimnames(S.phen.pooled)[[3]], "W", "B"))
# Ordination
eigen.phen.bw <- mat.sq.dist(S.bw, dist. = "Riemannian")
prcoa.bw <- pr.coord(eigen.phen.bw)
```
Relative to the heterogeneity of population covariance matrices, **B** clearly deviates from **W** along the first principal coordinate (Fig. 9). The interpretation of the relative PCs of **B** and **W** thus seems warranted.
```{r BW_Pop_PCoA_ordination, fig.height = 3.9, fig.width = 5, fig.cap = paste("**Figure 9**", "Principal coordinates ordination of the six populations (males and females pooled), along with their between-group (**B**) and their within-group (**W**) covariance matrices.")}
# Visualization of PCo1, PCo2 and PCo3 (fig. 9)
coul.bw <- c(coul.pop, rep("darkgreen", 2)) # colors
pco3d <- c(1, 2, 3) # dimensions
xyzlab <- c(paste("PCoord", pco3d[1]),
paste("PCoord", pco3d[2]),
paste("PCoord", pco3d[3]))
s3d <- scatterplot3d::scatterplot3d(prcoa.bw$PCoords[, pco3d[1:3]],
xlab = xyzlab[1], ylab = xyzlab[2], zlab = xyzlab[3],
color = coul.bw, pch = 19, angle = 55,
type = "h", lty.hplot = 3,
cex.symbols = 1, cex.axis = 0.8)
s3d.coords <- s3d$xyz.convert(prcoa.bw$PCoords[, pco3d[1:3]])
text(s3d.coords$x, s3d.coords$y, labels = row.names(prcoa.bw$PCoords),
pos = 4, cex = 0.7, col = coul.bw)
```
We performed a relative PCA of **B** with respect to **W** (Fig. 10). The first relative eigenvalue is more than 5 times larger than the second one, which is significant at *p* < 0.05, and similarly for the last relative eigenvalue. This supports an interpretation of the first and last relative PC, even though the absolute relative eigenvalues cannot be evolutionarily interpreted without knowing the variance ratio expected under neutral evolution, which depends on the number of generations since divergence as well as effective population size (Lande, 1979). One way to estimate this threshold is based on genetic data using the $F_{ST}$ statistic (Holsinger et al., 2009). Under pure genetic drift, $\mathbf{B}=F_{ST}/(1-F_{ST})\mathbf{W}$ (Lynch and Walsh, 1998; Martin et al., 2008). Kerschbaumer et al. (2014) reported $F_{ST}$ values for these populations ranging from 0.033 to 0.085, which translates into ratios of between- and within-population variance of 0.034-0.093. The first relative eigenvalue (2.06) clearly exceeded this threshold and suggests strong divergent selection. The last relative eigenvalue (0.025) may indicate weak stabilizing selection.
```{r BW_relPCA, fig.height = 3, fig.width = 4, fig.cap = paste("**Figure 10**", "Relative eigenvalues of the between-group covariance matrix versus the within-group covariance matrix for the six *Tropheus* populations.")}
# Relative PCA of B with respect to W
relEigenBW <- relative.eigen(B, W)
relEigenBW$relValues # relative eigenvalues
# Test differences between two successive relative eigenvalues
eigen.test(n = c(6, 511), relValues = relEigenBW$relValues)
# Visualization of the relative eigenvalues (fig. 10)
plot(relEigenBW$relValues[1:relEigenBW$q],
log = "y", las = 1, col = "blue", type = "b",
main = "B relative to W", cex.main = 1, cex.axis = 0.8,
xlab = NA, ylab = "Relative eigenvalues")
abline(h = 1)
```
The first relative PC corresponded to overall body depth and the positions of caudal, dorsal and ventral fins (Fig. 11). Apparently, these features, which determine the hydrodynamics and swimming ability of the fish, were under strong divergent selection in the studied *Tropheus* populations: their inter-population variance strongly exceeds the variation expected for neutral evolution.
The shape pattern corresponding to the last relative eigenvector mainly involved the shape of the head, especially the position and orientation of the mouth (Fig. 11). These features, which crucially affect feeding performance, likely were under stabilizing selection in the *Tropheus* populations. This is supported by our finding that females show little variation in mouth position and orientation despite a large and variable head size (Fig. 8).
```{r BW_vcvPattern, fig.height = 5, fig.width = 7, fig.cap = paste("**Figure 11**", "Visualization of the shape patterns corresponding to the variance within populations (*top*), and the last relative PC, which has the maximal excess of variance within populations relative to that between populations (*bottom*).")}
# Shape patterns corresponding to the relative eigenvectors
REF <- mshape(phen.gpa$coords) # average shape
A <- arrayspecs(t(phen.pca$rotation %*% relEigenBW$relVectors), p=19, k=2) # loadings
# Graphical parameters
WF <- cbind(c(1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 1, 12, 14, 14),
c(19, 18, 18, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 15))
gp3 <- gridPar(grid.col = "grey", tar.link.col = "blue",
tar.pt.size = 0.7, tar.pt.bg = "blue")
par(new = FALSE, mfrow = c(2, 2), mar = c(0.5, 0.5, 1, 0.5))
# Visualization of the first dimension (fig. 11: top)
plotRefToTarget(REF, (REF - 0.01 * A[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "-", line = - 1)
plotRefToTarget(REF, (REF + 0.01 * A[, , 1]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "+", line = - 1)
title("First relative eigenvector", outer = TRUE, line = - 1)
# Visualization of the last dimension (fig. 11: bottom)
plotRefToTarget(REF, (REF - 0.01 * A[, , 5]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "-", line = - 1)
plotRefToTarget(REF, (REF + 0.01 * A[, , 5]),
mag = 7, method = "TPS", gridPar = gp3, links = WF)
title(main = "+", line = - 1)
title("Last relative eigenvector", outer = TRUE, line = - 16)
```
## References
Bookstein F (1991). *Morphometric tools for landmark data: geometry and biology.* Cambridge University Press, Cambridge (UK); New York.
Bookstein F, Mitteroecker P (2014) Comparing covariance matrices by relative eigenanalysis, with applications to organismal biology. *Evolutionary Biology 41*: 336--350. https://doi.org/10.1007/s11692-013-9260-5
Herler J, Kerschbaumer M, Mitteroecker P, et al. (2010) Sexual dimorphism and population divergence in the Lake Tanganyika cichlid fish genus *Tropheus*. *Frontiers in Zoology 7*:4. https://doi.org/10.1186/1742-9994-7-4
Holsinger KE, Weir BS (2009) Genetics in geographically structured populations: defining, estimating and interpreting $F_{ST}$. *Nature Review Genetics 10(9)*:639--650. https://doi.org/10.1038/nrg2611
Kerschbaumer M, Mitteroecker P, Sturmbauer C (2014) Evolution of body shape in sympatric versus non-sympatric Tropheus populations of Lake Tanganyika. *Heredity 112(2)*: 89--98. https://doi.org/10.1038/hdy.2013.78
Kerschbaumer M, Mitteroecker P, Sturmbauer C (2013) Data from: Evolution of body shape in sympatric versus non-sympatric *Tropheus* populations of Lake Tanganyika. *Dryad Digital Repository*. https://doi.org/10.5061/dryad.fc02f
Lande R (1979) Quantitative genetic analysis of multivariate evolution, applied to brain:body size allometry. *Evolution 33(1 part 2)*:402--416. https://doi.org/10.1111/j.1558-5646.1979.tb04694.x
Lynch M, Walsh B (1998) *Genetics and Analysis of Quantitative Traits*. Sinauer Associates, Sunderland, MA.
Martin G, Chapuis E, Goudet J (2008) Multivariate $Q_{ST}$-$F_{ST}$ comparisons: a neutrality test for the evolution of the g matrix in structured populations. *Genetics 180(4)*:2135--49. https://doi.org/10.1534/genetics.107.080820
Rohlf FJ, Slice DE (1990) Extensions of the procrustes method for the optimal superimposition of landmarks. *Systematic Zoology 39(1)*:40--59. https://doi.org/10.2307/2992207
|
/scratch/gouwar.j/cran-all/cranData/vcvComp/vignettes/vcvComp-worked-example.Rmd
|
#' Simulated true data
#'
#' @description A data set of 200 simulated 'true' data, from which the observations are deduced, with two observed variables and two groups, non-compositional
#'
#' @format A data frame with 200 rows and 3 columns
#' \describe{
#' \item{Var1}{simulated variable}
#' \item{Var2}{simulated variable}
#' \item{Group}{Factor with levels 'Group 1' and 'Group 2'}
#' }
"datatrue"
#' Simulated true compositional data
#'
#' @description A data set of 200 simulated 'true' data, from which the observations are deduced, with three observed variables and two groups, compositional
#'
#' @format A data frame with 200 rows and 4 columns
#' \describe{
#' \item{Var1}{simulated variable, compositional}
#' \item{Var2}{simulated variable, compositional}
#' \item{Var3}{simulated variable, compositional}
#' \item{Group}{Factor with levels 'Group 1' and 'Group 2'}
#' }
"datatrue_coda"
#' Simulated observation data
#'
#' @description A data set of 200 simulated observations with two observed variables and two groups, non-compositional
#'
#' @format A data frame with 200 rows and 3 columns
#' \describe{
#' \item{Var1}{simulated observed variable}
#' \item{Var2}{simulated observed variable}
#' \item{Group}{Factor with levels 'Group 1' and 'Group 2'}
#' }
"dataobs"
#' Simulated observation of compositional data
#'
#' @description A data set of 200 simulated observations with three observed variables and two groups, compositional
#'
#' @format A data frame with 200 rows and 4 columns
#' \describe{
#' \item{Var1}{simulated observed variable, compositional}
#' \item{Var2}{simulated observed variable, compositional}
#' \item{Var3}{simulated observed variable, compositional}
#' \item{Group}{Factor with levels 'Group 1' and 'Group 2'}
#' }
"dataobs_coda"
#' Simulated observation uncertainties
#'
#' @description A data set of 200 simulated uncertainties with two variables and two groups, non-compositional
#'
#' @format A data frame with 200 rows and 3 columns
#' \describe{
#' \item{Var1}{simulated observed variable}
#' \item{Var2}{simulated observed variable}
#' \item{Group}{Factor with levels 'Group 1' and 'Group 2'}
#' }
"uncertainties"
#' Simulated observation uncertainties of compositional data
#'
#' @description A data set of 200 simulated uncertainties with three variables and two groups, compositional
#'
#' @format A data frame with 200 rows and 4 columns
#' \describe{
#' \item{Var1}{simulated observed variable, compositional}
#' \item{Var2}{simulated observed variable, compositional}
#' \item{Var3}{simulated observed variable, compositional}
#' \item{Group}{Factor with levels 'Group 1' and 'Group 2'}
#' }
"uncertainties_coda"
|
/scratch/gouwar.j/cran-all/cranData/vdar/R/data.R
|
#' Generalized mean
#'
#' @author Solveig Pospiech, K. Gerald v.d. Boogaart
#'
#' @description Calculates the generalized mean of a data set by using a given group variance and individual, observation-wise variances for each observation of the data set
#'
#' @param x a matrix containing the data for which the mean should be calculated
#' @param ... not implemented
#'
#' @return vector of lenght of ncol(x) of generalized means
#'
#' @export
generalized_mean <- function(x, ...) {
UseMethod("generalized_mean", x)
}
#' @describeIn generalized_mean for class matrix or data.frame
#' @param var a matrix containing the corrected (estimated true) variance of the data set
#' @param individual_var a matrix containing individual variances. Default is a 0 - matrix with the dimensions of x, can be used for implementing the individual uncertainties of each observation
#' @export
generalized_mean.default <- function(x, var, individual_var = matrix(0, nrow = nrow(x), ncol = ncol(x)), ...) {
# these two functions are equivalent in all generalized_mean functionos and to gsi.Diag resp. gsi.Inv of the pkg 'compositions'.
# they are here newly defined because until now gsi-functions from 'compositions' cannot be used outside the package
gsiDiag <- function(d) {
if (length(d) > 1)
return(diag(d))
return( structure(d,dim = c(1,1)))
}
gsiInv <- function(A,tol=1E-15) {
with(svd(A),v %*% gsiDiag(ifelse(abs(d)/max(d) > tol,1/d,0)) %*% t(u))
}
if (is.null(dim(x)))
stop("'x' is not a matrix")
x <- as.matrix(x)
if (any(!is.finite(x)))
stop("infinite, NA or NaN values in 'x'")
if (is.null(dim(individual_var)))
stop("'individual_var' is not a matrix")
individual_var <- as.matrix(individual_var)
if (any(!is.finite(individual_var)))
stop("infinite, NA or NaN values in 'individual_var'")
if (dim(individual_var)[2] == 1) { # if only one variable exists:
sigmasum <- apply(individual_var, 1, function(s) var + s)
sigmaInv = 1/sigmasum
normalization = sum(sigmaInv)
aux = sapply(1:nrow(x), function(i) sigmaInv[i] * x[i,] )
aux_sum = sum(aux)
} else {# for more than one variable
sigmasum <- apply(individual_var, 1, function(s) force_posdef(var + diag(s))) # diag because uncertainties are expected to have only entries in the diagonal
sigmaInv <- lapply(1:ncol(sigmasum), function(i) gsiInv(matrix(sigmasum[,i], ncol = ncol(x))))
zw <- unlist(sigmaInv)
zw[is.na(zw)] <- 0
normalization = matrix(rowSums(matrix(zw, ncol = nrow(x), byrow = F)), ncol = ncol(x))
aux = sapply(1:nrow(x), function(i) sigmaInv[[i]] %*% x[i,] )
aux_sum = rowSums(aux)
}
# erg<-svdInv(normalisation) %*% tensorA::margin(mapply(invMul,Sigmas,xi),1) # wird anders programmiert
erg <- solve(normalization) %*% aux_sum
attr(erg,"Sigma") <- normalization
return(erg)
}
#' @describeIn generalized_mean for class rmult of package 'compositions'
#' @param var a matrix containing the corrected (estimated true) group variances
#' @param individual_var a matrix containing individual variances. Default is a 0 - matrix with the dimensions of x, can be used for implementing the individual uncertainties
#' @export
generalized_mean.rmult <- function(x, var, individual_var = matrix(0, nrow = nrow(x), ncol = ncol(x)^2), ...) {
# these two functions are equivalent in all generalized_mean functionos and to gsi.Diag resp. gsi.Inv of the pkg 'compositions'.
# they are here newly defined because until now gsi-functions from 'compositions' cannot be used outside the package
gsiDiag <- function(d) {
if (length(d) > 1)
return(diag(d))
return( structure(d,dim = c(1,1)))
}
gsiInv <- function(A,tol=1E-15) {
with(svd(A),v %*% gsiDiag(ifelse(abs(d)/max(d) > tol,1/d,0)) %*% t(u))
}
if (is.null(dim(x)))
stop("'x' is not a matrix")
x <- as.matrix(x)
if (any(!is.finite(x)))
stop("infinite, NA or NaN values in 'x'")
if (is.null(dim(individual_var)))
stop("'individual_var' is not a matrix")
individual_var <- as.matrix(individual_var)
if (ncol(individual_var) != ncol(x)^2) stop("The individual variances seem to have not the right format.
Please make sure that your uncertainties are also converted into the respective log-ratio space and that all entries of the resulting variances-covariance matrix are in one row.")
if (any(!is.finite(individual_var)))
stop("infinite, NA or NaN values in 'individual_var'")
if (dim(individual_var)[2] == 1) { # if only one variable exists:
sigmasum <- apply(individual_var, 1, function(s) var + s)
sigmaInv = 1/sigmasum
normalization = sum(sigmaInv)
aux = sapply(1:nrow(x), function(i) sigmaInv[i] * as.vector(x[i,] )) # has to be "as.vector" because x is still rmult class
aux_sum = sum(aux)
} else {# for more than one variable
sigmasum <- apply(individual_var, 1, function(s) force_posdef(var + matrix(s, ncol = ncol(x)))) # matrix because the uncertainties are expected to also have covariance entries
sigmaInv <- lapply(1:ncol(sigmasum), function(i) gsiInv(matrix(sigmasum[,i], ncol = ncol(x))))
zw <- unlist(sigmaInv)
zw[is.na(zw)] <- 0
normalization = matrix(rowSums(matrix(zw, ncol = nrow(x), byrow = F)), ncol = ncol(x))
aux = sapply(1:nrow(x), function(i) sigmaInv[[i]] %*% as.vector(x[i,] )) # has to be "as.vector" because x is still rmult class
aux_sum = rowSums(aux)
}
erg <- gsiInv(normalization) %*% aux_sum
attr(erg,"Sigma") <- normalization
return(erg)
}
#' Estimate true group variance
#'
#' @author Solveig Pospiech, K. Gerald v.d. Boogaart
#'
#' @description Estimation of true group variance incorporating observation wise variances.
#' The function uses the data from x and the individual variances for each observation, for example derived from uncertainties, to calculate a 'true' group variance.
#' The variance of the matrix is corrected for the sum of the individual variances of the data set, which is normalized to the number of rows of the matrix.
#'
#' @param x a matrix of data
#' @param ... ...
#'
#' @return matrix of corrected group variance
#'
#' @export
calc_estimate_true_var <- function(x, ...) {
UseMethod("calc_estimate_true_var", x)
}
#' @describeIn calc_estimate_true_var for class matrix or data.frame
#' @param individual_var a matrix of cell-wise uncertainties, corresponding to the entries of 'x'
#' @param force_pos_def force positive definiteness of the new group variances, default TRUE
#' @export
calc_estimate_true_var.default <- function(x,
individual_var,
force_pos_def = T, ...) {
if (is.null(dim(x)))
stop("'x' is not a matrix")
if (is.null(dim(individual_var)))
stop("'individual_var' is not a matrix")
# average the individual individual_var:
averaged_sigmas = colSums(individual_var)/nrow(x)
newgroupsigma = compositions::var(x) - diag(averaged_sigmas) # diag because uncertainties are expected to have only entries in the diagonal
if (force_pos_def) {
message("Checking positive definiteness of corrected group variances...")
newgroupsigma = force_posdef(newgroupsigma)
message("Checking done")
}
return(newgroupsigma)
}
#' @describeIn calc_estimate_true_var for class rmult
#' @param individual_var a matrix of cell-wise uncertainties, corresponding to the entries of 'x'
#' @param force_pos_def force positive definiteness of the new group variances, default TRUE
#' @export
calc_estimate_true_var.rmult <- function(x, individual_var, force_pos_def = T, ...) {
if (is.null(dim(x)))
stop("'x' is not a matrix")
if (is.null(dim(individual_var)))
stop("'individual_var' is not a matrix")
# average the individual individual_var:
averaged_sigmas = colSums(individual_var)/nrow(x)
newgroupsigma = compositions::var(x) - matrix(averaged_sigmas, ncol = ncol(x)) # matrix because the uncertainties are expected to also have covariance entries
if (force_pos_def) {
message("Checking positive definiteness of corrected group variances...")
newgroupsigma = force_posdef(newgroupsigma)
message("Checking done")
}
return(newgroupsigma)
}
#' Force positive definiteness
#'
#' @description Function to force positive definiteness on a matrix.
#'
#' @author Solveig Pospiech
#'
#' @param x matrix
#' @param verbose logical, default TRUE. Should the function print the corrected eigenvalues?
#'
#' @return positive definite matrix
#'
# #' @export
force_posdef <- function(x, verbose = T) {
# test for positive definiteness
myeigen = eigen(x)
if (sum(zw <- myeigen$values < 0) > 0 ) {
message("Matrix is not positive definite. Eigenvalues are forced to non-negativeness.")
if (verbose) print.noquote(paste("The eigenvalues:", myeigen$values))
myeigen$values[zw] <- 0
x = myeigen$vectors %*% diag(myeigen$values) %*% t(myeigen$vectors)
}
return(x)
}
|
/scratch/gouwar.j/cran-all/cranData/vdar/R/subfunctions.R
|
#' Weighted Quadratic Discriminant Analysis
#'
#' @author Solveig Pospiech, package 'MASS'
# #' Raimon Tolosana-Delgado, K. Gerald v.d. Boogaart
#'
#' @description Extension of the qda() of package 'MASS' to calculate a QDA incorporating individual, cell-wise uncertainties,
#' e.g. if the uncertainties are expressed as individual variances for each measurand.
#'
#' @details Uncertainties can be considered in a statistical analysis either by each measured variable, by each observation or by using the individual, cell-wise uncertainties.
#' There are several methods for incorporating variable-wise or observation-wise uncertainties into a QDA, most of them using the uncertainties as weights for the variables or observations of the data set.
#' The term 'cell-wise uncertainties' describe a data set of $d$ analysed variables where each observation has an individual uncertainty for each of the $d$ variables conforming it.
#' Hence, a data set of $n \\times d$ data values has associated a data set of $n \\times d$ individual uncertainties.
#' Instead of weighting the columns or rows of the data set, the vqda() function uses uncertainties to recalculate better estimates of the group variances and group means.
#' If the presence of uncertainties is not accounted for, the decision rules are based on the group variances calculated by the given data set.
#' But this observed group variance might deviate notably from the group variance, which can be estimated including the uncertainties.
#' This methodological framework does not only allow to incorporate cell-wise uncertainties, but also would largely be valid if the information about the co-dependency between uncertainties within each observation would be reported.
#'
#' @references Pospiech, S., R. Tolosana-Delgado and K.G. van den Boogaart (2020) Discriminant Analysis for Compositional Data Incorporating Cell-Wise Uncertainties, Mathematical Geosciences
#'
#' @param x data frame or matrix containing the data to be discriminated
#' @param uncertainties data frame or matrix containing the values for uncertainties per cell. Uncertainties should be relative errors, e.g. the relative standard deviation of the measurand
#' @param grouping a factor or character vector specifying the group for each observation (row).
#' @param prior the prior probabilities of class membership. If unspecified, the class proportions for the training set are used. If present, the probabilities should be specified in the order of the factor levels.
#'
#' @examples
#' # for non-compositional data:
#' data("dataobs")
#' data("uncertainties")
#' myqda = vqda(x = dataobs[, 1:2], uncertainties = uncertainties[, 1:2], grouping = dataobs$Group)
#' mypred = predict(myqda, newdata = dataobs[, 1:2], newerror = uncertainties[, 1:2])
#' forplot = cbind(dataobs, LG1 = mypred$posterior[,1])
#' if (require("ggplot2")) {
#' scatter_plot = ggplot(data = forplot, aes(x = Var1, y = Var2)) +
#' geom_point(aes(shape = Group, color = LG1))
#' if (require("ggthemes")) {
#' scatter_plot = scatter_plot +
#' scale_color_gradientn(colours = colorblind_pal()(5))
#' }
#' scatter_plot
#' }
#'
#' # for compositional data
#' data("dataobs_coda")
#' data("uncertainties_coda")
#' require(compositions)
#' # generate ilr-transformation (from package 'compositions')
#' data_ilr = ilr(dataobs_coda[, 1:3])
#' uncert_ilr = t(simplify2array(apply(uncertainties_coda[, 1:3],1,
#' function(Delta) clrvar2ilr(diag(Delta)))))
#' uncert_ilr = compositions::rmult(uncert_ilr) # change class into rmult from package 'compositions'
#' myqda_coda = vqda(x = data_ilr, uncertainties = uncert_ilr, grouping = dataobs_coda$Group)
#' mypred_coda = predict(myqda_coda, newdata = data_ilr, newerror = uncert_ilr)
#' forplot_coda = cbind(dataobs_coda, LG1 = mypred_coda$posterior[,1])
#' # if 'ggtern' is installed, you can plot via ggtern:
#' # if (require("ggtern")) {
#' # ternary_plot = ggtern(data = forplot_coda, aes(x = Var1, y = Var2, z = Var3)) +
#' # geom_point(aes(shape = Group, color = LG1))
#' # if (require("ggthemes")) {
#' # ternary_plot = ternary_plot +
#' # scale_color_gradientn(colours = colorblind_pal()(5))
#' # }
#' # ternary_plot
#' # }
#'
#' @return object of class 'vqda' containing the following components:
#' \code{prior} the prior probabilities used.
#' \code{counts} counts per group.
#' \code{means} the group means.
#' \code{generalizedMeans} the group means calculated by the function \code{\link{generalized_mean}}
#' \code{groupVarCorrected} the group variances calculated by the function \code{\link{calc_estimate_true_var}}
#' \code{lev} the levels of the grouping factor.
#' \code{grouping} the factor specifying the class for each observation.
#'
#' @export
vqda <- function(x,
uncertainties,
grouping,
prior) {
# not implemented yet: formula, ....
UseMethod("vqda", x)
}
#' @export
vqda.default <- function(x,
uncertainties,
grouping,
prior = proportions) {
# prepare data ------------------------------------------------------------
# copied from MASS::qda.default
if (is.null(dim(x)))
stop("'x' is not a matrix")
x <- as.matrix(x)
if (any(!is.finite(x)))
stop("infinite, NA or NaN values in 'x'")
n <- nrow(x)
p <- ncol(x)
if (n != length(grouping))
stop("nrow(x) and length(grouping) are different")
g <- droplevels(as.factor(grouping))
lev <- levels(g)
counts <- as.vector(table(g))
names(counts) <- lev
if (any(counts < p + 1))
stop("some group is too small for 'qda'")
proportions <- counts/length(g)
ng <- length(proportions)
if (any(prior < 0) || round(sum(prior), 5) != 1)
stop("invalid 'prior'")
if (length(prior) != ng)
stop("'prior' is of incorrect length")
names(prior) <- lev
group.means <- tapply(unclass(x), list(rep(g, ncol(x)), col(x)), mean)
# scaling <- array(dim = c(p, p, ng))
# ldet <- numeric(ng)
# copy end
# vdar new code --------
# check if uncertainties and x have same class
if (class(uncertainties) == "rmult") {
warning("'uncertainties' has class 'rmult' but 'x' has not. Are you sure this is correct?")
# check the format of the uncertainties matrix, because then the uncertainties are stored as arrays in the rows.
if (ncol(uncertainties) != p^2) stop("transformed uncertainties are expected to be the outcome of 't(apply( <original_uncertainties> , 1, function(Delta) clrvar2ilr(diag(Delta))))'. Please make sure the input of the transformed uncertainties is correct.")
}
Zg = split(x, g) # split works perfectly fine for rmult-class to get in the lists matrix.
# if class is not rmult, split needs a data.frame to put it into matrix-like entries in the list-entries
# to avoid the costs of data.frame but also to avoid that rmult is mandatory for running this function, here comes the reforming of the list- entries:
Zg = lapply(Zg, function(y) matrix(y, ncol = ncol(x)))
# split errors into the groups, now they are vectors
sigmaIg <- split(compositions::rmult(uncertainties), g)
# for following line see comment for Zg
sigmaIg = lapply(sigmaIg, function(y) matrix(y, ncol = ncol(uncertainties)))
# generate a new sigma: the sigmasums have to be normalized to nrow (per group) and subtracted:
sigmacorrected <- mapply(calc_estimate_true_var, Zg, sigmaIg, SIMPLIFY = FALSE)
meancorrected <- mapply(generalized_mean, Zg, sigmacorrected, sigmaIg, SIMPLIFY = FALSE)
structure(list(
prior = prior,
counts = counts,
means = group.means,
generalizedMeans = meancorrected,
groupVarCorrected = sigmacorrected,
lev = lev,
grouping = grouping
)
,class = "vqda")
}
#' @export
vqda.rmult <- function(x,
uncertainties,
grouping,
prior = proportions) {
# prepare data ------------------------------------------------------------
# copied from MASS::qda.default
if (is.null(dim(x)))
stop("'x' is not a matrix")
x <- as.matrix(x)
if (any(!is.finite(x)))
stop("infinite, NA or NaN values in 'x'")
n <- nrow(x)
p <- ncol(x)
if (n != length(grouping))
stop("nrow(x) and length(grouping) are different")
g <- droplevels(as.factor(grouping))
lev <- levels(g)
counts <- as.vector(table(g))
names(counts) <- lev
if (any(counts < p + 1))
stop("some group is too small for 'qda'")
proportions <- counts/length(g)
ng <- length(proportions)
if (any(prior < 0) || round(sum(prior), 5) != 1)
stop("invalid 'prior'")
if (length(prior) != ng)
stop("'prior' is of incorrect length")
names(prior) <- lev
group.means <- tapply(unclass(x), list(rep(g, ncol(x)), col(x)), mean)
# scaling <- array(dim = c(p, p, ng))
# ldet <- numeric(ng)
# copy end
# vdar new code --------
# check if uncertainties and x have same class
if (class(uncertainties) != "rmult") {
warning("'x' has class 'rmult' but 'uncertainties' has not. Are you sure this is correct?")
message("The 'uncertainties' are expected to be the outcome of 't(apply( <original coda uncertainties> , 1, function(Delta) clrvar2ilr(diag(Delta))))'. Please make sure 'uncertainties' has the correct input format and transformation(s).")
}
# check the format of the uncertainties matrix, because then the uncertainties are stored as arrays in the rows.
if (ncol(uncertainties) != p^2) stop("transformed uncertainties are expected to be the outcome of 't(apply( <original_uncertainties> , 1, function(Delta) clrvar2ilr(diag(Delta))))'. Please make sure 'uncertainties' has the correct input format and transformation(s)")
Zg = split(compositions::rmult(x), g) # split works perfectly fine for rmult-class to get in the lists matrix.
# split errors into the groups, now they are vectors
sigmaIg <- split(compositions::rmult(uncertainties), g)
# generate a new sigma: the sigmasums have to be normalized to nrow(per group) and subtracted:
sigmacorrected <- mapply(calc_estimate_true_var, Zg, sigmaIg, SIMPLIFY = FALSE)
meancorrected <- mapply(generalized_mean, Zg, sigmacorrected, sigmaIg, SIMPLIFY = FALSE)
structure(list(
prior = prior,
counts = counts,
means = group.means,
generalizedMeans = meancorrected,
groupVarCorrected = sigmacorrected,
lev = lev,
grouping = grouping
)
,class = "vqda")
}
#' Weighted Linear Discriminant Analysis
#'
#' @author Solveig Pospiech, package 'MASS'
# #' Raimon Tolosana-Delgado, K. Gerald v.d. Boogaart
#'
#' @description Extension of the qda() of package 'MASS' (not the lda() function) to calculate a LDA incorporating individual, cell-wise uncertainties,
#' e.g. if the uncertainties are expressed as individual variances for each measurand.
#'
#' @details Uncertainties can be considered in a statistical analysis either by each measured variable, by each observation or by using the individual, cell-wise uncertainties.
#' There are several methods for incorporating variable-wise or observation-wise uncertainties into a QDA, most of them using the uncertainties as weights for the variables or observations of the data set.
#' The term 'cell-wise uncertainties' describe a data set of $d$ analysed variables where each observation has an individual uncertainty for each of the $d$ variables conforming it.
#' Hence, a data set of $n \\times d$ data values has associated a data set of $n \\times d$ individual uncertainties.
#' Instead of weighting the columns or rows of the data set, the vlda() function uses uncertainties to recalculate better estimates of the group variances and group means.
#' It is internally very similar to the \code{\link{vqda}} function, but with an averaged group variance for all groups.
#' If the presence of uncertainties is not accounted for, the decision rules are based on the group variances calculated by the given data set.
#' But this observed group variance might deviate notably from the group variance, which can be estimated including the uncertainties.
#' This methodological framework does not only allow to incorporate cell-wise uncertainties, but also would largely be valid if the information about the co-dependency between uncertainties within each observation would be reported.
#'
#' @references Pospiech, S., R. Tolosana-Delgado and K.G. van den Boogaart (2020) Discriminant Analysis for Compositional Data Incorporating Cell-Wise Uncertainties, Mathematical Geosciences
#'
#' @param x frame or matrix containing the data to be discriminated
#' @param uncertainties data frame or matrix containing the values for uncertainties per cell. Uncertainties should be relative errors, e.g. the relative standard deviation of the measurand
#' @param grouping a factor or character vector specifying the group for each observation (row).
#' @param prior the prior probabilities of class membership. If unspecified, the class proportions for the training set are used. If present, the probabilities should be specified in the order of the factor levels.
#'
#' @examples
#' # for non-compositional data:
#' data("dataobs")
#' data("uncertainties")
#' mylda = vlda(x = dataobs[, 1:2], uncertainties = uncertainties[, 1:2], grouping = dataobs$Group)
#' mypred = predict(mylda, newdata = dataobs[, 1:2], newerror = uncertainties[, 1:2])
#' forplot = cbind(dataobs, LG1 = mypred$posterior[,1])
#' if (require("ggplot2")) {
#' scatter_plot = ggplot(data = forplot, aes(x = Var1, y = Var2)) +
#' geom_point(aes(shape = Group, color = LG1))
#' if (require("ggthemes")) {
#' scatter_plot = scatter_plot +
#' scale_color_gradientn(colours = colorblind_pal()(5))
#' }
#' scatter_plot
#' }
#'
#' # for compositional data
#' data("dataobs_coda")
#' data("uncertainties_coda")
#' require(compositions)
#' # generate ilr-transformation (from package 'compositions')
#' data_ilr = ilr(dataobs_coda[, 1:3])
#' uncert_ilr = t(simplify2array(apply(uncertainties_coda[, 1:3],1,
#' function(Delta) clrvar2ilr(diag(Delta)))))
#' uncert_ilr = compositions::rmult(uncert_ilr) # change class into rmult from package 'compositions'
#' mylda_coda = vlda(x = data_ilr, uncertainties = uncert_ilr, grouping = dataobs_coda$Group)
#' mypred_coda = predict(mylda_coda, newdata = data_ilr, newerror = uncert_ilr)
#' forplot_coda = cbind(dataobs_coda, LG1 = mypred_coda$posterior[,1])
#' # if 'ggtern' is installed, you can plot via ggtern:
#' # if (require("ggtern")) {
#' # ternary_plot = ggtern(data = forplot_coda, aes(x = Var1, y = Var2, z = Var3)) +
#' # geom_point(aes(shape = Group, color = LG1))
#' # if (require("ggthemes")) {
#' # ternary_plot = ternary_plot +
#' # scale_color_gradientn(colours = colorblind_pal()(5))
#' # }
#' # ternary_plot
#' # }
#'
#' @return object of class 'vlda' containing the following components:
#' \code{prior} the prior probabilities used.
#' \code{counts} counts per group.
#' \code{means} the group means.
#' \code{generalizedMeans} the group means calculated by the function \code{\link{generalized_mean}}
#' \code{groupVarCorrected} the group variances calculated by the function \code{\link{calc_estimate_true_var}}
#' \code{lev} the levels of the grouping factor.
#' \code{grouping} the factor specifying the class for each observation.
#' @export
vlda <- function(x,
uncertainties,
grouping,
prior) {
# not implemented yet: formula, ....
UseMethod("vlda", x)
}
#' @export
vlda.default <- function(x,
uncertainties,
grouping,
prior = proportions) {
# prepare data ------------------------------------------------------------
# copied from MASS::qda.default
if (is.null(dim(x)))
stop("'x' is not a matrix")
x <- as.matrix(x)
if (any(!is.finite(x)))
stop("infinite, NA or NaN values in 'x'")
n <- nrow(x)
p <- ncol(x)
if (n != length(grouping))
stop("nrow(x) and length(grouping) are different")
g <- droplevels(as.factor(grouping))
lev <- levels(g)
counts <- as.vector(table(g))
names(counts) <- lev
if (any(counts < p + 1))
stop("some group is too small for 'qda'")
proportions <- counts/length(g)
ng <- length(proportions)
if (any(prior < 0) || round(sum(prior), 5) != 1)
stop("invalid 'prior'")
if (length(prior) != ng)
stop("'prior' is of incorrect length")
names(prior) <- lev
group.means <- tapply(unclass(x), list(rep(g, ncol(x)), col(x)), mean)
# scaling <- array(dim = c(p, p, ng))
# ldet <- numeric(ng)
# copy end
# vdar new code --------
# check if uncertainties and x have same class
if (class(uncertainties) == "rmult") {
warning("'uncertainties' has class 'rmult' but 'x' has not. Are you sure this is correct?")
# check the format of the uncertainties matrix, because then the uncertainties are stored as arrays in the rows.
if (ncol(uncertainties) != p^2) stop("transformed uncertainties are expected to be the outcome of 't(apply( <original_uncertainties> , 1, function(Delta) clrvar2ilr(diag(Delta))))'. Please make sure the input of the transformed uncertainties is correct.")
}
Zg = split(x, g) # split works perfectly fine for rmult-class to get in the lists matrix.
# if class is not rmult, split needs a data.frame to put it into matrix-like entries in the list-entries
# to avoid the costs of data.frame but also to avoid that rmult is mandatory for running this function, here comes the reforming of the list- entries:
Zg = lapply(Zg, function(y) matrix(y, ncol = ncol(x)))
# split errors into the groups, now they are vectors
sigmaIg <- split(compositions::rmult(uncertainties), g)
# for following line see comment for Zg
sigmaIg = lapply(sigmaIg, function(y) matrix(y, ncol = ncol(uncertainties)))
# sum up all group variances and normalize by DF (maybe not the cleanest code, because the var is still there)
averaged_variance = Reduce("+", lapply(Zg, function(y) compositions::var(y)*(nrow(y) - 1)))/(n - ng)
# sum all uncertainties by group
averaged_uncertainties = Reduce("+", lapply(sigmaIg, colSums))/n
message("Checking positive definiteness of corrected variance for all groups...")
if ("rmult" %in% class(uncertainties)) {
sigmacorrected_t <- force_posdef(averaged_variance - matrix(averaged_uncertainties, ncol = ncol(averaged_variance)))
} else {
sigmacorrected_t <- force_posdef(averaged_variance - diag(averaged_uncertainties))
}
message("Checking done")
# generate a list with the sigmacorrected for each group, to mimick the group variances of QDA
sigmacorrected = rep(list(sigmacorrected_t), ng)
meancorrected <- mapply(generalized_mean, Zg, sigmacorrected, sigmaIg, SIMPLIFY = FALSE)
structure(list(
prior = prior,
counts = counts,
means = group.means,
generalizedMeans = meancorrected,
groupVarCorrected = sigmacorrected,
lev = lev,
grouping = grouping
)
,class = "vlda")
}
#' @export
vlda.rmult <- function(x,
uncertainties,
grouping,
prior = proportions) {
# prepare data ------------------------------------------------------------
# copied from MASS::qda.default
if (is.null(dim(x)))
stop("'x' is not a matrix")
x <- as.matrix(x)
if (any(!is.finite(x)))
stop("infinite, NA or NaN values in 'x'")
n <- nrow(x)
p <- ncol(x)
if (n != length(grouping))
stop("nrow(x) and length(grouping) are different")
g <- droplevels(as.factor(grouping))
lev <- levels(g)
counts <- as.vector(table(g))
names(counts) <- lev
if (any(counts < p + 1))
stop("some group is too small for 'qda'")
proportions <- counts/length(g)
ng <- length(proportions)
if (any(prior < 0) || round(sum(prior), 5) != 1)
stop("invalid 'prior'")
if (length(prior) != ng)
stop("'prior' is of incorrect length")
names(prior) <- lev
group.means <- tapply(unclass(x), list(rep(g, ncol(x)), col(x)), mean)
# scaling <- array(dim = c(p, p, ng))
# ldet <- numeric(ng)
# copy end
# vdar new code --------
# check if uncertainties and x have same class
if (class(uncertainties) != "rmult") {
warning("'x' has class 'rmult' but 'uncertainties' has not. Are you sure this is correct?")
message("The 'uncertainties' are expected to be the outcome of 't(apply( <original coda uncertainties> , 1, function(Delta) clrvar2ilr(diag(Delta))))'. Please make sure 'uncertainties' has the correct input format and transformation(s).")
}
# check the format of the uncertainties matrix, because then the uncertainties are stored as arrays in the rows.
if (ncol(uncertainties) != p^2) stop("transformed uncertainties are expected to be the outcome of 't(apply( <original_uncertainties> , 1, function(Delta) clrvar2ilr(diag(Delta))))'. Please make sure 'uncertainties' has the correct input format and transformation(s)")
Zg = split(compositions::rmult(x), g) # split works perfectly fine for rmult-class to get in the lists matrix.
# split errors into the groups, now they are vectors
sigmaIg <- split(compositions::rmult(uncertainties), g)
# sum up all group variances and normalize by DF (maybe not the cleanest code, because the var is still there)
averaged_variance = Reduce("+", lapply(Zg, function(y) compositions::var(y)*(nrow(y) - 1)))/(n - ng)
# sum all uncertainties by group
averaged_uncertainties = Reduce("+", lapply(sigmaIg, colSums))/n
message("Checking positive definiteness of corrected variance for all groups...")
if ("rmult" %in% class(uncertainties)) {
sigmacorrected_t <- force_posdef(averaged_variance - matrix(averaged_uncertainties, ncol = ncol(averaged_variance)))
} else {
sigmacorrected_t <- force_posdef(averaged_variance - diag(averaged_uncertainties))
}
message("Checking done")
# generate a list with the sigmacorrected for each group, to mimick the group variances of QDA
sigmacorrected = rep(list(sigmacorrected_t), ng)
meancorrected <- mapply(generalized_mean, Zg, sigmacorrected, sigmaIg, SIMPLIFY = FALSE)
structure(list(
prior = prior,
counts = counts,
means = group.means,
generalizedMeans = meancorrected,
groupVarCorrected = sigmacorrected,
lev = lev,
grouping = grouping
)
,class = "vlda")
}
# vlda <- function(x,
# uncertainties,
# grouping,
# prior = proportions) {
#
#
#
# # own code --------
#
# # sum up all group variances and normalize by DF (maybe not the cleanest code, because the var is still there)
# averaged_variance = Reduce("+", lapply(Zg, function(y) compositions::var(y)*(nrow(y) - 1)))/(n - ng)
# # sum all uncertainties by group
# averaged_uncertainties = Reduce("+", lapply(sigmaIg, colSums))/n
# message("Checking positive definiteness of corrected variance for all groups...")
# if ("rmult" %in% class(uncertainties)) {
# sigmacorrected_t <- force_posdef(averaged_variance - matrix(averaged_uncertainties, ncol = ncol(averaged_variance)))
# } else {
# sigmacorrected_t <- force_posdef(averaged_variance - diag(averaged_uncertainties))
# }
# # generate a list with the sigmacorrected for each group, to mimick the group variances of QDA
# sigmacorrected = rep(list(sigmacorrected_t), ng)
# meancorrected <- mapply(generalized_mean, Zg, sigmacorrected, sigmaIg, SIMPLIFY = FALSE)
# structure(list(
# prior = prior,
# counts = counts,
# means = group.means,
# generalizedMeans = meancorrected,
# groupVarCorrected = sigmacorrected,
# lev = lev,
# grouping = grouping
# )
# ,class = "vlda")
# }
#' predict.vqda
#'
#' @author Solveig Pospiech, package 'MASS'
#'
#' @description Classify multivariate observations in conjunction with qda() or lda() of class 'vqda' or 'vlda'.
#'
#' @param object object of class 'vqda' or 'vlda'.
#' @param ... additional arguments affecting the predictions produced.
#'
# predict <- function(object, ...) {
# UseMethod("predict", object)
# }
#' @describeIn predict predict() for class 'vqda'
#' @param newdata data frame or matrix of cases to be classified or, if object has a formula, a data frame with columns of the same names as the variables used.
#' A vector will be interpreted as a row vector. If newdata is missing, an attempt will be made to retrieve the data used to fit the qda object.
#' @param newerror data frame or matrix of uncertainties corresponding to the cases in 'newdata'.
#' @param prior the prior probabilities of group membership. If unspecified, the prior of the object are used.
#' @param ... ...
#'
#' @return list containing the following components:
#' \code{class} factor containing the predicted group
#' \code{likelihood} matrix of dimension 'number of samples' x 'number of groups', containing the likelihood for each sample to belong to one of the groups
#' \code{grouping} original grouping of the samples, copied from the input object
#'
#' @export
predict.vqda <- function(object,
newdata,
newerror,
prior = object$prior, ...) {
# copied and slightly adjusted from predict.qda
if (!inherits(object, "vqda"))
stop("object not of class \"vqda\"")
ngroup <- length(object$prior)
if (!missing(prior)) {
if (any(prior < 0) || round(sum(prior), 5) != 1)
stop("invalid 'prior'")
if (length(prior) != ngroup)
stop("'prior' is of incorrect length")
}
if (!is.null(Terms <- object$terms)) {
if (missing(newdata))
newdata <- stats::model.frame(object)
else {
newdata <- stats::model.frame(stats::as.formula(stats::delete.response(Terms)),
newdata, na.action = function(x) x, xlev = object$xlevels)
}
x <- stats::model.matrix(stats::delete.response(Terms), newdata, contrasts = object$contrasts)
xint <- match("(Intercept)", colnames(x), nomatch = 0L)
if (xint > 0)
x <- x[, -xint, drop = FALSE]
# if (method == "looCV")
# g <- model.response(newdata)
}
else {
if (missing(newdata)) {
if (!is.null(sub <- object$call$subset)) {
newdata <- eval.parent(parse(text = paste(deparse(object$call$x,
backtick = TRUE), "[", deparse(sub, backtick = TRUE),
",]")))
g <- eval.parent(parse(text = paste(deparse(object$call[[3L]],
backtick = TRUE), "[", deparse(sub, backtick = TRUE),
"]")))
}
else {
newdata <- eval.parent(object$call$x)
g <- eval.parent(object$call[[3L]])
}
if (!is.null(nas <- object$call$na.action)) {
df <- data.frame(g = g, X = newdata)
df <- eval(call(nas, df))
g <- df$g
newdata <- df$X
}
g <- as.factor(g)
}
if (is.null(dim(newdata)))
dim(newdata) <- c(1, length(newdata))
x <- as.matrix(newdata)
}
# own code ----
# check if uncertainties and x have same class
if (class(newdata) != class(newerror)) {
if ("rmult" %in% class(newdata) & !"rmult" %in% class(newerror)) warning("newdata has class'rmult' but newerror has not. Are you sure this is correct?")
if (!"rmult" %in% class(newdata) & "rmult" %in% class(newerror)) warning("newerror has class'rmult' but newdata has not. Are you sure this is correct?")
if (!"rmult" %in% class(newdata) & !"rmult" %in% class(newerror)) warning("newdata and newerror have different classes. This might cause problems at a later stage.")
}
newSigmaIs <- if ("rmult" %in% class(newerror)) lapply(1:nrow(newerror), function(i) matrix(newerror[i,], ncol = ncol(newdata)))
else lapply(1:nrow(newerror), function(i) diag(newerror[i,]))
newZs <- lapply(1:nrow(newdata), function(i) newdata[i,]) # packt alle Beobachtungen in Listen
barmug = lapply(object$generalizedMeans, c)
groupVarCorrected = object$groupVarCorrected
LgsFunc <- function(Z0, Sigma0) {
AuxFunc <- function(mug, barSigma) {
SigmaSimp = barSigma + Sigma0
-0.5*determinant(SigmaSimp, logarithm = T)$modulus - 0.5*c(as.numeric(Z0 - mug) %*% solve(SigmaSimp) %*% as.numeric(Z0 - mug))
}
mapply(AuxFunc, barmug, groupVarCorrected)
}
L <- mapply(LgsFunc, newZs, newSigmaIs) # Ergebnis von dieser Funktion sollte matrix mit [n x Anzahl der Gruppen]
p = t(exp(L))
posterior = compositions::clo(p)
cl <- factor(max.col(posterior), levels = seq_along(object$lev),
labels = object$lev)
dimnames(posterior) <- list(rownames(x), object$lev)
#structure( # not used at the moment
return(list(class = cl,
posterior = posterior,
likelihood = L,
grouping = object$grouping)
#,class = "predicted_weighted")
)
}
#' @describeIn predict predict() for class 'vlda'
#' @param newdata data frame or matrix of cases to be classified or, if object has a formula, a data frame with columns of the same names as the variables used.
#' A vector will be interpreted as a row vector. If newdata is missing, an attempt will be made to retrieve the data used to fit the qda object.
#' @param newerror data frame or matrix of uncertainties corresponding to the cases in 'newdata'.
#' @param prior the prior probabilities of group membership. If unspecified, the prior of the object are used.
#' @param ... ...
#'
#' @return list containing the following components:
#' \code{class} factor containing the predicted group
#' \code{likelihood} matrix of dimension 'number of samples' x 'number of groups', containing the likelihood for each sample to belong to one of the groups
#' \code{grouping} original grouping of the samples, copied from the input object
#'
#' @export
predict.vlda <- function(object,
newdata,
newerror,
prior = object$prior, ...) {
# copied and slightly adjusted from predict.qda
if (!inherits(object, "vlda"))
stop("object not of class \"vlda\"")
ngroup <- length(object$prior)
if (!missing(prior)) {
if (any(prior < 0) || round(sum(prior), 5) != 1)
stop("invalid 'prior'")
if (length(prior) != ngroup)
stop("'prior' is of incorrect length")
}
if (!is.null(Terms <- object$terms)) {
if (missing(newdata))
newdata <- stats::model.frame(object)
else {
newdata <- stats::model.frame(stats::as.formula(stats::delete.response(Terms)),
newdata, na.action = function(x) x, xlev = object$xlevels)
}
x <- stats::model.matrix(stats::delete.response(Terms), newdata, contrasts = object$contrasts)
xint <- match("(Intercept)", colnames(x), nomatch = 0L)
if (xint > 0)
x <- x[, -xint, drop = FALSE]
# if (method == "looCV")
# g <- model.response(newdata)
}
else {
if (missing(newdata)) {
if (!is.null(sub <- object$call$subset)) {
newdata <- eval.parent(parse(text = paste(deparse(object$call$x,
backtick = TRUE), "[", deparse(sub, backtick = TRUE),
",]")))
g <- eval.parent(parse(text = paste(deparse(object$call[[3L]],
backtick = TRUE), "[", deparse(sub, backtick = TRUE),
"]")))
}
else {
newdata <- eval.parent(object$call$x)
g <- eval.parent(object$call[[3L]])
}
if (!is.null(nas <- object$call$na.action)) {
df <- data.frame(g = g, X = newdata)
df <- eval(call(nas, df))
g <- df$g
newdata <- df$X
}
g <- as.factor(g)
}
if (is.null(dim(newdata)))
dim(newdata) <- c(1, length(newdata))
x <- as.matrix(newdata)
}
# own code ----
# check if uncertainties and x have same class
if (class(newdata) != class(newerror)) {
if ("rmult" %in% class(newdata) & !"rmult" %in% class(newerror)) warning("newdata has class'rmult' but newerror has not. Are you sure this is correct?")
if (!"rmult" %in% class(newdata) & "rmult" %in% class(newerror)) warning("newerror has class'rmult' but newdata has not. Are you sure this is correct?")
if (!"rmult" %in% class(newdata) & !"rmult" %in% class(newerror)) warning("newdata and newerror have different classes. This might cause problems at a later stage.")
}
newSigmaIs <- if ("rmult" %in% class(newerror)) lapply(1:nrow(newerror), function(i) matrix(newerror[i,], ncol = ncol(newdata)))
else lapply(1:nrow(newerror), function(i) diag(newerror[i,]))
newZs <- lapply(1:nrow(newdata), function(i) newdata[i,]) # packt alle Beobachtungen in Listen
barmug = lapply(object$generalizedMeans, c)
groupVarCorrected = object$groupVarCorrected
LgsFunc <- function(Z0, Sigma0) {
AuxFunc <- function(mug, barSigma) {
SigmaSimp = barSigma + Sigma0
as.numeric(Z0) %*% solve(SigmaSimp) %*% mug - 0.5*c(mug %*% solve(SigmaSimp) %*% mug)
}
mapply(AuxFunc, barmug, groupVarCorrected)
}
L <- mapply(LgsFunc, newZs, newSigmaIs) # Ergebnis von dieser Funktion sollte matrix mit [n x Anzahl der Gruppen]
if (mean(L) > 500) L = L/10 # to avoid infinity
p = t(exp(L)) * object$prior
posterior = compositions::clo(p)
cl <- factor(max.col(posterior), levels = seq_along(object$lev),
labels = object$lev)
dimnames(posterior) <- list(rownames(x), object$lev)
#structure( # not used at the moment
return(list(class = cl,
posterior = posterior,
likelihood = L,
grouping = object$grouping)
#,class = "predicted_weighted")
)
}
|
/scratch/gouwar.j/cran-all/cranData/vdar/R/weighted_DA.R
|
#' Design from the Vdgraph package
#'
#' This data frame is taken verbatim from the (now archived) \pkg{Vdgraph} package. See
#' that package for the original reference.
#'
#' @name D310
#' @docType data
#' @usage D310
#' @format a data frame of 10 runs in three variables.
#' @references Lawson J, Vining G (2014). \emph{Vdgraph: Variance dispersion graphs and Fraction of design space plots for response surface designs}. R package version 2.2-2, \url{https://CRAN.R-project.org/package=Vdgraph}.
NULL
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/D310.R
|
#' Design from the Vdgraph package
#'
#' This data frame is taken verbatim from the (now archived) \pkg{Vdgraph} package. See
#' that package for the original reference.
#'
#' @name D416B
#' @docType data
#' @usage D416B
#' @format a data frame of 16 runs in four variables.
#' @references Lawson J, Vining G (2014). \emph{Vdgraph: Variance dispersion graphs and Fraction of design space plots for response surface designs}. R package version 2.2-2, \url{https://CRAN.R-project.org/package=Vdgraph}.
NULL
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/D416B.R
|
#' Design from the Vdgraph package
#'
#' This data frame is taken verbatim from the (now archived) \pkg{Vdgraph} package. See
#' that package for the original reference.
#'
#' @name D416C
#' @docType data
#' @usage D416C
#' @format a data frame of 16 runs in four variables.
#' @references Lawson J, Vining G (2014). \emph{Vdgraph: Variance dispersion graphs and Fraction of design space plots for response surface designs}. R package version 2.2-2, \url{https://CRAN.R-project.org/package=Vdgraph}.
NULL
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/D416C.R
|
#' Design from Goos & Jones
#'
#' This data frame contains the design of Table 5.4 in Goos & Jones (2011).
#'
#' @name GJ54
#' @docType data
#' @usage GJ54
#' @format a data frame of 15 runs in two variables: Time (seconds) and Temperature (Kelvin)
#' @references Goos, P., & Jones, B. (2011). Optimal design of experiments: a case study approach. John Wiley & Sons.
NULL
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/GJ54.R
|
#' Latin Hypercube Sampling
#'
#' Different versions of latin hypercube sampling (LHS): ordinary LHS, midpoint LHS, symmetric LHS or randomized symmetric LHS. LHS is a method
#' for constructing space-filling designs. They can be more efficient for hypercuboidal design regions than other sampling methods.
#'
#' @aliases LHS MLHS SLHS RSLHS
#' @param n number of design points to generate
#' @param m number of design factors
#' @param lim limits of the coordinates in all dimensions
#' @return Matrix with samples as rows.
#' @author Pieter C. Schoonees
#' @references
#' Pieter C. Schoonees, Niel J. le Roux, Roelof L.J. Coetzer (2016). Flexible Graphical Assessment of
#' Experimental Designs in R: The vdg Package. \emph{Journal of Statistical Software}, 74(3), 1-22.
#' \doi{10.18637/jss.v074.i03}.
#' @examples
#'
#' set.seed(1234)
#' pts <- seq(-1, 1, length = 11)
#'
#' # Ordinary LHS
#' samp <- LHS(n = 10, m = 2)
#' plot(samp, main = "LHS")
#' abline(h = pts, v = pts, col = "lightgrey")
#'
#' # Midpoint LHS
#' samp <- MLHS(n = 10, m = 2)
#' plot(samp, main = "MLHS")
#' abline(h = pts, v = pts, col = "lightgrey")
#'
#' # Symmetric LHS
#' samp <- SLHS(n = 10, m = 2)
#' plot(samp, main = "SLHS")
#' abline(h = pts, v = pts, col = "lightgrey")
#'
#' # Randomized Symmetric LHS
#' samp <- RSLHS(n = 10, m = 2)
#' plot(samp, main = "RSLHS")
#' abline(h = pts, v = pts, col = "lightgrey")
#' @keywords design
#' @export
LHS <-
function (n, m = 3, lim = c(-1, 1))
{
pts <- seq(from = lim[1], to = lim[2], length = n + 1)
pts <- pts[-1]
samp <- matrix(0, nrow = n, ncol = m)
for(i in 1:m)
samp[,i] <- pts[sample(1:n, n)]
umat <- matrix(runif(m*n), nrow = n, ncol = m)
samp <- samp - (lim[2] - lim[1])*umat/n
samp
}
#' @rdname LHS
#' @export
MLHS <-
function (n, m = 3, lim = c(-1, 1))
{
pts <- seq(from = lim[1], to = lim[2], length = n + 1)
pts <- pts[-1]
samp <- matrix(0, nrow = n, ncol = m)
for(i in 1:m)
samp[,i] <- pts[sample(1:n, n)]
samp <- samp - (lim[2] - lim[1])*0.5/n
samp
}
#' @rdname LHS
#' @export
SLHS <-
function (n, m = 3, lim = c(-1, 1))
{
k <- n/2
if(n %% 2 != 0) stop("Not a even number of points - a symmetric LHD cannot be constructed.")
pts <- seq(from = lim[1], to = lim[2], length = n + 1)
pts <- pts[-1]
samp <- matrix(0, nrow = k, ncol = m)
for(i in 1:m)
samp[,i] <- sample(1:n, k)
samp <- rbind(samp, n + 1 - samp)
for(i in 1:m)
samp[,i] <- pts[samp[,i]]
samp <- samp - (lim[2] - lim[1])*0.5/n
samp
}
#' @rdname LHS
#' @export
RSLHS <-
function (n, m = 3, lim = c(-1, 1))
{
k <- n/2
if(n %% 2 != 0) stop("Not a even number of points - a symmetric LHD cannot be constructed.")
pts <- seq(from = lim[1], to = lim[2], length = n + 1)
pts <- pts[-1]
samp <- matrix(0, nrow = k, ncol = m)
for(i in 1:m)
samp[,i] <- sample(1:n, k)
samp <- rbind(samp, n + 1 - samp)
# samp2 <- samp/(n/2)-1
for(i in 1:m)
samp[,i] <- pts[samp[,i]]
umat <- matrix(runif(n*m), nrow = n, ncol = m)
samp <- samp - (lim[2] - lim[1])*umat/n
samp
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/LHS.R
|
#' Design from the Vdgraph package
#'
#' This data frame is taken verbatim from the (now archived) \pkg{Vdgraph} package. See
#' that package for the original reference.
#'
#' @name SCDDL5
#' @docType data
#' @usage SCDDL5
#' @format a data frame of 23 runs in five variables.
#' @references Lawson J, Vining G (2014). \emph{Vdgraph: Variance dispersion graphs and Fraction of design space plots for response surface designs}. R package version 2.2-2, \url{https://CRAN.R-project.org/package=Vdgraph}.
NULL
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/SCDDL5.R
|
#' Design from the Vdgraph package
#'
#' This data frame is taken verbatim from the (now archived) \pkg{Vdgraph} package. See
#' that package for the original reference.
#'
#' @name SCDH5
#' @docType data
#' @usage SCDH5
#' @format a data frame of 28 runs in four variables.
#' @references Lawson J, Vining G (2014). \emph{Vdgraph: Variance dispersion graphs and Fraction of design space plots for response surface designs}. R package version 2.2-2, \url{https://CRAN.R-project.org/package=Vdgraph}.
NULL
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/SCDH5.R
|
#' Compute Mean Spherical SPV
#'
#' Computes the matrix of spherical region moments for a given model formula and a vector of radii, and uses this to
#' calculate the mean spherical SPV for each of the radii. The function \code{expmat} calculates
#' the matrix containing the exponents of each model factor within each model term as columns.
#' Only simple formulae are allowed for. Only products of terms should be included in
#' calls to \code{\link{I}}. The power operator \code{\link{^}} should be used instead
#' of \code{\link{sqrt}}. Models should contain only monomial terms.
#'
#' @param formula model formula
#' @param radii numeric vector or radii at which to calculate the matrix of spherical region moments
#' @param FtF.inv inverse of F'F, where F is the design matrix
#' @param n integer giving the number of design runs
# ' @return a matrix of variables by terms containing the exponents of the varaibles in every term
#' @author Pieter C. Schoonees
#' @references
#' Pieter C. Schoonees, Niel J. le Roux, Roelof L.J. Coetzer (2016). Flexible Graphical Assessment of
#' Experimental Designs in R: The vdg Package. \emph{Journal of Statistical Software}, 74(3), 1-22.
#' \doi{10.18637/jss.v074.i03}.
#' @examples
#' f1 <- formula(~ x1*x2)
#' expmat(f1)
#' f2 <- update(f1, ~ . + I(x1^2) + I(x2^2))
#' expmat(f2)
#' f3 <- update(f2, ~ . + I(x2^0.4))
#' expmat(f3)
#' f4 <- update(f3, ~ . + I(x1^2):I(x2^2))
#' expmat(f4)
#' f5 <- update(f4, ~ . + I(x1^3*x2^0.5))
#' expmat(f5)
meanspv <- function(formula, radii, FtF.inv, n){
expmat <- expmat(formula = formula)
nterms <- ncol(expmat)
sigfun <- function(delta, r){
if(any(delta %% 2 != 0)) return(rep(0, length(r)))
# if(all(delta == 0)) return(rep(1, length(r)))
m <- length(delta)
sdelta <- sum(delta)
out <- r^sdelta * gamma(m/2) * prod(gamma((delta + 1)/2)) / (pi^(m/2) *
gamma((sdelta + m)/2))
return(out)
}
lowers <- t(apply(combn(nterms, 2), 2, function(x) sigfun(expmat[,x[1]] +
expmat[,x[2]], r = radii)))
diags <- t(apply(2*expmat, 2, sigfun, r = radii))
smom <- mapply(as.data.frame(lowers), as.data.frame(diags),
FUN = function(x, y){tmp <- matrix(0, length(y), length(y));
tmp[lower.tri(tmp)] <- x;
tmp <- tmp + t(tmp);
diag(tmp) <- y;
dimnames(tmp) <- rep(list(colnames(expmat)), 2);
return(tmp)
}, SIMPLIFY = FALSE, USE.NAMES = FALSE)
mspv <- mapply(FUN = function(x, y) n * sum(diag(x %*% y)), smom,
list(FtF.inv))
out <- structure(list(Radius = radii, SPV = mspv),
class = c("meanspv", "list"))
return(out)
}
#' @rdname meanspv
#' @export
expmat <- function(formula){
vars <- all.vars(formula)
terms <- terms(formula)
factors <- attr(terms, "factors")
test <- tnames <- colnames(factors)
for(i in vars) test <- gsub(i, replacement = "", x = test)
test <- gsub("[[:digit:]^()I.:* -]", "", test)
if(!all(test == ""))
stop("Characters other that variable names and the regexp class '[[:digit:]^()I.:* -]' found in formula")
varrows <- na.omit(match(vars, rownames(factors)))
Irows <- seq_len(nrow(factors))[-varrows]
Inames <- rownames(factors)[Irows]
expmat <- factors[varrows, , drop = FALSE]
expmat[expmat == 2] <- 1
getExp <- function(term, var){
if(!grepl(var, term)) return(0L)
sterm <- gsub("^I\\(", "", term)
sterm <- gsub(")$", "", sterm)
sterm <- unlist(strsplit(sterm, "*", fixed = TRUE))
sterm <- sterm[grepl(var, sterm)]
if(length(sterm) != 1) stop("A variable should only occur once in each term")
tmatch <- match(var, sterm)
if(!is.na(tmatch)) if(tmatch == 1) return(1L)
sterm <- gsub("[()]", "", sterm)
sterm <- unlist(strsplit(sterm, "^", fixed = TRUE))
if(length(sterm) != 2) stop("Unkown use of '^' operator")
return(as.numeric(sterm[2]))
}
whichI <- match(Inames, tnames)
for(i in whichI) for(j in seq_along(vars[varrows]))
expmat[j, i] <- getExp(tnames[i], vars[varrows][j])
whichInt <- setdiff(seq_along(tnames), match(rownames(factors), tnames))
whichIntI <- whichInt[grep("I\\(", tnames[whichInt])]
nrInt <- length(whichIntI)
if(nrInt){
sint <- strsplit(tnames[whichIntI], ":", fixed = TRUE)
intPos <- lapply(sint, match, Inames)
for(i in seq_along(intPos)){
index <- intPos[[i]][!is.na(intPos[[i]])]
expmat[, whichIntI[i]] <- expmat[, whichIntI[i]] +
rowSums(expmat[, Inames[index], drop = FALSE])
}
}
if(attr(terms, "intercept")) expmat <- cbind("(Intercept)" = 0, expmat)
return(expmat)
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/meanspv.R
|
#' Plot VDGs or FDS plots
#'
#' Produce Variance Dispersion Graphs and/or Fraction of Design Space plots for
#' experimental designs. There are methods for the S3 classes \code{spv},
#' \code{spvlist}, \code{spvforlist} and \code{spvlistforlist} -- see
#' \code{\link{spv}}.
#'
#' @aliases plot.spv plot.spvlist plot.spvforlist plot.spvlistforlist
#' @param x an object of type \code{spv} for a single experimental design or an
#' object of type \code{spvlist} for multiple designs.
#' @param which either a numeric vector of integers or a character vector
#' indicating which plots to produce. The possible plots are:
#' \describe{
#' \item{\code{1} or \code{"fds"}}{A (variance ratio) FDS plot}
#' \item{\code{2} or \code{"vdgsim"}}{A VDG with only the simulated prediction variance points plotted}
#' \item{\code{3} or \code{"vdgquantile"}}{A VDG with only the quantile regression lines corresponding to \code{tau} shown}
#' \item{\code{4} or \code{"vdgboth"}}{A combination of \code{2} and \code{3}}
#' \item{\code{5} or \code{"boxplots"}}{Parallel boxplots of the prediction variance}
#' }
#' @param np scalar; the number of points to use for calculating the fraction of design space criterion.
#' @param alpha the alpha transparency coefficient for the plots
#' @param points.colour colour for points in scatterplot of SPV against the radius
#' @param points.size size for points in scatterplot of SPV against the radius
#' @param tau the tau parameter for \code{\link[quantreg]{rq}} (quantile
#' regression)
#' @param radii either a numeric vector containing the radii to use for
#' calculating the mean spherical SPV over the spherical design space, or an integer
#' (length one vector) giving the number of radii to use for calculationg
#' the mean spherical SPV. If missing, the mean spherical SPV is not used.
#' @param hexbin logical indicating whether hexagonal binning should be used to display
#' density instead of colour transparency
#' @param bins argument passed to \code{\link{stat_binhex}} to determine the
#' number of hexagons used for binning.
#' @param VRFDS logical indicating whether to construct a variance ratio FDS plot or not (only for class \code{spvlist}). The
#' first design is used as reference design in case of \code{VRFDS} is \code{TRUE}
#' @param df degrees-of-freedom parameter passed to \code{\link{bs}}
#' @param lines.size line size passed to \code{\link[ggplot2]{geom_line}}
#' @param origin numeric vector specifying the origin of the design space
#' @param method optional; passed to \code{\link[proxy]{dist}} to overwrite
#' defaults of "Euclidean" for spherical regions or "supremum" for cubiodal
#' regions
#' @param arrange Logical indicating whether to return a single graphical object arranging the
#' resulting plots in a single plot window via \code{\link{grid.arrange}}, or whether to return the
#' list of graphical objects containing the plots.
#' @param \dots additional arguments passed to \code{\link[proxy]{dist}}
#' @return Returns a list of \code{\link{ggplot}} graphical objects (or grobs) with names corresponding
#' to the character version of \code{which}. These plot objects can be manipulated by changing plot
#' aesthetics and theme elements.
#' @keywords hplot
#' @author Pieter C. Schoonees
#' @references
#' Pieter C. Schoonees, Niel J. le Roux, Roelof L.J. Coetzer (2016). Flexible Graphical Assessment of
#' Experimental Designs in R: The vdg Package. \emph{Journal of Statistical Software}, 74(3), 1-22.
#' \doi{10.18637/jss.v074.i03}.
#' @method plot spv
#' @export
#' @import ggplot2
#' @import quantreg
#' @importFrom proxy dist
#' @importFrom splines bs
#' @importFrom gridExtra grid.arrange
#' @examples
#'
#' # Single design (class 'spv')
#' # Larger n should be used in actual cases
#' library(rsm)
#' bbd3 <- as.data.frame(bbd(3)[,3:5])
#' colnames(bbd3) <- paste0("x", 1:3)
#' quad.3f <- formula(~ x1*x2*x3 - x1:x2:x3 + I(x1^2) + I(x2^2) + I(x3^2))
#' set.seed(1234)
#' out <- spv(n = 1000, design = bbd3, type = "spherical", formula = quad.3f)
#' out
#' plot(out)
#'
#' # List of designs (class 'spvlist')
#' \dontrun{
#' data(SCDH5); data(SCDDL5)
#' des.list <- list(SCDH5 = SCDH5, SCDDL5 = SCDDL5)
#' quad.5f <- formula(~ x1 + x2 + x3 + x4 + x5 + x1:x2 + x1:x3 + x1:x4 + x1:x5
#' + x2:x3 + x2:x4 + x2:x5 + x3:x4 + x3:x5 + x4:x5
#' + I(x1^2) + I(x2^2) + I(x3^2) + I(x4^2) + I(x5^2))
#' out2 <- spv(n = 500, design = des.list, type = "spherical", formula = quad.5f)
#' out2
#' plot(out2)
#' }
#'
#' # List of formulae (class 'spvforlist')
#' \dontrun{
#' fact3 <- expand.grid(x1 = c(-1,1), x2 = c(-1, 1), x3 = c(-1,1))
#' lin.3f <- formula(~ x1 + x2 + x3)
#' int.3f <- formula(~ (x1+x2+x3)^2)
#' set.seed(4312)
#' out3 <- spv(n = 500, design = fact3, type = "cuboidal",
#' formula = list(linear = lin.3f, interaction = int.3f))
#' out3
#' plot(out3)
#' }
#'
#' # List of formulae and designs (class 'spvlistforlist')
#' \dontrun{
#' fact3.n <- rbind(fact3, 0, 0, 0)
#' set.seed(4312)
#' out4 <- spv(n = 200, design = list(factorial = fact3, factorial.with.cntr = fact3.n),
#' type = "cuboidal", formula = list(linear = lin.3f, interaction = int.3f))
#' out4
#' plot(out4)
#' }
plot.spv <- function (x, which = c("fds", "vdgsim", "vdgquantile", "vdgboth", "boxplots"),
np = 50, alpha = 7/sqrt(length(x$spv)), points.colour = "#39BEB1",
points.size = 2, tau = c(0.05, 0.95), radii = 21, hexbin = FALSE, bins = 80,
df = 10, lines.size = 1, origin = rep(0, ncol(x$sample)), method,
arrange = FALSE, ...) {
# Avoid global variable notes for R CMD check and ggplot2
Radius <- SPV <- Fraction <- Location <- NULL
# Handle which depending on whether it is numeric or character (gets transformed to numeric)
pnms <- c("fds", "vdgsim", "vdgquantile", "vdgboth", "boxplots")
show <- rep(FALSE, 5)
if (is.character(which)) {
which <- match.arg(which, several.ok = TRUE)
which <- sort(match(which, pnms))
}
if (!is.numeric(which)) stop("Argument 'which' is of incorrect type.")
show[which] <- TRUE
type <- x$type
if (x$at && show[1L]){
show[1L] <- FALSE
which <- which[!(which %in% 1L)]
message("Plot 1 = 'fds' cannot be produced: 'at' is TRUE (inaccurate FDS plot)")
}
add.meanspv <- x$type == "spherical" && !is.null(radii)
if (is.null(tau) & !add.meanspv){
if(any(3L:4L %in% which))
message("Plots 3 = 'vdgquantile' and/or 4 = 'vdgboth' cannot be produced:
'tau' is NULL and mean SPV not requested/possible")
show[3L:4L] <- FALSE
which <- which[!(which %in% 3L:4L)]
}
if (!x$at && show[5L]){
show[5L] <- FALSE
which <- which[!(which %in% 5L)]
message("Plot 5 = 'boxplots' cannot be produced: 'at' is FALSE")
}
pnms <- pnms[show]
if (missing(method)) method <- switch(type, spherical = "Euclidean", cuboidal = "supremum",
lhs = "supremum", mlhs = "supremum", slhs = "supremum",
rslhs = "supremum")
xvec <- proxy::dist(x$sample, matrix(origin, nrow = 1, ncol = ncol(x$sample)), method = method, ...)
method <- attr(xvec, "method")
xvec <- as.numeric(xvec)
if (add.meanspv){
if(length(radii) == 1) radii <- seq(from = 0, to = max(xvec), length.out = radii)
mspv <- meanspv(formula = x$formula, radii = radii, FtF.inv = x$FtF.inv,
n = ifelse(x$unscaled, 1, x$ndes))
tmp3 <- as.data.frame(mspv)
tmp3$Location <- "Mean"
}
if (any(show[-1L])) tmp1 <- data.frame(Radius = xvec, SPV = x$spv)
if (show[1L]){
maxmin <- range(x$spv)
pts <- 0:np/np
tmp2 <- data.frame(Fraction = pts, SPV = quantile(x$spv, probs = pts, type = 1))
}
if (show[3L] || show[4L]){
if (!is.null(tau)){
pts <- seq(from = min(tmp1$Radius), to = max(tmp1$Radius), length = np)
fits <- lapply(tau, function(x, data) quantreg::rq(SPV ~ bs(Radius, df = df), tau = x,
data = data), data = tmp1)
newdf <- data.frame(Radius = rep(pts, length(tau)), SPV = as.numeric(
sapply(fits, predict, newdata = data.frame(Radius = pts))),
Location = rep(paste("tau =", tau), each = np))
if (exists("tmp3", inherits = FALSE)) tmp3 <- rbind(tmp3, newdf)
else tmp3 <- newdf
}
}
if (exists("tmp3", inherits = FALSE)) tmp3$Location <- as.factor(tmp3$Location)
if (show[1L]){
plot1 <- ggplot(tmp2, aes(x = Fraction, y = SPV)) + ggtitle("Fraction of Design Space Plot") +
xlab("Fraction of Design Space") +
geom_line(size = lines.size, colour = points.colour) +
theme(plot.title = element_text(vjust = 1))
}
if (show[2L]) {
plot2 <- ggplot(tmp1, aes(x = Radius, y = SPV)) + ggtitle("Variance Dispersion Graph") +
geom_point(alpha = alpha, colour = points.colour, size = points.size) +
theme(plot.title = element_text(vjust = 1)) +
xlab(paste0("Distance to Origin (", method,")"))
if (hexbin) {
plot2 <- plot2 + geom_hex(bins = bins) +
scale_fill_gradientn(colours = rev(topo.colors(5)[-(4:5)]), name = "Frequency", na.value = NA)
}
}
if (show[3L]) {
plot3 <- ggplot(tmp1, aes(x = Radius, y = SPV)) + ggtitle("Variance Dispersion Graph") +
theme(plot.title = element_text(vjust = 1), legend.text.align = 0.5)
if(hexbin){
plot3 <- plot3 + geom_hex(bins = bins) +
scale_fill_gradientn(colours = rev(topo.colors(5)[-(4:5)]), name = "Frequency", na.value = NA)
}
plot3 <- plot3 + geom_line(mapping = aes(x = Radius, y = SPV, linetype = Location), data = tmp3,
size = lines.size) + xlab(paste0("Distance to Origin (", method,")"))
}
if (show[4L]) {
plot4 <- ggplot(tmp1, aes(x = Radius, y = SPV)) + ggtitle("Variance Dispersion Graph") +
geom_point(alpha = alpha, colour = points.colour, size = points.size) +
theme(plot.title = element_text(vjust = 1), legend.text.align = 0.5)
if (hexbin) plot4 <- plot4 + geom_hex(bins = bins) +
scale_fill_gradientn(colours = rev(topo.colors(5)[-(4:5)]), name = "Frequency", na.value = NA)
plot4 <- plot4 + geom_line(mapping = aes(x = Radius, y = SPV, linetype = Location), data = tmp3,
size = lines.size) + xlab(paste0("Distance to Origin (", method,")"))
}
if (show[5L]) {
plot5 <- ggplot(tmp1, aes(x = Radius, y = SPV)) + ggtitle("Boxplots") +
geom_boxplot(aes(x = as.factor(round(Radius, getOption("digits"))))) +
xlab(paste0("Distance to Origin (", method,")")) +
theme(plot.title = element_text(vjust = 1), legend.text.align = 0.5)
}
if (length(which)) {
out <- mget(paste0("plot", which))
names(out) <- pnms
if(x$unscaled) out <- lapply(out, '+', ylab("Unscaled Prediction Variance (UPV)"))
else out <- lapply(out, '+', ylab("Scaled Prediction Variance (SPV)"))
if (arrange) do.call(gridExtra::grid.arrange, out)
else return(out)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/plot.spv.R
|
#' @rdname plot.spv
#' @method plot spvforlist
#' @export
plot.spvforlist <- function (x, which = c("fds", "vdgsim", "vdgquantile", "vdgboth", "boxplots"),
np = 50, alpha = 7/sqrt(length(x[[1]]$spv)), points.colour = "#39BEB1",
points.size = 2, tau = c(0.05, 0.95), radii = 21, hexbin = FALSE, bins = 80,
df = 10, lines.size = 1, origin = rep(0, ncol(x[[1]]$sample)), method,
arrange = FALSE, ...) {
# Avoid global variable notes for R CMD check and ggplot2
Radius <- SPV <- Formula <- Fraction <- Location <- NULL
# Handle which depending on whether it is numeric or character (gets transformed to numeric)
pnms <- c("fds", "vdgsim", "vdgquantile", "vdgboth", "boxplots")
show <- rep(FALSE, 5)
if (is.character(which)) {
which <- match.arg(which, several.ok = TRUE)
which <- sort(match(which, pnms))
}
if (!is.numeric(which)) stop("Argument 'which' is of incorrect type.")
show[which] <- TRUE
type <- x[[1]]$type
if (x[[1]]$at && show[1L]){
show[1L] <- FALSE
which <- which[!(which %in% 1L)]
message("Plot 1 = 'fds' cannot be produced: 'at' is TRUE (inaccurate FDS plot)")
}
add.meanspv <- x[[1]]$type == "spherical" && !is.null(radii)
if (is.null(tau) & !add.meanspv){
if (any(3L:4L %in% which))
message("Plots 3 = 'vdgboth' and/or 4 cannot be produced: 'tau' is NULL and mean SPV not requested/possible")
show[3L:4L] <- FALSE
which <- which[!(which %in% 3L:4L)]
}
if (!x[[1]]$at && show[5L]){
show[5L] <- FALSE
which <- which[!(which %in% 5L)]
message("Plot 5 = 'boxplots' cannot be produced: 'at' is FALSE")
}
pnms <- pnms[show]
if (missing(method)) method <- switch(type, spherical = "Euclidean", cuboidal = "supremum",
lhs = "supremum", mlhs = "supremum", slhs = "supremum", rslhs = "supremum")
xvec <- proxy::dist(x[[1]]$sample, matrix(origin, nrow = 1, ncol = ncol(x[[1]]$sample)), method = method, ...)
method <- attr(xvec, "method")
xvec <- as.numeric(xvec)
ndes <- length(x)
nspv <- length(xvec)
ntau <- length(tau)
spvmat <- sapply(x, '[[', "spv")
nms <- names(x)
if (is.null(nms)) nms <- seq_along(x)
if (add.meanspv){
if (length(radii) == 1) radii <- seq(from = 0, to = max(xvec), length.out = radii)
mspv <- lapply(x, function(y) as.data.frame(meanspv(formula = y$formula, radii = radii, FtF.inv = y$FtF.inv, n = ifelse(y$unscaled, 1, y$ndes))))
tmp3 <- do.call(rbind, mspv)
tmp3$Formula <- rep(names(x), each = length(radii))
tmp3$Location <- "Mean"
}
if (any(show[-1L])){
tmp1 <- data.frame(Radius = rep(xvec, ndes), SPV = as.numeric(spvmat),
Formula = factor(rep(nms, each = nspv)))
}
if (show[1L]){
maxmin <- range(sapply(x, '[[', "spv"))
pts <- 0:np/np
quantmat <- sapply(x, function(xx) quantile(xx$spv, probs = pts, type = 1))
tmp2 <- data.frame(Fraction = rep(pts, ndes), SPV = as.numeric(quantmat),
Formula = factor(rep(colnames(quantmat), each = length(pts))))
}
if (show[3L] || show[4L]){
if (!is.null(tau)){
pts <- seq(from = min(tmp1$Radius), to = max(tmp1$Radius), length = np)
aggfun <- function(x){
fits <- lapply(tau, function(y) quantreg::rq(SPV ~ bs(Radius, df = df), tau = y, data = x))
sapply(fits, predict, newdata = data.frame(Radius = pts))
}
preds <- sapply(split(tmp1, tmp1$Formula), aggfun)
newdf <- data.frame(Radius = rep(pts, ntau*ndes), SPV = as.numeric(preds),
Location = rep(rep(paste("tau =", tau), each = np), ndes),
Formula = factor(rep(colnames(preds), each = ntau*np)))
if (exists("tmp3", inherits = FALSE)) tmp3 <- rbind(tmp3, newdf)
else tmp3 <- newdf
}
}
if (exists("tmp3", inherits = FALSE)) tmp3$Location <- as.factor(tmp3$Location)
if (show[1L]) {
plot1 <- ggplot(tmp2, aes(x = Fraction, y = SPV, colour = Formula)) + ggtitle("Fraction of Design Space Plot") +
xlab("Fraction of Design Space") + geom_line(size = lines.size, ...) +
theme(plot.title = element_text(vjust = 1))
}
if (show[2L]) {
plot2 <- ggplot(tmp1, aes(x = Radius, y = SPV, order = Formula)) +
ggtitle("Variance Dispersion Graph") +
geom_point(alpha = alpha, colour = points.colour, size = points.size) +
theme(plot.title = element_text(vjust = 1)) +
guides(colour = guide_legend(override.aes = list(alpha = 1))) + xlab(paste0("Distance to Origin (", method,")")) +
facet_wrap(~ Formula)
if (hexbin){
plot2 <- plot2 + geom_hex(bins = bins) +
scale_fill_gradientn(colours = rev(topo.colors(5)[-(4:5)]), name = "Frequency", na.value = NA)
}
}
if (show[3L]) {
plot3 <- ggplot(tmp3, aes(x = Radius, y = SPV)) + ggtitle("Variance Dispersion Graph") +
theme(plot.title = element_text(vjust = 1)) +
geom_line(aes(group = interaction(Location, Formula), linetype = Formula, colour = Location),
size = lines.size) + theme(legend.text.align = 0.5) + xlab(paste0("Distance to Origin (", method,")"))
}
if (show[4L]) {
plot4 <- ggplot(tmp1, aes(x = Radius, y = SPV, order = Formula)) +
ggtitle("Variance Dispersion Graph") +
geom_point(alpha = alpha, colour = points.colour, size = points.size) +
theme(plot.title = element_text(vjust = 1)) +
guides(colour = guide_legend(override.aes = list(alpha = 1))) + xlab(paste0("Distance to Origin (", method,")"))
if (hexbin){
plot4 <- plot4 + geom_hex(bins = bins) +
scale_fill_gradientn(colours = rev(topo.colors(5)[-(4:5)]), name = "Frequency", na.value = NA)
}
plot4 <- plot4 + geom_line(data = tmp3, aes(x = Radius, y = SPV, linetype = Location,
order = Formula), size = lines.size) + facet_wrap(~ Formula)
}
if (show[5L]) {
if (!exists("tmp1", inherits = FALSE)){
tmp1 <- data.frame(Radius = rep(xvec, ndes), SPV = as.numeric(spvmat),
Formula = factor(rep(nms, each = nspv)))
}
plot5 <- ggplot(tmp1, aes(x = Radius, y = SPV)) + ggtitle("Boxplots") +
theme(plot.title = element_text(vjust = 1),
legend.text.align = 0.5) +
geom_boxplot(aes(x = as.factor(round(Radius, getOption("digits"))))) + xlab(paste0("Distance to Origin (", method,")")) +
facet_wrap(~ Formula)
}
if (length(which)) {
out <- mget(paste0("plot", which))
names(out) <- pnms
if (x[[1]]$unscaled) out <- lapply(out, '+', ylab("Unscaled Prediction Variance (UPV)"))
else out <- lapply(out, '+', ylab("Scaled Prediction Variance (SPV)"))
if (arrange) do.call(gridExtra::grid.arrange, out)
else return(out)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/plot.spvforlist.R
|
#' @rdname plot.spv
#' @method plot spvlist
#' @export
plot.spvlist <- function (x, which = c("fds", "vdgsim", "vdgquantile", "vdgboth", "boxplots"),
np = 50, alpha = 7/sqrt(length(x[[1]]$spv)), points.colour = "#39BEB1",
points.size = 2, tau = c(0.05, 0.95), radii = 21, hexbin = FALSE,
bins = 80, VRFDS = FALSE, df = 10, lines.size = 1,
origin = rep(0, ncol(x[[1]]$sample)), method,
arrange = FALSE, ...) {
# Avoid global variable notes for R CMD check and ggplot2
Radius <- SPV <- Design <- Fraction <- Location <- NULL
# Handle which depending on whether it is numeric or character (gets transformed to numeric)
pnms <- c("fds", "vdgsim", "vdgquantile", "vdgboth", "boxplots")
show <- rep(FALSE, 5)
if (is.character(which)) {
which <- match.arg(which, several.ok = TRUE)
which <- sort(match(which, pnms))
}
if (!is.numeric(which)) stop("Argument 'which' is of incorrect type.")
show[which] <- TRUE
type <- x[[1]]$type
if (x[[1]]$at && show[1L]){
show[1L] <- FALSE
which <- which[!(which %in% 1L)]
message("Plot 1 = 'fds' cannot be produced: 'at' is TRUE (inaccurate FDS plot)")
}
add.meanspv <- x[[1]]$type == "spherical" && !is.null(radii)
if (is.null(tau) & !add.meanspv){
if (any(3L:4L %in% which))
message("Plots 3 = 'vdgquantile' and/or 4 = 'vdgboth' cannot be produced: 'tau' is NULL
and mean SPV not requested/possible")
show[3L:4L] <- FALSE
which <- which[!(which %in% 3L:4L)]
}
if (!x[[1]]$at && show[5L]){
show[5L] <- FALSE
which <- which[!(which %in% 5L)]
message("Plot 5 = 'boxplots' cannot be produced: 'at' is FALSE")
}
pnms <- pnms[show]
if (missing(method)) method <- switch(type, spherical = "Euclidean", cuboidal = "supremum",
lhs = "supremum", mlhs = "supremum", slhs = "supremum", rslhs = "supremum")
xvec <- proxy::dist(x[[1]]$sample, matrix(origin, nrow = 1, ncol = ncol(x[[1]]$sample)), method = method, ...)
method <- attr(xvec, "method")
xvec <- as.numeric(xvec)
ndes <- length(x)
nspv <- length(xvec)
ntau <- length(tau)
spvmat <- sapply(x, '[[', "spv")
nms <- names(x)
if (is.null(nms)) nms <- seq_along(x)
if (add.meanspv){
if (length(radii) == 1) radii <- seq(from = 0, to = max(xvec), length.out = radii)
mspv <- lapply(x, function(y) as.data.frame(meanspv(formula = y$formula, radii = radii,
FtF.inv = y$FtF.inv, n = ifelse(y$unscaled, 1, y$ndes))))
tmp3 <- do.call(rbind, mspv)
tmp3$Design <- rep(names(x), each = length(radii))
tmp3$Location <- "Mean"
}
if (any(show[-1L])){
tmp1 <- data.frame(Radius = rep(xvec, ndes), SPV = as.numeric(spvmat),
Design = factor(rep(nms, each = nspv)))
}
if (show[1L]){
maxmin <- range(sapply(x, '[[', "spv"))
pts <- 0:np/np
if (VRFDS) {
quantmat <- sapply(x, function(xx) quantile(log(xx$spv/x[[1]]$spv), probs = pts, type = 1))
} else quantmat <- sapply(x, function(xx) quantile(xx$spv, probs = pts, type = 1))
tmp2 <- data.frame(Fraction = rep(pts, ndes), SPV = as.numeric(quantmat),
Design = factor(rep(colnames(quantmat), each = length(pts))))
}
if (show[3L] || show[4L]){
if (!is.null(tau)){
pts <- seq(from = min(tmp1$Radius), to = max(tmp1$Radius), length = np)
aggfun <- function(x){
fits <- lapply(tau, function(y) quantreg::rq(SPV ~ bs(Radius, df = df), tau = y, data = x))
sapply(fits, predict, newdata = data.frame(Radius = pts))
}
preds <- sapply(split(tmp1, tmp1$Design), aggfun)
newdf <- data.frame(Radius = rep(pts, ntau*ndes), SPV = as.numeric(preds),
Location = rep(rep(paste("tau =", tau), each = np), ndes),
Design = factor(rep(colnames(preds), each = ntau*np)))
if (exists("tmp3", inherits = FALSE)) tmp3 <- rbind(tmp3, newdf)
else tmp3 <- newdf
}
}
if (exists("tmp3", inherits = FALSE)) tmp3$Location <- as.factor(tmp3$Location)
if (show[1L]) {
plot1 <- ggplot(tmp2, aes(x = Fraction, y = SPV, colour = Design)) + ggtitle("Fraction of Design Space Plot") +
xlab("Fraction of Design Space") + geom_line(size = lines.size) +
theme(plot.title = element_text(vjust = 1))
if (VRFDS) plot1 <- plot1 + ggtitle("Variance Ratio FDS Plot") + ylab(expression(log(SPV[x] / SPV[ref])))
}
if (show[2L]) {
plot2 <- ggplot(tmp1, aes(x = Radius, y = SPV, order = Design)) +
ggtitle("Variance Dispersion Graph") +
geom_point(alpha = alpha, colour = points.colour, size = points.size) +
theme(plot.title = element_text(vjust = 1)) +
guides(colour = guide_legend(override.aes = list(alpha = 1))) + xlab(paste0("Distance to Origin (", method,")")) +
facet_wrap(~ Design)
if (hexbin){
plot2 <- plot2 + geom_hex(bins = bins) +
scale_fill_gradientn(colours = rev(topo.colors(5)[-(4:5)]), name = "Frequency", na.value = NA)
}
}
if (show[3L]) {
plot3 <- ggplot(tmp3, aes(x = Radius, y = SPV)) + ggtitle("Variance Dispersion Graph") +
theme(plot.title = element_text(vjust = 1)) +
geom_line(aes(group = interaction(Location, Design), linetype = Location, colour = Design),
size = lines.size) + theme(legend.text.align = 0.5) +
xlab(paste0("Distance to Origin (", method,")"))
}
if (show[4L]) {
plot4 <- ggplot(tmp1, aes(x = Radius, y = SPV, order = Design)) +
ggtitle("Variance Dispersion Graph") +
geom_point(alpha = alpha, colour = points.colour, size = points.size) +
theme(plot.title = element_text(vjust = 1)) +
guides(colour = guide_legend(override.aes = list(alpha = 1))) + xlab(paste0("Distance to Origin (", method,")")) +
facet_wrap(~ Design)
if (hexbin){
plot4 <- plot4 + geom_hex(bins = bins) +
scale_fill_gradientn(colours = rev(topo.colors(5)[-(4:5)]), name = "Frequency", na.value = NA)
}
plot4 <- plot4 + geom_line(data = tmp3, aes(x = Radius, y = SPV, linetype = Location),
# order = Design),
size = lines.size, colour = 1)
}
if (show[5L]) {
plot5 <- ggplot(tmp1, aes(x = Radius, y = SPV)) + ggtitle("Boxplots") +
geom_boxplot(aes(x = as.factor(round(Radius, getOption("digits"))))) + xlab(paste0("Distance to Origin (", method,")")) +
theme(plot.title = element_text(vjust = 1),
legend.text.align = 0.5) + facet_wrap(~ Design)
}
if (length(which)) {
out <- mget(paste0("plot", which))
if (VRFDS) {
if (show[1L]) out$plot1 <- out$plot1 + ylab(expression(log(SPV[x] / SPV[ref])))
} else {
if (x[[1]]$unscaled) out <- lapply(out, '+', ylab("Unscaled Prediction Variance (UPV)"))
else out <- lapply(out, '+', ylab("Scaled Prediction Variance (SPV)"))
}
names(out) <- pnms
if (arrange) do.call(gridExtra::grid.arrange, out)
else return(out)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/plot.spvlist.R
|
#' @rdname plot.spv
#' @method plot spvlistforlist
#' @export
plot.spvlistforlist <- function (x, which =c("fds", "vdgsim", "vdgquantile", "vdgboth", "boxplots"),
np = 50, alpha = 7/sqrt(length(x[[1]][[1]]$spv)), points.colour = "#39BEB1",
points.size = 2, tau = c(0.05, 0.95), radii = 21, hexbin = FALSE, bins = 80,
df = 10, lines.size = 1, origin = rep(0, ncol(x[[1]][[1]]$sample)), method,
arrange = FALSE, ...) {
# Avoid global variable notes for R CMD check and ggplot2
Radius <- SPV <- Design <- Formula <- Fraction <- Location <- NULL
# Handle which depending on whether it is numeric or character (gets transformed to numeric)
pnms <- c("fds", "vdgsim", "vdgquantile", "vdgboth", "boxplots")
show <- rep(FALSE, 5)
if (is.character(which)) {
which <- match.arg(which, several.ok = TRUE)
which <- sort(match(which, pnms))
}
if (!is.numeric(which)) stop("Argument 'which' is of incorrect type.")
show[which] <- TRUE
type <- x[[1]][[1]]$type
if (x[[1]][[1]]$at && show[1L]){
show[1L] <- FALSE
which <- which[!(which %in% 1L)]
message("Plot 1 = 'fds' cannot be produced: 'at' is TRUE (inaccurate FDS plot)")
}
add.meanspv <- x[[1]][[1]]$type == "spherical" && !is.null(radii)
if (is.null(tau) & !add.meanspv){
if (any(3L:4L %in% which))
message("Plots 3 = 'vdgquantile' and/or 4 = 'vdgboth' cannot be produced: 'tau' is NULL and mean SPV not requested/possible")
show[3L:4L] <- FALSE
which <- which[!(which %in% 3:4)]
}
if (!x[[1]][[1]]$at && show[5L]){
show[5L] <- FALSE
which <- which[!(which %in% 5L)]
message("Plot 5 = 'boxplots' cannot be produced: 'at' is FALSE")
}
pnms <- pnms[show]
if (missing(method)) method <- switch(type, spherical = "Euclidean", cuboidal = "supremum",
lhs = "supremum", mlhs = "supremum", slhs = "supremum", rslhs = "supremum")
xvec <- proxy::dist(x[[1]][[1]]$sample, matrix(origin, nrow = 1, ncol = ncol(x[[1]][[1]]$sample)), method = method, ...)
method <- attr(xvec, "method")
xvec <- as.numeric(xvec)
nfor <- length(x)
ndes <- length(x[[1]])
nspv <- length(xvec)
ntau <- length(tau)
spvmat <- do.call(cbind, lapply(x, function(y) do.call(cbind, lapply(y, "[[", "spv"))))
fornms <- names(x)
desnms <- names(x[[1]])
names(spvmat) <- paste(rep(desnms, nfor), rep(fornms, each = ndes), sep = ".")
if (add.meanspv){
if (length(radii) == 1) radii <- seq(from = 0, to = max(xvec), length.out = radii)
mspv <- lapply(x, function(y) lapply(y, function(z) as.data.frame(meanspv(formula = z$formula, radii = radii,
FtF.inv = z$FtF.inv, n = ifelse(z$unscaled, 1, z$ndes)))))
tmp3 <- do.call(rbind, lapply(mspv, function(x) do.call(rbind, x)))
tmp3$Formula <- rep(fornms, each = ndes * length(radii))
tmp3$Design <- rep(rep(desnms, each = length(radii)), nfor)
tmp3$Location <- "Mean"
}
if (any(show[-1L])){
tmp1 <- data.frame(Radius = rep(xvec, ndes*nfor), SPV = as.numeric(spvmat),
Design = factor(rep(rep(desnms, each = nspv), nfor)), Formula = factor(rep(fornms, each = ndes*nspv)))
}
if (show[1L]){
maxmin <- range(spvmat)
pts <- 0:np/np
quantmat <- apply(spvmat, 2, function(xx) quantile(xx, probs = pts, type = 1))
tmp2 <- data.frame(Fraction = rep(pts, ndes*nfor), SPV = as.numeric(quantmat),
Formula = factor(rep(fornms, each = ndes*(np + 1))),
Design = rep(rep(desnms, each = np + 1), nfor))
}
if (show[3L] || show[4L]){
if (!is.null(tau)){
if (!exists("tmp1", inherits = FALSE)){
tmp1 <- data.frame(Radius = rep(xvec, ndes*nfor), SPV = as.numeric(spvmat),
Design = factor(rep(rep(desnms, each = nspv), nfor)), Formula = factor(rep(fornms, each = nfor*nspv)))
}
pts <- seq(from = min(tmp1$Radius), to = max(tmp1$Radius), length = np)
aggfun <- function(x){
fits <- lapply(tau, function(y) quantreg::rq(SPV ~ bs(Radius, df = df), tau = y, data = x))
sapply(fits, predict, newdata = data.frame(Radius = pts))
}
preds <- sapply(split(tmp1, list(tmp1$Formula, tmp1$Design)), aggfun)
newdf <- data.frame(Radius = rep(pts, ntau*ndes*nfor), SPV = as.numeric(preds),
Location = rep(rep(paste("tau =", tau), each = np), nfor*ndes),
Formula = factor(rep(rep(sort(fornms), each = ntau*np), ndes)),
Design = factor(rep(sort(desnms), each = ntau*np*nfor)))
if (exists("tmp3", inherits = FALSE)) tmp3 <- rbind(tmp3, newdf)
else tmp3 <- newdf
}
}
if (exists("tmp3", inherits = FALSE)) tmp3$Location <- as.factor(tmp3$Location)
if (show[1L]){
plot1 <- ggplot(tmp2, aes(x = Fraction, y = SPV, colour = Design)) + ggtitle("Fraction of Design Space Plot") +
xlab("Fraction of Design Space") + geom_line(size = lines.size) +
theme(plot.title = element_text(vjust = 1)) + facet_wrap(~ Formula)
}
if (show[2L]) {
plot2 <- ggplot(tmp1, aes(x = Radius, y = SPV, order = Formula)) +
ggtitle("Variance Dispersion Graph") +
geom_point(alpha = alpha, colour = points.colour, size = points.size) +
theme(plot.title = element_text(vjust = 1)) +
guides(colour = guide_legend(override.aes = list(alpha = 1))) + facet_wrap(~ Design + Formula) +
xlab(paste0("Distance to Origin (", method,")"))
if (hexbin){
plot2 <- plot2 + geom_hex(bins = bins) +
scale_fill_gradientn(colours = rev(topo.colors(5)[-(4:5)]), name = "Frequency", na.value = NA)
}
}
if (show[3L]) {
plot3 <- ggplot(tmp3, aes(x = Radius, y = SPV, colour = Design)) + ggtitle("Variance Dispersion Graph") +
theme(plot.title = element_text(vjust = 1)) +
geom_line(aes(group = interaction(Location, Formula, Design), linetype = Location),
size = lines.size) + theme(legend.text.align = 0.5) + xlab(paste0("Distance to Origin (", method,")")) +
facet_wrap(~ Formula)
}
if (show[4L]) {
plot4 <- ggplot(tmp1, aes(x = Radius, y = SPV, order = Formula)) +
ggtitle("Variance Dispersion Graph") +
geom_point(alpha = alpha, colour = points.colour, size = points.size) +
theme(plot.title = element_text(vjust = 1)) +
guides(colour = guide_legend(override.aes = list(alpha = 1))) + facet_wrap(~ Design + Formula) +
xlab(paste0("Distance to Origin (", method,")"))
if (hexbin){
plot4 <- plot4 + geom_hex(bins = bins) +
scale_fill_gradientn(colours = rev(topo.colors(5)[-(4:5)]), name = "Frequency", na.value = NA)
}
plot4 <- plot4 + geom_line(data = tmp3, aes(group = interaction(Location, Design, Formula), linetype = Location,
order = Design), size = lines.size, colour = 1) +
theme(legend.text.align = 0.5) + xlab(paste0("Distance to Origin (", method,")")) + facet_wrap(~ Design + Formula)
}
if (show[5L]) {
plot5 <- ggplot(tmp1, aes(x = Radius, y = SPV)) + ggtitle("Boxplots") +
theme(plot.title = element_text(vjust = 1),
legend.text.align = 0.5) +
geom_boxplot(mapping = aes(group = Radius)) + xlab(paste0("Distance to Origin (", method,")")) +
facet_wrap(~ Design + Formula)
}
if (length(which)) {
out <- mget(paste0("plot", which))
names(out) <- pnms
if (x[[1]][[1]]$unscaled) out <- lapply(out, '+', ylab("Unscaled Prediction Variance (UPV)"))
else out <- lapply(out, '+', ylab("Scaled Prediction Variance (SPV)"))
if (arrange) do.call(gridExtra::grid.arrange, out)
else return(out)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/plot.spvlistforlist.R
|
#' Print Method for S3 \code{spv} classes
#'
#' Simple print methods for S3 classes \code{spv}, \code{spvlist}, \code{spvforlist} and \code{spvlistforlist}. See
#' \code{\link{plot.spv}} for examples.
#'
#' @aliases print.spv print.spvlist print.spvforlist print.spvlistforlist
#' @param x Object of class \code{spv} or \code{spvlist}
#' @param \dots Unimplemented
#' @author Pieter C. Schoonees
#' @references
#' Pieter C. Schoonees, Niel J. le Roux, Roelof L.J. Coetzer (2016). Flexible Graphical Assessment of
#' Experimental Designs in R: The vdg Package. \emph{Journal of Statistical Software}, 74(3), 1-22.
#' \doi{10.18637/jss.v074.i03}.
#' @export
#' @keywords print
print.spv <- function(x, ...){
cat("\nObject of class 'spv'\n")
cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"), "\n\n", sep = "")
cat("Sample dimensions:\n", nrow(x$sample), " columns and ", ncol(x$sample), " rows\n\n", sep = "")
if(!is.null(as.list(x$call)$type)){
if(as.list(x$call)$type %in% c("s", "S", "sphere")) stype <- "Spherical"
else stype <- "Cuboidal"
cat("Design space type:\n", stype, "\n\n", sep = "")
}
cat("Summary of", ifelse(x$unscaled, "Unscaled Prediction Variance (UPV):\n", "Scaled Prediction Variance (SPV):\n"))
print(summary(x$spv))
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/print.spv.R
|
#' @rdname print.spv
#' @method print spvforlist
#' @export
print.spvforlist <- function(x, ...){
nms <- names(x)
if(is.null(nms)) nms <- seq_along(x)
cat("\nObject of class 'spvforlist'")
cat("\n\nCall:\n", paste(deparse(x[[1]]$call), sep = "\n", collapse = "\n"), sep = "")
cat("\n\nSample dimensions:\n", nrow(x[[1]]$sample), " columns and ", ncol(x[[1]]$sample), " rows\n\n", sep = "")
if(!is.null(as.list(x$call)$type)){
if(as.list(x[[1]]$call)$type %in% c("s", "S", "sphere")) stype <- "Spherical"
else stype <- "Cuboidal"
cat("Design space type:\n", stype, "\n\n", sep = "")
}
cat("Summary of", ifelse(x[[1]]$unscaled, "Unscaled Prediction Variance (UPV):\n", "Scaled Prediction Variance (SPV):\n"))
spv_df <- do.call(cbind, lapply(x, "[[", "spv"))
colnames(spv_df) <- nms
cat("\n\tFormulae:\n")
print(summary(spv_df))
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/print.spvforlist.R
|
#' @rdname print.spv
#' @method print spvlist
#' @export
print.spvlist <- function(x, ...){
nms <- names(x)
if(is.null(nms)) nms <- seq_along(x)
cat("\nObject of class 'spvlist'\n")
cat("\nDesign names: \n", paste0(nms, collapse = ", "), sep = "")
cat("\n\nCall:\n", paste(deparse(x[[1]]$call), sep = "\n", collapse = "\n"), sep = "")
cat("\n\nSample dimensions:\n", nrow(x[[1]]$sample), " columns and ", ncol(x[[1]]$sample), " rows\n\n", sep = "")
if(!is.null(as.list(x$call)$type)){
if(as.list(x[[1]]$call)$type %in% c("s", "S", "sphere")) stype <- "Spherical"
else stype <- "Cuboidal"
cat("Design space type:\n", stype, "\n\n", sep = "")
}
cat("Summary of", ifelse(x[[1]]$unscaled, "Unscaled Prediction Variance (UPV):\n", "Scaled Prediction Variance (SPV):\n"))
spv_df <- do.call(cbind, lapply(x, "[[", "spv"))
colnames(spv_df) <- nms
cat("\n\tDesigns:\n")
print(summary(spv_df))
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/print.spvlist.R
|
#' @rdname print.spv
#' @method print spvlistforlist
#' @export
print.spvlistforlist <- function(x, ...){
fornms <- names(x)
desnms <- names(x[[1]])
cat("Object of class 'spvlistforlist'\n")
cat("\nDesign names: \n", paste0(desnms, collapse = ", "), sep = "")
cat("\n\nFormulae: \n", paste0(fornms, collapse = ", "), sep = "")
cat("\n\nCall:\n", paste(deparse(x[[1]][[1]]$call), sep = "\n", collapse = "\n"), sep = "")
cat("\n\nSample dimensions:\n", nrow(x[[1]][[1]]$sample), " columns and ", ncol(x[[1]][[1]]$sample), " rows\n\n", sep = "")
cat("Summary of", ifelse(x[[1]][[1]]$unscaled, "Unscaled Prediction Variance (UPV):\n", "Scaled Prediction Variance (SPV):\n"))
spv_lst <- lapply(x, function(y) do.call(cbind, lapply(y, "[[", "spv")))
for(i in seq_along(fornms)){
cat("\n\tFormula:", fornms[i], "\n")
cat("\tDesigns:\n")
print(summary(spv_lst[[i]]))
}
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/print.spvlistforlist.R
|
#' @rdname runif_sphere
#' @export
#' @examples
#'
#' set.seed(1234)
#' runif_sphere(n = 10)
runif_cube <- function (n, m = 2, max.dist = 1, at = FALSE, nr.dist = 21) {
if(at){
spec.dist <- seq(from = 0, to = max.dist, length = nr.dist)
# nper <- ceiling(n/nr.dist)
nper <- ceiling(n * spec.dist / sum(spec.dist))
nper[1] <- 1
n <- sum(nper)
out <- matrix(runif(n*m, min = -max.dist, max = max.dist), nrow = n,
ncol = m)
spec.dist <- rep(spec.dist, nper)
out <- sweep(out, MARGIN = 1, STATS = apply(out, 1, function(x) max(abs(x))),
FUN = "/")
out <- spec.dist*out
} else out <- matrix(runif(n*m, min = -max.dist, max = max.dist), nrow = n,
ncol = m)
out
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/runif_cube.R
|
#' Sampling for hyperspheres/hypercubes
#'
#' Sample uniformly in or on a hyperspheres or hypercubes.
#'
#' @param n number of points to sample
#' @param m number of design factors
#' @param max.dist maximum distance from origin (L-infinity norm/supremum distance) for the hypercuboidal
#' design region (enveloping hypercube)
#' @param max.radius maximum radius of the hyperspherical design region (enveloping hypersphere)
#' @param at logical indicating whether to sample on concentric hyperspheres/hypercubes or not. With this
#' option \code{n} is distributed proportionally across radii / supremum distances so that the density
#' of samples on each concentric hypercube / hypersphere are uniform across the different hyperspheres / hypercubes..
#' @param nr.dist the number of concentric hypercubes to use in case at is \code{TRUE}
#' @param nr.rad number of concentric hyperspheres to sample on in case of \code{at} being \code{TRUE}
#' @author Pieter C. Schoonees
#' @references
#' Pieter C. Schoonees, Niel J. le Roux, Roelof L.J. Coetzer (2016). Flexible Graphical Assessment of
#' Experimental Designs in R: The vdg Package. \emph{Journal of Statistical Software}, 74(3), 1-22.
#' \doi{10.18637/jss.v074.i03}.
#' @export
#' @examples
#'
#' set.seed(1234)
#' samp <- runif_sphere(n = 500, at = TRUE)
#' plot(samp, asp = 1)
runif_sphere <- function (n, m = 2, max.radius = sqrt(m), at = FALSE, nr.rad = 21) {
if(m > 2){
bvec <- (m - 1:(m - 2))/2
bvec <- beta(bvec, bvec)
} else bvec <- 1
if(at){
spec.dist <- seq(from = 0, to = max.radius, length = nr.rad)
nprop <- gamma(m/2 + 1) * 2^((m - 1) * (m - 2)/2 + 1) * spec.dist^(m - 1) *
bvec / (m * pi^(m / 2) * max.radius^m)
ns <- ceiling(n * nprop / sum(nprop))
ns[1] <- 1
rvec <- rep(spec.dist, ns)
n <- length(rvec)
}
uvec <- runif(n)
tmp <- matrix(rnorm(n*m), nrow = n, ncol = m)
if(!at) rvec <- max.radius * (m * pi^(m/2 - 1) * uvec/(gamma(m/2 + 1) * 2 ^
((m - 1)*(m - 2)/2 + 1) * prod(bvec)))^(1/m)
lng <- sqrt(apply(tmp, 1, crossprod))
out <- tmp*rvec/lng
out
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/runif_sphere.R
|
#' Sampler Function
#'
#' This is a wrapper for the sampling funcions of the \pkg{vdg} package. It extracts design properties from the
#' design passed to it to take appropriate samples.
#'
#' @param n number of points to sample
#' @param design design for which the sample is required (either a matrix or data frame)
#' @param type type of design region/sampling method. One of "spherical", "cuboidal",
#' "lhs", "mlhs", "slhs", "rslhs" or "custom". Option "custom" requires \code{custom.fun} to be
#' non-\code{NULL}.
#' @param at logical; should sampling be done on the surface of hyperspheres or hypercubes?
#' Not used for LHS methods.
#' @param custom.fun A custom sampling function, used in conjunction with \code{type = "custom"}. The
#' first and second arguments must be the sample size and dimension respectively.
#' @param \dots other arguments passed to the underlying sampling functions.
#' @return Matrix with samples as rows, with S3 class \code{smpl}
#' @seealso \code{\link{runif_sphere}}, \code{\link{runif_cube}}, \code{\link{LHS}},
#' \code{\link{MLHS}}, \code{\link{SLHS}}, \code{\link{RSLHS}}
#' @author Pieter C. Schoonees
#' @references
#' Pieter C. Schoonees, Niel J. le Roux, Roelof L.J. Coetzer (2016). Flexible Graphical Assessment of
#' Experimental Designs in R: The vdg Package. \emph{Journal of Statistical Software}, 74(3), 1-22.
#' \doi{10.18637/jss.v074.i03}.
#' @export
#' @examples
#' ## Default spherical design region
#' set.seed(1896)
#' samp1 <- sampler(n = 100, design = expand.grid(x = -1:1, y = -1:1))
#' plot(samp1)
#'
#' ## Supplying a custom sampling function based on lhs::improvedLHS()
#' library("lhs")
#' sfun <- function(n, k, dup = 1) 2 * improvedLHS(n, k, dup = dup) - 1
#' samp2 <- sampler(n = 100, design = expand.grid(x = -1:1, y = -1:1),
#' type = "custom", custom.fun = sfun)
#' plot(samp2)
sampler <- function(n, design, type = c("spherical", "cuboidal", "lhs", "mlhs", "slhs", "rslhs", "custom"),
at = FALSE, custom.fun = NULL, ...){
m <- ncol(design)
type <- tolower(type)
type <- match.arg(arg = type)
if (type == "custom" && !is.function(custom.fun))
stop("A custom sampling function must be supplied as argument 'custom.fun'.")
samp <- switch(type, spherical = runif_sphere(n = n, m = m, at = at, ...),
cuboidal = runif_cube(n = n, m = m, at = at, ...),
lhs = LHS(n = n, m = m, ...),
mlhs = MLHS(n = n, m = m, ...),
slhs = SLHS(n = n, m = m, ...),
rslhs = RSLHS(n = n, m = m, ...),
custom = custom.fun(n, m, ...))
colnames(samp) <- colnames(design)
class(samp) <- c("smpl", "matrix")
samp
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/sampler.R
|
#' Calculate the Scaled Prediction Variance (or SPV)
#'
#' Calculates the SPV for a sample of points in a design region of specified type. Sampling is done
#' by calling \code{\link{sampler}}.
#'
#' @param n number of samples to take
#' @param design a design or list of designs. Each design must be either a matrix or a data.frame or coercible to a data.frame.
#' @param type type of sampling passed to \code{\link{sampler}}
#' @param formula either a single model formula of a list of formulae
#' @param at only used when type is \code{'spherical'} or \code{'cuboidal'}
#' @param keepfun optional; function operating on the columns of a matrix with the same number of columns as design which return a logical value for
#' including a specific point in the sample or not. Useful for rejection sampling for nonstandard design regions.
#' @param sample optional; if not missing it should contain a matrix or data.frame containing points sampled over the required design region. If it is not
#' missing, no further sampling will be done: the SPV is simply evaluated at these points.
#' @param unscaled logical indicating whether to use the unscaled prediction variance (UPV) instead of the scale prediction variance (SPV)
#' @param \dots additional arguments passed to \code{\link{sampler}}. This enables the used of
#' user-specified sampling functions via the \code{custom.fun} argument to \code{\link{sampler}}.
#' @return Object of class 'spv', 'spvlist', 'spvforlist' or 'spvlistforlist', depending on whether single designs/formulas
#' are passed or lists of these.
#' @author Pieter C. Schoonees
#' @seealso \code{\link{plot.spv}} for more examples
#' @keywords multivariate
#' @export
#' @import parallel
#' @references
#' Pieter C. Schoonees, Niel J. le Roux, Roelof L.J. Coetzer (2016). Flexible Graphical Assessment of
#' Experimental Designs in R: The vdg Package. \emph{Journal of Statistical Software}, 74(3), 1-22.
#' \doi{10.18637/jss.v074.i03}.
#' @examples
#'
#' # Single design (class 'spv')
#' library(rsm)
#' bbd3 <- as.data.frame(bbd(3)[,3:5])
#' colnames(bbd3) <- paste0("x", 1:3)
#' quad.3f <- formula(~(x1 + x2 + x3)^2 - x1:x2:x3)
#' out <- spv(n = 1000, design = bbd3, type = "spherical", formula = quad.3f)
#' out
#' @rdname spv
#' @export
spv <- function(n, design, type = "spherical", formula, at = FALSE, keepfun, sample, unscaled = FALSE, ...){
UseMethod("spv", design)
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/spv.R
|
#' @rdname spv
#' @method spv data.frame
#' @export
spv.data.frame <- function(n, design, type = c("spherical", "cuboidal", "lhs", "mlhs", "slhs", "rslhs", "custom"),
formula, at = FALSE, keepfun, sample, unscaled = FALSE, ...){
cll <- match.call()
type <- tolower(type)
type <- match.arg(type)
if (missing(sample)){
sample <- sampler(n = n, design = design, type = type, at = at, ...)
if (!missing(keepfun)) {
repeat{
keep <- keepfun(sample)
cnt <- sum(keep)
sample <- sample[keep, ]
if (cnt >= n) break
rate <- cnt/n
cat("Retained samples:", round(cnt, digits = 2),
"-- Adding some more...\n")
addsample <- sampler(n = max(ceiling((n - cnt)/rate), ceiling(n/10)),
design = design, type = type, at = at, ...)
sample <- rbind(sample, addsample)
}
cat("Final sample of size", nrow(sample), "\n")
}
}
ndes <- nrow(design)
n <- nrow(sample)
m <- ncol(design)
if (is(formula, "formula")){
formula <- as.formula(paste("~", paste(attr(terms(formula, data = sample), "term.labels"),
collapse = " + ")))
mat <- model.matrix(formula, data = as.data.frame(sample))
mod.mat <- model.matrix(formula, data = design)
p <- ncol(mod.mat)
FtF.inv <- solve(crossprod(mod.mat))
tmp <- .Fortran("fds", as.integer(p), as.integer(n), as.integer(ndes),
as.double(FtF.inv), as.double(mat), double(n),
PACKAGE = "vdg")
spv <- tmp[[6]]
if (unscaled) spv <- spv / ndes
out <- list(spv = spv, sample = sample, type = type, call = cll,
formula = formula, at = at, FtF.inv = FtF.inv, ndes = ndes,
unscaled = unscaled)
class(out) <- c("spv", "list")
return(out)
}
if (is.list(formula)){
formula <- lapply(formula, function(x)
as.formula(paste("~", paste(attr(terms(x, data = design[[1]]), "term.labels"), collapse = " + "))))
nr <- length(formula)
nms <- names(formula)
if (is.null(nms)) nms <- paste0("formula", seq_along(formula))
if (length(unique(nms)) != nr) stop("Formula names must be unique.")
spvformula <- function(formula, design, sample, call, unscaled){
ndes <- nrow(design)
n <- nrow(sample)
mat <- model.matrix(formula, data = as.data.frame(sample))
m <- ncol(design)
mod.mat <- model.matrix(formula, data = as.data.frame(design))
p <- ncol(mod.mat)
FtF.inv <- solve(crossprod(mod.mat))
tmp <- .Fortran("fds", as.integer(p), as.integer(n), as.integer(ndes),
as.double(FtF.inv), as.double(mat), double(n),
PACKAGE = "vdg")
spv <- tmp[[6]]
if (unscaled) spv <- spv / ndes
out <- list(spv = spv, sample = sample, type = type, call = call,
formula = formula, at = at, FtF.inv = FtF.inv, ndes = ndes,
unscaled = unscaled)
class(out) <- c("spv", "list")
out
}
cl <- makeCluster(getOption("cl.cores", min(detectCores() - 1, nr)))
clusterEvalQ(cl, library(vdg))
out <- parLapply(cl, formula, spvformula, design = design, sample = sample,
call = cll, unscaled = unscaled)
stopCluster(cl)
names(out) <- nms
class(out) <- c("spvforlist", "list")
return(out)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/spv.data.frame.R
|
#' @rdname spv
#' @method spv list
#' @export
spv.list <- function(n, design, type = c("spherical", "cuboidal", "lhs", "mlhs", "slhs", "rslhs", "custom"),
formula, at = FALSE, keepfun, sample, unscaled = FALSE, ...){
cll <- match.call()
type <- tolower(type)
type <- match.arg(type)
nr <- length(design)
desnms <- names(design)
if (is.null(desnms)) desnms <- paste0("design", seq_along(design))
if (length(unique(desnms)) != nr) stop("Design names must be unique.")
if (missing(sample)){
sample <- sampler(n = n, design = design[[1]], type = type, at = at, ...)
if (!missing(keepfun)) {
repeat{
keep <- keepfun(sample)
cnt <- sum(keep)
sample <- sample[keep, ]
if (cnt >= n) break
rate <- cnt/n
message("Retained samples:", round(cnt, digits = 2),
"-- Adding some more...")
addsample <- sampler(n = max(ceiling((n - cnt)/rate), ceiling(n/10)),
design = design[[1]], type = type, at = at, ...)
sample <- rbind(sample, addsample)
}
message("Final sample of size", nrow(sample), "\n")
}
}
spvdesign <- function(design, sample, formula, call, unscaled){
ndes <- nrow(design)
n <- nrow(sample)
mat <- model.matrix(formula, data = as.data.frame(sample))
m <- ncol(design)
mod.mat <- model.matrix(formula, data = as.data.frame(design))
p <- ncol(mod.mat)
FtF.inv <- solve(crossprod(mod.mat))
tmp <- .Fortran("fds", as.integer(p), as.integer(n), as.integer(ndes),
as.double(FtF.inv), as.double(mat), double(n),
PACKAGE = "vdg")
spv <- tmp[[6]]
if (unscaled) spv <- spv / ndes
out <- list(spv = spv, sample = sample, type = type, call = call, at = at,
FtF.inv = FtF.inv, formula = formula, ndes = ndes,
unscaled = unscaled)
class(out) <- c("spv", "list")
out
}
cl <- makeCluster(getOption("cl.cores", min(detectCores() - 1, nr)))
on.exit(stopCluster(cl))
if (is(formula, "formula")){
formula <- as.formula(paste("~", paste(attr(terms(formula, data = design[[1]]), "term.labels"),
collapse = " + ")))
out <- parLapply(cl, design, spvdesign, sample = sample, formula = formula,
call = cll, unscaled = unscaled)
names(out) <- desnms
class(out) <- c("spvlist", "list")
return(out)
}
if (is.list(formula)){
formula <- lapply(formula, function(x)
as.formula(paste("~", paste(attr(terms(x, data = design[[1]]), "term.labels"), collapse = " + "))))
nf <- length(formula)
fornms <- names(formula)
if (is.null(fornms)) fornms <- paste0("formula", seq_along(formula))
if (length(unique(fornms)) != nf) stop("Formula names must be unique.")
out <- lapply(formula, function(y) {
out <- parLapply(cl, design, spvdesign, sample = sample, formula = y,
call = cll, unscaled = unscaled)
names(out) <- desnms
class(out) <- c("spvlist", "list")
return(out)
})
names(out) <- fornms
class(out) <- c("spvlistforlist", "list")
return(out)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/spv.list.R
|
#' @rdname spv
#' @method spv matrix
#' @export
spv.matrix <- function(n, design, type = c("spherical", "cuboidal", "lhs", "mlhs", "slhs", "rslhs", "custom"),
formula, at = FALSE, keepfun, sample, unscaled = FALSE, ...){
cll <- match.call()
type <- tolower(type)
type <- match.arg(type)
if (missing(sample)){
sample <- sampler(n = n, design = design, type = type, at = at, ...)
if (!missing(keepfun)) {
repeat{
keep <- keepfun(sample)
cnt <- sum(keep)
sample <- sample[keep, ]
if (cnt >= n) break
rate <- cnt/n
cat("Retained samples:", round(cnt, digits = 2),
"-- Adding some more...\n")
addsample <- sampler(n = max(ceiling((n - cnt)/rate), ceiling(n/10)),
design = design, type = type, at = at, ...)
sample <- rbind(sample, addsample)
}
cat("Final sample of size", nrow(sample), "\n")
}
}
ndes <- nrow(design)
n <- nrow(sample)
m <- ncol(design)
if (is(formula, "formula")){
formula <- as.formula(paste("~", paste(attr(terms(formula, data = sample), "term.labels"),
collapse = " + ")))
mat <- model.matrix(formula, data = as.data.frame(sample))
mod.mat <- model.matrix(formula, data = as.data.frame(design))
p <- ncol(mod.mat)
FtF.inv <- solve(crossprod(mod.mat))
tmp <- .Fortran("fds", as.integer(p), as.integer(n), as.integer(ndes),
as.double(FtF.inv), as.double(mat), double(n),
PACKAGE = "vdg")
spv <- tmp[[6]]
if (unscaled) spv <- spv / ndes
out <- list(spv = spv, sample = sample, type = type, call = cll, at = at,
formula = formula, FtF.inv = FtF.inv, ndes = ndes,
unscaled = unscaled)
class(out) <- c("spv", "list")
return(out)
}
if (is.list(formula)){
formula <- lapply(formula, function(x)
as.formula(paste("~", paste(attr(terms(x, data = design[[1]]), "term.labels"), collapse = " + "))))
nr <- length(formula)
nms <- names(formula)
if (is.null(nms)) nms <- paste0("formula", seq_along(formula))
if (length(unique(nms)) != nr) stop("Formula names must be unique.")
spvformula <- function(formula, design, sample, call, unscaled){
ndes <- nrow(design)
n <- nrow(sample)
mat <- model.matrix(formula, data = as.data.frame(sample))
m <- ncol(design)
mod.mat <- model.matrix(formula, data = as.data.frame(design))
p <- ncol(mod.mat)
FtF.inv <- solve(crossprod(mod.mat))
tmp <- .Fortran("fds", as.integer(p), as.integer(n), as.integer(ndes),
as.double(FtF.inv), as.double(mat), double(n),
PACKAGE = "vdg")
spv <- tmp[[6]]
if (unscaled) spv <- spv / ndes
out <- list(spv = spv, sample = sample, type = type, call = call,
formula = formula, at = at, FtF.inv = FtF.inv, ndes = ndes,
unscaled = unscaled)
class(out) <- c("spv", "list")
out
}
cl <- makeCluster(getOption("cl.cores", min(detectCores() - 1, nr)))
clusterEvalQ(cl, library(vdg))
out <- parLapply(cl, formula, spvformula, design = design, sample = sample,
call = cll, unscaled = unscaled)
stopCluster(cl)
names(out) <- nms
class(out) <- c("spvforlist", "list")
return(out)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/spv.matrix.R
|
#' Standardize or Unstandarize the Column Range
#'
#' Simple functions for rescaling a data matrix to a coded design and back. \code{stdrange} converts
#' the design in actual measurements into a coded design, while \code{ustdrange} reverses the process
#' (if the correct arguments are given).
#' @aliases ustdrange
#' @param x matrix containing the design, or an object coercible to a matrix.
#' @param mins vector of original values, one for each column, which should be recoded to the value -1;
#' or which have alreadty been recoded to -1. This and the next argument are both recycled if not of the correct length.
#' @param maxs vector of original values which should be recoded as 1, or which have already been recoded to 1.
#' @export
#' @author Pieter C. Schoonees
stdrange <- function(x, mins = apply(x, 2, min), maxs = apply(x, 2, max)){
x <- as.matrix(x)
nr <- nrow(x)
nc <- ncol(x)
mins <- matrix(mins, nr, nc, byrow = TRUE)
maxs <- matrix(maxs, nr, nc, byrow = TRUE)
out <- 2*(x - mins)/(maxs - mins) - 1
return(out)
}
#' @rdname stdrange
ustdrange <- function(x, mins, maxs){
x <- as.matrix(x)
nr <- nrow(x)
nc <- ncol(x)
mins <- matrix(mins, nr, nc, byrow = TRUE)
maxs <- matrix(maxs, nr, nc, byrow = TRUE)
out <- mins + 0.5*(1 + x) * (maxs - mins)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/stdrange.R
|
#' Variance Dispersion Graphs, Fraction-of-Design-Space Plots and Variants
#'
#' This package provides functionality for producing variance dispersion graphs (VDGs),
#' fraction-of-design (FDS) plots and related graphics for assessing the prediction variance
#' properties of experimental designs. Random sampling is used to assess the distribution of the
#' prediction variance throughout the design region. Multiple design and/or model formulae
#' can be assessed at the same time. Graphics are produced by the \pkg{ggplot2} package.
#'
#' @details The workhorse function in the package is \code{\link{spv}}, which takes lists of
#' experimental designs and / or model formulae and produces samples throughout the design region
#' at which the prediction variance is evaluated. Depending on the type of input for the
#' \code{design} and \code{formula} arguments, \code{\link{spv}} creates output objects of S3 classes
#' \code{spv}, \code{spvlist}, \code{spvforlist} or \code{spvlistforlist}. The graphical output are
#' obtained with the \code{\link{plot}} methods of these classes, and the \code{which} argument
#' can be used to control the type of plots produced.
#'
#' The design regions allowed for are typically spherical or cuboidal in nature, but the
#' \code{keepfun} argument to \code{\link{spv}} can be used for rejection sampling. In this way
#' nonstandard design regions can be allowed for. See also the \code{type} argument of \code{\link{spv}}.
#' The output from the \code{\link{plot}} methods for objects created by \code{\link{spv}} are
#' typically named lists of graphical objects created by \pkg{ggplot2}. These are best stored in an
#' object and recreated by printing the required plot. Storing such graphical objects also enables
#' post-hoc manipulation of the plots, such as changing the background colour by using
#' \pkg{ggplot2}'s \code{\link{theme}} function.
#'
#' @seealso \code{\link{spv}}, \code{\link{plot.spv}}, and \code{vignette(topic = "vdg")}.
#'
#' @name vdg-package
#' @aliases vdg-package vdg
#' @docType package
#' @author Pieter C. Schoonees <[email protected]>
#' @references
#' Pieter C. Schoonees, Niel J. le Roux, Roelof L.J. Coetzer (2016). Flexible Graphical Assessment of
#' Experimental Designs in R: The vdg Package. \emph{Journal of Statistical Software}, 74(3), 1-22.
#' \doi{10.18637/jss.v074.i03}.
#' @useDynLib vdg, fds, .registration = TRUE
#' @keywords package
#' @importFrom grDevices topo.colors
#' @importFrom methods is
#' @importFrom stats as.formula model.matrix na.omit predict quantile rnorm runif terms
#' @importFrom utils combn
NULL
|
/scratch/gouwar.j/cran-all/cranData/vdg/R/vdg-package.R
|
## ----knitr-setup, include=FALSE,cache=FALSE,echo=FALSE--------------
library(knitr)
opts_chunk$set(comment = NA, size = 'normalsize', prompt = TRUE, highlight = FALSE,
cache = TRUE, crop = FALSE, concordance = FALSE, fig.align='center',
fig.path='paper-figures/Paper-', out.width="0.4\\textwidth", fig.lp = "F:",
background = "#FFFFFF")
opts_knit$set(out.format = "latex")
knit_hooks$set(crop = hook_pdfcrop)
options(width = 70, prompt = "R> ", continue = "+ ", digits = 3, useFancyQuotes = FALSE)
thm <- knit_theme$get('default')
# Set default font colour (fgcolor) to black
thm$highlight <- "\\definecolor{fgcolor}{rgb}{0, 0, 0}\n
\\newcommand{\\hlnum}[1]{\\textcolor[rgb]{0.686,0.059,0.569}{#1}}%\n
\\newcommand{\\hlstr}[1]{\\textcolor[rgb]{0.192,0.494,0.8}{#1}}%\n
\\newcommand{\\hlcom}[1]{\\textcolor[rgb]{0.678,0.584,0.686}{\\textit{#1}}}%\n
\\newcommand{\\hlopt}[1]{\\textcolor[rgb]{0,0,0}{#1}}%\n
\\newcommand{\\hlstd}[1]{\\textcolor[rgb]{0.345,0.345,0.345}{#1}}%\n
\\newcommand{\\hlkwa}[1]{\\textcolor[rgb]{0.161,0.373,0.58}{\\textbf{#1}}}%\n\\newcommand{\\hlkwb}[1]{\\textcolor[rgb]{0.69,0.353,0.396}{#1}}%\n
\\newcommand{\\hlkwc}[1]{\\textcolor[rgb]{0.333,0.667,0.333}{#1}}%\n
\\newcommand{\\hlkwd}[1]{\\textcolor[rgb]{0.737,0.353,0.396}{\\textbf{#1}}}%"
thm$background <- "#FFFFFF"
thm$fontstyle <- "italic"
knit_theme$set(thm)
## Specific to vignette
options(cl.cores = 1)
## ----spv-example, echo=FALSE, fig.width=5, fig.height=5, fig.cap = "An example of a variance dispersion graph.",out.width="0.35\\textwidth", message=FALSE----
library("vdg")
data("D310")
set.seed(1)
vdgex <- spv(n = 100000, design = D310, formula = ~.^2, at = TRUE)
plot(vdgex, which = "vdgquantile", tau = c(0, 1))[[1]] + theme(legend.position = "none")
## ----qp-ex, echo = FALSE, fig.width = 6, fig.height=4.5, fig.cap = "An example of a quantile plot, corresponding to the example in Figure~\\ref{F:spv-example}.",out.width="0.35\\textwidth"----
set.seed(1)
qpex <- spv(n = 50000, design = D310, formula = ~.^2, at = TRUE, nr.rad = 6)
my_ecdf <- function(x) {
xs <- sort(x)
xun <- unique(xs)
n <- length(xun)
prop <- rep(NA, n)
for (i in seq_along(xun))
prop[i] <- sum(xs <= xun[i]) / n
return(cbind(x = xun, y = prop))
}
ds <- formatC(sqrt(rowSums(qpex$sample^2)), format = "f", digits = 3)
lst <- split(qpex$spv[-1], f = factor(ds[-1]))
ecd <- lapply(lst, my_ecdf)
df <- as.data.frame(do.call(rbind, ecd))
df$Radius <- factor(ds[-1])
df$Distance <- sqrt(rowSums(qpex$sample^2))[-1]
ggplot(data = df, mapping = aes(y = x, x = y, group = Radius, colour = Radius)) + geom_line(size = 1) + ylab("SPV Quantile") + xlab("Proportion")
## ----fds-example, echo = FALSE, fig.width = 5.5, fig.height=5.5, out.width="0.35\\textwidth", fig.cap = "An example of and FDS plot, corresponding to Figures~\\ref{F:spv-example} and \\ref{F:qp-ex}.",results='hide'----
set.seed(1)
fdsex <- spv(n = 50000, design = D310, formula = ~.^2)
plot(fdsex, which = "fds", np = 100)
## ----lhs,fig.height=5.5,fig.width=5.5,fig.cap="An example of an LHS of 10 points in a two-dimensional design space.",results='hide'----
library("vdg")
set.seed(8745)
samp <- LHS(n = 10, m = 2, lim = c(-1, 1))
plot(samp, main = "", pty = "s", pch = 16, ylim = c(-1, 1),
asp = 1, xlab = expression(X[1]), ylab = expression(X[2]))
abline(h = seq(-1, 1, length.out = 10),
v = seq(-1, 1, length.out = 10), lty = 3, col = "grey")
## ----vign, eval=FALSE-----------------------------------------------
# vignette(topic = "vdg", package = "vdg")
## ----load-roq-------------------------------------------------------
data("D416B")
data("D416C")
## ----vdgroq,fig.width=9, fig.height=5.5, results='hide', fig.cap="A VDG for Roquemore's hybrid designs D416B and D416C for a full quadratic model.",out.width="0.7\\textwidth"----
quad4 <- formula( ~ (x1 + x2 + x3 + x4)^2 + I(x1^2) + I(x2^2) +
I(x3^2) + I(x4^2))
set.seed(1234)
spv1 <- spv(n = 5000, design = list(D416B = D416B,
D416C = D416C), formula = quad4)
plot(spv1, which = "vdgboth")
## ----quad4, eval=FALSE----------------------------------------------
# quad4 <- formula( ~ .^2 + I(x1^2) + I(x2^2) + I(x3^2) + I(x4^2))
## ----ex1-bothroqfds,fig.width=6, fig.height=5, results='hide',fig.cap="A standard and variance ratio FDS plot for Roquemore's hybrid designs D416B and D416C for a full quadratic model.",fig.show='hold'----
plot(spv1, which = "fds")
plot(spv1, which = "fds", VRFDS = TRUE, np = 100)
## ----vdgroq-theme,fig.width=7, fig.height=4, results='hide', fig.cap="A second version of Figure~\\ref{F:vdgroq}.",out.width="0.5\\textwidth"----
p <- plot(spv1, which = "vdgboth")
p$vdgboth + theme_bw() + theme(panel.grid = element_blank())
## ----make-ccd3------------------------------------------------------
library("rsm")
ccd3 <- as.data.frame(ccd(basis = 3, n0 = 4,
alpha = "spherical", oneblock = TRUE))[, 3:5]
## ----algdes-cand,results='hide'-------------------------------------
set.seed(8619)
cand <- runif_sphere(n = 10000, m = 3)
colnames(cand) <- colnames(ccd3)
## ----algdes-AD------------------------------------------------------
quad3 <- formula( ~ (x1 + x2 + x3)^2 + I(x1^2) + I(x2^2) + I(x3^2))
library("AlgDesign")
set.seed(3476)
desD <- optFederov(quad3, data = cand, nTrials = 22, criterion = "D")
desA <- optFederov(quad3, data = cand, nTrials = 22, criterion = "A")
## ----ex2-spv,results='hide',fig.show='hide'-------------------------
spv2 <- spv(n = 10000, formula = quad3,
design = list(CCD = ccd3, D = desD$design, A = desA$design))
plot(spv2, which = 2:3)
## ----ccdfds,include=FALSE,echo=FALSE,fig.height=5,fig.width=6,fig.show='hide'----
plot(spv2, which = 2)[[1]] + theme(plot.title = element_text(size = 16), legend.position = "none")
## ----ccdvdg,include=FALSE,echo=FALSE,fig.height=5,fig.width=8,fig.show='hide'----
plot(spv2, which = 3)[[1]] + theme(plot.title = element_text(size = 16))
## ----GJdesreg,echo=FALSE,fig.width=5,fig.height=5,fig.cap="The design region and D-optimal design of \\cite{Goo2011}. Some runs are replicated."----
df <- data.frame(Time = c(360, 420, 720, 720, 660, 360, 360), Temperature = c(550, 550, 523, 520, 520, 529, 550))
p <- ggplot(data = df, aes(x = Time, y = Temperature)) + geom_path() + geom_point(data = GJ54)
p
## ----keepfun--------------------------------------------------------
keepfun <- function(x) apply(x >= -1 & x <= 1, 1, all) &
(x[, 2] <= -1.08 * x[, 1] + 0.28) & (x[, 2] >= -0.36 * x[, 1] - 0.76)
## ----ex3-for--------------------------------------------------------
cube2 <- formula( ~ (Time + Temperature)^2 + I(Time^2) +
I(Temperature^2) + I(Time^3) + I(Temperature^3) +
Time:I(Temperature^2) + I(Time^2):Temperature)
GJmod <- update(cube2, ~ . - I(Time^3) - I(Time^2):Temperature)
## ----ex3-spv--------------------------------------------------------
spv3 <- spv(n = 10000, design = stdrange(GJ54), type = "lhs",
formula = list(Cubic = cube2, GoosJones = GJmod),
keepfun = keepfun)
## ----vdggj-noplot,eval=FALSE----------------------------------------
# plot(spv3, which = 1, points.size = 2)
## ----vdggj,fig.width=8,fig.height=4,results='hide',echo=FALSE,fig.cap="VDG for the D-optimal design of \\citet{Goo2011}, for the two models.",out.width="0.5\\textwidth"----
plot(spv3, which = 1, points.size = 2)[[1]] + theme(plot.title = element_text(size = 16))
## ----rgl-code,eval=FALSE--------------------------------------------
# library("rgl")
# with(spv3$Cubic, plot3d(x = sample[, "Time"],
# y = sample[, "Temperature"], z = spv))
|
/scratch/gouwar.j/cran-all/cranData/vdg/inst/doc/vdg.R
|
# Generated by cpp11: do not edit by hand
compare_files <- function(expected, test) {
.Call(`_vdiffr_compare_files`, expected, test)
}
svglite_ <- function(file, bg, width, height, pointsize, standalone, always_valid) {
.Call(`_vdiffr_svglite_`, file, bg, width, height, pointsize, standalone, always_valid)
}
svgstring_ <- function(env, bg, width, height, pointsize, standalone) {
.Call(`_vdiffr_svgstring_`, env, bg, width, height, pointsize, standalone)
}
get_svg_content <- function(p) {
.Call(`_vdiffr_get_svg_content`, p)
}
set_engine_version <- function(version) {
invisible(.Call(`_vdiffr_set_engine_version`, version))
}
|
/scratch/gouwar.j/cran-all/cranData/vdiffr/R/cpp11.R
|
svglite_path <- function(...) {
file.path("R", "svglite", ...)
}
for (file in list.files(svglite_path(), pattern = "*.R")) {
source(svglite_path(file), local = TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/vdiffr/R/embed-svglite.R
|
#' Does a figure look like its expected output?
#'
#' `expect_doppelganger()` is a testthat expectation for graphical
#' plots. It generates SVG snapshots that you can review graphically
#' with [testthat::snapshot_review()]. You will find more information
#' about snapshotting in the [testthat snapshots
#' vignette](https://testthat.r-lib.org/articles/snapshotting.html).
#'
#' @param title A brief description of what is being tested in the
#' figure. For instance: "Points and lines overlap".
#'
#' If a ggplot2 figure doesn't have a title already, `title` is
#' applied to the figure with `ggtitle()`.
#'
#' The title is also used as file name for storing SVG (in a
#' sanitzed form, with special characters converted to `"-"`).
#' @param fig A figure to test. This can be a ggplot object, a
#' recordedplot, or more generally any object with a `print` method.
#'
#' If you need to test a plot with non-printable objects (e.g. base
#' plots), `fig` can be a function that generates and prints the
#' plot, e.g. `fig = function() plot(1:3)`.
#' @param path,... `r lifecycle::badge('deprecated')`.
#' @param writer A function that takes the plot, a target SVG file,
#' and an optional plot title. It should transform the plot to SVG
#' in a deterministic way and write it to the target file. See
#' [write_svg()] (the default) for an example.
#' @param cran If `FALSE` (the default), mismatched snapshots only
#' cause a failure when you run tests locally or in your CI (Github
#' Actions or any platform that sets the `CI` environment variable).
#' If `TRUE`, failures may also occur on CRAN machines.
#'
#' Failures are disabled on CRAN by default because testing the
#' appearance of a figure is inherently fragile. Changes in the R
#' graphics engine or in ggplot2 may cause subtle differences in the
#' aspect of a plot, such as a slightly smaller or larger margin.
#' These changes will cause spurious failures because you need to
#' update your snapshots to reflect the upstream changes.
#'
#' It would be distracting for both you and the CRAN maintainers if
#' such changes systematically caused failures on CRAN. This is why
#' snapshot expectations do not fail on CRAN by default and should
#' be treated as a monitoring tool that allows you to quickly check
#' how the appearance of your figures changes over time, and to
#' manually assess whether changes reflect actual problems in your
#' package.
#'
#' Internally, this argument is passed to
#' [testthat::expect_snapshot_file()].
#'
#'
#' @section Debugging:
#' It is sometimes difficult to understand the cause of a failure.
#' This usually indicates that the plot is not created
#' deterministically. Potential culprits are:
#'
#' * Some of the plot components depend on random variation. Try
#' setting a seed.
#'
#' * The plot depends on some system library. For instance sf plots
#' depend on libraries like GEOS and GDAL. It might not be possible
#' to test these plots with vdiffr.
#'
#' To help you understand the causes of a failure, vdiffr
#' automatically logs the SVG diff of all failures when run under R
#' CMD check. The log is located in `tests/vdiffr.Rout.fail` and
#' should be displayed on Travis.
#'
#' You can also set the `VDIFFR_LOG_PATH` environment variable with
#' `Sys.setenv()` to unconditionally (also interactively) log failures
#' in the file pointed by the variable.
#'
#' @examples
#' if (FALSE) { # Not run
#'
#' library("ggplot2")
#'
#' test_that("plots have known output", {
#' disp_hist_base <- function() hist(mtcars$disp)
#' expect_doppelganger("disp-histogram-base", disp_hist_base)
#'
#' disp_hist_ggplot <- ggplot(mtcars, aes(disp)) + geom_histogram()
#' expect_doppelganger("disp-histogram-ggplot", disp_hist_ggplot)
#' })
#'
#' }
#' @export
expect_doppelganger <- function(title,
fig,
path = deprecated(),
...,
writer = write_svg,
cran = FALSE) {
testthat::local_edition(3)
fig_name <- str_standardise(title)
file <- paste0(fig_name, ".svg")
# Announce snapshot file before touching `fig` in case evaluation
# causes an error. This allows testthat to restore the files
# (see r-lib/testthat#1393).
testthat::announce_snapshot_file(name = file)
testcase <- make_testcase_file(fig_name)
writer(fig, testcase, title)
if (!missing(...)) {
lifecycle::deprecate_soft(
"1.0.0",
"vdiffr::expect_doppelganger(... = )",
)
}
if (lifecycle::is_present(path)) {
lifecycle::deprecate_soft(
"1.0.0",
"vdiffr::expect_doppelganger(path = )",
)
}
if (is_graphics_engine_stale()) {
testthat::skip(paste_line(
"The R graphics engine is too old.",
"Please update to R 4.1.0 and regenerate the vdiffr snapshots."
))
}
withCallingHandlers(
testthat::expect_snapshot_file(
testcase,
name = file,
cran = cran,
compare = testthat::compare_file_text
),
expectation_failure = function(cnd) {
if (is_snapshot_stale(title, testcase)) {
testthat::skip(paste_line(
"SVG snapshot generated under a different vdiffr version.",
"i" = "Please update your snapshots."
))
}
if (!is_null(snapshotter <- get_snapshotter())) {
path_old <- snapshot_path(snapshotter, file)
path_new <- snapshot_path(snapshotter, paste0(fig_name, ".new.svg"))
if (all(file.exists(path_old, path_new))) {
push_log(fig_name, path_old, path_new)
}
}
}
)
}
# From testthat
get_snapshotter <- function() {
x <- getOption("testthat.snapshotter")
if (is.null(x)) {
return()
}
if (!x$is_active()) {
return()
}
x
}
snapshot_path <- function(snapshotter, file) {
file.path(snapshotter$snap_dir, snapshotter$file, file)
}
is_graphics_engine_stale <- function() {
getRversion() < "4.1.0"
}
str_standardise <- function(s, sep = "-") {
stopifnot(is_scalar_character(s))
s <- gsub("[^a-z0-9]", sep, tolower(s))
s <- gsub(paste0(sep, sep, "+"), sep, s)
s <- gsub(paste0("^", sep, "|", sep, "$"), "", s)
s
}
is_snapshot_stale <- function(title, testcase) {
if (is_null(snapshotter <- get_snapshotter())) {
return(FALSE)
}
file <- paste0(str_standardise(title), ".svg")
path <- snapshot_path(snapshotter, file)
if (!file.exists(path)) {
return(FALSE)
}
lines <- readLines(path)
match <- regexec(
"data-engine-version='([0-9.]+)'",
lines
)
match <- Filter(length, regmatches(lines, match))
# Old vdiffr snapshot that doesn't embed a version
if (!length(match)) {
return(TRUE)
}
if (length(match) > 1) {
abort("Found multiple vdiffr engine versions in SVG snapshot.")
}
snapshot_version <- match[[1]][[2]]
svg_engine_ver() != snapshot_version
}
|
/scratch/gouwar.j/cran-all/cranData/vdiffr/R/expect-doppelganger.R
|
make_testcase_file <- function(fig_name) {
file <- tempfile(fig_name, fileext = ".svg")
structure(file, class = "vdiffr_testcase")
}
#' Default SVG writer
#'
#' This is the default SVG writer for vdiffr test cases. It uses
#' embedded versions of [svglite](https://svglite.r-lib.org),
#' [harfbuzz](https://harfbuzz.github.io/), and the Liberation and
#' Symbola fonts in order to create deterministic SVGs.
#'
#' @param plot A plot object to convert to SVG. Can be a ggplot2 object,
#' a [recorded plot][grDevices::recordPlot], or any object with a
#' [print()][base::print] method.
#' @param file The file to write the SVG to.
#' @param title An optional title for the test case.
#'
#' @export
write_svg <- function(plot, file, title = "") {
svglite(file)
on.exit(grDevices::dev.off())
print_plot(plot, title)
}
print_plot <- function(p, title = "") {
UseMethod("print_plot")
}
#' @export
print_plot.default <- function(p, title = "") {
print(p)
}
#' @export
print_plot.ggplot <- function(p, title = "") {
if (title != "" && !"title" %in% names(p$labels)) {
p <- p + ggplot2::ggtitle(title)
}
if (!length(p$theme)) {
p <- p + ggplot2::theme_test()
}
print(p)
}
#' @export
print_plot.grob <- function(p, title) {
grid::grid.draw(p)
}
#' @export
print_plot.recordedplot <- function(p, title) {
grDevices::replayPlot(p)
}
#' @export
print_plot.function <- function(p, title) {
p()
}
|
/scratch/gouwar.j/cran-all/cranData/vdiffr/R/svg.R
|
#' An SVG Graphics Driver
#'
#' This function produces graphics compliant to the current w3 svg XML
#' standard. The driver output is currently NOT specifying a DOCTYPE DTD.
#'
#' svglite provides two ways of controlling fonts: system fonts
#' aliases and user fonts aliases. Supplying a font alias has two
#' effects. First it determines the \code{font-family} property of all
#' text anchors in the SVG output. Secondly, the font is used to
#' determine the dimensions of graphical elements and has thus an
#' influence on the overall aspect of the plots. This means that for
#' optimal display, the font must be available on both the computer
#' used to create the svg, and the computer used to render the
#' svg. See the \code{fonts} vignette for more information.
#'
#' @param filename The file where output will appear.
#' @param height,width Height and width in inches.
#' @param bg Default background color for the plot (defaults to "white").
#' @param pointsize Default point size.
#' @param standalone Produce a standalone svg file? If \code{FALSE}, omits
#' xml header and default namespace.
#' @param always_valid Should the svgfile be a valid svg file while it is being
#' written to? Setting this to `TRUE` will incur a considerable performance
#' hit (>50% additional rendering time) so this should only be set to `TRUE`
#' if the file is being parsed while it is still being written to.
#' @param file Identical to `filename`. Provided for backward compatibility.
#' @references \emph{W3C Scalable Vector Graphics (SVG)}:
#' \url{http://www.w3.org/Graphics/SVG/}
#' @author This driver was written by T Jake Luciani
#' \email{jakeluciani@@yahoo.com} 2012: updated by Matthieu Decorde
#' \email{matthieu.decorde@@ens-lyon.fr}
#' @seealso \code{\link{pictex}}, \code{\link{postscript}}, \code{\link{Devices}}
#'
#' @examples
#' # Save to file
#' svglite(tempfile("Rplots.svg"))
#' plot(1:11, (-5:5)^2, type = 'b', main = "Simple Example")
#' dev.off()
#'
#' @keywords device
#' @useDynLib svglite, .registration = TRUE
#' @importFrom systemfonts match_font
#' @export
svglite <- function(filename = "Rplot%03d.svg", width = 10, height = 8,
bg = "white", pointsize = 12, standalone = TRUE,
always_valid = FALSE, file) {
if (!missing(file)) {
filename <- file
}
if (invalid_filename(filename))
stop("invalid 'file': ", filename)
invisible(svglite_(filename, bg, width, height, pointsize, standalone, always_valid))
}
#' Access current SVG as a string.
#'
#' This is a variation on \code{\link{svglite}} that makes it easy to access
#' the current value as a string.
#'
#' See \code{\link{svglite}()} documentation for information about
#' specifying fonts.
#'
#' @return A function with no arguments: call the function to get the
#' current value of the string.
#' @examples
#' s <- svgstring(); s()
#'
#' plot.new(); s();
#' text(0.5, 0.5, "Hi!"); s()
#' dev.off()
#'
#' s <- svgstring()
#' plot(rnorm(5), rnorm(5))
#' s()
#' dev.off()
#' @inheritParams svglite
#' @export
svgstring <- function(width = 10, height = 8, bg = "white",
pointsize = 12, standalone = TRUE) {
env <- new.env(parent = emptyenv())
string_src <- svgstring_(env, width = width, height = height, bg = bg,
pointsize = pointsize, standalone = standalone)
function() {
svgstr <- env$svg_string
if(!env$is_closed) {
svgstr <- c(svgstr, get_svg_content(string_src))
}
structure(svgstr, class = "svg")
}
}
#' @export
print.svg <- function(x, ...) cat(x, sep = "\n")
|
/scratch/gouwar.j/cran-all/cranData/vdiffr/R/svglite/SVG.R
|
#' Run plotting code and view svg in RStudio Viewer or web browser.
#'
#' This is useful primarily for testing. Requires the \code{htmltools}
#' package.
#'
#' @param code Plotting code to execute.
#' @param ... Other arguments passed on to \code{\link{svglite}}.
#' @keywords internal
#' @export
#' @examples
#' if (interactive() && require("htmltools")) {
#' htmlSVG(plot(1:10))
#' htmlSVG(hist(rnorm(100)))
#' }
htmlSVG <- function(code, ...) {
svg <- inlineSVG(code, ...)
htmltools::browsable(
htmltools::HTML(svg)
)
}
#' Run plotting code and return svg
#'
#' This is useful primarily for testing. Requires the \code{xml2} package.
#'
#' @return A \code{xml2::xml_document} object.
#' @inheritParams htmlSVG
#' @inheritParams svglite
#' @keywords internal
#' @export
#' @examples
#' if (require("xml2")) {
#' x <- xmlSVG(plot(1, axes = FALSE))
#' x
#' xml_find_all(x, ".//text")
#' }
xmlSVG <- function(code, ..., standalone = FALSE, height = 7, width = 7) {
plot <- inlineSVG(code, ...,
standalone = standalone,
height = height,
width = width
)
xml2::read_xml(plot)
}
#' Run plotting code and open svg in OS/system default svg viewer or editor.
#'
#' This is useful primarily for testing or post-processing the SVG.
#'
#' @inheritParams htmlSVG
#' @inheritParams svglite
#' @keywords internal
#' @export
#' @examples
#' if (interactive()) {
#' editSVG(plot(1:10))
#' editSVG(contour(volcano))
#' }
editSVG <- function(code, ..., width = NA, height = NA) {
dim <- plot_dim(c(width, height))
tmp <- tempfile(fileext = ".svg")
svglite(tmp, width = dim[1], height = dim[2], ...)
tryCatch(code,
finally = grDevices::dev.off()
)
system(sprintf("open %s", shQuote(tmp)))
}
#' Run plotting code and return svg as string
#'
#' This is useful primarily for testing but can be used as an
#' alternative to \code{\link{svgstring}()}.
#'
#' @inheritParams htmlSVG
#' @keywords internal
#' @export
#' @examples
#' stringSVG(plot(1:10))
stringSVG <- function(code, ...) {
svg <- inlineSVG(code, ...)
structure(svg, class = "svg")
}
inlineSVG <- function(code, ..., width = NA, height = NA) {
dim <- plot_dim(c(width, height))
svg <- svgstring(width = dim[1], height = dim[2], ...)
tryCatch(code,
finally = grDevices::dev.off()
)
out <- svg()
class(out) <- NULL
out
}
|
/scratch/gouwar.j/cran-all/cranData/vdiffr/R/svglite/inlineSVG.R
|
mini_plot <- function(...) graphics::plot(..., axes = FALSE, xlab = "", ylab = "")
plot_dim <- function(dim = c(NA, NA)) {
if (any(is.na(dim))) {
if (length(grDevices::dev.list()) == 0) {
default_dim <- c(10, 8)
} else {
default_dim <- grDevices::dev.size()
}
dim[is.na(dim)] <- default_dim[is.na(dim)]
dim_f <- prettyNum(dim, digits = 3)
message("Saving ", dim_f[1], "\" x ", dim_f[2], "\" image")
}
dim
}
vapply_chr <- function(.x, .f, ...) {
vapply(.x, .f, character(1), ...)
}
vapply_lgl <- function(.x, .f, ...) {
vapply(.x, .f, logical(1), ...)
}
lapply_if <- function(.x, .p, .f, ...) {
if (!is.logical(.p)) {
.p <- vapply_lgl(.x, .p)
}
.x[.p] <- lapply(.x[.p], .f, ...)
.x
}
keep <- function(.x, .p, ...) {
.x[vapply_lgl(.x, .p, ...)]
}
compact <- function(x) {
Filter(length, x)
}
`%||%` <- function(x, y) {
if (is.null(x)) y else x
}
is_scalar_character <- function(x) {
is.character(x) && length(x) == 1
}
names2 <- function(x) {
names(x) %||% rep("", length(x))
}
ilapply <- function(.x, .f, ...) {
idx <- names(.x) %||% seq_along(.x)
out <- Map(.f, names(.x), .x, ...)
names(out) <- names(.x)
out
}
ilapply_if <- function(.x, .p, .f, ...) {
if (!is.logical(.p)) {
.p <- vapply_lgl(.x, .p)
}
.x[.p] <- ilapply(.x[.p], .f, ...)
.x
}
set_names <- function(x, nm = x) {
stats::setNames(x, nm)
}
zip <- function(.l) {
fields <- set_names(names(.l[[1]]))
lapply(fields, function(i) {
lapply(.l, .subset2, i)
})
}
svglite_manual_tests <- new.env()
register_manual_test <- function(file) {
testthat_dir <- getwd()
testfile <- file.path(testthat_dir, file)
assign(file, testfile, svglite_manual_tests)
}
init_manual_tests <- function() {
remove(list = names(svglite_manual_tests), envir = svglite_manual_tests)
}
open_manual_tests <- function() {
lapply(names(svglite_manual_tests), function(test) {
utils::browseURL(svglite_manual_tests[[test]])
})
}
invalid_filename <- function(filename) {
if (!is.character(filename) || length(filename) != 1)
return(TRUE)
# strip double occurences of %
stripped_file <- gsub("%{2}", "", filename)
# filename is fine if there are no % left
if (!grepl("%", stripped_file))
return(FALSE)
# remove first allowed pattern, % followed by digits followed by [diouxX]
stripped_file <- sub("%[#0 ,+-]*[0-9.]*[diouxX]", "", stripped_file)
# matching leftover % indicates multiple patterns or a single incorrect pattern (e.g., %s)
return(grepl("%", stripped_file))
}
#' Convert an svg file to svgz, overwriting the old file
#' @param file the path to the file to convert
#' @keywords internal
#' @export
create_svgz <- function(file) {
svg <- readLines(file)
out <- gzfile(file, "w")
writeLines(svg, out)
close(out)
invisible(NULL)
}
|
/scratch/gouwar.j/cran-all/cranData/vdiffr/R/svglite/utils.R
|
package_version <- function(pkg) {
as.character(utils::packageVersion(pkg))
}
cat_line <- function(..., trailing = TRUE, file = "") {
cat(paste_line(..., trailing = trailing), file = file)
}
paste_line <- function(..., trailing = FALSE) {
lines <- paste(chr(...), collapse = "\n")
if (trailing) {
lines <- paste0(lines, "\n")
}
lines
}
push_log <- function(name, old_path, new_path) {
log_path <- Sys.getenv("VDIFFR_LOG_PATH")
# If no envvar is set, check if we are running under R CMD check. In
# that case, always push a log file.
if (!nzchar(log_path)) {
if (!is_checking_remotely()) {
return(invisible(FALSE))
}
log_path <- testthat::test_path("..", "vdiffr.Rout.fail")
}
log_exists <- file.exists(log_path)
file <- file(log_path, "a")
on.exit(close(file))
if (!log_exists) {
cat_line(
file = file,
"Environment:",
vdiffr_info(),
""
)
}
diff_lines <- diff_lines(name, old_path, new_path)
cat_line(file = file, "", !!!diff_lines, "")
}
is_checking_remotely <- function() {
nzchar(Sys.getenv("CI")) || !nzchar(Sys.getenv("NOT_CRAN"))
}
diff_lines <- function(name,
before_path,
after_path) {
before <- readLines(before_path)
after <- readLines(after_path)
diff <- diffobj::diffChr(
before,
after,
format = "raw",
# For reproducibility
disp.width = 80
)
lines <- as.character(diff)
paste_line(
glue("Failed doppelganger: {name} ({before_path})"),
"",
!!!lines
)
}
vdiffr_info <- function() {
glue(
"- vdiffr-svg-engine: { SVG_ENGINE_VER }
- vdiffr: { utils::packageVersion('vdiffr') }"
)
}
|
/scratch/gouwar.j/cran-all/cranData/vdiffr/R/utils.R
|
#' @import rlang
#' @importFrom glue glue
#' @useDynLib vdiffr, .registration = TRUE
#' @keywords internal
"_PACKAGE"
SVG_ENGINE_VER <- "2.0"
svg_engine_ver <- function() {
as.numeric_version(SVG_ENGINE_VER)
}
.onLoad <- function(lib, pkg) {
set_engine_version(SVG_ENGINE_VER)
}
## usethis namespace: start
#' @importFrom lifecycle deprecated
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/vdiffr/R/vdiffr-package.R
|
# This code creates "src/glyph_dims.h" which inlines the glyph dimensions of
# Liberation and Symbola for all unicode points up until 50000.
#
# Changes in the entries of "src/glyph_dims.h" will result in potential failures
# of visual tests since it will change string dimension calculations. Only
# update it for very good reasons!
# Create vector of unicode characters
char_num <- seq_len(50000)
res <- 1e4
chars <- eval(
parse(text = paste0(
'c("',
paste(sprintf("\\U%04X", char_num), collapse = '","'),
'")')
)
)
# Get font files from fontquiver (old vdiffr dependency)
liberation <- fontquiver::font("Liberation", 'Sans', 'Regular')$ttf
symbola <- fontquiver::font("Symbola", 'Symbols', 'Regular')$ttf
# Extract glyph dimensions from Liberation
liberation_glyphs <- systemfonts::glyph_info(chars, res = res, path = liberation)
liberation_glyphs$num <- char_num
liberation_glyphs <- liberation_glyphs[!duplicated(liberation_glyphs$index), ]
liberation_glyphs <- data.frame(
char = liberation_glyphs$num,
width = liberation_glyphs$x_advance * 72 / res,
ascent = vapply(liberation_glyphs$bbox, `[`, numeric(1), 4) * 72 / res,
descent = -vapply(liberation_glyphs$bbox, `[`, numeric(1), 3) * 72 / res
)
# Extract glyph dimensions from Symbola
symbola_glyphs <- systemfonts::glyph_info(chars, res = res, path = symbola)
symbola_glyphs$num <- char_num
symbola_glyphs <- symbola_glyphs[!duplicated(symbola_glyphs$index), ]
symbola_glyphs <- data.frame(
char = symbola_glyphs$num,
width = symbola_glyphs$x_advance * 72 / res,
ascent = vapply(symbola_glyphs$bbox, `[`, numeric(1), 4) * 72 / res,
descent = -vapply(symbola_glyphs$bbox, `[`, numeric(1), 3) * 72 / res
)
# Write "src/glyph_dims.h"
def <- c(
"// Generated by inst/create_glyph_dims.R — Do not edit by hand",
"#pragma once",
"#include <unordered_map>",
"struct Dim {",
" double width;",
" double ascent;",
" double descent;",
"};",
"const std::unordered_map<uint32_t, Dim> LIBERATION_DIM = {",
paste(sprintf(
" {%i, {%.4f, %.4f, %.4f}}",
liberation_glyphs$char,
liberation_glyphs$width,
liberation_glyphs$ascent,
liberation_glyphs$descent
), collapse = ",\n"),
"};",
"const std::unordered_map<uint32_t, Dim> SYMBOLA_DIM = {",
paste(sprintf(
" {%i, {%.4f, %.4f, %.4f}}",
symbola_glyphs$char,
symbola_glyphs$width,
symbola_glyphs$ascent,
symbola_glyphs$descent
), collapse = ",\n"),
"};"
)
writeLines(def, "src/glyph_dims.h")
|
/scratch/gouwar.j/cran-all/cranData/vdiffr/inst/create_glyph_dims.R
|
VERSION <- commandArgs(TRUE)
if(!file.exists(sprintf("../windows/harfbuzz-%s/include/png.h", VERSION))){
if(getRversion() < "3.3.0") setInternet2()
download.file(sprintf("https://github.com/rwinlib/harfbuzz/archive/v%s.zip", VERSION), "lib.zip", quiet = TRUE)
dir.create("../windows", showWarnings = FALSE)
unzip("lib.zip", exdir = "../windows")
unlink("lib.zip")
}
|
/scratch/gouwar.j/cran-all/cranData/vdiffr/tools/winlibs.R
|
### Constructor (Accepts either controlPoints or parameters)
"AffineTransformation" <- function(controlPoints=data.frame(), parameters=numeric()) {
if (missing(parameters))
return(new("AffineTransformation", controlPoints=controlPoints))
if (missing(controlPoints))
return(new("AffineTransformation", parameters=parameters))
new("AffineTransformation", controlPoints=controlPoints, parameters=parameters)
}
### Calculate parameters from control points
### Modifies the original object
### Arguments:
### - object is an "AffineTransformation" object
###
if (!isGeneric("calculateParameters"))
setGeneric("calculateParameters",function(object){standardGeneric ("calculateParameters")})
setMethod(f="calculateParameters",signature("AffineTransformation"),
definition=function(object){
if (ncol(object@controlPoints) == 0)
stop("Control points were not provided. You could access the parameters directly by calling 'getParameters'.")
newObject <- deparse(substitute(object))
names(object@controlPoints) <- c('X_Source','Y_Source','X_Target','Y_Target')
linMod <- lm(formula = cbind(X_Target, Y_Target) ~ X_Source + Y_Source,
data=object@controlPoints)
object@parameters <- as.vector(rbind(coef(linMod)[2:3,],coef(linMod)[1,]))
object@residuals <- as.matrix(linMod$residuals)
object@rmse <- sqrt(sum(object@residuals**2)/nrow(object@residuals))
assign(newObject,object,envir=parent.frame())
return(invisible())
}
)
|
/scratch/gouwar.j/cran-all/cranData/vec2dtransf/R/AffineTransformation-methods.R
|
setMethod("show","Cartesian2DCoordinateTransformation",
function(object){
cat("\n*** Object of the class '", class(object)[1], "' *** \n")
nrowShow <- min(10,nrow(object@controlPoints))
if(length(object@controlPoints) != 0){
cat("\n* Slot 'controlPoints' (display limited to 10 rows): \n")
colnames(object@controlPoints) = c("X source", "Y source", "X target", "Y target")
print(object@controlPoints[1:nrowShow,],quote=FALSE)
}else{
cat("\n* Slot 'controlPoints' is empty \n")
}
if(length(object@parameters) == 0){
cat("\n* Slot 'parameters' is empty \n");
}else{
names(object@parameters) = letters[seq( from = 1, to = length(object@parameters) )]
cat("\n* Slot 'parameters': \n"); print (object@parameters)
}
cat("\n")
}
)
### Getter for slot parameters
setGeneric("getParameters",function(object){standardGeneric ("getParameters")})
setMethod("getParameters","Cartesian2DCoordinateTransformation",
function(object){
if (length(object@parameters) == 0)
return("Transformation parameters are unknown. You may need to call 'calculateParameters' first.")
names(object@parameters) = letters[seq( from = 1, to = length(object@parameters) )]
return(object@parameters)
}
)
### Getter for slot residuals
setGeneric("getResiduals",function(object){standardGeneric ("getResiduals")})
setMethod("getResiduals","Cartesian2DCoordinateTransformation",
function(object){
if (nrow(object@residuals) == 0 && ncol(object@controlPoints) == 0)
return("Residuals cannot be calculated. You must provide control points first.")
if (nrow(object@residuals) == 0 && ncol(object@controlPoints) > 0)
return("Residuals are unknown. You may need to call 'calculateParameters' first.")
if (all(object@residuals == 0) && length(object@parameters) > 0)
return("Residuals are all zero. There are not redundant control points to apply Least Squares.")
return(object@residuals)
}
)
### Getter for slot rmse
setGeneric("getRMSE",function(object){standardGeneric ("getRMSE")})
setMethod("getRMSE","Cartesian2DCoordinateTransformation",
function(object){
if (is.null(object@rmse) && ncol(object@controlPoints) == 0)
return("Root Mean Square Error cannot be calculated. You must provide control points first.")
if (is.null(object@rmse) && ncol(object@controlPoints) > 0)
return("Root Mean Square Error is unknown. You may need to call 'calculateParameters' first.")
if (object@rmse == 0 && length(object@parameters) > 0)
return("Root Mean Square Error is zero. There are not redundant control points to apply Least Squares.")
return(object@rmse)
}
)
### Transform coordinates using calculated parameters
### Arguments:
### - coords is a matrix containing x and y values
### - object is either an "AffineTransformation" or "SimilarityTransformation" object
### Returns:
### A matrix containing transformed x and y values
###
"transformCoordinates" <- function(coords, object) {
X <- object@parameters
if (is(object, "AffineTransformation"))
newCoords <- vapply(1:nrow(coords),
FUN=function(x) c(c(coords[x,],1)%*%X[1:3],c(coords[x,],1)%*%X[4:6]),
FUN.VALUE=c(0,0)
)
if (is(object, "SimilarityTransformation"))
newCoords <- vapply(1:nrow(coords),
FUN=function(x) c(c(coords[x,],1)%*%X[1:3],c(coords[x,],1)%*%c(-X[2],X[1],X[4])),
FUN.VALUE=c(0,0)
)
t(newCoords)
}
### Plot a transformed grid by using the transformation parameters
### Arguments:
### - object is either a "AffineTransformation" or "SimilarityTransformation" object
### - bbox is an SP bbox object, i.e. a 2x2 matrix with coordinates
### - numberOfPoints is the number of points to represent the grid
###
setGeneric("plotGridTransformation",function(object, bbox, numberOfPoints){
standardGeneric ("plotGridTransformation")})
setMethod(f="plotGridTransformation",signature(object="Cartesian2DCoordinateTransformation"),
definition=function(object, bbox, numberOfPoints){
if (missing(object))
stop("Please provide a transformation object as first argument.")
if (missing(bbox))
stop("Please provide a bounding box (bbox).")
if (missing(numberOfPoints))
stop("Please provide a number of points for representing the grid.")
# Adapted from SP package (Class-Spatial.R)
if (!is.matrix(bbox))
stop("bbox should be a matrix")
if (any(is.na(bbox)))
stop("bbox should never contain NA values")
if (any(!is.finite(bbox)))
stop("bbox should never contain infinite values")
if (any(bbox[,"max"] < bbox[,"min"]))
stop("invalid bbox: max < min")
# end of "Adapted from SP package"
if (!isTRUE(all.equal(numberOfPoints,as.integer(numberOfPoints))))
stop("numberOfPoints should be an integer")
if (numberOfPoints <= 0)
stop("numberOfPoints should be greater than zero")
if (length(object@parameters) == 0)
stop("Parameters have to be calculated before. Call 'calculateParameters' and try again.")
offset=as.integer(sqrt(numberOfPoints))
# Adapted from http://casoilresource.lawr.ucdavis.edu/drupal/node/433
x <- seq(bbox[1,1],bbox[1,2], length.out=offset)
y <- seq(bbox[2,1],bbox[2,2], length.out=offset)
g = expand.grid(X=x, Y=y)
ng = transformCoordinates(as.matrix(g), object)
plot(g, cex=0.3, main='Transformed grid', col='red')
points(ng, cex=.3, col='green')
legend(bbox[1,1],bbox[2,2],legend=c("Original point", "Transformed point"),
pch=c(16,16),pt.cex=.6,col=c('red', 'green'))
# End of "Adapted from http://casoilresource.lawr.ucdavis.edu/drupal/node/433"
}
)
### Apply the transformation to an SP object
### Arguments:
### - object is either a "AffineTransformation" or "SimilarityTransformation" object
### - sp.object is an object of type: SpatialPoints, SpatialPointsDataFrame,
### SpatialLines, SpatialLinesDataFrame, SpatialPolygons or SpatialPolygonsDataFrame
### Returns:
### A transformed sp.object
###
setGeneric("applyTransformation",function(object, sp.object){
standardGeneric ("applyTransformation")})
setMethod(f="applyTransformation",signature(object="Cartesian2DCoordinateTransformation"),
definition=function(object, sp.object){
if (missing(object))
stop("Please provide a transformation object as first argument.")
if (missing(sp.object))
stop("Please provide an SP object as second argument.")
if (!class(sp.object) %in% c('SpatialPoints', 'SpatialPointsDataFrame',
'SpatialLines', 'SpatialLinesDataFrame', 'SpatialPolygons',
'SpatialPolygonsDataFrame'))
stop('Transformation can be applied on objects of type SpatialPoints, SpatialPointsDataFrame, SpatialLines, SpatialLinesDataFrame, SpatialPolygons or SpatialPolygonsDataFrame.')
if (length(object@parameters) == 0)
stop("Parameters have to be calculated before. Call 'calculateParameters' and try again.")
if (!is.na(proj4string(sp.object)) && !is.projected(sp.object))
stop("The SP object cannot have a geographic Coordinate Reference System (CRS).")
bDataFrame = ifelse(class(sp.object) %in% c('SpatialPointsDataFrame', 'SpatialLinesDataFrame',
'SpatialPolygonsDataFrame'), TRUE, FALSE)
if (bDataFrame)
df = sp.object@data
rs = CRS(proj4string(sp.object))
# Do transform!
if (is(sp.object, 'SpatialPoints')){
newCoords = transformCoordinates(coordinates(sp.object), object)
if (bDataFrame){
newSPObject=SpatialPointsDataFrame(coords=newCoords, data=df, proj4string=rs)
}else{
newSPObject=SpatialPoints(coords=newCoords, proj4string=rs)
}
}
if (is(sp.object, 'SpatialLines')){
newLines = lapply( sp.object@lines,
function(objLines) Lines(lapply( objLines@Lines,
function(objLine) Line(transformCoordinates(objLine@coords, object))),
ID=objLines@ID)
)
newSPObject = SpatialLines(newLines, proj4string=rs)
if (bDataFrame)
newSPObject = SpatialLinesDataFrame(newSPObject, data=df)
}
if (is(sp.object, 'SpatialPolygons')){
newPolygons = lapply( sp.object@polygons,
function(objPolygons) Polygons(lapply( objPolygons@Polygons,
function(objPolygon) Polygon(transformCoordinates(objPolygon@coords, object), hole=objPolygon@hole)),
ID=objPolygons@ID)
)
newSPObject = SpatialPolygons(newPolygons, pO=sp.object@plotOrder, proj4string=rs)
if (bDataFrame)
newSPObject = SpatialPolygonsDataFrame(newSPObject, data=df)
}
return(newSPObject)
}
)
|
/scratch/gouwar.j/cran-all/cranData/vec2dtransf/R/Cartesian2DCoordinateTransformation-methods.R
|
### AffineTransformation class ###
## Affine transformations can be written as:
## x' = ax + by + c
## y' = dx + ey + f
setClass("AffineTransformation",
representation(),
contains = "Cartesian2DCoordinateTransformation",
validity = function(object) {
if (length(object@parameters) == 0){
if (nrow(object@controlPoints) < 3)
stop("At least 3 control points (rows in the data.frame 'controlPoints') are required for the affine transformation")
}
else if (length(object@parameters) != 6){
stop("Affine transformations require 6 parameters!")
}
return(TRUE)
}
)
|
/scratch/gouwar.j/cran-all/cranData/vec2dtransf/R/Class-AffineTransformation.R
|
setClassUnion("numericOrNULL", c("numeric", "NULL"))
setClass("Cartesian2DCoordinateTransformation",
representation(controlPoints = "data.frame", parameters="numeric",
residuals="matrix", rmse="numericOrNULL", "VIRTUAL"),
prototype(controlPoints=data.frame(),parameters=numeric(),
residuals=matrix(nrow=0,ncol=0),rmse=NULL),
validity = function(object) {
if (!inherits(object@controlPoints, "data.frame"))
stop("controlPoints should be of class data.frame")
if (ncol(object@controlPoints) == 0 && length(object@parameters) == 0 )
stop("Either 'controlPoints' or 'parameters' must be provided!")
if ( length(object@parameters) == 0 ){
if (ncol(object@controlPoints) != 4)
stop("'controlPoints' must have four (4) columns: X source, Y source, X target, Y target")
}
return(TRUE)
}
)
|
/scratch/gouwar.j/cran-all/cranData/vec2dtransf/R/Class-Cartesian2DCoordinateTransformation.R
|
### SimilarityTransformation class ###
## Similarity transformations can be written as:
## x' = ax + by + c
## y' = ay - bx + d
setClass("SimilarityTransformation",
representation(),
contains = "Cartesian2DCoordinateTransformation",
validity = function(object) {
if (length(object@parameters) == 0){
if (nrow(object@controlPoints) < 2)
stop("At least 2 control points (rows in the data.frame 'controlPoints') are required for the similarity transformation")
}
else if (length(object@parameters) != 4){
stop("Similarity transformations require 4 parameters!")
}
return(TRUE)
}
)
|
/scratch/gouwar.j/cran-all/cranData/vec2dtransf/R/Class-SimilarityTransformation.R
|
### Constructor (Accepts either controlPoints or parameters)
"SimilarityTransformation" <- function(controlPoints=data.frame(), parameters=numeric()) {
if (missing(parameters))
return(new("SimilarityTransformation", controlPoints=controlPoints))
if (missing(controlPoints))
return(new("SimilarityTransformation", parameters=parameters))
new("SimilarityTransformation", controlPoints=controlPoints, parameters=parameters)
}
### Calculate parameters from control points
### Modifies the original object
### Arguments:
### - object is an "SimilarityTransformation" object
###
if (!isGeneric("calculateParameters"))
setGeneric("calculateParameters",function(object){standardGeneric ("calculateParameters")})
setMethod(f="calculateParameters",signature(object="SimilarityTransformation"),
definition=function(object){
if (ncol(object@controlPoints) == 0)
stop("Control points were not provided. You could access the parameters directly by calling 'getParameters'.")
newObject <- deparse(substitute(object))
x1=c(object@controlPoints[,1],object@controlPoints[,2])
x2=c(object@controlPoints[,2],-object@controlPoints[,1])
ones = rep(1,nrow(object@controlPoints))
zeros = rep(0,nrow(object@controlPoints))
x3=c(ones,zeros)
x4=c(zeros,ones)
linMod=lm(formula=c(object@controlPoints[,3],object@controlPoints[,4]) ~ x1 + x2 + x3 + x4 - 1)
object@parameters <- as.vector(coef(linMod))
object@residuals <- matrix(linMod$residuals,nrow(object@controlPoints))
object@rmse <- sqrt(sum(object@residuals**2)/nrow(object@residuals))
assign(newObject,object,envir=parent.frame())
return(invisible())
}
)
|
/scratch/gouwar.j/cran-all/cranData/vec2dtransf/R/SimilarityTransformation-methods.R
|
#' Compare all combinations of vectors using set operations
#'
#' @inheritParams render.venn.diagram
#' @param named_list_of_vectors_to_compare A named list of vectors to compare (see, for example, \code{\link{example.vectors.list}}). Duplicate values in a given vector will only be counted once (for example, c("a", "a", "b", "c") will be treated identically to c("a", "b", "c").
#' @param degrees_of_comparison_to_include A number or vector of numbers of which degrees of comparison to print (for example, 'c(2, 5)' would print only 2- and 5-way vector comparisons).
#' @param draw_venn_diagrams A logical (TRUE/FALSE) indicator whether to draw Venn diagrams for all 2- through 5-way comparisons of vectors.
#' @param vector_colors_for_venn_diagrams An optional vector of color names for Venn diagrams (if \code{draw_venn_diagrams} is \code{TRUE}). Color names are applied to the named vectors in \code{named_list_of_vectors_to_compare} in their order in \code{named_list_of_vectors_to_compare}. If this is blank, a random color will be selected for each vector. Either way, each vector will have a consistent color across the Venn diagrams in which it appears.
#' @param save_venn_diagram_files A logical (TRUE/FALSE) indicator whether to save Venn diagrams as PNG files.
#' @param location_for_venn_diagram_files An optional string giving a directory into which to save Venn diagram PNG files (if \code{save_venn_diagram_files} is \code{TRUE}). This location must already exist on the filesystem.
#' @param prefix_for_venn_diagram_files An optional string giving a prefix to prepend to saved Venn diagram PNG files (if \code{save_venn_diagram_files} is \code{TRUE}).
#' @param saved_venn_diagram_resolution_ppi An optional number giving a resolution (PPI) for saved Venn diagrams (if \code{save_venn_diagram_files} is \code{TRUE}).
#' @param saved_venn_diagram_dimension_units An optional string giving units for specifying \code{saved_venn_diagram_width} and \code{saved_venn_diagram_height} (if \code{save_venn_diagram_files} is \code{TRUE}). Can be \code{px} (pixels), \code{in} (inches, the default), \code{cm}, or \code{mm}.
#' @param saved_venn_diagram_width The width (in \code{saved_venn_diagram_dimension_units} units) for saved Venn diagrams (if \code{save_venn_diagram_files} is \code{TRUE}).
#' @param saved_venn_diagram_height The height (in \code{saved_venn_diagram_dimension_units} units) for saved Venn diagrams (if \code{save_venn_diagram_files} is \code{TRUE}).
#'
#' @param suppress_messages A logical (TRUE/FALSE) indicator whether to suppress messages. Even if this is \code{TRUE}, warnings will still be printed.
#'
#' @return A list, with one object for each comparison of vectors. The list contains the following elements:
#' \describe{
#' \item{elements_involved}{The vector names involved in the comparison.}
#' \item{union_of_elements}{A vector of all (deduplicated) items involved in the comparison, across all of the vectors.}
#' \item{overlap_of_elements}{A vector of the deduplicated elements that occurred in all of the compared vectors.}
#' \item{elements_unique_to_first_element}{This element will have a sub-element named for each vector being compared (i.e., for each of the names in \code{$elements_involved}). The (deduplicated) items that were unique to that vector (i.e., not overlapping with any other vector in the comparison).}
#' \item{venn_diagram}{If \code{save_venn_diagram_files} is \code{TRUE}, and the comparison is of 2 through 5 vectors, a Venn diagram object produced using the \pkg{VennDiagram} package. This diagram can be rendered using \code{\link{render.venn.diagram}}.}
#' }
#'
#' To compile this list object into a Markdown report, use \code{\link{compare.vectors.and.return.text.analysis.of.overlap}}. For an example of this usage, see the \code{Veccompare Overlap Report} RMarkdown template for RStudio that is installed as part of the \pkg{veccompare} package.
#' @export compare.vectors
#'
#' @examples
#' example <- veccompare::compare.vectors(veccompare::example.vectors.list)
#'
#' # To extract similar elements across list items:
#' veccompare::extract.compared.vectors(
#' example,
#' elements_of_output = "elements_involved"
#' )
#'
#' # To extract all comparisons that involve "vector_a":
#' veccompare::extract.compared.vectors(
#' example,
#' vector_names = "vector_a"
#' )
#'
#' # To find all comparisons that were about "vector_a" and "vector_c":
#' veccompare::extract.compared.vectors(
#' example,
#' vector_names = c("vector_a", "vector_c"),
#' only_match_vector_names = TRUE
#' )
#'
#' # To get all elements that did a two-way comparison:
#' veccompare::extract.compared.vectors(
#' example,
#' degrees_of_comparison = 2
#' )
compare.vectors <- function(
named_list_of_vectors_to_compare,
degrees_of_comparison_to_include = NULL,
draw_venn_diagrams = FALSE, # Whether we shold draw venn digrams for 2- to 5-way comparisons (the VennDiagram package can only draw up to five-way comparisons).
vector_colors_for_venn_diagrams = NULL,
save_venn_diagram_files = FALSE,
location_for_venn_diagram_files = "",
prefix_for_venn_diagram_files = "",
saved_venn_diagram_resolution_ppi = 300,
saved_venn_diagram_dimension_units = "in",
saved_venn_diagram_width = 8,
saved_venn_diagram_height = 6,
viewport_npc_width_height_for_images = 1.0,
suppress_messages = FALSE
){
vector_names <- names(named_list_of_vectors_to_compare)
degrees_of_comparison_to_include_after_figuring_venn_diagrams <- degrees_of_comparison_to_include # We'll update this as necessary as we go below.
degrees_of_comparison_for_venn_diagrams <- NULL # We'll also update this as necessary as we go below.
# If we're generating Venn diagrams, we'll create a consistent color to use for each vector:
if(draw_venn_diagrams == TRUE){
# Generate the combinations of vectors to use for Venn diagram drawing: ------------------------
# Figure out which degrees of comparison we need to calculate, especially if we're to draw Venn diagrams (which will require that, e.g., if we're drawing a 5-way comparison, we've also calculated all of the 1- to 4-way comparisons)
if(!is.null(degrees_of_comparison_to_include)){
maximum_degree_of_comparison_calculated <- max(degrees_of_comparison_to_include)
minimum_degree_of_comparison_calculated <- min(degrees_of_comparison_to_include)
if(draw_venn_diagrams == TRUE & maximum_degree_of_comparison_calculated >= 2 & minimum_degree_of_comparison_calculated <= 5){
degrees_of_comparison_for_venn_diagrams <- seq(
from = 2,
to = min(maximum_degree_of_comparison_calculated, 5) # We can only draw up to 5-way comparisons
)
degrees_of_comparison_to_include_after_figuring_venn_diagrams <- union(degrees_of_comparison_to_include, degrees_of_comparison_for_venn_diagrams)
degrees_of_comparison_to_include_after_figuring_venn_diagrams <- degrees_of_comparison_to_include_after_figuring_venn_diagrams[order(degrees_of_comparison_to_include_after_figuring_venn_diagrams)]
# Tell the user if there are elements that need to be computed in order to draw Venn diagrams but that weren't asked for:
degrees_of_comparison_not_asked_for_but_needed_for_diagrams <- degrees_of_comparison_for_venn_diagrams[which(! degrees_of_comparison_for_venn_diagrams %in% degrees_of_comparison_to_include)]
if(length(degrees_of_comparison_not_asked_for_but_needed_for_diagrams) > 0){
if(suppress_messages != TRUE){
message("Note: We need to calculate all combinations of degree(s) ", veccompare::vector.print.with.and(degrees_of_comparison_not_asked_for_but_needed_for_diagrams), " in addition to the degrees you asked for (", veccompare::vector.print.with.and(degrees_of_comparison_to_include), "), in order to draw Venn diagrams. Proceeding with calculating all of those...")
}
}
}
} else { # If degrees_of_comparison_to_include IS NULL:
maximum_degree_of_comparison_calculated <- length(named_list_of_vectors_to_compare)
if(draw_venn_diagrams == TRUE & maximum_degree_of_comparison_calculated >= 2){
degrees_of_comparison_for_venn_diagrams <- 2:(min(maximum_degree_of_comparison_calculated, 5)) # We can only draw up to 5-way comparisons
}
} # End of if(!is.null(degrees_of_comparison_to_include))
# Generate Venn diagram colors ------------------------
if(length(degrees_of_comparison_for_venn_diagrams) > 0){
if(!is.null(vector_colors_for_venn_diagrams)){
if(length(vector_colors_for_venn_diagrams) != length(vector_names)){
stop("The number of colors for Venn diagrams (", length(vector_colors_for_venn_diagrams), ") does not match the number of vectors we are comparing (", length(vector_names), ").")
} else {
vector_colors <- as.list(vector_colors_for_venn_diagrams)
names(vector_colors) <- vector_names
}
} else { # If we've not been given colors to use, we'll generate random ones:
vector_colors <- as.list(veccompare::generate.random.colors(length(vector_names)))
names(vector_colors) <- vector_names
}
} # End of if length(degrees_of_comparison_for_venn_diagrams) > 0
} # End of if draw_venn_diagrams == TRUE
combinations_of_vector_names <- as.data.frame(
gtools::combinations(
length(vector_names), # size of the source vector
length(vector_names), # size of the target vectors
vector_names,
set = TRUE, # Remove duplicates from the output
repeats.allowed = TRUE # Do all permutations of starting orders (vs. just combinations, where order doesn't matter). Setting this to TRUE will, when the items are chunked down, give us all 1:length(vector_names) sized combinations of items.
)
)
# Remove duplicate combinations, assessed after chunking down unique items in each row (so that, e.g., "a a a b" and "a a b b" are seen as the same thing, and only one of them is retained, since they both contain just elements "a" and "b"):
# The 'as.data.frame()' below is so that we know we're always dealing with a data.frame here -- if there's more than one combination of vector names, this would be a data.frame anyway, but if there's just one vector (and thus no combinations), this would otherwise be a factor. So setting this to be consistently a data.frame simplifies the steps below by allowing more consistency.
combinations_of_vector_names_chunked_for_unique_items <- as.data.frame(
combinations_of_vector_names[
!duplicated(
apply(
combinations_of_vector_names,
1, # iterate over rows
unique
)
)
, # Use all columns
]
)
# As a check of our work, this should usually be TRUE, unless length(vector_names) <= 2:
# nrow(combinations_of_vector_names_chunked_for_unique_items) < nrow(combinations_of_vector_names)
if(nrow(combinations_of_vector_names_chunked_for_unique_items) > 1){
rownames(combinations_of_vector_names_chunked_for_unique_items) <- NULL # This is just for aesthetic purposes; it makes debugging easier.
}
if(is.null(degrees_of_comparison_to_include)){ # If we *have not* been told which comparisons (e.g., 2-way, 3-way, etc.) to include, we'll use all of them by default:
combinations_of_vector_names_chunked_for_unique_items <- combinations_of_vector_names_chunked_for_unique_items
} else { # If we *have* been told which comparisons to include, we'll set that here:
if(!is.numeric(degrees_of_comparison_to_include)){
stop("The argument 'degrees_of_comparison_to_include' is expected to be numeric.")
} else { # If we are dealing with a numeric argument, as expected.
combinations_of_vector_names_chunked_for_unique_items <- combinations_of_vector_names_chunked_for_unique_items[
which(
apply(
combinations_of_vector_names_chunked_for_unique_items,
1, # Iterate over rows
function(x){length(unique(x)) %in% degrees_of_comparison_to_include_after_figuring_venn_diagrams}
)
)
, # Use all columns
]
if(suppress_messages != TRUE){
message("Calculating only the following degree(s) of comparison: ", veccompare::vector.print.with.and(degrees_of_comparison_to_include_after_figuring_venn_diagrams), "...")
}
}
}
combination_set_operations <- apply(
combinations_of_vector_names_chunked_for_unique_items,
1, # Iterate over rows
function(row){
# length(unique(x))
# print(row[1])
unique_names_in_row <- as.character(unique(row))
if(nrow(combinations_of_vector_names_chunked_for_unique_items) == 1){
vector_items_for_unique_names_in_row <- lapply(named_list_of_vectors_to_compare[unique_names_in_row], unique)
} else {
vector_items_for_unique_names_in_row <- named_list_of_vectors_to_compare[unique_names_in_row]
}
# Run setdiff() on with each element being first in turn (since setdiff gives the unique items for whichever item is printed first)
list_of_elements_unique_to_vectors = list() # We'll fill this in below.
for(vector_name in unique_names_in_row){
list_of_elements_unique_to_vectors[[vector_name]] = Reduce(
setdiff,
c(
vector_items_for_unique_names_in_row[vector_name], # Put vector_name first (since setdiff gives the unique items for whichever item is printed first)
vector_items_for_unique_names_in_row[-which( names(vector_items_for_unique_names_in_row) %in% vector_name)]
)
)
}
list_to_return <- list(
"elements_involved" = as.character(unique_names_in_row),
# The Reduce() approach below comes from https://www.r-bloggers.com/intersect-for-multiple-vectors-in-r/:
"union_of_elements" = Reduce(union, vector_items_for_unique_names_in_row),
"overlap_of_elements" = Reduce(intersect, vector_items_for_unique_names_in_row),
"elements_unique_to_first_element" = list_of_elements_unique_to_vectors
)
return(list_to_return)
}
)
# str(combination_set_operations)
# Draw Venn diagrams -------------------------------------------------
if(draw_venn_diagrams == TRUE){
# We will now create Venn diagrams for each level of comparison (e.g., 2-way, 3-way, etc.), from 2 to the maximum level of comparison (up to 5-way, since that's the most that the VennDiagram package I'm using can draw):
# Define a sub-function to make it easier to query overlaps between elements
get_overlap_of_elements_from_combination_set_operations <- function(
... # This should be a list of element names. "..." is R's way of accepting an arbitrary number of arguments.
){
element_names <- unlist(list(...)) # Parse the "..." arbitrary number of arguments into a vector.
overlap_value <- length(
combination_set_operations[[ # sapply() below should only bring back one matching element, since above we made sure we weren't calculating repeats, so this (just assuming that there will only be one matching element) seems safe to do.
which(
sapply(
purrr::map(combination_set_operations, "elements_involved"),
function(x){setequal(x, c(element_names))}
)
)
]]$overlap_of_elements
)
return(overlap_value)
} # End of sub-function definition
if(maximum_degree_of_comparison_calculated >= 2){
if(maximum_degree_of_comparison_calculated >= 6){
if(suppress_messages != TRUE){
message("Note: We can only draw up to 5-way diagrams. Thus, combinations of greater than 5 degrees (i.e., 6+ - way comparisons) will not be drawn...")
}
}
valid_venn_diagram_degrees_actually_asked_for_by_the_user <- degrees_of_comparison_for_venn_diagrams[degrees_of_comparison_for_venn_diagrams %in% degrees_of_comparison_to_include]
if(length(valid_venn_diagram_degrees_actually_asked_for_by_the_user) > 0){
if(suppress_messages != TRUE){
message("Drawing only the following degree(s) of comparison, following the options entered by the user: ", veccompare::vector.print.with.and(valid_venn_diagram_degrees_actually_asked_for_by_the_user), "...")
}
}
for(degree_of_comparison in valid_venn_diagram_degrees_actually_asked_for_by_the_user){ # The Venn Diagram package can only draw up to 5-way comparisons, so we won't go above 5 when drawing Venn-Diagrams.
if(suppress_messages != TRUE){
message("Calculating Venn diagram for all ", degree_of_comparison, "-way comparisons...", sep = "")
}
combination_set_elements_relevant_for_current_degree_of_comparison <- which(
sapply(
purrr::map(combination_set_operations, "elements_involved"),
function(x) {length(x) == degree_of_comparison}
)
)
for(combination_set_element_number in combination_set_elements_relevant_for_current_degree_of_comparison){
names_of_elements_in_this_comparison_set <- combination_set_operations[[combination_set_element_number]]$elements_involved
if(suppress_messages != TRUE){
message("Drawing comparison between ", veccompare::vector.print.with.and(names_of_elements_in_this_comparison_set), "...", sep = "")
}
if(length(names_of_elements_in_this_comparison_set) == 2){
venn_diagram <- VennDiagram::draw.pairwise.venn(
area1 = length(unique(named_list_of_vectors_to_compare[[names_of_elements_in_this_comparison_set[1]]])),
area2 = length(unique(named_list_of_vectors_to_compare[[names_of_elements_in_this_comparison_set[2]]])),
cross.area = length(combination_set_operations[[combination_set_element_number]]$overlap_of_elements),
category = names_of_elements_in_this_comparison_set,
lty = rep("blank", 2), # Line dash pattern of the circles
fill = unlist(vector_colors[names_of_elements_in_this_comparison_set]),
alpha = rep(0.5, 2),
cat.pos = rep(0, 2), # Category position around the circles (in degrees)
cat.dist = rep(0.025, 2), # Category names' distances from the edges of the circles (can be negative)
scaled = TRUE,
margin = 0,
cex = rep(1.0, 3),
cat.cex = rep(1.0, 2),
ind = FALSE # Do not automatically draw the diagram
)
} else if(length(names_of_elements_in_this_comparison_set) == 3){
venn_diagram <- VennDiagram::draw.triple.venn(
area1 = length(unique(named_list_of_vectors_to_compare[[names_of_elements_in_this_comparison_set[1]]])),
area2 = length(unique(named_list_of_vectors_to_compare[[names_of_elements_in_this_comparison_set[2]]])),
area3 = length(unique(named_list_of_vectors_to_compare[[names_of_elements_in_this_comparison_set[3]]])),
n12 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[2]),
n23 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[3]),
n13 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[3]),
n123 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[3]),
category = names_of_elements_in_this_comparison_set,
lty = rep("blank", 3), # Line dash pattern of the circles
fill = unlist(vector_colors[names_of_elements_in_this_comparison_set]),
alpha = rep(0.5, 3),
cat.pos = c(315, 45, 180), # Category position around the circles (in degrees)
cat.dist = rep(0.025, 3), # Category names' distances from the edges of the circles (can be negative)
scaled = TRUE,
margin = 0,
cex = rep(1.0, 7),
cat.cex = rep(1.0, 3),
ind = FALSE # Do not automatically draw the diagram
)
} else if(length(names_of_elements_in_this_comparison_set) == 4){
venn_diagram <- VennDiagram::draw.quad.venn(
area1 = length(unique(named_list_of_vectors_to_compare[[names_of_elements_in_this_comparison_set[1]]])),
area2 = length(unique(named_list_of_vectors_to_compare[[names_of_elements_in_this_comparison_set[2]]])),
area3 = length(unique(named_list_of_vectors_to_compare[[names_of_elements_in_this_comparison_set[3]]])),
area4 = length(unique(named_list_of_vectors_to_compare[[names_of_elements_in_this_comparison_set[4]]])),
n12 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[2]),
n13 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[3]),
n14 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[4]),
n23 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[3]),
n24 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[4]),
n34 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[4]),
n123 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[3]),
n124 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[4]),
n134 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[4]),
n234 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[4]),
n1234 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[4]),
category = names_of_elements_in_this_comparison_set,
lty = rep("blank", 4), # Line dash pattern of the circles
fill = unlist(vector_colors[names_of_elements_in_this_comparison_set]),
alpha = rep(0.5, 4),
#cat.pos = c(0, 0, 180), # Category position around the circles (in degrees)
#cat.dist = rep(0.025, 3), # Category names' distances from the edges of the circles (can be negative)
scaled = TRUE,
margin = 0,
cex = rep(1.0, 15),
cat.cex = rep(1.0, 4),
ind = FALSE # Do not automatically draw the diagram
)
} else if(length(names_of_elements_in_this_comparison_set) == 5){
venn_diagram <- VennDiagram::draw.quintuple.venn(
area1 = length(unique(named_list_of_vectors_to_compare[[names_of_elements_in_this_comparison_set[1]]])),
area2 = length(unique(named_list_of_vectors_to_compare[[names_of_elements_in_this_comparison_set[2]]])),
area3 = length(unique(named_list_of_vectors_to_compare[[names_of_elements_in_this_comparison_set[3]]])),
area4 = length(unique(named_list_of_vectors_to_compare[[names_of_elements_in_this_comparison_set[4]]])),
area5 = length(unique(named_list_of_vectors_to_compare[[names_of_elements_in_this_comparison_set[5]]])),
n12 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[2]),
n13 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[3]),
n14 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[4]),
n15 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[5]),
n23 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[3]),
n24 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[4]),
n25 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[5]),
n34 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[4]),
n35 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[5]),
n45 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[4], names_of_elements_in_this_comparison_set[5]),
n123 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[3]),
n124 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[4]),
n125 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[5]),
n134 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[4]),
n135 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[5]),
n145 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[4], names_of_elements_in_this_comparison_set[5]),
n234 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[4]),
n235 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[5]),
n245 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[4], names_of_elements_in_this_comparison_set[5]),
n345 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[4], names_of_elements_in_this_comparison_set[5]),
n1234 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[4]),
n1235 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[5]),
n1245 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[4], names_of_elements_in_this_comparison_set[5]),
n1345 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[4], names_of_elements_in_this_comparison_set[5]),
n2345 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[4], names_of_elements_in_this_comparison_set[5]),
n12345 = get_overlap_of_elements_from_combination_set_operations(names_of_elements_in_this_comparison_set[1], names_of_elements_in_this_comparison_set[2], names_of_elements_in_this_comparison_set[3], names_of_elements_in_this_comparison_set[4], names_of_elements_in_this_comparison_set[5]),
category = names_of_elements_in_this_comparison_set,
lty = rep("blank", 5), # Line dash pattern of the circles
fill = unlist(vector_colors[names_of_elements_in_this_comparison_set]),
alpha = rep(0.5, 5),
#cat.pos = c(0, 0, 180), # Category position around the circles (in degrees)
#cat.dist = rep(0.025, 3), # Category names' distances from the edges of the circles (can be negative)
scaled = TRUE,
margin = 0,
cex = rep(1.0, 31),
cat.cex = rep(1.0, 5),
ind = FALSE # Do not automatically draw the diagram
)
} # End of if statement over length of elements
combination_set_operations[[combination_set_element_number]]$venn_diagram <- venn_diagram
if(save_venn_diagram_files == TRUE){
filename_to_use <- make.names(
paste(
paste(combination_set_operations[[combination_set_element_number]]$elements_involved, sep = "_vs_", collapse = "_vs_"),
"_venn_diagram.png",
sep = "",
collapse = ""
)
)
# Sanitize user inputs:
if(location_for_venn_diagram_files != ""){
location_for_venn_diagram_files_to_use <- as.character(location_for_venn_diagram_files)
} else {
location_for_venn_diagram_files_to_use <- getwd()
}
# Sanitize user inputs:
if(prefix_for_venn_diagram_files != ""){
prefix_for_venn_diagram_files_to_use <- as.character(prefix_for_venn_diagram_files)
} else {
prefix_for_venn_diagram_files_to_use <- ""
}
final_path_for_venn_diagram <- file.path(location_for_venn_diagram_files_to_use, paste(prefix_for_venn_diagram_files_to_use, filename_to_use, sep = ""))
if(suppress_messages != TRUE){
message("Saving Venn diagram to '", final_path_for_venn_diagram, "'...")
}
viewport_npc_width_height_for_images_value <- viewport_npc_width_height_for_images
# Begin pumping output into a PNG file:
png(
filename = final_path_for_venn_diagram,
res = saved_venn_diagram_resolution_ppi,
units = saved_venn_diagram_dimension_units,
width = saved_venn_diagram_width,
height = saved_venn_diagram_height
#, bg = "transparent" # Create a transparent background
)
veccompare::render.venn.diagram(venn_diagram, viewport_npc_width_height_for_images = viewport_npc_width_height_for_images_value)
# Stop sending output to a PNG
dev.off()
} # End of if(save_venn_diagram_files == TRUE)
} # End of for loop over combination_set_element_number
} # End of for loop over degree_of_comparison
} # End of if statement re: draw_venn_diagrams
} # End of if statement re: whether length(maximum_degree_of_comparison_calculated) > 1
# End of Draw Venn diagrams -------------------------------------------------
# To test the last-drawn diagram:
# grid::grid.newpage()
# grid::grid.draw(venn_diagram)
return(combination_set_operations)
} # End of function definition
|
/scratch/gouwar.j/cran-all/cranData/veccompare/R/compare.vectors.R
|
#' Create a Markdown report from the output of \code{\link{compare.vectors}}
#'
#' This function is a wrapper for \code{\link{compare.vectors}}. It creates a Markdown report of all degrees of set comparisons between a named list of vectors.
#'
#' Use of this function is illustrated with the \code{Veccompare Overlap Report} RMarkdown template for RStudio that is installed as part of the \pkg{veccompare} package.
#' @inheritParams compare.vectors
#' @inheritParams render.venn.diagram
#' @param cat_immediately A logical (TRUE/FALSE) indicator whether to immediately print the output, as in an RMarkdown document.
#' @param base_heading_level_to_use An integer indicating the highest-level heading to print. Defaults to \code{1} (i.e., start by using first-level headings); \code{1} is also the minimum value used.
#'
#' @return A string of Markdown (and Venn diagrams, if \code{draw_venn_diagrams} is \code{TRUE}).
#'
#' If \code{cat_immediately} is \code{TRUE}, nothing is returned by the function; rather, the output Markdown is printed immediately (for example, as part of a Knitted RMarkdown document, or to the console).
#'
#' If \code{cat_immediately} is \code{FALSE}, the output can be saved to an object (as in the example below). This object can then be printed using \code{cat()}.
#'
#' NOTE WELL: If \code{cat_immediately} is \code{FALSE}, the output \emph{should} be saved to an object. If it is not, R will give an error message when printing to the console, because of unescaped special characters (which work correctly when \code{cat()} is used).
#'
#' @export compare.vectors.and.return.text.analysis.of.overlap
#'
#' @examples
#' example <- compare.vectors.and.return.text.analysis.of.overlap(
#' veccompare::example.vectors.list,
#' cat_immediately = FALSE,
#' draw_venn_diagrams = FALSE
#' )
#' cat(example)
compare.vectors.and.return.text.analysis.of.overlap <- function(
named_list_of_vectors_to_compare,
degrees_of_comparison_to_include = NULL, # By default, all degrees of comparison will be included (e.g., for three vectors, all 1-, 2-, and 3-way comparisons). If you only want to include 2- and 3-way comparisons, for example, you can use 'c(2, 3)' here.
cat_immediately = FALSE, # Whether to immediately print to the console using cat(). This needs to be true if venn diagrams are to be drawn.
draw_venn_diagrams = FALSE, # Whether we shold draw venn digrams for 2- to 5-way comparisons (the VennDiagram package can only draw up to five-way comparisons).
viewport_npc_width_height_for_images = 1.0,
vector_colors_for_venn_diagrams = NULL,
save_venn_diagram_files = FALSE,
location_for_venn_diagram_files = "",
prefix_for_venn_diagram_files = "",
saved_venn_diagram_resolution_ppi = 300,
saved_venn_diagram_dimension_units = "in",
saved_venn_diagram_width = 8,
saved_venn_diagram_height = 6,
base_heading_level_to_use = 1
){
if(!is.numeric(base_heading_level_to_use)){
stop("'base_heading_level_to_use' is expected to be an integer (for example, 1, 2, 3, etc.).")
} else {
# If base_heading_level_to_use *is* an integer, round it to the nearest whole number, with a minimum of 1
base_heading_level_to_use <- max(round(base_heading_level_to_use, digits = 0), 1)
# We'll then create a heading prefix for use when creating Markdown below:
markdown_base_heading <- paste(rep("#", base_heading_level_to_use), sep = "", collapse = "")
}
# First, we'll escape special Markdown characters:
message("Escaping special Markdown characters (_, *, /)...")
named_list_of_vectors_to_compare <- lapply(named_list_of_vectors_to_compare, function(x){sub("_", "\\\\_", x)})
named_list_of_vectors_to_compare <- lapply(named_list_of_vectors_to_compare, function(x){sub("*", "\\\\*", x, fixed = TRUE)})
named_list_of_vectors_to_compare <- lapply(named_list_of_vectors_to_compare, function(x){sub("/", "\\\\/", x)})
if(draw_venn_diagrams == TRUE){ # Sanitize the user input
draw_venn_diagrams_value <- TRUE
if(save_venn_diagram_files == TRUE){ # Sanitize the user input
save_venn_diagram_files_value <- TRUE
} else {
save_venn_diagram_files_value <- FALSE
}
if(location_for_venn_diagram_files != ""){ # Sanitize the user input
location_for_venn_diagram_files_value <- as.character(location_for_venn_diagram_files)
} else {
location_for_venn_diagram_files_value <- ""
}
if(prefix_for_venn_diagram_files != ""){ # Sanitize the user input
prefix_for_venn_diagram_files_value <- as.character(prefix_for_venn_diagram_files)
} else {
prefix_for_venn_diagram_files_value <- ""
}
if(saved_venn_diagram_resolution_ppi != ""){ # Sanitize the user input
saved_venn_diagram_resolution_ppi_value <- as.numeric(saved_venn_diagram_resolution_ppi)
} else {
saved_venn_diagram_resolution_ppi_value <- ""
}
if(saved_venn_diagram_dimension_units != ""){ # Sanitize the user input
saved_venn_diagram_dimension_units_value <- as.character(saved_venn_diagram_dimension_units)
} else {
saved_venn_diagram_dimension_units_value <- ""
}
if(saved_venn_diagram_width != ""){ # Sanitize the user input
saved_venn_diagram_width_value <- as.numeric(saved_venn_diagram_width)
} else {
saved_venn_diagram_width_value <- ""
}
if(saved_venn_diagram_height != ""){ # Sanitize the user input
saved_venn_diagram_height_value <- as.numeric(saved_venn_diagram_height)
} else {
saved_venn_diagram_height_value <- ""
}
if(!is.null(vector_colors_for_venn_diagrams)){ # Sanitize the user input
message("Using the following Venn diagram colors: ", veccompare::vector.print.with.and(vector_colors_for_venn_diagrams))
vector_colors_for_venn_diagrams_value <- vector_colors_for_venn_diagrams
} else {
vector_colors_for_venn_diagrams_value <- NULL
}
if(!is.numeric(viewport_npc_width_height_for_images)){
stop("'viewport_npc_width_height_for_images' is expected to be numeric (e.g., 1.0, 0.5, etc.).")
} else {
viewport_npc_width_height_for_images_value <- viewport_npc_width_height_for_images
}
} else {
draw_venn_diagrams_value <- FALSE
}
if(draw_venn_diagrams_value == TRUE & cat_immediately != TRUE){
warning("'draw_venn_diagrams' is TRUE, but 'cat_immediately' is FALSE. 'cat_immediately' needs to be set to TRUE in order for Venn diagrams to be drawn in the output. Therefore, skipping drawing diagrams...")
draw_venn_diagrams_value <- FALSE
}
output_markdown <- NULL # We'll fill this in below.
vector_names <- names(named_list_of_vectors_to_compare)
degrees_of_comparison_to_include_value <- degrees_of_comparison_to_include # This allows us below to avoid passing the argument 'degrees_of_comparison_to_include_value = degrees_of_comparison_to_include_value'
combination_set_operations <- veccompare::compare.vectors(
named_list_of_vectors_to_compare,
degrees_of_comparison_to_include = degrees_of_comparison_to_include_value,
draw_venn_diagrams = draw_venn_diagrams_value,
vector_colors_for_venn_diagrams = vector_colors_for_venn_diagrams_value,
save_venn_diagram_files = save_venn_diagram_files_value,
location_for_venn_diagram_files = location_for_venn_diagram_files_value,
prefix_for_venn_diagram_files = prefix_for_venn_diagram_files_value,
saved_venn_diagram_resolution_ppi = saved_venn_diagram_resolution_ppi_value,
saved_venn_diagram_dimension_units = saved_venn_diagram_dimension_units_value,
saved_venn_diagram_width = saved_venn_diagram_width_value,
viewport_npc_width_height_for_images = viewport_npc_width_height_for_images_value,
saved_venn_diagram_height = saved_venn_diagram_height_value
)
if(is.null(degrees_of_comparison_to_include)){ # If we *have not* been told which comparisons (e.g., 2-way, 3-way, etc.) to include, we'll use all of them by default:
degrees_of_comparisons <- 1:length(vector_names)
} else { # If we *have* been told which comparisons to include, we'll set that here:
if(!is.numeric(degrees_of_comparison_to_include)){
stop("The argument 'degrees_of_comparison_to_include' is expected to be numeric.")
} else { # If we are dealing with a numeric argument, as expected.
degrees_of_comparisons <- degrees_of_comparison_to_include
}
}
message("Creating output Markdown text...")
for(n_way_comparison in degrees_of_comparisons){
if(n_way_comparison == 1){
addition_to_output_markdown <- paste("\n\n", markdown_base_heading, " Number of Items in Each Element", sep = "", collapse = "")
} else {
addition_to_output_markdown <- paste("\n\n", markdown_base_heading, " ", n_way_comparison, "-Way Comparisons", sep = "", collapse = "")
}
if(cat_immediately == TRUE){
cat(addition_to_output_markdown)
} else {
output_markdown <- paste(output_markdown, addition_to_output_markdown, sep = "", collapse = "")
}
comparisons_at_this_level_of_combination <- combination_set_operations[
sapply(
purrr::map(combination_set_operations, "elements_involved"),
function(x){length(x) == n_way_comparison}
)
]
for(list_element in comparisons_at_this_level_of_combination){
if(length(list_element) == 1){
list_element <- list_element[[1]] # Take off an annoying feature of R, whereby we need to select the first (and only) sub-element.
}
#
# Print the results of the set operations comparing the elements:
#
addition_to_output_markdown <- paste(
"\n\n",
markdown_base_heading,
"# **",
vector.print.with.and(
list_element[["elements_involved"]],
string_to_return_if_vector_is_empty = "(None)"),
"**",
sep = "",
collapse = ""
)
if(cat_immediately == TRUE){
cat(addition_to_output_markdown)
} else {
output_markdown <- paste(output_markdown, addition_to_output_markdown, sep = "", collapse = "")
}
# If we have a venn diagram to draw, go ahead and draw it:
if(draw_venn_diagrams == TRUE & cat_immediately == TRUE){ # Note that we give the user a warning above if draw_venn_diagrams is TRUE but cat_immediately is FALSE
if(!is.null(list_element[["venn_diagram"]])){
cat("\n\n")
veccompare::render.venn.diagram(
list_element[["venn_diagram"]],
viewport_npc_width_height_for_images = viewport_npc_width_height_for_images_value
)
cat("\n\n")
}
}
addition_to_output_markdown <- paste("\n", "- Total number of values (not counting duplicates): ", length(unique(list_element[["union_of_elements"]])), sep = "", collapse = "") # unique() is needed here just for when the total number of elements is 1 (i.e., we're just reporting on the number of elements) -- in that case, this is necessary to count only non-duplicate values (without this, venn diagrams in higher-level comparisons won't add up with the values in the level-1 comparisons (i.e., with the number of elements printed for each vector individually).
if(cat_immediately == TRUE){
cat(addition_to_output_markdown)
} else {
output_markdown <- paste(output_markdown, addition_to_output_markdown, sep = "", collapse = "")
}
if(length(list_element[["elements_involved"]]) > 1){ # If it's not just the element compared with itself:
addition_to_output_markdown <- paste("\n",
"- Total number of elements that **overlap among ",
vector.print.with.and(list_element[["elements_involved"]]), ":** ",
length(list_element[["overlap_of_elements"]]),
" (",
round(
length(list_element$overlap_of_elements)/length(list_element$union_of_elements)*100,
digits = 2
), "% of the total number of values)",
sep = "", collapse = ""
)
if(cat_immediately == TRUE){
cat(addition_to_output_markdown)
} else {
output_markdown <- paste(output_markdown, addition_to_output_markdown, sep = "", collapse = "")
}
addition_to_output_markdown <- paste("\n",
"\t- Items that **overlap among ",
vector.print.with.and(list_element[["elements_involved"]]), ":** *",
vector.print.with.and(
list_element[["overlap_of_elements"]],
string_to_return_if_vector_is_empty = "(None)"
),
"*",
sep = "", collapse = ""
)
if(cat_immediately == TRUE){
cat(addition_to_output_markdown)
} else {
output_markdown <- paste(output_markdown, addition_to_output_markdown, sep = "", collapse = "")
}
for(involved_vector_for_getting_unique_elements in list_element[["elements_involved"]]){
percent_unique_to_involved_vector <- round(length(list_element[["elements_unique_to_first_element"]][[involved_vector_for_getting_unique_elements]])/length(unique(named_list_of_vectors_to_compare[[involved_vector_for_getting_unique_elements]]))*100, 2)
addition_to_output_markdown <- paste(
"\n\n",
markdown_base_heading,
"## Elements Unique to ", involved_vector_for_getting_unique_elements,
"\n\nTotal number of elements that are **unique to ",
involved_vector_for_getting_unique_elements, ":** ",
length(list_element[["elements_unique_to_first_element"]][[involved_vector_for_getting_unique_elements]]),
# Get the percentage equivalent:
" (", percent_unique_to_involved_vector, "% of ", involved_vector_for_getting_unique_elements, "; put differently, ", 100-percent_unique_to_involved_vector , "% of ", involved_vector_for_getting_unique_elements, " is overlapping)",
sep = "", collapse = ""
)
if(cat_immediately == TRUE){
cat(addition_to_output_markdown)
} else {
output_markdown <- paste(output_markdown, addition_to_output_markdown, sep = "", collapse = "")
}
addition_to_output_markdown <- paste("\n",
"\n\nItems that are **unique to ",
involved_vector_for_getting_unique_elements, ":**",
"\n\n> *",
vector.print.with.and(
list_element[["elements_unique_to_first_element"]][[involved_vector_for_getting_unique_elements]],
string_to_return_if_vector_is_empty = "(None)"
),
"*",
sep = "", collapse = ""
)
if(cat_immediately == TRUE){
cat(addition_to_output_markdown)
} else {
output_markdown <- paste(output_markdown, addition_to_output_markdown, sep = "", collapse = "")
}
} # End of if statement re: unique elements
} # End of if statement re: length of elements involved.
} # End of for loop over comparisons_at_this_level_of_combination
} # End of for loop over degree of combinations
if(cat_immediately != TRUE){
return(output_markdown)
}
} # End of function definition
|
/scratch/gouwar.j/cran-all/cranData/veccompare/R/compare.vectors.and.return.text.analysis.of.overlap.R
|
#' Example Vectors List
#'
#' An example dataset containing several named vectors, which can be compared to one another for overlaps, unique elements, etc.
#'
#' @format A list of named vectors.
"example.vectors.list"
|
/scratch/gouwar.j/cran-all/cranData/veccompare/R/example.vectors.list.R
|
#' Extract elements from the output of \code{\link{compare.vectors}}
#'
#' Straightforwardly extract particular elements from the output of \code{\link{compare.vectors}}.
#'
#' @param output_from_compare.vectors The list output of \code{\link{compare.vectors}}.
#' @param vector_names An optional vector of names to extract from the named list (\code{named_list_of_vectors_to_compare}) used with \code{\link{compare.vectors}}.
#' @param only_match_vector_names A logical (TRUE/FALSE) indicator whether to match \strong{only} \code{vector_names}. If \code{vector_names} is \code{c("a", "b")}, for example, and \code{only_match_vector_names} is \code{TRUE}, this function will output only the comparison between \code{a} and \code{b}. If \code{only_match_vector_names} is \code{FALSE}, however, this function will output the comparison between \code{a} and \code{b}, as well as between \code{a}, \code{b}, and \code{c}, etc.
#' @param degrees_of_comparison An optional number of vector of numbers indicating which degrees of comparison to return (for example, 2 will return only two-way comparisons from \code{output_from_compare.vectors}.
#' @param elements_of_output An optional vector of element names from \code{output_from_compare.vectors} to return (for example, "elements_involved"). See the \strong{Value} section of \code{\link{compare.vectors}} for a list of the elements to choose from.
#'
#' @return
#' A winnowed version of \code{output_from_compare.vectors}. Depending on arguments, either a list, a vector, or a string.
#'
#' @export extract.compared.vectors
#'
#' @examples
#' example <- veccompare::compare.vectors(veccompare::example.vectors.list)
#'
#' # To extract similar elements across list items:
#' veccompare::extract.compared.vectors(
#' example,
#' elements_of_output = "elements_involved"
#' )
#'
#' # To extract all comparisons that involve "vector_a":
#' veccompare::extract.compared.vectors(
#' example,
#' vector_names = "vector_a"
#' )
#'
#' # To find all comparisons that were about "vector_a" and "vector_c":
#' veccompare::extract.compared.vectors(
#' example,
#' vector_names = c("vector_a", "vector_c"),
#' only_match_vector_names = TRUE
#' )
#'
#' # To get all elements that did a two-way comparison:
#' veccompare::extract.compared.vectors(
#' example,
#' degrees_of_comparison = 2
#' )
#'
#' # A more complex / specific example:
#' extract.compared.vectors(
#' example,
#' vector_names = c("vector_a", "vector_c"),
#' only_match_vector_names = FALSE,
#' degrees_of_comparison = c(2, 3),
#' elements_of_output = "elements_involved"
#' )
extract.compared.vectors <- function(
output_from_compare.vectors,
vector_names = NULL,
only_match_vector_names = FALSE,
degrees_of_comparison = NULL,
elements_of_output = NULL
){
output <- output_from_compare.vectors # We'll start with output_from_compare.vectors, and winnow it as we go.
if(!is.null(degrees_of_comparison)){
# Get all elements that did a two-way comparison:
output <- output[
which(
sapply(
purrr::map(output, "elements_involved"),
function(x){length(x) %in% degrees_of_comparison}
)
)
]
}
if(!is.null(vector_names)){
if(only_match_vector_names == TRUE){
# To find all comparisons that were about the particular vector_names:
output <- output[
sapply(
purrr::map(output, "elements_involved"),
function(x){setequal(x, vector_names)}
)
]
} else {
# To extract all comparisons that involve "vector_a":
output <- output[
sapply(
purrr::map(output, "elements_involved"),
function(x){all(vector_names %in% x)}
)
]
}
}
# Remove NULL elements we possibly created above:
# First, though, check whether there's anything left to remove: The user may have entered options that resulted in just a blank list():
if(length(output) > 0){
output <- output[
sapply(
output,
function(x){!is.null(x)}
)
]
}
if(!is.null(elements_of_output)){
# output <- purrr::map(output, elements_of_output)
# Extract just the elements_of_output elements from across the list items of output:
output <- lapply(output, function(x){
return(x[elements_of_output])
})
}
if(length(output) == 1){
output <- output[[1]]
}
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/veccompare/R/extract.compared.vectors.R
|
# Save some example vectors into our /data directory, following http://r-pkgs.had.co.nz/data.html:
example.vectors.list <- list(
"vector_a" = c("a", "b", "c", "d", "z", "q", "x"),
"vector_b" = c("a", "a", "b", "e", "f", "z"),
"vector_c" = c("b", "f", "g", "h", "i"),
"vector_d" = c("c", "i", "b", "k", "l"),
"vector_e" = c("a", "g", "h", "k", "i"),
"vector_f" = c("f", "g", "a", "w")
)
# devtools::use_data(example.vectors.list, overwrite = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/veccompare/R/generate.example.data.R
|
#' Generate Random Colors
#'
#' An function to generate a given number of random colors.
#'
#' @param number_of_colors_to_get The number of colors to generate.
#'
#' @return A vector of R color names.
#' @export generate.random.colors
#'
#' @examples
#' generate.random.colors(5)
generate.random.colors <- function(number_of_colors_to_get){
return(colors(distinct = TRUE)[runif(number_of_colors_to_get, min = 1, max = length(colors(distinct = TRUE)))])
}
|
/scratch/gouwar.j/cran-all/cranData/veccompare/R/generate.random.colors.R
|
#' Render (Print) a Previously-Computed Venn Diagram
#'
#' A wrapper function for printing a \code{grid}-based image using \code{grid::grid.draw()}.
#'
#' @param venn_diagram_created_with_VennDiagram_package A grid-based diagram object. For example, a Venn diagram previously generated using \code{veccompare::compare.vectors()}.
#' @param viewport_npc_width_height_for_images The scale at which to print an image. If the image is cut off at its edges, for example, this can be set lower than 1.0.
#'
#' @return The function will not return a value; rather, it will print the image.
#' @export render.venn.diagram
#'
#' @examples
#' # Create comparisons across 5 vectors, specifically creating all 4-way venn diagrams from them:
#' example <- veccompare::compare.vectors(
#' veccompare::example.vectors.list[1:5],
#' draw_venn_diagrams = TRUE,
#' suppress_messages = TRUE,
#' degrees_of_comparison_to_include = 4
#' )
#'
#' # Get the first 4-way comparison that includes a diagram:
#' diagram <- veccompare::extract.compared.vectors(
#' example,
#' degrees_of_comparison = 4,
#' elements_of_output = "venn_diagram"
#' )[[1]]$venn_diagram
#'
#' # Print the diagram:
#' veccompare::render.venn.diagram(
#' diagram,
#' viewport_npc_width_height_for_images = .7
#' # Scale the image down to 70%,
#' # in case it otherwise gets cut off at the margins.
#' )
render.venn.diagram <- function(
venn_diagram_created_with_VennDiagram_package,
viewport_npc_width_height_for_images = 1.0
){
grid::grid.newpage()
grid::pushViewport(grid::viewport(
width = grid::unit(viewport_npc_width_height_for_images, "npc"),
height = grid::unit(viewport_npc_width_height_for_images, "npc")
)) # Following https://stackoverflow.com/questions/21234439/how-to-force-the-labels-to-fit-in-venndiagram#comment75690400_22826211, force the output rendering mechanism to be smaller than normal, in order not to cut off diagram names.
grid::grid.draw(venn_diagram_created_with_VennDiagram_package)
}
|
/scratch/gouwar.j/cran-all/cranData/veccompare/R/render.venn.diagram.R
|
#' Summarize Percentage Overlap for Two-Way Comparisons between Vectors
#'
#' @inheritParams compare.vectors
#' @param output_type Either \code{"table"}, \code{"matrix_plot"}, or \code{"network_graph"}. \code{"table"} will return a matrix showing percentage overlap between each pair of vectors. \code{"matrix_plot"} will plot this table, coloring it by the amount of overlap. \code{"network_graph"} will return a network graph image illustrating the overlap percentages between each pair of vectors.
#' @param melt_table A logical (TRUE/FALSE) indicator, when \code{output_type} is \code{"table"}, whether to print the output in \code{\link[reshape2]{melt}ed} form (using the \pkg{reshape2} package).
#' @param network_graph_minimum \code{minimum} argument from \code{\link[qgraph]{qgraph}}, for when \code{output_type} is \code{"network_graph"}.
#' @param margins_for_plot The margins for image output (if \code{output_type} is \code{matrix_plot} or \code{network_graph}). Specified as a vector of numbers, in the form \code{c(bottom, left, top, right)}. If \code{output_type} is \code{matrix_plot}, defaults to \code{c(2, 0, 1, 0)}; if \code{output_type} is \code{network_graph}, defaults to \code{c(3, 3, 3, 0.5)}.
#'
#' @return Either a matrix (if \code{output} is \code{"table"}), or an image (if \code{output} is \code{"matrix_plot"} or \code{"network_graph"}). If an image is printed, nothing is returned by the function; rather, the output is printed immediately.
#'
#' If \code{output} is \code{"table"} and \code{melt_table} is \code{FALSE}, the output will be a matrix with \code{nrow} and \code{ncol} both equal to the number of vectors in \code{named_list_of_vectors_to_compare}. This table shows the decimal percentage overlap (e.g., "0.20" = 20\%) between each combination of vectors. \emph{This table is intended to be read with row names first, in this form:} "[row title] overlaps with [column title] [cell value] percent."
#'
#' If \code{output} is \code{"table"} and \code{melt_table} is \code{TRUE}, the output will be a \code{\link[reshape2]{melt}ed} data.frame with three columns: \code{Vector_Name}, \code{Overlaps_With}, and \code{Decimal_Percentage}.
#'
#' @export summarize.two.way.comparisons.percentage.overlap
#'
#' @examples
#' summarize.two.way.comparisons.percentage.overlap(veccompare::example.vectors.list)
#' summarize.two.way.comparisons.percentage.overlap(
#' veccompare::example.vectors.list,
#' output_type = "table",
#' melt_table = TRUE
#' )
#'
#' summarize.two.way.comparisons.percentage.overlap(
#' veccompare::example.vectors.list,
#' output_type = "matrix_plot" # You can also choose "network_graph"
#' )
#'
summarize.two.way.comparisons.percentage.overlap <- function(
named_list_of_vectors_to_compare,
output_type = "table", # c("table", "matrix_plot", "network_graph")
melt_table = FALSE, # Overridden by output_type
network_graph_minimum = 0,
margins_for_plot = NULL
){
if(! output_type %in% c("table", "matrix_plot", "network_graph")){
stop("'output_type' must be one of the following: 'table', 'matrix_plot', 'network_graph'.")
} else {
if(length(output_type) != 1){
stop("'output_type' must have only 1 value.")
}
}
if(is.null(margins_for_plot)){
if(output_type == "matrix_plot"){
margins_for_plot <- c(2, 0, 1, 0) # Increase the margins, so that nothing gets cut off at the top and bottom of the plot.
} else if(output_type == "network_graph"){
margins_for_plot <- c(3, 3, 3, 0.5) # Margins (for the plot, not the legend): c(bottom, left, top, right)
} else { # We shouldn't ever get to this step; I'm just providing a fallback to make future code expansion easier.
margins_for_plot <- c(1, 1, 1, 1)
}
}
# Compute all two-way comparisons:
two_way_comparison_output <- veccompare::compare.vectors(
named_list_of_vectors_to_compare,
degrees_of_comparison_to_include = 2,
suppress_messages = TRUE
)
output_table <- matrix( # We'll fill this in below
nrow = length(named_list_of_vectors_to_compare),
ncol = length(named_list_of_vectors_to_compare)
)
# Set the diagonal to 1, since every element overlaps 100% with itself
diag(output_table) <- 1.00
rownames(output_table) <- names(named_list_of_vectors_to_compare)
colnames(output_table) <- names(named_list_of_vectors_to_compare)
for(list_element in two_way_comparison_output){
for(involved_vector_for_getting_unique_elements in list_element[["elements_involved"]]){
percent_unique_to_involved_vector <- 1.00 - round(length(list_element[["elements_unique_to_first_element"]][[involved_vector_for_getting_unique_elements]])/length(unique(named_list_of_vectors_to_compare[[involved_vector_for_getting_unique_elements]])), 2)
# We specify row and column in this way because it works correctly with reshape2::melt below. The way to view the output is across: "[row title] overlaps with [column title] [cell value] percent."
output_table[
involved_vector_for_getting_unique_elements,
list_element[["elements_involved"]][list_element[["elements_involved"]] != involved_vector_for_getting_unique_elements]
] <- percent_unique_to_involved_vector
} # End of for loop over involved_vector_for_getting_unique_elements
} # End of for loop over list_element
if(output_type == "matrix_plot"){
# See https://cran.r-project.org/web/packages/corrplot/vignettes/corrplot-intro.html for corrplot examples:
message("Drawing plot of table...")
par(xpd=TRUE)
return(
corrplot::corrplot(
output_table,
method="number",
is.corr = FALSE,
tl.col = "black", # Set title color to black
diag = FALSE,
order = "alphabet",
# Widen the legend:
cl.ratio = 0.2,
cl.align = "r",
cl.lim = c(0, 1.0),
tl.cex = 0.8, # Title font size ratio.
mar = margins_for_plot
)
)
} else if(output_type == "table"){
if(melt_table == TRUE){
melted_matrix <- reshape2::melt(as.matrix(output_table))
colnames(melted_matrix) <- c("Vector_Name", "Overlaps_With", "Decimal_Percentage")
# Remove self-directed edges, as they aren't really meaningful here:
melted_matrix <- melted_matrix[
melted_matrix$Vector_Name != melted_matrix$Overlaps_With
, # Use all columns
]
rownames(melted_matrix) <- NULL # For aesthetics, remove row numbers.
return(melted_matrix)
} else {
return(output_table)
}
} else if(output_type == "network_graph") {
message("Drawing network graph...")
list_item_sizes <- sapply(named_list_of_vectors_to_compare[order(names(named_list_of_vectors_to_compare))], length)
list_item_relative_sizes <- list_item_sizes / max(list_item_sizes)*10
return(
qgraph::qgraph(
output_table[order(rownames(output_table)), order(colnames(output_table))], # We're alphabetizing row and column name orders here to more easily match up with the values for the 'nodeNames' argument below.
esize = 5,
directed = TRUE,
theme = "gray",
edge.labels = TRUE,
shape = "circle",
vsize = list_item_relative_sizes, # Get the size of each list object in the order it appears in the melted table (this is from using 'vsize = c(1,2,3,4,5,6)' and noting that the size increases were in line with the output of 'unique(melted_matrix$Vector_Name)')
minimum = network_graph_minimum,
threshold = -1, # Set this lower than 0, to effectively turn it off.
DoNotPlot = FALSE,
layout = "circle",
legend = TRUE,
labels = c(1:length(named_list_of_vectors_to_compare)), # names(named_list_of_vectors_to_compare)[order(names(named_list_of_vectors_to_compare))], # Note: There seems to be a bug with this package when using an edge list and this option, which is why I'm using output_table here above.
label.scale = TRUE,
label.scale.equal = FALSE,
label.cex = 2,
#groups = names(named_list_of_vectors_to_compare)[order(names(named_list_of_vectors_to_compare))], # Cause the nodes to be colored based on their names.
nodeNames = names(named_list_of_vectors_to_compare)[order(names(named_list_of_vectors_to_compare))],
legend.cex = 0.3,
mar = margins_for_plot
)
)
# return(qgraph_output)
} else {
stop("No 'output_type' selected.") # This shouldn't happen, but just in case it does, we'll throw an error message here.
}
} # End of function definition
|
/scratch/gouwar.j/cran-all/cranData/veccompare/R/summarize.two.way.comparisons.percentage.overlap.R
|
#' veccompare: Automatically Generate All n-Wise Set Comparisons on Vectors
#'
#' The \pkg{veccompare} package contains functions for automating set operations. Given a named list of 5 vectors, for example, \pkg{veccompare} can calculate all 2-, 3-, 4-, and 5-way comparisons between those vectors, recording information for each comparison about the set "union" (combined elements), "intersection" (overlap / shared elements), and compliments (which elements are unique to each vector involved in the comparison).
#'
#' The veccompare package contains functions for automating set operations (i.e., comparisons of overlap) between multiple vectors.
#'
#' The package also contains a function for automating reporting in RMarkdown, by generating markdown output for easy analysis, as well as an RMarkdown template for use with RStudio.
#'
#' The primary function from \pkg{veccompare} is \code{\link{compare.vectors}}. Complementarily, \code{\link{compare.vectors.and.return.text.analysis.of.overlap}} will call \code{\link{compare.vectors}} and generate Markdown-style output from it (for example, for use within an RMarkdown file).
#'
#' An RMarkdown template illustrating several of \pkg{veccompare}'s features can be used from within RStudio by clicking \code{File -> New File -> R Markdown... -> From Template -> Veccompare Overlap Report}.
#'
#' \pkg{veccompare} also provides a function, \code{\link{summarize.two.way.comparisons.percentage.overlap}}, that can create correlation-plot-style images and network graphs for all two-way comparisons between vectors. This function is also demonstrated in the \code{Veccompare Overlap Report} described above.
#'
# The below were recommended after using Ctrl + Shift + E in RStudio to do CRAN-like checks on the package.
#' @importFrom "grDevices" "colors"
#' @importFrom "grDevices" "png"
#' @importFrom "grDevices" "dev.off"
#' @importFrom "graphics" "par"
#' @importFrom "stats" "runif"
#' @importFrom "utils" "head"
#' @importFrom "utils" "tail"
"_PACKAGE"
|
/scratch/gouwar.j/cran-all/cranData/veccompare/R/veccompare.R
|
#' Print a vector with commas and a final "and".
#'
#' @param vector_to_print A vector of strings (or elements able to be coerced into strings) to print.
#' @param string_to_return_if_vector_is_empty If \code{vector_to_print} is empty, the string that should be returned (for example, "", "(None)", etc.)
#' @param use_oxford_comma A logical (TRUE/FALSE) value indicating whether to use an Oxford comma ("One, two, and three" vs. "One, two and three").
#'
#' @return A single string that concatenates the input, separating with commas and adding "and" before the final item.
#' @export vector.print.with.and
#'
#' @examples
#' vector.print.with.and(c("One", "Two", "Three", "Four"))
#' vector.print.with.and(c("One", "Two", "Three", "Four"), use_oxford_comma = FALSE)
#' vector.print.with.and(c("One", "Two"))
#' vector.print.with.and(c("One"))
#' vector.print.with.and(c(), string_to_return_if_vector_is_empty = "(None)") # Outputs "(None)"
#' vector.print.with.and(c(""), string_to_return_if_vector_is_empty = "(None)") # Outputs ""
vector.print.with.and <- function(
vector_to_print,
string_to_return_if_vector_is_empty = "",
use_oxford_comma = TRUE
){
if(length(vector_to_print) == 0){
return(string_to_return_if_vector_is_empty)
} else if(length(vector_to_print) == 1){
return(vector_to_print)
} else if(length(vector_to_print) == 2){
return(paste(vector_to_print, sep = " and ", collapse = " and "))
} else {
return(
paste(
paste(
head(
vector_to_print,
-1 # Remove the last element, so that we can add "and" before it : )
),
collapse = ", "
),
if(use_oxford_comma == TRUE){","},
" and ",
tail(vector_to_print, n = 1),
sep = ""
)
)
}
}
|
/scratch/gouwar.j/cran-all/cranData/veccompare/R/vector.print.with.and.R
|
#' Which of One Set is not in Another
#'
#' This function is a wrapper for \code{\link{setdiff}}. It makes it easier to remember which vector is being subtracted from the other, by displaying an explicit message.
#'
#' @param set_1 A vector to be subtracted from.
#' @param set_2 A vector to subtract from \code{set_1}.
#' @param suppress_messages A logical (TRUE/FALSE) indicator whether to suppress messages.
#'
#' @return A vector of the values of \code{set_1} that are not present in \code{set_2}. Put differently, a vector resulting from subtracting \code{set_2} from \code{set_1}.
#' @export which.of.one.set.is.not.in.another
#'
#' @examples
#' veccompare::which.of.one.set.is.not.in.another(
#' veccompare::example.vectors.list$vector_a,
#' veccompare::example.vectors.list$vector_b
#' )
#'
#' veccompare::which.of.one.set.is.not.in.another(
#' veccompare::example.vectors.list$vector_b,
#' veccompare::example.vectors.list$vector_a
#' )
#'
which.of.one.set.is.not.in.another <- function(
set_1,
set_2,
suppress_messages = FALSE
){
if(suppress_messages != TRUE){
message("Displaying the elements of the first set that are not present in the second set (i.e., subtracting the second set from the first set)...")
}
return(setdiff(set_1, set_2))
}
|
/scratch/gouwar.j/cran-all/cranData/veccompare/R/which.of.one.set.is.not.in.another.R
|
---
title: "Untitled"
author: "Author Name"
date: "January 1, 1970"
output: html_document
---
```{r setup, echo = FALSE, eval = TRUE}
# Set the default code chunk options for this document:
knitr::opts_chunk$set(echo = FALSE) # Don't print code itself.
knitr::opts_chunk$set(message = FALSE) # Don't print output messages.
knitr::opts_chunk$set(include = TRUE) # Do run code chunks.
knitr::opts_chunk$set(fig.width = 8) # Set the default image width wide enough so that it doesn't get cut off.
knitr::opts_chunk$set(fig.height = 6)
knitr::opts_chunk$set(fig.align="center")
knitr::opts_chunk$set(dev = 'png') # Set the default image writer to PDF
options(digits=2) # Use two decimal places by default.
knitr::opts_chunk$set(cache = FALSE)
```
```{r define_sample_data, eval = TRUE}
vectors_to_use <- veccompare::example.vectors.list
```
# Information about the maps
**Note that `results = "asis"` is set in the code chunks below. This is necessary for output that is printed with the `cat_immediately = TRUE` option (or that is printed with `cat(...)` generally).**
```{r print_report_on_one_way_comparisons, results = "asis"}
veccompare::compare.vectors.and.return.text.analysis.of.overlap(
vectors_to_use,
degrees_of_comparison_to_include = c(1),
cat_immediately = TRUE,
viewport_npc_width_height_for_images = 0.7,
base_heading_level_to_use = 2
)
```
# 2-Way Comparisons Graphical and Tabular Summaries
The two-way comparisons can be summerized visually and tabularly in several ways.
First, we present a matrix showing decimal percentage overlap (i.e., "0.23" means "23%") between each pair of maps. This table is intended to be read with row names first, in this form:} *"[row title] overlaps with \[column title] \[cell value] percent."*
```{r print_matrix_summary_of_two_way_comparisons, results = "asis"}
veccompare::summarize.two.way.comparisons.percentage.overlap(
vectors_to_use,
output_type = "matrix_plot"
)
```
The table above can also be read in the form below (reading across each row: *[column one] overlaps with [column two] [column three] percent.* As above, we present decimal percentages (i.e., "0.23" means "23%"):
```{r print_melted_table_summary_of_two_way_comparisons, results = "asis"}
pander::pandoc.table(
veccompare::summarize.two.way.comparisons.percentage.overlap(
vectors_to_use,
output_type = "table",
melt_table = TRUE
),
split.cells = 15,
split.tables = Inf,
justify = c('center'),
style = 'multiline'
)
```
Finally, we can create a network graph of all two-way comparisons between maps. Higher overlap is represented with darker lines. Maps are sized based on their relative numbers of items. Relationship lines are directed -- the arrow flows in the direction of overlap (*"[map one] --> overlaps [line label] percent with --> [map two]"*).
This graph only shows relationships at or above 20% overlap.
```{r print_network_graph_summary_of_two_way_comparisons, results = "asis", fig.width=8, fig.height=6, fig.align="center", eval = TRUE}
veccompare::summarize.two.way.comparisons.percentage.overlap(
vectors_to_use,
output_type = "network_graph",
network_graph_minimum = .2
)
```
```{r print_report_on_comparisons, results = "asis", fig.width=8, fig.height=6, fig.align="center", eval = TRUE}
# We can now get all comparisons between the vectors, from 2-way up to 6-way:
veccompare::compare.vectors.and.return.text.analysis.of.overlap(
vectors_to_use,
degrees_of_comparison_to_include = 2:length(vectors_to_use),
cat_immediately = TRUE,
draw_venn_diagrams = TRUE,
viewport_npc_width_height_for_images = 0.65, # If venn diagrams are getting cut off, this number can be lowered (for example, to 0.7)
base_heading_level_to_use = 1
)
```
|
/scratch/gouwar.j/cran-all/cranData/veccompare/inst/rmarkdown/templates/veccompare_report/skeleton/skeleton.Rmd
|
# rev 1.3: fixed crash on zero intersection, thanks to [email protected]
# Rev 1.2: added checks for empty-set inputs
#Jan2021: clean up & speed up
vintersect <- function(x, y, multiple=TRUE){
# base::intersect does as.vector internally
# really can start out with this if() and just return vector() {logical(0)}
xtype <- typeof(x)
if(!length(x) | !length(y)) return( vector(mode = xtype) )
# make output look just like base::intersect
# is there something in the intersection?
if (!length(intersect(x,y))) return( vector(mode = xtype) )
if(multiple) {
x <- as.vector(x)
y <- as.vector(y)
xx <- x[!is.na(x)]
xn <- length(x) - length(xx) #x[is.na(x)]
yy <- y[!is.na(y)]
yn <- length(y) - length(yy) # y[is.na(y)]
#unlike vdiff, here I want the difference in how many NA there are
ndif <- min(xn,yn) #length(xn), length(yn))
intout <- vector()
# but since base::intersect includes "NA", have to jigger it here
trueint = intersect(xx,yy)
for(jj in 1: length(trueint) ) {
# use sum instead of length(which), is 10% faster at least for big vectors
#intout <- c(intout, rep(trueint[jj], min(length(which(trueint[jj]==xx)), length(which(trueint[jj]==yy) ) ) ) )
intout <- c(intout, rep(trueint[jj], min(sum(trueint[jj]==xx),sum(trueint[jj]==yy) ) ) )
}
trueint<-c( intout, rep(NA,ndif))
return(trueint)
} else return(intersect(x,y))
}
|
/scratch/gouwar.j/cran-all/cranData/vecsets/R/Oldvintersect.R
|
# rev 1.4: fixed crash on zero intersection, thanks to [email protected]
# Rev 1.2: added checks for empty-set inputs
#Jan2021: clean up & speed up
vintersect <- function(x, y, multiple=TRUE){
# base::intersect does as.vector internally
# really can start out with this if() and just return vector() {logical(0)}
xtype <- typeof(x)
if(!length(x) | !length(y)) return( vector(mode = xtype) )
# make output look just like base::intersect
# is there something in the intersection?
if (!length(intersect(x,y))) return( vector(mode = xtype) )
if(multiple) {
x <- as.vector(x)
y <- as.vector(y)
xx <- x[!is.na(x)]
xn <- length(x) - length(xx) #x[is.na(x)]
yy <- y[!is.na(y)]
yn <- length(y) - length(yy) # y[is.na(y)]
#unlike vdiff, here I want the difference in how many NA there are
ndif <- min(xn,yn) #length(xn), length(yn))
intout <- vector()
# but since base::intersect includes "NA", have to jigger it here
trueint = intersect(xx,yy)
for(jj in 1: length(trueint) ) {
# use sum instead of length(which), is 10% faster at least for big vectors
#intout <- c(intout, rep(trueint[jj], min(length(which(trueint[jj]==xx)), length(which(trueint[jj]==yy) ) ) ) )
intout <- c(intout, rep(trueint[jj], min(sum(trueint[jj]==xx),sum(trueint[jj]==yy) ) ) )
}
trueint<-c( intout, rep(NA,ndif))
return(trueint)
} else return(intersect(x,y))
}
|
/scratch/gouwar.j/cran-all/cranData/vecsets/R/vintersect.R
|
# vperm for vecsets
# hey hey hey: a way to enhance pracma::perms!
#
# disallow 'simplify = FALSE'
vperm <- function(x, m = if(length(x) == 1) x else length(x), FUN=NULL, ...) {
#where m is number of elt's to take at a time
elist <- list(...)
# force safety internal data type
elist$simplify = TRUE # i.e. if some joker puts 'simplify' into ellipsis
# combn and pracma:combs return transforms of each other...
# if FUN returns a single value, I get a 1XN output, unlike when arrays are returned.
# How to discern that from , e.g. combn(1:5,1) and combn(1:5,5) ?
# combn returns all combinations in columns. The problem is that when I apply a FUN, the result is a vector,
# not a 1XN array, and t(vector) turns out to be 1XN, not what I want.
thecomb <- combn(x, m , FUN, ...)
# combn may return an array, albeit of one dimension. however, ncol(thecomb) will be NA
# the problem is that certain classes of "x" and certain values of "m" can return different
# dimensional results
if (!is.na(ncol(thecomb)) ) {
thecomb <- t(thecomb)
} else {
m = 1 # because no longer have multiple elements
dim(thecomb) <- c(dim(thecomb), 1) #force it to have a column
}
facm <- factorial(m)
# build size of theperm
theperm <- matrix(0,nrow= nrow(thecomb) *facm, ncol = m)
for(jrow in 1: nrow(thecomb)) {
startrow <- 1 + (jrow-1) * facm
endrow <- startrow - 1 + facm
theperm[startrow:endrow,] <- pracma::perms(thecomb[jrow,] )
}
return(invisible(theperm))
}
|
/scratch/gouwar.j/cran-all/cranData/vecsets/R/vperm.R
|
# Rev 1.2: added checks for empty-set inputs
# Jan 2021 minor speed cleanups
vsetdiff <- function (x, y, multiple=TRUE) {
x <- as.vector(x)
y <- as.vector(y)
# new code to check for empty sets here
if(!multiple) return ( setdiff(x,y))
if(!length(x)) return(NULL)
if(!length(y)) return(x)
xx <- x[!is.na(x)]
xn <- length(x) - length(xx) #x[is.na(x)]
yy <- y[!is.na(y)]
yn <- length(y) - length(yy) # y[is.na(y)]
# if the output of unlist() is length 0
# then difout <- xx (foo[-0] does naughty things)
tapout <- unlist( tapply(yy, yy, function(yyy) head(which(xx == yyy[1]), length(yyy) ) ) )
if(length(tapout)) difout<-xx[-tapout] else difout<- xx
ndif <- max(0,length(xn)-length(yn) )
difout<- c(difout, rep(NA,ndif) )
return(difout)
}
|
/scratch/gouwar.j/cran-all/cranData/vecsets/R/vsetdiff.R
|
vsetequal <- function (x, y, multiple=TRUE) {
x <- as.vector(x)
y <- as.vector(y)
#?? is all(....) any different from base::setequal?
if(!multiple) {
return( setequal(x,y) )
# all(c(match(x, y, 0L) > 0L, match(y, x, 0L) > 0L))
} else {
# Can get away with this 'cause set theory doesn't "allow" floats
# works fine when both x and y are empty
# == takes precedence over && ; parentheses added for clarity
return ( (length(x) == length(y)) && identical(sort(x), sort(y)) )
}
}
|
/scratch/gouwar.j/cran-all/cranData/vecsets/R/vsetequal.R
|
vunion <- function (x, y, multiple=TRUE) {
# 'multiple' = FALSE is normal union
xtype <- typeof(x)
if(!length(x) & !length(y)) return( vector(mode = xtype) )
if (multiple) {
# (max()) is 4X faster than this
# trueun <- c(vintersect(x,y), vsetdiff(x,y), vsetdiff(y,x) )
# but have to remove NA values to avoid disaster
x <- as.vector(x)
y <- as.vector(y)
xx <- x[!is.na(x)]
xn <- length(x) - length(xx) #x[is.na(x)]
yy <- y[!is.na(y)]
yn <- length(y) - length(yy) # y[is.na(y)]
# here I want the max of how many NA there are
ndif <- max(xn,yn)
uniqs <- sort(unique(c(xx,yy))) #makes output pretty
trueun <- vector()
for (ju in 1:length(uniqs)) {
trueun <- c(trueun, rep(uniqs[ju],times = max(sum(xx==uniqs[ju]), sum(yy==uniqs[ju]) ) ) )
}
# now put NAs back in
trueun <- c(trueun,rep(NA,ndif))
return(trueun)
} else return(union(x,y))
}
|
/scratch/gouwar.j/cran-all/cranData/vecsets/R/vunion.R
|
# Basic infix operators. For a real implementation with interesting details, see the `bitops` package.
#' @name Infix Bitwise Operators
#' @rdname infix
#' @aliases `%|%`
#' @aliases `%&%`
#' @aliases `%^%`
#' @aliases `%<<%`
#' @aliases `%>>%`
#'
#' @title Infix operators for bitwise operations.
#'
#' @description Basic infix wrapper around the base::bitw_OP_ operations.
#'
#' @param a,b Integer vectors. Numerics are coerced to integers.
#' @param n Non-negative integer vector of values up to 31.
#'
#' @return An integer vector of length of the longer of the arguments, or zero if one of the arguments is zero-length. NA input makes NA output.
NULL
#' @rdname infix
#' @examples
#' 1 %|% 2
#' @return `%|%`: A vector of pairwise ORed values.
#' @export
`%|%` <- function(a, b)
{
bitwOr(a,b)
}
#' @rdname infix
#' @examples
#' 1 %&% 2
#' @return `%&%`: A vector of pairwise ANDed values.
#' @export
`%&%` <- function(a,b)
{
bitwAnd(a,b)
}
#' @rdname infix
#' @examples
#' 1 %^% 2
#' @return `%^%`: A vector of pairwise XORed values.
#' @export
`%^%` <- function(a,b)
{
bitwXor(a,b)
}
#' @rdname infix
#' @examples
#' 1 %<<% 2
#' @return `%<<%`: A vector of the values on the LHS pairwise left-shifted by the RHS value.
#' @export
`%<<%` <- function(a,n)
{
bitwShiftL(a,n)
}
#' @rdname infix
#' @examples
#' 8 %>>% 2
#' @return `%>>%`: A vector of the values on the LHS pairwise right-shifted by the RHS value.
#' @export
`%>>%` <- function(a,n)
{
bitwShiftR(a,n)
}
|
/scratch/gouwar.j/cran-all/cranData/vectorbitops/R/infix.R
|
# These functions take a vector argument and reduce it to scalar by the chosen bitop.
# Essentially providing set operations on the bits - intersection/union, useful for bitmasks.
#' @name Vector Bitops
#' @rdname funs
#' @aliases bit_vector_AND
#' @aliases bit_vector_OR
#' @aliases bit_vector_XOR
#'
#' @title Bitwise Operations along a Vector
#'
#' @description Functions to apply the same bitwise operation sequentially down a vector of integers. A fast way to AND or OR everything together when a single value is required.
#'
#' @param vec A vector of integers. Numeric vectors will be coerced to int.
#'
#' @return A single integer, the result of applying the operation in question along the vector. Input that cannot be coerced to int returns NA. An empty vector returns 0.
NULL
#' @rdname funs
#' @examples
#' bit_vector_AND(c(1,3,5,7,9))
#' @return `bit_vector_AND`: A single integer, the result of ANDing each entry in the input vector together.
#' @export
bit_vector_AND <- function(vec)
{
.Call(vectorbitops_AND, vec)
}
#' @rdname funs
#' @examples
#' bit_vector_OR(c(1,2,4,8,16))
#' @return `bit_vector_OR`: A single integer, the result of ORing each entry in the input vector together.
#' @export
bit_vector_OR <- function(vec)
{
.Call(vectorbitops_OR, vec)
}
#' @rdname funs
#' @examples
#' bit_vector_XOR(c(1,2,3,4,5))
#' @return `bit_vector_XOR`: A single integer, the result of XORing each entry in the input vector together.
#' @export
bit_vector_XOR <- function(vec)
{
.Call(vectorbitops_XOR, vec)
}
|
/scratch/gouwar.j/cran-all/cranData/vectorbitops/R/vector_ops.R
|
#' @keywords internal
"_PACKAGE"
## usethis namespace: start
#' @useDynLib vectorbitops, .registration = TRUE
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/vectorbitops/R/vectorbitops-package.R
|
#' AR1NV - Estimate the parameters for an AR(1) model
#'
#' @param x One dimensional time series vector
#'
#' @return Return a list containing:
#' \item{g}{estimate of the lag-one autocorrelation.}
#' \item{a}{estimate of the noise variance.}
#'
#' @author Tunc Oygur ([email protected])
#'
#' Code based on a cross wavelet and wavelet coherence toolbox MATLAB package written by Eric Breitenberger
#'
#' @references
#' SGrinsted, A., J. C. Moore, and S. Jevrejeva. 2004. Application of the cross
#' wavelet transform and wavelet coherence to geophysical time series.
#' \emph{Nonlinear Processes in Geophysics} 11:561-566.
#'
ar1nv <- function(x){
N <- length(x)
m <- mean(x)
x <- x-m
# Lag zero and one covariance estimates:
c0 <- t(x) %*% (x/N)
c1 <- x[1:N-1] %*% (x[2:N]/(N-1))
g <- c1/c0
a <- sqrt((1-g^2)*c0)
return(list(g=as.numeric(g), a=as.numeric(a)))
}
|
/scratch/gouwar.j/cran-all/cranData/vectorwavelet/R/ar1nv.R
|
#' Compute multiple wavelet coherence
#'
#' @param y time series 1 in matrix format (\code{m} rows x 2 columns). The
#' first column should contain the time steps and the second column should
#' contain the values.
#' @param x1 time series 2 in matrix format (\code{m} rows x 2 columns). The
#' first column should contain the time steps and the second column should
#' contain the values.
#' @param x2 time series 3 in matrix format (\code{m} rows x 2 columns). The
#' first column should contain the time steps and the second column should
#' contain the values.
#' @param pad pad the values will with zeros to increase the speed of the
#' transform. Default is TRUE.
#' @param dj spacing between successive scales. Default is 1/12.
#' @param s0 smallest scale of the wavelet. Default is \code{2*dt}.
#' @param J1 number of scales - 1.
#' @param max.scale maximum scale. Computed automatically if left unspecified.
#' @param mother type of mother wavelet function to use. Can be set to
#' \code{morlet}, \code{dog}, or \code{paul}. Default is \code{morlet}.
#' Significance testing is only available for \code{morlet} wavelet.
#' @param param nondimensional parameter specific to the wavelet function.
#' @param lag1 vector containing the AR(1) coefficient of each time series.
#' @param sig.level significance level. Default is \code{0.95}.
#' @param sig.test type of significance test. If set to 0, use a regular
#' \eqn{\chi^2} test. If set to 1, then perform a time-average test. If set to
#' 2, then do a scale-average test.
#' @param nrands number of Monte Carlo randomizations. Default is 300.
#' @param quiet Do not display progress bar. Default is \code{FALSE}
#'
#' @return Return a \code{vectorwavelet} object containing:
#' \item{coi}{matrix containg cone of influence}
#' \item{rsq}{matrix of wavelet coherence}
#' \item{phase}{matrix of phases}
#' \item{period}{vector of periods}
#' \item{scale}{vector of scales}
#' \item{dt}{length of a time step}
#' \item{t}{vector of times}
#' \item{xaxis}{vector of values used to plot xaxis}
#' \item{s0}{smallest scale of the wavelet }
#' \item{dj}{spacing between successive scales}
#' \item{mother}{mother wavelet used}
#' \item{type}{type of \code{vectorwavelet} object created (\code{mwc})}
#' \item{signif}{matrix containg \code{sig.level} percentiles of wavelet coherence
#' based on the Monte Carlo AR(1) time series}
#'
#' @author Tunc Oygur ([email protected])
#'
#' Code based on MWC MATLAB package written by Eric K. W. Ng and Johnny C. L. Chan.
#'
#' @references
#' T. Oygur, G. Unal.. Vector wavelet coherence for multiple time series.
#' \emph{Int. J. Dynam. Control (2020).}
#'
#' T. Oygur, G. Unal. 2017. The large fluctuations of the stock
#' return and financial crises evidence from Turkey: using wavelet
#' coherency and VARMA modeling to forecast stock return.
#' \emph{Fluctuation and Noise Letters}
#'
#' Ng, Eric KW and Chan, Johnny CL. 2012. Geophysical applications of partial
#' wavelet coherence and multiple wavelet coherence. \emph{Journal of Atmospheric
#' and Oceanic Technology} 29-12:1845--1853.
#'
#' @examples
#' old.par <- par(no.readonly=TRUE)
#'
#' t <- (-100:100)
#'
#' y <- sin(t*2*pi)+sin(t*2*pi/4)+sin(t*2*pi/8)+sin(t*2*pi/16)+sin(t*2*pi/32)+sin(t*2*pi/64)
#' x1 <- sin(t*2*pi/8)
#' x2 <- sin(t*2*pi/32)
#'
#' y <- cbind(t,y)
#' x1 <- cbind(t,x1)
#' x2 <- cbind(t,x2)
#'
#' ## Multiple wavelet coherence
#' result <- mwc(y, x1, x2, nrands = 10)
#' \donttest{
#' result <- mwc(y, x1, x2)
#' }
#'
#' ## Plot wavelet coherence and make room to the right for the color bar
#' ## Note: plot function can be used instead of plot.vectorwavelet
#' par(oma = c(0, 0, 0, 1), mar = c(5, 4, 4, 5) + 0.1, pin = c(3,3))
#' plot.vectorwavelet(result, plot.cb = TRUE, main = "Plot multiple wavelet coherence")
#'
#' par(old.par)
#'
#' @keywords wavelet
#' @keywords coherence
#' @keywords continuous wavelet transform
#' @keywords multiple wavelet coherence
#'
#' @importFrom stats sd
#' @importFrom biwavelet check.data wt smooth.wavelet wtc.sig
#' @export
mwc <- function (y, x1, x2, pad = TRUE, dj = 1/12, s0 = 2 * dt, J1 = NULL,
max.scale = NULL, mother = "morlet", param = -1, lag1 = NULL,
sig.level = 0.95, sig.test = 0, nrands = 300, quiet = FALSE) {
mother <- match.arg(tolower(mother), c("morlet", "paul", "dog"))
# Check data format
checked <- check.data(y = y, x1 = x1, x2 = x2)
xaxis <- y[, 1]
dt <- checked$y$dt
t <- checked$y$t
n <- checked$y$n.obs
if (is.null(J1)) {
if (is.null(max.scale)) {
max.scale <- (n * 0.17) * 2 * dt # automatic maxscale
}
J1 <- round(log2(max.scale/s0)/dj)
}
# Get AR(1) coefficients for each time series
if (is.null(lag1)) {
y.ar1 <- ar1nv(y[,2])$g
x1.ar1 <- ar1nv(x1[,2])$g
x2.ar1 <- ar1nv(x2[,2])$g
lag1 <- c(y.ar1, x1.ar1, x2.ar1)
}
# Get CWT of each time series
wt.y <- wt(d = y, pad = pad, dj = dj, s0 = s0, J1 = J1, max.scale = max.scale,
mother = mother, param = param, sig.level = sig.level,
sig.test = sig.test, lag1 = lag1[1])
wt.x1 <- wt(d = x1, pad = pad, dj = dj, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, param = param,
sig.level = sig.level, sig.test = sig.test, lag1 = lag1[2])
wt.x2 <- wt(d = x2, pad = pad, dj = dj, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, param = param,
sig.level = sig.level, sig.test = sig.test, lag1 = lag1[3])
# Standard deviation for each time series
y.sigma <- sd(y[, 2], na.rm = TRUE)
x1.sigma <- sd(x1[, 2], na.rm = TRUE)
x2.sigma <- sd(x2[, 2], na.rm = TRUE)
s.inv <- 1/t(wt.y$scale)
s.inv <- matrix(rep(s.inv, n), nrow = NROW(wt.y$wave))
smooth.wt_y <- smooth.wavelet(s.inv*(abs(wt.y$wave)^2), dt, dj, wt.y$scale)
smooth.wt_x1 <- smooth.wavelet(s.inv*(abs(wt.x1$wave)^2), dt, dj, wt.x1$scale)
smooth.wt_x2 <- smooth.wavelet(s.inv*(abs(wt.x2$wave)^2), dt, dj, wt.x2$scale)
coi <- pmin(wt.y$coi, wt.x1$coi, wt.x2$coi, na.rm = T)
# Cross-wavelet computation
cw.yx1 <- wt.y$wave * Conj(wt.x1$wave)
cw.yx2 <- wt.y$wave * Conj(wt.x2$wave)
cw.x2x1 <- wt.x2$wave * Conj(wt.x1$wave)
smooth.cw_yx1 <- smooth.wavelet(s.inv*(cw.yx1), dt, dj, wt.y$scale)
smooth.cw_yx2 <- smooth.wavelet(s.inv*(cw.yx2), dt, dj, wt.y$scale)
smooth.cw_x2x1 <- smooth.wavelet(s.inv*(cw.x2x1), dt, dj, wt.y$scale)
rsq.yx1 <- abs(smooth.cw_yx1)^2/(smooth.wt_y * smooth.wt_x1)
rsq.yx2 <- abs(smooth.cw_yx2)^2/(smooth.wt_y * smooth.wt_x2)
rsq.x2x1 <- abs(smooth.cw_x2x1)^2/(smooth.wt_x2 * smooth.wt_x1)
r.yx1 <- sqrt(rsq.yx1)
r.yx2 <- sqrt(rsq.yx2)
r.x2x1 <- sqrt(rsq.x2x1)
# Wavelet coherence
norm.rsq <- (1 - r.x2x1^2)
rsq <- (r.yx1^2+r.yx2^2-2*Re(r.yx1*Conj(r.yx2)*Conj(r.x2x1)))/norm.rsq
# Phase difference
phase <- atan2(Im(cw.yx1), Re(cw.yx1))
if (nrands > 0) {
signif <- wtc.sig(nrands = nrands, lag1 = lag1,
dt = dt, n, pad = pad, dj = dj, J1 = J1, s0 = s0,
max.scale = max.scale, mother = mother, sig.level = sig.level,
quiet = quiet)
}
else {
signif <- NA
}
results <- list(coi = coi,
rsq = rsq,
phase = phase,
period = wt.y$period,
scale = wt.y$scale,
dt = dt,
t = t,
xaxis = xaxis,
s0 = s0,
dj = dj,
mother = mother,
type = "mwc",
signif = signif)
class(results) <- "vectorwavelet"
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/vectorwavelet/R/mwc.R
|
#' Check the format of multivariate time series
#'
#' @param y time series y in matrix format (\code{m} rows x 2 columns). The
#' first column should contain the time steps and the second column should
#' contain the values.
#' @param x multivariate time series x in matrix format (\code{m} rows x (1 + (n-1)) columns). The
#' first column should contain the time steps and the other columns should
#' contain the values.
#'
#' @return Returns a named list containing:
#' \item{t}{time steps}
#' \item{dt}{size of a time step}
#' \item{n.obs}{number of observations}
#'
#' @author Tunc Oygur ([email protected])
#'
#' Code based on biwavelet package written by Tarik C. Gouhier.
#'
#' @examples
#' #Example 1:
#' t1 <- cbind(1:100, rnorm(100))
#' n.check.data(y = t1)
#'
#' #Example 2:
#' t1 <- cbind(1:100, rnorm(100))
#' t2 <- cbind(1:100, rnorm(100), rnorm(100), rnorm(100))
#' n.check.data(y = t1, x = t2)
#'
#'
#' @export
n.check.data <- function (y, x = NULL) {
if (is.null(y)) {
stop("The time series cannot be NULL")
}
y.check <- n.check.datum(y)
if(!is.null(x)) {
for(i in 2:ncol(x)) {
xi <- x[,c(1,i)]
xi.check <- n.check.datum(xi)
if (any(diff(y[, 1]) != diff(xi[, 1]))) {
stop("The time series must have the same step size")
}
if (y.check$n.obs != xi.check$n.obs) {
stop("The time series must have the same length (see merge command)")
}
}
}
return(list(y = y.check))
}
#' Helper function
#' @param x matrix
#' @return list(t, dt, n.obs)
#' @note This function is not exported
n.check.datum <- function (x) {
if (NCOL(x) > 1) {
t <- x[, 1]
diffs <- diff(t)
dt <- as.numeric(diffs[1])
epsilon <- 0.1 * dt
if (any(abs(diff(t) - dt) > epsilon)) {
stop("The step size must be constant ",
"(see approx function to interpolate)")
}
else {
if (class(t) == "Date") {
t <- seq_len(NROW(t))
dt <- diff(t)[1]
}
}
}
else {
stop("Error: time steps have to be in column 1")
}
return(list(t = t, dt = dt, n.obs = NROW(x)))
}
|
/scratch/gouwar.j/cran-all/cranData/vectorwavelet/R/n.check.data.R
|
#' Plot \code{vectorwavelet} objects
#'
#' Plot \code{vectorwavelet} objects which are multiple wavelet coherence,
#' quadruple wavelet coherence and n-dimensional vector wavelet coherence.
#'
#' @param x \code{vectorwavelet} object generated by \code{mwc}, \code{qmec}, or
#' \code{vwc}.
#' @param ncol number of colors to use. Default is 1024.
#' @param fill.cols Vector of fill colors to be used. Users can specify color
#' vectors using \code{colorRampPalette} or \code{brewer.pal} from package
#' \code{RColorBrewer}. Default is \code{NULL} and will generate MATLAB's jet
#' color palette.
#' @param xlab xlabel of the figure. Default is "Time"
#' @param ylab ylabel of the figure. Default is "Period"
#' @param tol tolerance level for significance contours. Sigificance contours
#' will be drawn around all regions of the spectrum where
#' \code{spectrum/percentile >= tol}. Default is \code{1}. If strict
#' \code{i^{th}} percentile regions are desired, then
#' \code{tol} must be set to \code{1}.
#' @param plot.cb plot color bar if TRUE. Default is FALSE.
#' @param plot.coi plot cone of influence (COI) as a semi-transparent polygon if
#' TRUE. Default is TRUE. Areas that fall within the polygon can be affected
#' by edge effects.
#' @param lwd.coi Line width of COI. Default is 1.
#' @param col.coi Color of COI. Default is \code{white}.
#' @param lty.coi Line type of COI. Default is 1 for solide lines.
#' @param alpha.coi Transparency of COI. Range is 0 (full transparency) to 1 (no
#' transparency). Default is 0.5.
#' @param plot.sig plot contours for significance if TRUE. Default is TRUE.
#' @param lwd.sig Line width of significance contours. Default is 4.
#' @param col.sig Color of significance contours. Default is \code{black}.
#' @param lty.sig Line type of significance contours. Default is 1.
#' @param bw plot in black and white if TRUE. Default is FALSE.
#' @param legend.loc legend location coordinates as defined by
#' \code{image.plot}. Default is \code{NULL}.
#' @param legend.horiz plot a horizontal legend if TRUE. Default is FALSE.
#' @param arrow.len size of the arrows. Default is based on plotting region
#' (min(par()$pin[2]/30,par()$pin[1]/40).
#' @param arrow.lwd width/thickness of arrows. Default is arrow.len*0.3.
#' @param arrow.cutoff cutoff value for plotting phase arrows. Phase arrows will be
#' be plotted in regions where the significance of the zvalues exceeds \code{arrow.cutoff}.
#' If the object being plotted does not have a significance field, regions
#' whose zvalues exceed the \code{arrow.cutoff} quantile will be plotted. Default is 0.7.
#' @param arrow.col Color of arrows. Default is \code{black}.
#' @param xlim the x limits. The default is \code{NULL}.
#' @param ylim the y limits. The default is \code{NULL}.
#' @param zlim the z limits. The default is \code{NULL}.
#' @param xaxt Add x-axis? The default is \code{s}; use \code{n} for none.
#' @param yaxt Add y-axis? The default is \code{s}; use \code{n} for none.
#' @param form format to use to display dates on the x-axis. Default is '\%Y'
#' for 4-digit year. See \code{?Date} for other valid formats.
#' @param \dots other parameters.
#'
#' @return No return value, shows the objects plot.
#'
#' @author Tunc Oygur ([email protected])
#'
#' Code based on biwavelet package written by Tarik C. Gouhier.
#'
#' @importFrom fields image.plot
#' @importFrom grDevices adjustcolor colorRampPalette
#' @importFrom graphics axTicks axis box contour image par polygon
#' @export
plot.vectorwavelet <- function (x, ncol = 1024, fill.cols = NULL,
xlab = "Time", ylab = "Period",
tol = 1,
plot.cb = FALSE,
plot.coi = TRUE, lwd.coi = 1, col.coi = "white",
lty.coi = 1, alpha.coi = 0.5, plot.sig = TRUE,
lwd.sig = 4, col.sig = "black", lty.sig = 1,
bw = FALSE,
legend.loc = NULL,
legend.horiz = FALSE,
arrow.len = min(par()$pin[2]/30,
par()$pin[1]/40),
arrow.lwd = arrow.len * 0.3,
arrow.cutoff = 0.7,
arrow.col = "black",
xlim = NULL, ylim = NULL, zlim = c(0,1),
xaxt = "s", yaxt = "s", form = "%Y",...) {
if (is.null(fill.cols)) {
if (bw) {
fill.cols <- c("black", "white")
} else {
fill.cols <- c("#00007F", "blue", "#007FFF", "cyan",
"#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")
}
}
col.pal <- colorRampPalette(fill.cols)
fill.colors <- col.pal(ncol)
if (x$type %in% c("mwc","qmwc", "vwc")) {
zvals <- x$rsq
zvals[!is.finite(zvals)] <- 0
zvals[zvals < zlim[1]] <- zlim[1]
zvals[zvals > 1] <- 1
zvals[zvals < 0] <- 0
if (is.null(zlim)) {
zlim <- range(zvals, na.rm = TRUE)
}
locs <- pretty(range(zlim), n = 5)
leg.lab <- locs
}
if (is.null(xlim)) {
xlim <- range(x$t)
}
yvals <- log2(x$period)
if (is.null(ylim)) {
ylim <- range(yvals)
}else {
ylim <- log2(ylim)
}
image(x$t,
yvals,
t(zvals),
zlim = zlim,
xlim = xlim,
ylim = rev(ylim),
xlab = xlab,
ylab = ylab,
yaxt = "n",
xaxt = "n",
col = fill.colors,...)
box()
if (class(x$xaxis)[1] == "Date" | class(x$xaxis)[1] == "POSIXct") {
if (xaxt != "n") {
xlocs <- pretty(x$t) + 1
axis(side = 1, at = xlocs, labels = format(x$xaxis[xlocs],
form, ...))
}
}
else {
if (xaxt != "n") {
xlocs <- axTicks(1)
axis(side = 1, at = xlocs, ...)
}
}
if (yaxt != "n") {
axis.locs <- axTicks(2)
yticklab <- format(2^axis.locs, dig = 1)
axis(2, at = axis.locs, labels = yticklab, ...)
}
# COI
if (plot.coi) {
polygon(x = c(x$t, rev(x$t)), lty = lty.coi, lwd = lwd.coi,
y = c(log2(x$coi),
rep(max(log2(x$coi), na.rm = TRUE), length(x$coi))),
col = adjustcolor(col.coi, alpha.f = alpha.coi), border = col.coi)
}
# sig.level contour (default is 95%)
if (plot.sig & length(x$signif) > 1) {
if (x$type %in% c("mwc", "qmwc", "vwc")) {
tmp <- x$rsq/x$signif
contour(x$t, yvals, t(tmp), level = tol, col = col.sig,
lwd = lwd.sig, add = TRUE, drawlabels = FALSE)
}
}
box()
## Add color bar: this must happen after everything, otherwise chaos ensues!
if (plot.cb) {
image.plot(x$t,
yvals,
t(zvals),
zlim = zlim,
ylim = rev(range(yvals)),
xlab = xlab,
ylab = ylab,
col = fill.colors,
smallplot = legend.loc,
horizontal = legend.horiz,
legend.only = TRUE,
axis.args =
list(at = locs, labels = format(leg.lab, dig = 2), ...),
xpd = NA)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vectorwavelet/R/plot.vectorwavelet.R
|
#' Compute quadruple wavelet coherence
#'
#' @param y time series 1 in matrix format (\code{m} rows x 2 columns). The
#' first column should contain the time steps and the second column should
#' contain the values.
#' @param x1 time series 2 in matrix format (\code{m} rows x 2 columns). The
#' first column should contain the time steps and the second column should
#' contain the values.
#' @param x2 time series 3 in matrix format (\code{m} rows x 2 columns). The
#' first column should contain the time steps and the second column should
#' contain the values.
#' @param x3 time series 4 in matrix format (\code{m} rows x 2 columns). The
#' first column should contain the time steps and the second column should
#' contain the values.
#' @param pad pad the values will with zeros to increase the speed of the
#' transform. Default is TRUE.
#' @param dj spacing between successive scales. Default is 1/12.
#' @param s0 smallest scale of the wavelet. Default is \code{2*dt}.
#' @param J1 number of scales - 1.
#' @param max.scale maximum scale. Computed automatically if left unspecified.
#' @param mother type of mother wavelet function to use. Can be set to
#' \code{morlet}, \code{dog}, or \code{paul}. Default is \code{morlet}.
#' Significance testing is only available for \code{morlet} wavelet.
#' @param param nondimensional parameter specific to the wavelet function.
#' @param lag1 vector containing the AR(1) coefficient of each time series.
#' @param sig.level significance level. Default is \code{0.95}.
#' @param sig.test type of significance test. If set to 0, use a regular
#' \eqn{\chi^2} test. If set to 1, then perform a time-average test. If set to
#' 2, then do a scale-average test.
#' @param nrands number of Monte Carlo randomizations. Default is 300.
#' @param quiet Do not display progress bar. Default is \code{FALSE}
#'
#' @return Return a \code{vectorwavelet} object containing:
#' \item{coi}{matrix containg cone of influence}
#' \item{rsq}{matrix of wavelet coherence}
#' \item{phase}{matrix of phases}
#' \item{period}{vector of periods}
#' \item{scale}{vector of scales}
#' \item{dt}{length of a time step}
#' \item{t}{vector of times}
#' \item{xaxis}{vector of values used to plot xaxis}
#' \item{s0}{smallest scale of the wavelet }
#' \item{dj}{spacing between successive scales}
#' \item{mother}{mother wavelet used}
#' \item{type}{type of \code{vectorwavelet} object created (\code{qmwc})}
#' \item{signif}{matrix containg \code{sig.level} percentiles of wavelet coherence
#' based on the Monte Carlo AR(1) time series}
#'
#' @author Tunc Oygur ([email protected])
#'
#' @references
#' T. Oygur, G. Unal.. Vector wavelet coherence for multiple time series.
#' \emph{Int. J. Dynam. Control (2020).}
#'
#' T. Oygur, G. Unal. 2017. The large fluctuations of the stock
#' return and financial crises evidence from Turkey: using wavelet
#' coherency and VARMA modeling to forecast stock return.
#' \emph{Fluctuation and Noise Letters}
#'
#' @examples
#' old.par <- par(no.readonly=TRUE)
#'
#' t <- (-100:100)
#'
#' y <- sin(t*2*pi)+sin(t*2*pi/4)+sin(t*2*pi/8)+sin(t*2*pi/16)+sin(t*2*pi/32)+sin(t*2*pi/64)
#' x1 <- sin(t*2*pi/16)
#' x2 <- sin(t*2*pi/32)
#' x3 <- sin(t*2*pi/64)
#'
#' y <- cbind(t,y)
#' x1 <- cbind(t,x1)
#' x2 <- cbind(t,x2)
#' x3 <- cbind(t,x3)
#'
#' ## Quadruple wavelet coherence
#' result <- qmwc(y, x1, x2, x3, nrands = 10)
#' \donttest{
#' result <- qmwc(y, x1, x2, x3)
#' }
#'
#' ## Plot wavelet coherence and make room to the right for the color bar
#' ## Note: plot function can be used instead of plot.vectorwavelet
#' par(oma = c(0, 0, 0, 1), mar = c(5, 4, 4, 5) + 0.1, pin = c(3,3))
#' plot.vectorwavelet(result, plot.cb = TRUE, main = "Plot quadruple wavelet coherence")
#'
#' par(old.par)
#'
#' @keywords wavelet
#' @keywords coherence
#' @keywords continuous wavelet transform
#' @keywords quadruple wavelet coherence
#'
#' @importFrom stats sd
#' @importFrom biwavelet check.data wt smooth.wavelet wtc.sig
#' @export
qmwc <- function (y, x1, x2, x3, pad = TRUE, dj = 1/12, s0 = 2 * dt, J1 = NULL,
max.scale = NULL, mother = "morlet", param = -1, lag1 = NULL,
sig.level = 0.95, sig.test = 0, nrands = 300, quiet = FALSE) {
mother <- match.arg(tolower(mother), c("morlet", "paul", "dog"))
# Check data format
checked <- check.data(y = y, x1 = x1, x2 = x2)
xaxis <- y[, 1]
dt <- checked$y$dt
t <- checked$y$t
n <- checked$y$n.obs
if (is.null(J1)) {
if (is.null(max.scale)) {
max.scale <- (n * 0.17) * 2 * dt # automatic maxscale
}
J1 <- round(log2(max.scale/s0)/dj)
}
# Get AR(1) coefficients for each time series
if (is.null(lag1)) {
y.ar1 <- ar1nv(y[,2])$g
x1.ar1 <- ar1nv(x1[,2])$g
x2.ar1 <- ar1nv(x2[,2])$g
x3.ar1 <- ar1nv(x3[,2])$g
lag1 <- c(y.ar1, x1.ar1,x2.ar1,x3.ar1)
}
# Get CWT of each time series
wt.y <- wt(d = y, pad = pad, dj = dj, s0 = s0, J1 = J1, max.scale = max.scale,
mother = mother, param = param, sig.level = sig.level,
sig.test = sig.test, lag1 = lag1[1])
wt.x1 <- wt(d = x1, pad = pad, dj = dj, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, param = param,
sig.level = sig.level, sig.test = sig.test, lag1 = lag1[2])
wt.x2 <- wt(d = x2, pad = pad, dj = dj, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, param = param,
sig.level = sig.level, sig.test = sig.test, lag1 = lag1[3])
wt.x3 <- wt(d = x3, pad = pad, dj = dj, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, param = param,
sig.level = sig.level, sig.test = sig.test, lag1 = lag1[4])
# Standard deviation for each time series
y.sigma <- sd(y[, 2], na.rm = TRUE)
x1.sigma <- sd(x1[, 2], na.rm = TRUE)
x2.sigma <- sd(x2[, 2], na.rm = TRUE)
x3.sigma <- sd(x3[, 2], na.rm = TRUE)
s.inv <- 1/t(wt.y$scale)
s.inv <- matrix(rep(s.inv, n), nrow = NROW(wt.y$wave))
smooth.wt_y <- smooth.wavelet(s.inv*(abs(wt.y$wave)^2), dt, dj, wt.y$scale)
smooth.wt_x1 <- smooth.wavelet(s.inv*(abs(wt.x1$wave)^2), dt, dj, wt.x1$scale)
smooth.wt_x2 <- smooth.wavelet(s.inv*(abs(wt.x2$wave)^2), dt, dj, wt.x2$scale)
smooth.wt_x3 <- smooth.wavelet(s.inv*(abs(wt.x3$wave)^2), dt, dj, wt.x3$scale)
coi <- pmin(wt.y$coi, wt.x1$coi, wt.x2$coi, wt.x3$coi, na.rm = T)
# Cross-wavelet computation
cw.yx1 <- wt.y$wave * Conj(wt.x1$wave)
cw.yx2 <- wt.y$wave * Conj(wt.x2$wave)
cw.yx3 <- wt.y$wave * Conj(wt.x3$wave)
cw.x1x2 <- wt.x1$wave * Conj(wt.x2$wave)
cw.x1x3 <- wt.x1$wave * Conj(wt.x3$wave)
cw.x2x3 <- wt.x2$wave * Conj(wt.x3$wave)
smooth.cw_yx1 <- smooth.wavelet(s.inv*(cw.yx1), dt, dj, wt.y$scale)
smooth.cw_yx2 <- smooth.wavelet(s.inv*(cw.yx2), dt, dj, wt.y$scale)
smooth.cw_yx3 <- smooth.wavelet(s.inv*(cw.yx3), dt, dj, wt.y$scale)
smooth.cw_x1x2 <- smooth.wavelet(s.inv*(cw.x1x2), dt, dj, wt.y$scale)
smooth.cw_x1x3 <- smooth.wavelet(s.inv*(cw.x1x3), dt, dj, wt.y$scale)
smooth.cw_x2x3 <- smooth.wavelet(s.inv*(cw.x2x3), dt, dj, wt.y$scale)
rsq.yx1 <- abs(smooth.cw_yx1)^2/(smooth.wt_y * smooth.wt_x1)
rsq.yx2 <- abs(smooth.cw_yx2)^2/(smooth.wt_y * smooth.wt_x2)
rsq.yx3 <- abs(smooth.cw_yx3)^2/(smooth.wt_y * smooth.wt_x3)
rsq.x1x2 <- abs(smooth.cw_x1x2)^2/(smooth.wt_x1 * smooth.wt_x2)
rsq.x1x3 <- abs(smooth.cw_x1x3)^2/(smooth.wt_x1 * smooth.wt_x3)
rsq.x2x3 <- abs(smooth.cw_x2x3)^2/(smooth.wt_x2 * smooth.wt_x3)
r.yx1 <- sqrt(rsq.yx1)
r.yx2 <- sqrt(rsq.yx2)
r.yx3 <- sqrt(rsq.yx3)
r.x1x2 <- sqrt(rsq.x1x2)
r.x1x3 <- sqrt(rsq.x1x3)
r.x2x3 <- sqrt(rsq.x2x3)
# Wavelet coherence
Cxxd <- 1 - r.x1x2^2 - r.x1x3^2 - r.x2x3^2 + 2 * Re(r.x1x2 * r.x2x3 * Conj(r.x1x3) )
Cd <- 1 - r.yx1^2 - r.yx2^2 - r.yx3^2 - r.x1x2^2 - r.x1x3^2 - r.x2x3^2
Cd <- Cd + (r.yx3^2 * r.x1x2^2)
Cd <- Cd + (r.yx1^2 * r.x2x3^2)
Cd <- Cd + (r.yx2^2 * r.x1x3^2)
Cd <- Cd + 2 * Re(r.yx1 * r.x1x3 * Conj(r.yx3))
Cd <- Cd + 2 * Re(r.yx2 * r.x2x3 * Conj(r.yx3))
Cd <- Cd + 2 * Re(r.yx1 * r.x1x2 * Conj(r.yx2))
Cd <- Cd + 2 * Re(r.x1x2 * r.x2x3 * Conj(r.x1x3))
Cd <- Cd - 2 * Re(r.yx1 * r.x1x2 * r.x2x3 * Conj(r.yx3))
Cd <- Cd - 2 * Re(r.yx2 * Conj(r.yx1) * r.x2x3 * Conj(r.x1x3))
Cd <- Cd - 2 * Re(r.yx2 * Conj(r.yx3) * r.x1x3 * Conj(r.x1x2))
rsq <- 1 - (Cd / Cxxd)
# Phase difference
phase <- atan2(Im(cw.yx1), Re(cw.yx1))
if (nrands > 0) {
signif <- wtc.sig(nrands = nrands, lag1 = lag1,
dt = dt, n, pad = pad, dj = dj, J1 = J1, s0 = s0,
max.scale = max.scale, mother = mother, sig.level = sig.level,
quiet = quiet)
}
else {
signif <- NA
}
results <- list(coi = coi,
rsq = rsq,
phase = phase,
period = wt.y$period,
scale = wt.y$scale,
dt = dt,
t = t,
xaxis = xaxis,
s0 = s0,
dj = dj,
mother = mother,
type = "qmwc",
signif = signif)
class(results) <- "vectorwavelet"
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/vectorwavelet/R/qmwc.R
|
#' @docType package
#' @name vectorwavelet-package
#' @aliases vectorwavelet
#' @exportPattern ^[[:alpha:]]+
#' @importFrom stats as.dist filter qchisq quantile rnorm sd ts var weighted.mean
#' @importFrom utils setTxtProgressBar txtProgressBar
#'
#' @title
#' Vector wavelet coherence for multiple time series
#'
#' @description
#' Description: This package can be used to perform multiple wavelet coherence (mwc),
#' quadruple wavelet coherence (qmwc), and n-dimensional vector wavelet coherence (vwc) analyses.
#'
#' @author Tunc Oygur, Gazanfer Unal
#'
#' Maintainer: Tunc Oygur <[email protected]>
#'
#' Code based on biwavelet package written by Tarik C. Gouhier, Aslak Grinsted, Viliam Simko.
#'
#' @references
#' T. Oygur, G. Unal.. Vector wavelet coherence for multiple time series.
#' \emph{Int. J. Dynam. Control (2020).}
#'
#' T. Oygur, G. Unal.. The large fluctuations of the stock
#' return and financial crises evidence from Turkey: using wavelet
#' coherency and VARMA modeling to forecast stock return.
#' \emph{Fluctuation and Noise Letters, 2017}
#'
#' T.C. Gouhier, A. Grinstead and V. Simko. 2016. \emph{biwavelet:
#' Conduct univariate and bivariate wavelet analyses (Version 0.20.15).}
#' Available from http://github.com/tgouhier/biwavelet
#'
#' Ng, Eric KW and Chan, Johnny CL. 2012. Geophysical applications of partial
#' wavelet coherence and multiple wavelet coherence. \emph{Journal of Atmospheric
#' and Oceanic Technology} 29-12:1845--1853.
#'
#' Grinsted, A., J. C. Moore, and S. Jevrejeva. 2004. Application of the cross
#' wavelet transform and wavelet coherence to geophysical time series.
#' \emph{Nonlinear Processes in Geophysics} 11:561-566.
#'
#' Torrence, C., and G. P. Compo. 1998. A Practical Guide to Wavelet Analysis.
#' \emph{Bulletin of the American Meteorological Society} 79:61-78.
#'
#' @keywords wavelet
#' @keywords coherence
#' @keywords continuous wavelet transform
#' @keywords multiple wavelet coherence
#' @keywords quadruple wavelet coherence
#' @keywords vector wavelet coherence
NULL
.onAttach <- function(libname, pkgname) {
# just to show a startup message
message <- paste("vectorwavelet", utils::packageVersion("vectorwavelet"), "loaded.")
packageStartupMessage(message, appendLF = TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/vectorwavelet/R/vectorwavelet-package.R
|
#' Compute n-dimensional vector wavelet coherence
#'
#' @param y time series y in matrix format (\code{m} rows x 2 columns). The
#' first column should contain the time steps and the second column should
#' contain the values.
#' @param x multivariate time series x in matrix format (\code{m} rows x n columns).
#' The first column should contain the time steps and the other columns should
#' contain the values.
#' @param pad pad the values will with zeros to increase the speed of the
#' transform. Default is TRUE.
#' @param dj spacing between successive scales. Default is 1/12.
#' @param s0 smallest scale of the wavelet. Default is \code{2*dt}.
#' @param J1 number of scales - 1.
#' @param max.scale maximum scale. Computed automatically if left unspecified.
#' @param mother type of mother wavelet function to use. Can be set to
#' \code{morlet}, \code{dog}, or \code{paul}. Default is \code{morlet}.
#' Significance testing is only available for \code{morlet} wavelet.
#' @param param nondimensional parameter specific to the wavelet function.
#' @param lag1 vector containing the AR(1) coefficient of each time series.
#' @param sig.level significance level. Default is \code{0.95}.
#' @param sig.test type of significance test. If set to 0, use a regular
#' \eqn{\chi^2} test. If set to 1, then perform a time-average test. If set to
#' 2, then do a scale-average test.
#' @param nrands number of Monte Carlo randomizations. Default is 300.
#' @param quiet Do not display progress bar. Default is \code{FALSE}
#'
#' @return Return a \code{vectorwavelet} object containing:
#' \item{coi}{matrix containg cone of influence}
#' \item{rsq}{matrix of wavelet coherence}
#' \item{phase}{matrix of phases}
#' \item{period}{vector of periods}
#' \item{scale}{vector of scales}
#' \item{dt}{length of a time step}
#' \item{t}{vector of times}
#' \item{xaxis}{vector of values used to plot xaxis}
#' \item{s0}{smallest scale of the wavelet }
#' \item{dj}{spacing between successive scales}
#' \item{mother}{mother wavelet used}
#' \item{type}{type of \code{vectorwavelet} object created (\code{vwc})}
#' \item{signif}{matrix containg \code{sig.level} percentiles of wavelet coherence
#' based on the Monte Carlo AR(1) time series}
#'
#' @author Tunc Oygur ([email protected])
#'
#' @references
#' T. Oygur, G. Unal.. Vector wavelet coherence for multiple time series.
#' \emph{Int. J. Dynam. Control (2020).}
#'
#' @examples
#' old.par <- par(no.readonly=TRUE)
#'
#' t <- (-100:100)
#'
#' y <- sin(t*2*pi)+sin(t*2*pi/4)+sin(t*2*pi/8)+sin(t*2*pi/16)+sin(t*2*pi/32)+sin(t*2*pi/64)
#' x1 <- sin(t*2*pi/8)
#' x2 <- sin(t*2*pi/16)
#' x3 <- sin(t*2*pi/32)
#' x4 <- sin(t*2*pi/64)
#'
#' y <- cbind(t,y)
#' x <- cbind(t,x1,x2,x3,x4)
#'
#' ## n-dimensional multiple wavelet coherence
#' result <- vwc(y, x, nrands = 10)
#' \donttest{
#' result <- vwc(y, x)
#' }
#'
#' ## Plot wavelet coherence and make room to the right for the color bar
#' ## Note: plot function can be used instead of plot.vectorwavelet
#' par(oma = c(0, 0, 0, 1), mar = c(5, 4, 4, 5) + 0.1, pin = c(3,3))
#' plot.vectorwavelet(result, plot.cb = TRUE, main = "Plot n-dimensional vwc (n=5)")
#'
#' par(old.par)
#'
#' @keywords wavelet
#' @keywords coherence
#' @keywords continuous wavelet transform
#' @keywords n-dimensional wavelet coherence
#'
#' @importFrom stats sd
#' @importFrom biwavelet check.data wt smooth.wavelet wtc.sig
#' @export
vwc <- function (y, x, pad = TRUE, dj = 1/12, s0 = 2 * dt, J1 = NULL,
max.scale = NULL, mother = "morlet", param = -1, lag1 = NULL,
sig.level = 0.95, sig.test = 0, nrands = 300, quiet = FALSE) {
mother <- match.arg(tolower(mother), c("morlet", "paul", "dog"))
# Check data format
checked <- n.check.data(y = y, x = x)
xaxis <- y[, 1]
dt <- checked$y$dt
t <- checked$y$t
n <- checked$y$n.obs
if (is.null(J1)) {
if (is.null(max.scale)) {
max.scale <- (n * 0.17) * 2 * dt
}
J1 <- round(log2(max.scale/s0)/dj)
}
# Get AR(1) coefficients
if (is.null(lag1)) {
y.ar1 <- ar1nv(y[, 2])$g
x.ar1 <- as.numeric(apply(x[,-1], 2, function(x) {ar1nv(x)$g }))
lag1 <- c(y.ar1, x.ar1)
}
# Standard deviation
y.sigma <- sd(y[, 2], na.rm = TRUE)
x.sigma <- sd(x[, 2], na.rm = TRUE)
df <- cbind(y,x[,-1])
colnames(df) <- c("t","y",paste0("x",1:(ncol(x)-1)))
n_dim <- ncol(df)-1
# Get CWT of each time series
wt.res <- list()
for(i in 2:ncol(df)) {
xi <- df[,c(1,i)]
temp.col <- colnames(df)[i]
wt.res[[temp.col]] <- wt(d = xi, pad = pad, dj = dj, s0 = s0, J1 = J1, max.scale = max.scale, mother = mother,
param = param, sig.level = sig.level,sig.test = sig.test, lag1 = lag1[i-1])
rm(xi);rm(temp.col)
}
rm(i)
s.inv <- 1/t(wt.res[["y"]]$scale)
s.inv <- matrix(rep(s.inv, n), nrow = NROW(wt.res[["y"]]$wave))
s.wt.res <- list()
for(i in 2:ncol(df)) {
temp.col <- colnames(df)[i]
s.wt.res[[temp.col]] <- smooth.wavelet(s.inv*(abs(wt.res[[temp.col]]$wave)^2), dt, dj, wt.res[[temp.col]]$scale)
rm(temp.col)
}
rm(i)
coi.res <- matrix(NA, nrow = n, ncol = (ncol(df)-1))
for(i in 2:ncol(df)) {
temp.col <- colnames(df)[i]
coi.res[,(i-1)] <- wt.res[[temp.col]]$coi
rm(temp.col)
}
rm(i)
coi <- apply(coi.res, 1, function(x) min(x, na.rm = T))
rm(coi.res)
# Cross-wavelet computation
cw <- list()
smooth.cw <- list()
rsq <- list()
r <- list()
for(i in 2:(ncol(df)-1)){
for(j in (i+1):ncol(df)){
temp.col.i <- colnames(df)[i]
temp.col.j <- colnames(df)[j]
temp.cw <- wt.res[[temp.col.i]]$wave * Conj(wt.res[[temp.col.j]]$wave)
temp.smooth.cw <- smooth.wavelet(s.inv*(temp.cw), dt, dj, wt.res[["y"]]$scale)
temp.rsq <- abs(temp.smooth.cw)^2/(s.wt.res[[temp.col.i]] * s.wt.res[[temp.col.j]])
temp.r <- sqrt(temp.rsq)
cw[[paste0(temp.col.i,"-",temp.col.j)]] <- temp.cw
smooth.cw[[paste0(temp.col.i,"-",temp.col.j)]] <- temp.smooth.cw
rsq[[paste0(temp.col.i,"-",temp.col.j)]] <- temp.rsq
r[[paste0(temp.col.i,"-",temp.col.j)]] <- temp.r
rm(temp.col.i);rm(temp.col.j);rm(temp.cw);rm(temp.smooth.cw);rm(temp.rsq);rm(temp.r)
}
rm(j)
}
rm(i)
######################################################################################################################
######################################################################################################################
# Wavelet coherence
r_tilde <- list()
for(i in 1:(ncol(df)-1)) {
for(j in 1:(ncol(df)-1)) {
temp.col.i <- colnames(df)[i+1]
temp.col.j <- colnames(df)[j+1]
if(i==j) {
r_tilde[[paste0(temp.col.i,"-",temp.col.j)]] <- 1
} else if (j > i) {
r_tilde[[paste0(temp.col.i,"-",temp.col.j)]] <- r[[paste0(temp.col.i,"-",temp.col.j)]]
} else {
r_tilde[[paste0(temp.col.i,"-",temp.col.j)]] <- Conj(r[[paste0(temp.col.j,"-",temp.col.i)]])
}
}
rm(j)
}
rm(i)
##Cofactor function ##################################################################################################
cofactor.wavelogy <- function(ii, jj, del_ii, del_jj, level, n_dim,order) {
order.sign <- if(order %% 2 == 1) 1 else -1
temp.col.i <- colnames(df)[ii+1]
temp.col.j <- colnames(df)[jj+1]
res <- order.sign * r_tilde[[paste0(temp.col.i,"-",temp.col.j)]]
ii_vec <- setdiff(c(1:n_dim),del_ii)
jj_vec <- setdiff(c(1:n_dim),del_jj)
if(n_dim-level != 2) {
temp.res <- 0
for(kk in 1:(n_dim-level)) {
temp.res <- temp.res + cofactor.wavelogy(ii=min(ii_vec), jj=jj_vec[kk], del_ii=c(del_ii,min(ii_vec)), del_jj=c(del_jj,jj_vec[kk]), level=level+1, n_dim, order=kk)
}
res <- res * temp.res
} else {
temp.col.i_1 <- colnames(df)[ii_vec[1]+1]
temp.col.j_1 <- colnames(df)[jj_vec[1]+1]
temp.col.i_2 <- colnames(df)[ii_vec[2]+1]
temp.col.j_2 <- colnames(df)[jj_vec[2]+1]
res <- res * (r_tilde[[paste0(temp.col.i_1,"-",temp.col.j_1)]]*r_tilde[[paste0(temp.col.i_2,"-",temp.col.j_2)]] -
r_tilde[[paste0(temp.col.i_2,"-",temp.col.j_1)]]*r_tilde[[paste0(temp.col.i_1,"-",temp.col.j_2)]])
}
return(res)
}
##Cxx ####################################################################################################
Cxx <- 0
for(k in 1:n_dim) {
Cxx <- Cxx + cofactor.wavelogy(ii=1, jj=k, del_ii=1, del_jj=k, level=1, n_dim, order=k)
}
C11 <- cofactor.wavelogy(ii=1, jj=1, del_ii=1, del_jj=1, level=1, n_dim, order=1)
##Rsq ####################################################################################################
rsq <- 1 - (Cxx / C11)
######################################################################################################################
######################################################################################################################
# Phase difference
phase <- atan2(Im(cw[["y-x1"]]), Re(cw[["y-x1"]]))
if (nrands > 0) {
signif <- wtc.sig(nrands = nrands, lag1 = c(y.ar1, x.ar1),
dt = dt, n, pad = pad, dj = dj, J1 = J1, s0 = s0,
max.scale = max.scale, mother = mother, sig.level = sig.level,
quiet = quiet)
}
else {
signif <- NA
}
results <- list(coi = coi,
rsq = rsq,
phase = phase,
period = wt.res[["y"]]$period,
scale = wt.res[["y"]]$scale,
dt = dt,
t = t,
xaxis = xaxis,
s0 = s0,
dj = dj,
mother = mother,
type = "vwc",
signif = signif)
class(results) <- "vectorwavelet"
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/vectorwavelet/R/vwc.R
|
vl <- wdgt_png_tf <- NULL
#' General axis setttings (x-axis)
#'
#' Axes provide axis lines, ticks and labels to convey how a spatial range represents
#' a data range. Simply put, axes visualize scales. \cr
#' \cr
#' By default, Vega-Lite automatically creates axes for x, y, row, and column channels
#' when they are encoded. Axis can be customized via the axis property of a channel
#' definition.
#'
#' @param vl Vega-Lite object
#' @param axisWidth,layer,offset,grid,labels,labelAngle,labelAlign,labelBaseline
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @param labelMaxLength,shortTimeLabels,subdivide,ticks,tickPadding,tickSize
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @param tickSizeMajor,tickSizeMinor,tickSizeEnd,title,titleOffset,titleMaxLength
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @param characterWidth,orient,format,remove
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/axis.html}{Vega-List Axis spec}
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
#' add_filter("datum.year == 2000") %>%
#' calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
#' encode_x("gender", "nominal") %>%
#' encode_y("people", "quantitative", aggregate="sum") %>%
#' encode_color("gender", "nominal") %>%
#' scale_x_ordinal(band_size=6) %>%
#' scale_color_nominal(range=c("#EA98D2", "#659CCA")) %>%
#' facet_col("age", "ordinal", padding=4) %>%
#' axis_x(remove=TRUE) %>%
#' axis_y(title="population", grid=FALSE) %>%
#' axis_facet_col(orient="bottom", axisWidth=1, offset=-8) %>%
#' facet_cell(stroke_width=0) %>%
#' mark_bar()
axis_x <- function(vl, axisWidth=NULL, layer=NULL, offset=NULL, grid=NULL, labels=TRUE,
labelAngle=NULL, labelAlign=NULL, labelBaseline=NULL, labelMaxLength=25,
shortTimeLabels=NULL, subdivide=NULL, ticks=NULL, tickPadding=NULL,
tickSize=NULL, tickSizeMajor=NULL, tickSizeMinor=NULL, tickSizeEnd=NULL,
title="", titleOffset=NULL, titleMaxLength=NULL, characterWidth=6,
orient=NULL, format=NULL, remove=FALSE) {
chnl <- "x"
if (remove) {
vl$x$encoding[[chnl]]$axis <- FALSE
} else {
if (!is.null(axisWidth)) vl$x$encoding[[chnl]]$axis$axisWidth <- axisWidth
if (!is.null(layer)) vl$x$encoding[[chnl]]$axis$layer <- layer
if (!is.null(offset)) vl$x$encoding[[chnl]]$axis$offset <- offset
if (!is.null(grid)) vl$x$encoding[[chnl]]$axis$grid <- grid
if (!is.null(labels)) vl$x$encoding[[chnl]]$axis$labels <- labels
if (!is.null(labelAngle)) vl$x$encoding[[chnl]]$axis$labelAngle <- labelAngle
if (!is.null(labelAlign)) vl$x$encoding[[chnl]]$axis$labelAlign <- labelAlign
if (!is.null(labelBaseline)) vl$x$encoding[[chnl]]$axis$labelBaseline <- labelBaseline
if (!is.null(labelMaxLength)) vl$x$encoding[[chnl]]$axis$labelMaxLength <- labelMaxLength
if (!is.null(shortTimeLabels)) vl$x$encoding[[chnl]]$axis$shortTimeLabels <- shortTimeLabels
if (!is.null(subdivide)) vl$x$encoding[[chnl]]$axis$subdivide <- subdivide
if (!is.null(ticks)) vl$x$encoding[[chnl]]$axis$ticks <- ticks
if (!is.null(tickPadding)) vl$x$encoding[[chnl]]$axis$tickPadding <- tickPadding
if (!is.null(tickSize)) vl$x$encoding[[chnl]]$axis$tickSize <- tickSize
if (!is.null(tickSizeMajor)) vl$x$encoding[[chnl]]$axis$tickSizeMajor <- tickSizeMajor
if (!is.null(tickSizeMinor)) vl$x$encoding[[chnl]]$axis$tickSizeMinor <- tickSizeMinor
if (!is.null(tickSizeEnd)) vl$x$encoding[[chnl]]$axis$tickSizeEnd <- tickSizeEnd
if (!is.null(title)) vl$x$encoding[[chnl]]$axis$title <- title
if (!is.null(titleOffset)) vl$x$encoding[[chnl]]$axis$titleOffset <- titleOffset
if (!is.null(titleMaxLength)) vl$x$encoding[[chnl]]$axis$titleMaxLength <- titleMaxLength
if (!is.null(characterWidth)) vl$x$encoding[[chnl]]$axis$characterWidth <- characterWidth
if (!is.null(orient)) vl$x$encoding[[chnl]]$axis$orient <- orient
if (!is.null(format)) vl$x$encoding[[chnl]]$axis$format <- format
}
vl
}
#' General axis setttings (y-axis)
#'
#' Axes provide axis lines, ticks and labels to convey how a spatial range represents
#' a data range. Simply put, axes visualize scales. \cr
#' \cr
#' By default, Vega-Lite automatically creates axes for x, y, row, and column channels
#' when they are encoded. Axis can be customized via the axis property of a channel
#' definition.
#'
#' @param vl Vega-Lite object
#' @param axisWidth,layer,offset,grid,labels,labelAngle,labelAlign,labelBaseline
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @param labelMaxLength,shortTimeLabels,subdivide,ticks,tickPadding,tickSize
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @param tickSizeMajor,tickSizeMinor,tickSizeEnd,title,titleOffset,titleMaxLength
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @param characterWidth,orient,format,remove
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/axis.html}{Vega-List Axis spec}
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
#' add_filter("datum.year == 2000") %>%
#' calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
#' encode_x("gender", "nominal") %>%
#' encode_y("people", "quantitative", aggregate="sum") %>%
#' encode_color("gender", "nominal") %>%
#' scale_x_ordinal(band_size=6) %>%
#' scale_color_nominal(range=c("#EA98D2", "#659CCA")) %>%
#' facet_col("age", "ordinal", padding=4) %>%
#' axis_x(remove=TRUE) %>%
#' axis_y(title="population", grid=FALSE) %>%
#' axis_facet_col(orient="bottom", axisWidth=1, offset=-8) %>%
#' facet_cell(stroke_width=0) %>%
#' mark_bar()
axis_y <- function(vl, axisWidth=NULL, layer=NULL, offset=NULL, grid=NULL, labels=TRUE,
labelAngle=NULL, labelAlign=NULL, labelBaseline=NULL, labelMaxLength=25,
shortTimeLabels=NULL, subdivide=NULL, ticks=NULL, tickPadding=NULL,
tickSize=NULL, tickSizeMajor=NULL, tickSizeMinor=NULL, tickSizeEnd=NULL,
title="", titleOffset=NULL, titleMaxLength=NULL, characterWidth=6,
orient=NULL, format=NULL, remove=FALSE) {
chnl <- "y"
if (remove) {
vl$x$encoding[[chnl]]$axis <- FALSE
} else {
if (!is.null(axisWidth)) vl$x$encoding[[chnl]]$axis$axisWidth <- axisWidth
if (!is.null(layer)) vl$x$encoding[[chnl]]$axis$layer <- layer
if (!is.null(offset)) vl$x$encoding[[chnl]]$axis$offset <- offset
if (!is.null(grid)) vl$x$encoding[[chnl]]$axis$grid <- grid
if (!is.null(labels)) vl$x$encoding[[chnl]]$axis$labels <- labels
if (!is.null(labelAngle)) vl$x$encoding[[chnl]]$axis$labelAngle <- labelAngle
if (!is.null(labelAlign)) vl$x$encoding[[chnl]]$axis$labelAlign <- labelAlign
if (!is.null(labelBaseline)) vl$x$encoding[[chnl]]$axis$labelBaseline <- labelBaseline
if (!is.null(labelMaxLength)) vl$x$encoding[[chnl]]$axis$labelMaxLength <- labelMaxLength
if (!is.null(shortTimeLabels)) vl$x$encoding[[chnl]]$axis$shortTimeLabels <- shortTimeLabels
if (!is.null(subdivide)) vl$x$encoding[[chnl]]$axis$subdivide <- subdivide
if (!is.null(ticks)) vl$x$encoding[[chnl]]$axis$ticks <- ticks
if (!is.null(tickPadding)) vl$x$encoding[[chnl]]$axis$tickPadding <- tickPadding
if (!is.null(tickSize)) vl$x$encoding[[chnl]]$axis$tickSize <- tickSize
if (!is.null(tickSizeMajor)) vl$x$encoding[[chnl]]$axis$tickSizeMajor <- tickSizeMajor
if (!is.null(tickSizeMinor)) vl$x$encoding[[chnl]]$axis$tickSizeMinor <- tickSizeMinor
if (!is.null(tickSizeEnd)) vl$x$encoding[[chnl]]$axis$tickSizeEnd <- tickSizeEnd
if (!is.null(title)) vl$x$encoding[[chnl]]$axis$title <- title
if (!is.null(titleOffset)) vl$x$encoding[[chnl]]$axis$titleOffset <- titleOffset
if (!is.null(titleMaxLength)) vl$x$encoding[[chnl]]$axis$titleMaxLength <- titleMaxLength
if (!is.null(characterWidth)) vl$x$encoding[[chnl]]$axis$characterWidth <- characterWidth
if (!is.null(orient)) vl$x$encoding[[chnl]]$axis$orient <- orient
if (!is.null(format)) vl$x$encoding[[chnl]]$axis$format <- format
}
vl
}
#' General axis setttings (column facet)
#'
#' Axes provide axis lines, ticks and labels to convey how a spatial range represents
#' a data range. Simply put, axes visualize scales. \cr
#' \cr
#' By default, Vega-Lite automatically creates axes for x, y, row, and column channels
#' when they are encoded. Axis can be customized via the axis property of a channel
#' definition.
#'
#' @param vl Vega-Lite object
#' @param axisWidth,layer,offset,grid,labels,labelAngle,labelAlign,labelBaseline
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @param labelMaxLength,shortTimeLabels,subdivide,ticks,tickPadding,tickSize
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @param tickSizeMajor,tickSizeMinor,tickSizeEnd,title,titleOffset,titleMaxLength
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @param characterWidth,orient,format,remove
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/axis.html}{Vega-List Axis spec}
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
#' add_filter("datum.year == 2000") %>%
#' calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
#' encode_x("gender", "nominal") %>%
#' encode_y("people", "quantitative", aggregate="sum") %>%
#' encode_color("gender", "nominal") %>%
#' scale_x_ordinal(band_size=6) %>%
#' scale_color_nominal(range=c("#EA98D2", "#659CCA")) %>%
#' facet_col("age", "ordinal", padding=4) %>%
#' axis_x(remove=TRUE) %>%
#' axis_y(title="population", grid=FALSE) %>%
#' axis_facet_col(orient="bottom", axisWidth=1, offset=-8) %>%
#' facet_cell(stroke_width=0) %>%
#' mark_bar()
axis_facet_col <- function(vl, axisWidth=0, layer=NULL, offset=NULL, grid=FALSE, labels=TRUE,
labelAngle=NULL, labelAlign=NULL, labelBaseline=NULL, labelMaxLength=25,
shortTimeLabels=NULL, subdivide=NULL, ticks=NULL, tickPadding=NULL,
tickSize=0, tickSizeMajor=NULL, tickSizeMinor=NULL, tickSizeEnd=NULL,
title="", titleOffset=NULL, titleMaxLength=NULL, characterWidth=6,
orient=NULL, format=NULL, remove=FALSE) {
chnl <- "column"
if (!is.null(axisWidth)) vl$x$encoding[[chnl]]$axis$axisWidth <- axisWidth
if (!is.null(layer)) vl$x$encoding[[chnl]]$axis$layer <- layer
if (!is.null(offset)) vl$x$encoding[[chnl]]$axis$offset <- offset
if (!is.null(grid)) vl$x$encoding[[chnl]]$axis$grid <- grid
if (!is.null(labels)) vl$x$encoding[[chnl]]$axis$labels <- labels
if (!is.null(labelAngle)) vl$x$encoding[[chnl]]$axis$labelAngle <- labelAngle
if (!is.null(labelAlign)) vl$x$encoding[[chnl]]$axis$labelAlign <- labelAlign
if (!is.null(labelBaseline)) vl$x$encoding[[chnl]]$axis$labelBaseline <- labelBaseline
if (!is.null(labelMaxLength)) vl$x$encoding[[chnl]]$axis$labelMaxLength <- labelMaxLength
if (!is.null(shortTimeLabels)) vl$x$encoding[[chnl]]$axis$shortTimeLabels <- shortTimeLabels
if (!is.null(subdivide)) vl$x$encoding[[chnl]]$axis$subdivide <- subdivide
if (!is.null(ticks)) vl$x$encoding[[chnl]]$axis$ticks <- ticks
if (!is.null(tickPadding)) vl$x$encoding[[chnl]]$axis$tickPadding <- tickPadding
if (!is.null(tickSize)) vl$x$encoding[[chnl]]$axis$tickSize <- tickSize
if (!is.null(tickSizeMajor)) vl$x$encoding[[chnl]]$axis$tickSizeMajor <- tickSizeMajor
if (!is.null(tickSizeMinor)) vl$x$encoding[[chnl]]$axis$tickSizeMinor <- tickSizeMinor
if (!is.null(tickSizeEnd)) vl$x$encoding[[chnl]]$axis$tickSizeEnd <- tickSizeEnd
if (!is.null(title)) vl$x$encoding[[chnl]]$axis$title <- title
if (!is.null(titleOffset)) vl$x$encoding[[chnl]]$axis$titleOffset <- titleOffset
if (!is.null(titleMaxLength)) vl$x$encoding[[chnl]]$axis$titleMaxLength <- titleMaxLength
if (!is.null(characterWidth)) vl$x$encoding[[chnl]]$axis$characterWidth <- characterWidth
if (!is.null(orient)) vl$x$encoding[[chnl]]$axis$orient <- orient
if (!is.null(format)) vl$x$encoding[[chnl]]$axis$format <- format
vl
}
#' General axis setttings (row facets)
#'
#' Axes provide axis lines, ticks and labels to convey how a spatial range represents
#' a data range. Simply put, axes visualize scales. \cr
#' \cr
#' By default, Vega-Lite automatically creates axes for x, y, row, and column channels
#' when they are encoded. Axis can be customized via the axis property of a channel
#' definition.
#'
#' @param vl Vega-Lite object
#' @param axisWidth,layer,offset,grid,labels,labelAngle,labelAlign,labelBaseline
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @param labelMaxLength,shortTimeLabels,subdivide,ticks,tickPadding,tickSize
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @param tickSizeMajor,tickSizeMinor,tickSizeEnd,title,titleOffset,titleMaxLength
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @param characterWidth,orient,format,remove
#' see \href{http://vega.github.io/vega-lite/docs/axis.html}{axis docs} &
#' \href{https://github.com/vega/vega-lite/blob/master/src/axis.ts}{axis base config}
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/axis.html}{Vega-List Axis spec}
#' @export
axis_facet_row <- function(vl, axisWidth=0, layer=NULL, offset=NULL, grid=FALSE, labels=TRUE,
labelAngle=NULL, labelAlign=NULL, labelBaseline=NULL, labelMaxLength=25,
shortTimeLabels=NULL, subdivide=NULL, ticks=NULL, tickPadding=NULL,
tickSize=0, tickSizeMajor=NULL, tickSizeMinor=NULL, tickSizeEnd=NULL,
title="", titleOffset=NULL, titleMaxLength=NULL, characterWidth=6,
orient=NULL, format=NULL, remove=FALSE) {
chnl <- "row"
if (!is.null(axisWidth)) vl$x$encoding[[chnl]]$axis$axisWidth <- axisWidth
if (!is.null(layer)) vl$x$encoding[[chnl]]$axis$layer <- layer
if (!is.null(offset)) vl$x$encoding[[chnl]]$axis$offset <- offset
if (!is.null(grid)) vl$x$encoding[[chnl]]$axis$grid <- grid
if (!is.null(labels)) vl$x$encoding[[chnl]]$axis$labels <- labels
if (!is.null(labelAngle)) vl$x$encoding[[chnl]]$axis$labelAngle <- labelAngle
if (!is.null(labelAlign)) vl$x$encoding[[chnl]]$axis$labelAlign <- labelAlign
if (!is.null(labelBaseline)) vl$x$encoding[[chnl]]$axis$labelBaseline <- labelBaseline
if (!is.null(labelMaxLength)) vl$x$encoding[[chnl]]$axis$labelMaxLength <- labelMaxLength
if (!is.null(shortTimeLabels)) vl$x$encoding[[chnl]]$axis$shortTimeLabels <- shortTimeLabels
if (!is.null(subdivide)) vl$x$encoding[[chnl]]$axis$subdivide <- subdivide
if (!is.null(ticks)) vl$x$encoding[[chnl]]$axis$ticks <- ticks
if (!is.null(tickPadding)) vl$x$encoding[[chnl]]$axis$tickPadding <- tickPadding
if (!is.null(tickSize)) vl$x$encoding[[chnl]]$axis$tickSize <- tickSize
if (!is.null(tickSizeMajor)) vl$x$encoding[[chnl]]$axis$tickSizeMajor <- tickSizeMajor
if (!is.null(tickSizeMinor)) vl$x$encoding[[chnl]]$axis$tickSizeMinor <- tickSizeMinor
if (!is.null(tickSizeEnd)) vl$x$encoding[[chnl]]$axis$tickSizeEnd <- tickSizeEnd
if (!is.null(title)) vl$x$encoding[[chnl]]$axis$title <- title
if (!is.null(titleOffset)) vl$x$encoding[[chnl]]$axis$titleOffset <- titleOffset
if (!is.null(titleMaxLength)) vl$x$encoding[[chnl]]$axis$titleMaxLength <- titleMaxLength
if (!is.null(characterWidth)) vl$x$encoding[[chnl]]$axis$characterWidth <- characterWidth
if (!is.null(orient)) vl$x$encoding[[chnl]]$axis$orient <- orient
if (!is.null(format)) vl$x$encoding[[chnl]]$axis$format <- format
vl
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/axis.r
|
#' Group continuous data values (x-axis)
#'
#' The "bin" property is for grouping quantitative, continuous data values of a
#' particular field into smaller number of “bins” (e.g., for a histogram).
#'
#' @param vl Vega-Lite object
#' @param min the minimum bin value to consider.
#' @param max the maximum bin value to consider.
#' @param base the number base to use for automatic bin determination.
#' @param step an exact step size to use between bins.
#' @param steps an array of allowable step sizes to choose from.
#' @param minstep minimum allowable step size (particularly useful for integer values).
#' @param div Scale factors indicating allowable subdivisions. The default value is
#' [5, 2], which indicates that for base 10 numbers (the default base),
#' the method may consider dividing bin sizes by 5 and/or 2. For example,
#' for an initial step size of 10, the method can check if bin sizes of 2
#' (= 10/5), 5 (= 10/2), or 1 (= 10/(5*2)) might also satisfy the given
#' constraints.
#' @param maxbins the maximum number of allowable bins.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/bin.html}{Vega-Lite Binning}
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
#' encode_x("IMDB_Rating", "quantitative") %>%
#' encode_y("Rotten_Tomatoes_Rating", "quantitative") %>%
#' encode_size("*", "quantitative", aggregate="count") %>%
#' bin_x(maxbins=10) %>%
#' bin_y(maxbins=10) %>%
#' mark_point()
bin_x <- function(vl, min=NULL, max=NULL, base=NULL, step=NULL,
steps=NULL, minstep=NULL, div=NULL, maxbins=NULL) {
chnl <- "x"
if (!is.null(min)) vl$x$encoding[[chnl]]$bin$min <- min
if (!is.null(max)) vl$x$encoding[[chnl]]$bin$max <- max
if (!is.null(base)) vl$x$encoding[[chnl]]$bin$base <- base
if (!is.null(step)) vl$x$encoding[[chnl]]$bin$grid <- step
if (!is.null(steps)) vl$x$encoding[[chnl]]$bin$labels <- steps
if (!is.null(minstep)) vl$x$encoding[[chnl]]$bin$minstep <- minstep
if (!is.null(div)) vl$x$encoding[[chnl]]$bin$div <- div
if (!is.null(maxbins)) vl$x$encoding[[chnl]]$bin$maxbins <- maxbins
if (length( vl$x$encoding[[chnl]]$bin) == 0) vl$x$encoding$x$bin <- TRUE
vl
}
#' Group continuous data values (y-axis)
#'
#' The "bin" property is for grouping quantitative, continuous data values of a
#' particular field into smaller number of “bins” (e.g., for a histogram).
#'
#' @param vl Vega-Lite object
#' @param min the minimum bin value to consider.
#' @param max the maximum bin value to consider.
#' @param base the number base to use for automatic bin determination.
#' @param step an exact step size to use between bins.
#' @param steps an array of allowable step sizes to choose from.
#' @param minstep minimum allowable step size (particularly useful for integer values).
#' @param div Scale factors indicating allowable subdivisions. The default value is
#' [5, 2], which indicates that for base 10 numbers (the default base),
#' the method may consider dividing bin sizes by 5 and/or 2. For example,
#' for an initial step size of 10, the method can check if bin sizes of 2
#' (= 10/5), 5 (= 10/2), or 1 (= 10/(5*2)) might also satisfy the given
#' constraints.
#' @param maxbins the maximum number of allowable bins.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/bin.html}{Vega-Lite Binning}
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
#' encode_x("IMDB_Rating", "quantitative") %>%
#' encode_y("Rotten_Tomatoes_Rating", "quantitative") %>%
#' encode_size("*", "quantitative", aggregate="count") %>%
#' bin_x(maxbins=10) %>%
#' bin_y(maxbins=10) %>%
#' mark_point()
bin_y <- function(vl, min=NULL, max=NULL, base=NULL, step=NULL,
steps=NULL, minstep=NULL, div=NULL, maxbins=NULL) {
chnl <- "y"
if (!is.null(min)) vl$x$encoding[[chnl]]$bin$min <- min
if (!is.null(max)) vl$x$encoding[[chnl]]$bin$max <- max
if (!is.null(base)) vl$x$encoding[[chnl]]$bin$base <- base
if (!is.null(step)) vl$x$encoding[[chnl]]$bin$grid <- step
if (!is.null(steps)) vl$x$encoding[[chnl]]$bin$labels <- steps
if (!is.null(minstep)) vl$x$encoding[[chnl]]$bin$minstep <- minstep
if (!is.null(div)) vl$x$encoding[[chnl]]$bin$div <- div
if (!is.null(maxbins)) vl$x$encoding[[chnl]]$bin$maxbins <- maxbins
if (length( vl$x$encoding[[chnl]]$bin) == 0) vl$x$encoding$y$bin <- TRUE
vl
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/bin.r
|
#' Add cell size to main Vega-Lite spec
#'
#' Short version: set this to control the height and with of a single plot panel.
#' It will also be the size of panels in a faceted/trellis plot, so make sure your
#' viewport height/width (set in the main call to the widget) is as large as you
#' want it to be (otheriwse this will do it's best to calculate it but will probably
#' not be what you ultimately want).
#'
#' At its core, a Vega-Lite specification describes a single plot. When a
#' facet channel is added, the visualization is faceted into a trellis plot,
#' which contains multiple plots. Each plot in either a single plot or a trellis
#' plot is called a cell. Cell configuration allows us to customize each individual
#' single plot and each plot in a trellis plot.
#'
#' @param vl a Vega-Lite object
#' @param width the width of the single plot or each plot in a trellis plot when
#' the visualization has continuous x-scale. (If the plot has ordinal x-scale, the
#' width is determined by the x-scale’s bandSize and the cardinality of the x-scale.
#' If the plot does not have a field on x, the width is derived from scale config’s
#' bandSize for all marks except text and from scale config’s textBandWidth for text mark.)
#' Default value: \code{200}
#' @param height the height of the single plot or each plot in a trellis plot when
#' the visualization has continuous y-scale. (If the visualization has ordinal y-scale,
#' the height is determined by the bandSize and the cardinality of the y-scale. If the
#' plot does not have a field on y, the height is scale config’s bandSize.)
#' Default value: \code{200}
#' @encoding UTF-8
#' @references
#' \href{http://vega.github.io/vega-lite/docs/config.html#cell-config}{Vega-Lite Cell spec}
#' @export
#' @examples
#' vegalite() %>%
#' cell_size(300, 200) %>%
#' add_data("https://vega.github.io/vega-editor/app/data/unemployment-across-industries.json") %>%
#' encode_x("date", "temporal") %>%
#' encode_y("count", "quantitative", aggregate="sum") %>%
#' encode_color("series", "nominal") %>%
#' scale_color_nominal(range="category20b") %>%
#' timeunit_x("yearmonth") %>%
#' scale_x_time(nice="month") %>%
#' axis_x(axisWidth=0, format="%Y", labelAngle=0) %>%
#' mark_area()
cell_size <- function(vl, width=200, height=200) {
vl$x$config$cell$width <- width
vl$x$config$cell$height <- height
vl
}
#' Facet cell aesthetics
#'
#' At its core, a Vega-Lite specification describes a single plot. When a facet
#' channel is added, the visualization is faceted into a trellis plot, which
#' contains multiple plots. Each plot in either a single plot or a trellis plot
#' is called a cell. Cell configuration allows us to customize each individual
#' single plot and each plot in a trellis plot.
#'
#' @param vl Vega-Lite object
#' @param width,height width and height property of the cell configuration determine
#' the width of a visualization with a continuous x-scale and the height of
#' a visualization with a continuous y-scale respectively. Visit the
#' URL in the References section for more information.
#' @param fill fill color
#' @param fill_opacity \code{0.0}-\code{1.0}
#' @param stroke stroke color
#' @param stroke_opacity \code{0.0}-\code{1.0}
#' @param stroke_width stroke of the width in pixels
#' @param stroke_dash an array of alternating stroke, space lengths for creating
#' dashed or dotted lines.
#' @param stroke_dash_offset the offset (in pixels) into which to begin drawing with the stroke dash array.
#' @encoding UTF-8
#' @references
#' \href{http://vega.github.io/vega-lite/docs/config.html#facet-config}{Vega-Lite Facet spec}
#' @export
facet_cell <- function(vl, width=200, height=200, fill=NULL, fill_opacity=NULL,
stroke=NULL, stroke_opacity=NULL, stroke_width=NULL,
stroke_dash=NULL, stroke_dash_offset=NULL) {
chnl <- "config"
if (!is.null(width)) vl$x[[chnl]]$facet$cell$width <- width
if (!is.null(height)) vl$x[[chnl]]$facet$cell$height <- height
if (!is.null(fill)) vl$x[[chnl]]$facet$cell$fill <- fill
if (!is.null(fill_opacity)) vl$x[[chnl]]$facet$cell$fillOpacity <- fill_opacity
if (!is.null(stroke)) vl$x[[chnl]]$facet$cell$stroke <- stroke
if (!is.null(stroke_opacity)) vl$x[[chnl]]$facet$cell$strokeOpacity <- stroke_opacity
if (!is.null(stroke_width)) vl$x[[chnl]]$facet$cell$strokeWidth <- stroke_width
if (!is.null(stroke_dash)) vl$x[[chnl]]$facet$cell$strokeDash <- stroke_dash
if (!is.null(stroke_dash_offset)) vl$x[[chnl]]$facet$cell$strokeDashOffset <- stroke_dash_offset
vl
}
#' Facet grid aesthetics
#'
#' @param vl Vega-Lite object
#' @param grid_color color of the grid between facets.
#' @param grid_opacity \code{0.0}-\code{1.0}
#' @param grid_offset offset for grid between facets.
#' @encoding UTF-8
#' @references
#' \href{http://vega.github.io/vega-lite/docs/config.html#facet-config}{Vega-Lite Facet spec}
#' @export
grid_facet <- function(vl, grid_color=NULL, grid_opacity=NULL, grid_offset=NULL) {
chnl <- "config"
if (!is.null(grid_color)) vl$x[[chnl]]$facet$grid$gridColor <- grid_color
if (!is.null(grid_opacity)) vl$x[[chnl]]$facet$grid$gridOpacity <- grid_opacity
if (!is.null(grid_offset)) vl$x[[chnl]]$facet$grid$gridOffset <- grid_offset
vl
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/cell.r
|
#' Opacity config
#'
#' @param vl a Vega-Lite object
#' @param opacity \code{0.0}-\code{1.0}
#' @param fill_opacity \code{0.0}-\code{1.0}
#' @param stroke_opacity \code{0.0}-\code{1.0}
#' @encoding UTF-8
#' @export
config_opacity <- function(vl, opacity=NULL, fill_opacity=NULL, stroke_opacity=NULL) {
if (!is.null(opacity)) vl$x$config$mark$opacity <- opacity
if (!is.null(fill_opacity)) vl$x$config$mark$fillOpacity <- fill_opacity
if (!is.null(stroke_opacity)) vl$x$config$mark$strokeOpacity <- stroke_opacity
vl
}
#' Stroke config
#'
#' @param vl a Vega-Lite object
#' @param stroke stroke color
#' @param stroke_opacity \code{0.0}-\code{1.0}
#' @param stroke_width stroke of the width in pixels
#' @param stroke_dash an array of alternating stroke, space lengths for creating
#' dashed or dotted lines.
#' @param stroke_dash_offset the offset (in pixels) into which to begin drawing with the stroke dash array.
#' @encoding UTF-8
#' @export
config_stroke <- function(vl, stroke=NULL, stroke_width=NULL,
stroke_dash=NULL, stroke_dash_offset=NULL,
stroke_opacity=NULL) {
if (!is.null(stroke)) vl$x$config$mark$stroke <- stroke
if (!is.null(stroke_width)) vl$x$config$mark$strokeWidth <- stroke_width
if (!is.null(stroke_dash)) vl$x$config$mark$strokeEash <- stroke_dash
if (!is.null(stroke_dash_offset)) vl$x$config$mark$strokeDashOffset <- stroke_dash_offset
if (!is.null(stroke_opacity)) vl$x$config$mark$strokeOpacity <- stroke_opacity
vl
}
#' Color config
#'
#' @param vl a Vega-Lite object
#' @param color color of the mark – either fill or stroke color based on the filled mark config.
#' @param fill fill color. This config will be overridden by color channel’s
#' specified or mapped values if filled is true.
#' @param stroke stroke color. This config will be overridden by color channel’s
#' specified or mapped values if filled is false.
#' @encoding UTF-8
#' @export
config_color <- function(vl, color=NULL, fill=NULL, stroke=NULL) {
if (!is.null(color)) vl$x$config$mark$color <- color
if (!is.null(fill)) vl$x$config$mark$fill <- fill
vl
}
#' Text config
#'
#' @param vl a Vega-Lite object
#' @param angle rotation angle of the text, in degrees.
#' @param align horizontal alignment of the text. One of left, right, center.
#' @param baseline vertical alignment of the text. One of top, middle, bottom.
#' @param dx,dy horizontal/vertical in pixels, between the text label and its
#' anchor point. The offset is applied after rotation by the angle property.
#' @param radius polar coordinate radial offset, in pixels, of the text label
#' from the origin determined by the x and y properties.
#' @param theta polar coordinate angle, in radians, of the text label from the
#' origin determined by the x and y properties. Values for theta follow
#' the same convention of arc mark startAngle and endAngle properties:
#' angles are measured in radians, with 0 indicating “north”.
#' @param format ormatting pattern for text value. If not defined, this will be
#' determined automatically
#' @param short_time_labels whether month names and weekday names should be abbreviated.
#' @param opacity 0-1
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/mark.html}{Vega-Lite Mark spec}
#' @export
config_text <- function(vl, angle=NULL, align=NULL, baseline=NULL,
dx=NULL, dy=NULL, radius=NULL, theta=NULL,
format=NULL, short_time_labels=NULL, opacity=NULL) {
if (!is.null(angle)) vl$x$config$mark$angle <- angle
if (!is.null(align)) vl$x$config$mark$align <- align
if (!is.null(baseline)) vl$x$config$mark$baseline <- baseline
if (!is.null(dx)) vl$x$config$mark$dx <- dx
if (!is.null(dy)) vl$x$config$mark$dy <- dy
if (!is.null(radius)) vl$x$config$mark$radius <- radius
if (!is.null(theta)) vl$x$config$mark$theta <- theta
if (!is.null(format)) vl$x$config$mark$format <- format
if (!is.null(short_time_labels)) vl$x$config$mark$shortTimeLabels <- short_time_labels
if (!is.null(opacity)) vl$x$config$mark$opacity <- opacity
return(vl)
}
#' Font config
#'
#' @param vl a Vega-Lite object
#' @param font typeface to set the text in (e.g., Helvetica Neue).
#' @param font_size font size, in pixels. The default value is 10.
#' @param font_style font style (e.g., italic).
#' @param font_weight font weight (e.g., bold).
#' @encoding UTF-8
#' @export
config_font <- function(vl, font=NULL, font_size=NULL, font_style=NULL, font_weight=NULL) {
if (!is.null(font)) vl$x$config$mark$font <- font
if (!is.null(font_size)) vl$x$config$mark$fontSize <- font_size
if (!is.null(font_style)) vl$x$config$mark$fontStyle <- font_style
if (!is.null(font_weight)) vl$x$config$mark$fontWight <- font_weight
vl
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/config.r
|
#' Add data to a Vega-Lite spec
#'
#' Vega-Lite is more lightweight than full Vega. However, the spec is
#' flexible enough to support embedded data or using external sources that
#' are in JSON, CSV or TSV format.
#'
#' @param vl a Vega-Lite object
#' @param source you can specify a (fully qualified) URL or an existing
#' \code{data.frame} (or \code{ts}) object or a reference to a local file.
#' For the URL case, the \code{url} component of \code{data} will be set. You can help
#' Vega-Lite out by giving it a hint for the data type with \code{format_type}
#' but it is not required. For the local \code{data.frame} case it will embed
#' the data into the spec. For the case where a local file is specified, it
#' will be read in (either a JSON file, CSV file or TSV file) and converted
#' to a \code{data.frame} and embedded.
#' @param format_type if \code{source} is a URL, this should be one of \code{json},
#' \code{csv} or \code{tsv}). It is not required and it is ignored if \code{source}
#' is not a URL.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/data.html}{Vega-Lite Data spec}
#' @export
#' @examples
#' dat <- jsonlite::fromJSON('[
#' {"a": "A","b": 28}, {"a": "B","b": 55}, {"a": "C","b": 43},
#' {"a": "D","b": 91}, {"a": "E","b": 81}, {"a": "F","b": 53},
#' {"a": "G","b": 19}, {"a": "H","b": 87}, {"a": "I","b": 52}
#' ]')
#'
#' vegalite() %>%
#' add_data(dat) %>%
#' encode_x("a", "ordinal") %>%
#' encode_y("b", "quantitative") %>%
#' mark_bar()
add_data <- function(vl, source, format_type=NULL) {
if (inherits(source, "data.frame")) {
vl$x$data$values <- data.frame(source, stringsAsFactors=FALSE)
} else if (inherits(source, "ts")) {
vl$x$data$values <- as.data.frame(source, stringsAsFactors=FALSE)
} else if (is_url(source)) {
vl$x$data <- list(url=source)
if (!is.null(format_type)) vl$x$data$formatType <- format_type
} else if (file.exists(source)) {
ext <- tools::file_ext(source)
if (ext == "json") {
vl$x$data$values <- jsonlite::fromJSON(source, flatten=TRUE)
} else if (ext == "csv") {
vl$x$data$values <- read.csv(source, stringsAsFactors=FALSE)
} else if (ext == "tsv") {
vl$x$data$values <- read.csv(source, sep="\t", stringsAsFactors=FALSE)
} else {
stop('"source" is not a JSON, CSV or TSV file.', call.=FALSE)
}
} else {
stop('"source" is not a data.frame, URL or local file.', call.=FALSE)
}
vl
}
is_url <- function(x) {
pattern <- "^([abcdefghijklmnopqrstuvwxyz]+)(://.*)"
(regexpr(pattern, x) != -1)
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/data.r
|
#' Encode x "channel"
#'
#' Vega-Lite has many "encoding channels". Each channel definition object must
#' describe the data field encoded by the channel and its data type, or a constant
#' value directly mapped to the mark properties. In addition, it can describe the
#' mapped field’s transformation and properties for its scale and guide.
#'
#' @param vl Vega-Lite object created by \code{\link{vegalite}}
#' @param field single element character vector naming the column. Can be \code{*} is using
#' \code{aggregate}.
#' @param type the encoded field’s type of measurement. This can be either a full type
#' name (\code{quantitative}, \code{temporal}, \code{ordinal}, and \code{nominal})
#' or an initial character of the type name (\code{Q}, \code{T}, \code{O}, \code{N}).
#' This property is case insensitive. If \code{auto} is used, the type will
#' be guessed (so you may want to actually specify it if you want consistency).
#' @param aggregate perform aggregaton on \code{field}. See
#' \href{http://vega.github.io/vega-lite/docs/aggregate.html}{Supported Aggregation Options} for
#' more info on valid operations. Leave \code{NULL} for no aggregation.
#' @param sort either one of \code{ascending}, \code{descending} or (for ordinal scales)
#' the result of a call to \code{\link{sort_def}}
#' @encoding UTF-8
#' @note right now, \code{type} == "\code{auto}" just assume "\code{quantitative}". It
#' will eventually get smarter, but you are better off specifying it.
#' @references \href{http://vega.github.io/vega-lite/docs/encoding.html}{Vega-Lite Encoding spec}
#' @export
#' @examples
#' dat <- jsonlite::fromJSON('[
#' {"a": "A","b": 28}, {"a": "B","b": 55}, {"a": "C","b": 43},
#' {"a": "D","b": 91}, {"a": "E","b": 81}, {"a": "F","b": 53},
#' {"a": "G","b": 19}, {"a": "H","b": 87}, {"a": "I","b": 52}
#' ]')
#'
#' vegalite() %>%
#' add_data(dat) %>%
#' encode_x("a", "ordinal") %>%
#' encode_y("b", "quantitative") %>%
#' mark_bar()
encode_x <- function(vl, field, type="auto", aggregate=NULL, sort=NULL) {
type <- tolower(type)
if (type == "auto") type <- "quantitative"
if (!type %in% c("quantitative", "temporal", "ordinal", "nominal", "q", "t", "o", "n")) {
message('"type" is not a valid value for this spec component. Ignoring.')
return(vl)
}
vl$x$encoding$x <- list(field=field, type=type)
if (!is.null(aggregate)) vl$x$encoding$x$aggregate <- aggregate
if (!is.null(sort)) vl$x$encoding$x$sort <- sort
vl
}
#' Encode y "channel"
#'
#' Vega-Lite has many "encoding channels". Each channel definition object must
#' describe the data field encoded by the channel and its data type, or a constant
#' value directly mapped to the mark properties. In addition, it can describe the
#' mapped field’s transformation and properties for its scale and guide.
#'
#' @param vl Vega-Lite object created by \code{\link{vegalite}}
#' @param field single element character vector naming the column
#' @param type the encoded field’s type of measurement. This can be either a full type
#' name (\code{quantitative}, \code{temporal}, \code{ordinal}, and \code{nominal})
#' or an initial character of the type name (\code{Q}, \code{T}, \code{O}, \code{N}).
#' This property is case insensitive. If \code{auto} is used, the type will
#' be guessed (so you may want to actually specify it if you want consistency).
#' @param aggregate perform aggregaton on \code{field}. See
#' \href{http://vega.github.io/vega-lite/docs/aggregate.html}{Supported Aggregation Options} for
#' more info on valid operations. Leave \code{NULL} for no aggregation.
#' @param sort either one of \code{ascending}, \code{descending} or (for ordinal scales)
#' the result of a call to \code{\link{sort_def}}
#' @encoding UTF-8
#' @note right now, \code{type} == "\code{auto}" just assume "\code{quantitative}". It
#' will eventually get smarter, but you are better off specifying it.
#' @export
#' @examples
#' dat <- jsonlite::fromJSON('[
#' {"a": "A","b": 28}, {"a": "B","b": 55}, {"a": "C","b": 43},
#' {"a": "D","b": 91}, {"a": "E","b": 81}, {"a": "F","b": 53},
#' {"a": "G","b": 19}, {"a": "H","b": 87}, {"a": "I","b": 52}
#' ]')
#'
#' vegalite() %>%
#' add_data(dat) %>%
#' encode_x("a", "ordinal") %>%
#' encode_y("b", "quantitative") %>%
#' mark_bar()
encode_y <- function(vl, field, type="auto", aggregate=NULL, sort=NULL) {
type <- tolower(type)
if (type == "auto") type <- "quantitative"
if (!type %in% c("quantitative", "temporal", "ordinal", "nominal", "q", "t", "o", "n")) {
message('"type" is not a valid value for this spec component. Ignoring.')
return(vl)
}
vl$x$encoding$y <- list(field=field, type=type)
if (!is.null(aggregate)) vl$x$encoding$y$aggregate <- aggregate
if (!is.null(sort)) vl$x$encoding$y$sort <- sort
vl
}
#' Encode color "channel"
#'
#' @param vl Vega-Lite object created by \code{\link{vegalite}}
#' @param field single element character vector naming the column
#' @param type the encoded field’s type of measurement. This can be either a full type
#' name (\code{quantitative}, \code{temporal}, \code{ordinal}, and \code{nominal})
#' or an initial character of the type name (\code{Q}, \code{T}, \code{O}, \code{N}).
#' This property is case insensitive. If \code{auto} is used, the type will
#' be guessed (so you may want to actually specify it if you want consistency).
#' @param aggregate perform aggregaton on \code{field}. See
#' \href{http://vega.github.io/vega-lite/docs/aggregate.html}{Supported Aggregation Options} for
#' more info on valid operations. Leave \code{NULL} for no aggregation.
#' @param sort either one of \code{ascending}, \code{descending} or (for ordinal scales)
#' the result of a call to \code{\link{sort_def}}
#' @param value scale value
#' @references \href{http://vega.github.io/vega-lite/docs/encoding.html}{Vega-Lite Encoding spec}
#' @note right now, \code{type} == "\code{auto}" just assume "\code{quantitative}". It
#' will eventually get smarter, but you are better off specifying it.
#' @encoding UTF-8
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
#' encode_x("Horsepower", "quantitative") %>%
#' encode_y("Miles_per_Gallon", "quantitative") %>%
#' encode_color("Origin", "nominal") %>%
#' encode_shape("Origin", "nominal") %>%
#' mark_point()
encode_color <- function(vl, field=NULL, type, value=NULL, aggregate=NULL, sort=NULL) {
if (is.null(field) & is.null(value)) {
stop('Either "field" or "value" must be specified', call.=FALSE)
}
if (!is.null(field)) {
type <- tolower(type)
if (type == "auto") type <- "quantitative"
if (!type %in% c("quantitative", "temporal", "ordinal", "nominal", "q", "t", "o", "n")) {
message('"type" is not a valid value for this spec component. Ignoring.')
return(vl)
}
vl$x$encoding$color <- list(field=field, type=type)
} else {
vl$x$encoding$color <- list(value=value)
}
if (!is.null(aggregate)) vl$x$encoding$color$aggregate <- aggregate
if (!is.null(sort)) vl$x$encoding$color$sort <- sort
vl
}
#' Encode shape "channel"
#'
#' @param vl Vega-Lite object created by \code{\link{vegalite}}
#' @param field single element character vector naming the column
#' @param type the encoded field’s type of measurement. This can be either a full type
#' name (\code{quantitative}, \code{temporal}, \code{ordinal}, and \code{nominal})
#' or an initial character of the type name (\code{Q}, \code{T}, \code{O}, \code{N}).
#' This property is case insensitive. If \code{auto} is used, the type will
#' be guessed (so you may want to actually specify it if you want consistency).
#' @param aggregate perform aggregaton on \code{field}. See
#' \href{http://vega.github.io/vega-lite/docs/aggregate.html}{Supported Aggregation Options} for
#' more info on valid operations. Leave \code{NULL} for no aggregation.
#' @param value scale value
#' @param sort either one of \code{ascending}, \code{descending} or (for ordinal scales)
#' the result of a call to \code{\link{sort_def}}
#' @references \href{http://vega.github.io/vega-lite/docs/encoding.html}{Vega-Lite Encoding spec}
#' @note right now, \code{type} == "\code{auto}" just assume "\code{quantitative}". It
#' will eventually get smarter, but you are better off specifying it.
#' @encoding UTF-8
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
#' encode_x("Horsepower", "quantitative") %>%
#' encode_y("Miles_per_Gallon", "quantitative") %>%
#' encode_color("Origin", "nominal") %>%
#' encode_shape("Origin", "nominal") %>%
#' mark_point()
encode_shape <- function(vl, field=NULL, type, value=NULL, aggregate=NULL, sort=NULL) {
if (is.null(field) & is.null(value)) {
stop('Either "field" or "value" must be specified', call.=FALSE)
}
if (!is.null(field)) {
type <- tolower(type)
if (type == "auto") type <- "quantitative"
if (!type %in% c("quantitative", "temporal", "ordinal", "nominal", "q", "t", "o", "n")) {
message('"type" is not a valid value for this spec component. Ignoring.')
return(vl)
}
vl$x$encoding$shape <- list(field=field, type=type)
} else {
vl$x$encoding$shape <- list(value=value)
}
if (!is.null(aggregate)) vl$x$encoding$shape$aggregate <- aggregate
if (!is.null(sort)) vl$x$encoding$shape$sort <- sort
vl
}
#' Encode size "channel"
#'
#' @param vl Vega-Lite object created by \code{\link{vegalite}}
#' @param field single element character vector naming the column. Can be \code{*} is using
#' \code{aggregate}.
#' @param value scale value
#' @param type the encoded field’s type of measurement. This can be either a full type
#' name (\code{quantitative}, \code{temporal}, \code{ordinal}, and \code{nominal})
#' or an initial character of the type name (\code{Q}, \code{T}, \code{O}, \code{N}).
#' This property is case insensitive. If \code{auto} is used, the type will
#' be guessed (so you may want to actually specify it if you want consistency).
#' @param aggregate perform aggregaton on \code{field}. See
#' \href{http://vega.github.io/vega-lite/docs/aggregate.html}{Supported Aggregation Options} for
#' more info on valid operations. Leave \code{NULL} for no aggregation.
#' @param sort either one of \code{ascending}, \code{descending} or (for ordinal scales)
#' the result of a call to \code{\link{sort_def}}
#' @references \href{http://vega.github.io/vega-lite/docs/encoding.html}{Vega-Lite Encoding spec}
#' @note right now, \code{type} == "\code{auto}" just assume "\code{quantitative}". It
#' will eventually get smarter, but you are better off specifying it.
#' @encoding UTF-8
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
#' encode_x("Horsepower", "quantitative") %>%
#' encode_y("Miles_per_Gallon", "quantitative") %>%
#' encode_size("Acceleration", "quantitative") %>%
#' mark_point()
encode_size <- function(vl, field=NULL, type, value=NULL, aggregate=NULL, sort=NULL) {
if (is.null(field) & is.null(value)) {
stop('Either "field" or "value" must be specified', call.=FALSE)
}
if (!is.null(field)) {
type <- tolower(type)
if (type == "auto") type <- "quantitative"
if (!type %in% c("quantitative", "temporal", "ordinal", "nominal", "q", "t", "o", "n")) {
message('"type" is not a valid value for this spec component. Ignoring.')
return(vl)
}
vl$x$encoding$size <- list(field=field, type=type)
} else {
vl$x$encoding$size <- list(value=value)
}
if (!is.null(aggregate)) vl$x$encoding$size$aggregate <- aggregate
if (!is.null(sort)) vl$x$encoding$size$sort <- sort
vl
}
#' Encode text "channel"
#'
#' @param vl Vega-Lite object created by \code{\link{vegalite}}
#' @param field single element character vector naming the column. Can be \code{*} is using
#' \code{aggregate}.
#' @param type the encoded field’s type of measurement. This can be either a full type
#' name (\code{quantitative}, \code{temporal}, \code{ordinal}, and \code{nominal})
#' or an initial character of the type name (\code{Q}, \code{T}, \code{O}, \code{N}).
#' This property is case insensitive. If \code{auto} is used, the type will
#' be guessed (so you may want to actually specify it if you want consistency).
#' @param value scale value
#' @param aggregate perform aggregaton on \code{field}. See
#' \href{http://vega.github.io/vega-lite/docs/aggregate.html}{Supported Aggregation Options} for
#' more info on valid operations. Leave \code{NULL} for no aggregation.
#' @param sort either one of \code{ascending}, \code{descending} or (for ordinal scales)
#' the result of a call to \code{\link{sort_def}}
#' @note right now, \code{type} == "\code{auto}" just assume "\code{quantitative}". It
#' will eventually get smarter, but you are better off specifying it.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/encoding.html}{Vega-Lite Encoding spec}
#' @export
#' @examples
#' vegalite() %>%
#' cell_size(300, 200) %>%
#' add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
#' encode_x("Horsepower", "quantitative") %>%
#' encode_y("Miles_per_Gallon", "quantitative") %>%
#' encode_color("Origin", "nominal") %>%
#' calculate("OriginInitial", "datum.Origin[0]") %>%
#' encode_text("OriginInitial", "nominal") %>%
#' mark_text()
encode_text <- function(vl, field, type, value=NULL, aggregate=NULL, sort=NULL) {
if (is.null(field) & is.null(value)) {
stop('Either "field" or "value" must be specified', call.=FALSE)
}
if (!is.null(field)) {
type <- tolower(type)
if (type == "auto") type <- "quantitative"
if (!type %in% c("quantitative", "temporal", "ordinal", "nominal", "q", "t", "o", "n")) {
message('"type" is not a valid value for this spec component. Ignoring.')
return(vl)
}
vl$x$encoding$text <- list(field=field, type=type)
} else {
vl$x$encoding$text <- list(value=value)
}
if (!is.null(aggregate)) vl$x$encoding$text$aggregate <- aggregate
if (!is.null(sort)) vl$x$encoding$text$sort <- sort
vl
}
#' Encode detail "channel"
#'
#' Grouping data is another important operation in visualizing data. For
#' aggregated plots, all encoded fields without aggregate functions are used as
#' grouping fields in the aggregation (similar to fields in GROUP BY in SQL).
#' For line and area marks, mapping a data field to color or shape channel will
#' group the lines and stacked areas by the field.\cr
#' \cr
#' detail channel allows providing an additional grouping field (level) for
#' grouping data in aggregation without mapping data to a specific visual
#' channel.
#'
#' @param vl Vega-Lite object created by \code{\link{vegalite}}
#' @param field single element character vector naming the column
#' @param type the encoded field’s type of measurement. This can be either a full type
#' name (\code{quantitative}, \code{temporal}, \code{ordinal}, and \code{nominal})
#' or an initial character of the type name (\code{Q}, \code{T}, \code{O}, \code{N}).
#' This property is case insensitive. If \code{auto} is used, the type will
#' be guessed (so you may want to actually specify it if you want consistency).
#' @param aggregate perform aggregaton on \code{field}. See
#' \href{http://vega.github.io/vega-lite/docs/aggregate.html}{Supported Aggregation Options} for
#' more info on valid operations. Leave \code{NULL} for no aggregation.
#' @param sort either one of \code{ascending}, \code{descending} or (for ordinal scales)
#' the result of a call to \code{\link{sort_def}}
#' @encoding UTF-8
#' @note right now, \code{type} == "\code{auto}" just assume "\code{quantitative}". It
#' will eventually get smarter, but you are better off specifying it.
#' @references \href{http://vega.github.io/vega-lite/docs/encoding.html}{Vega-Lite Encoding spec}
#' @export
#' @examples
#' vegalite() %>%
#' cell_size(200, 200) %>%
#' add_data("https://vega.github.io/vega-editor/app/data/stocks.csv") %>%
#' encode_x("date", "temporal") %>%
#' encode_y("price", "quantitative") %>%
#' encode_detail("symbol", "nominal") %>%
#' mark_line()
encode_detail <- function(vl, field=NULL, type, aggregate=NULL, sort=NULL) {
type <- tolower(type)
if (type == "auto") type <- "quantitative"
if (!type %in% c("quantitative", "temporal", "ordinal", "nominal", "q", "t", "o", "n")) {
message('"type" is not a valid value for this spec component. Ignoring.')
return(vl)
}
if (is.null(field)) { stop('"field" must be specified', call.=FALSE) }
vl$x$encoding$detail <- list(field=field, type=type)
if (!is.null(aggregate)) vl$x$encoding$detail$aggregate <- aggregate
if (!is.null(sort)) vl$x$encoding$order$sort <- sort
vl
}
#' Encode detail "order"
#'
#' Grouping data is another important operation in visualizing data. For
#' aggregated plots, all encoded fields without aggregate functions are used as
#' grouping fields in the aggregation (similar to fields in GROUP BY in SQL).
#' For line and area marks, mapping a data field to color or shape channel will
#' group the lines and stacked areas by the field.\cr
#' \cr
#' order channel sorts the layer order or stacking order (for stacked charts) of
#' the marks while path channel sorts the order of data points in line marks.
#'
#' @param vl Vega-Lite object created by \code{\link{vegalite}}
#' @param field single element character vector naming the column
#' @param type the encoded field’s type of measurement. This can be either a full type
#' name (\code{quantitative}, \code{temporal}, \code{ordinal}, and \code{nominal})
#' or an initial character of the type name (\code{Q}, \code{T}, \code{O}, \code{N}).
#' This property is case insensitive. If \code{auto} is used, the type will
#' be guessed (so you may want to actually specify it if you want consistency).
#' @param aggregate perform aggregaton on \code{field}. See
#' \href{http://vega.github.io/vega-lite/docs/aggregate.html}{Supported Aggregation Options} for
#' more info on valid operations. Leave \code{NULL} for no aggregation.
#' @param sort either one of \code{ascending}, \code{descending} or (for ordinal scales)
#' the result of a call to \code{\link{sort_def}}
#' @encoding UTF-8
#' @note right now, \code{type} == "\code{auto}" just assume "\code{quantitative}". It
#' will eventually get smarter, but you are better off specifying it.
#' @references \href{http://vega.github.io/vega-lite/docs/encoding.html}{Vega-Lite Encoding spec}
#' @export
#' @examples
#' vegalite() %>%
#' cell_size(200, 200) %>%
#' add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
#' encode_x("Horsepower", "quantitative") %>%
#' encode_y("Miles_per_Gallon", "quantitative") %>%
#' encode_color("Origin", "nominal") %>%
#' encode_order("Origin", "ordinal", sort="descending") %>%
#' mark_point()
encode_order <- function(vl, field=NULL, type, aggregate=NULL, sort=NULL) {
type <- tolower(type)
if (type == "auto") type <- "quantitative"
if (!type %in% c("quantitative", "temporal", "ordinal", "nominal", "q", "t", "o", "n")) {
message('"type" is not a valid value for this spec component. Ignoring.')
return(vl)
}
if (!is.null(aggregate)) vl$x$encoding$text$aggregate <- aggregate
if (is.null(field)) { stop('"field" must be specified', call.=FALSE) }
vl$x$encoding$order <- list(field=field, type=type)
if (!is.null(sort)) vl$x$encoding$order$sort <- sort
vl
}
#' Encode detail "path"
#'
#' Grouping data is another important operation in visualizing data. For
#' aggregated plots, all encoded fields without aggregate functions are used as
#' grouping fields in the aggregation (similar to fields in GROUP BY in SQL).
#' For line and area marks, mapping a data field to color or shape channel will
#' group the lines and stacked areas by the field.\cr
#' \cr
#' By default, line marks order their points in their paths by the field of
#' channel x or y. However, to show a pattern of data change over time between x & y
#' we use path channel to sort points in a paritcular order (e.g. by time).
#'
#' @param vl Vega-Lite object created by \code{\link{vegalite}}
#' @param field single element character vector naming the column
#' @param type the encoded field’s type of measurement. This can be either a full type
#' name (\code{quantitative}, \code{temporal}, \code{ordinal}, and \code{nominal})
#' or an initial character of the type name (\code{Q}, \code{T}, \code{O}, \code{N}).
#' This property is case insensitive. If \code{auto} is used, the type will
#' be guessed (so you may want to actually specify it if you want consistency).
#' @param aggregate perform aggregaton on \code{field}. See
#' \href{http://vega.github.io/vega-lite/docs/aggregate.html}{Supported Aggregation Options} for
#' more info on valid operations. Leave \code{NULL} for no aggregation.
#' @param sort either one of \code{ascending}, \code{descending} or (for ordinal scales)
#' the result of a call to \code{\link{sort_def}}
#' @encoding UTF-8
#' @note right now, \code{type} == "\code{auto}" just assume "\code{quantitative}". It
#' will eventually get smarter, but you are better off specifying it.
#' @references \href{http://vega.github.io/vega-lite/docs/encoding.html}{Vega-Lite Encoding spec}
#' @export
#' @examples
#' vegalite() %>%
#' cell_size(300, 300) %>%
#' add_data("https://vega.github.io/vega-editor/app/data/driving.json") %>%
#' encode_x("miles", "quantitative") %>%
#' encode_y("gas", "quantitative") %>%
#' encode_path("year", "temporal") %>%
#' scale_x_linear(zero=FALSE) %>%
#' scale_y_linear(zero=FALSE) %>%
#' mark_line()
encode_path <- function(vl, field=NULL, type, aggregate=NULL, sort=NULL) {
type <- tolower(type)
if (type == "auto") type <- "quantitative"
if (!type %in% c("quantitative", "temporal", "ordinal", "nominal", "q", "t", "o", "n")) {
message('"type" is not a valid value for this spec component. Ignoring.')
return(vl)
}
if (!is.null(aggregate)) vl$x$encoding$path$aggregate <- aggregate
if (is.null(field)) { stop('"field" must be specified', call.=FALSE) }
vl$x$encoding$path <- list(field=field, type=type)
if (!is.null(sort)) vl$x$encoding$path$sort <- sort
vl
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/encoding.r
|
#' Create a horizontal ribbon of panels
#'
#' @param vl Vega-Lite object
#' @param field single element character vector naming the column.
#' @param type the encoded field’s type of measurement.
#' @param round round values
#' @param padding facet padding
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/facet.html}{Vega-Lite Faceting}
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
#' add_filter("datum.year == 2000") %>%
#' calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
#' encode_x("gender", "nominal") %>%
#' encode_y("people", "quantitative", aggregate="sum") %>%
#' encode_color("gender", "nominal") %>%
#' scale_x_ordinal(band_size=6) %>%
#' scale_color_nominal(range=c("#EA98D2", "#659CCA")) %>%
#' facet_col("age", "ordinal", padding=4) %>%
#' axis_x(remove=TRUE) %>%
#' axis_y(title="population", grid=FALSE) %>%
#' axis_facet_col(orient="bottom", axisWidth=1, offset=-8) %>%
#' facet_cell(stroke_width=0) %>%
#' mark_bar()
facet_col <- function(vl, field, type, round=TRUE, padding=16) {
vl$x$encoding$column <- list(field=field, type=type)
vl$x$encoding$column$scale <- list(round=round, padding=padding)
vl
}
#' Create a vertical ribbon of panels
#'
#' @param vl Vega-Lite object
#' @param field single element character vector naming the column.
#' @param type the encoded field’s type of measurement.
#' @param round round values
#' @param padding facet padding
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/facet.html}{Vega-Lite Faceting}
#' @export
#' @examples
#' # see facet_col
facet_row <- function(vl, field, type, round=TRUE, padding=16) {
vl$x$encoding$row <- list(field=field, type=type)
vl$x$encoding$row$scale <- list(round=round, padding=padding)
vl
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/facet.r
|
#' Take a JSON Vega-Lite Spec and render as an htmlwidget
#'
#' Vega-Lite is - at the core - a JSON "Grammar of Graphics" specification
#' for how to build a data- & stats-based visualization. While Vega & D3 are
#' the main targets, the use of Vega-Lite does not have to be restricted to just
#' D3. For now, this function takes in a JSON spec (full text or URL) and
#' renders it as an htmlwidget. Data should either be embedded or use a
#' an absolute URL reference.
#'
#' @param spec URL to a Vega-Lite JSON file or the JSON text of a spec
#' @param width,height widget width/height
#' @param renderer the renderer to use for the view. One of \code{canvas} or
#' \code{svg} (the default)
#' @param export if \code{TRUE} the \emph{"Export as..."} link will
#' be displayed with the chart.(Default: \code{FALSE}.)
#' @param source if \code{TRUE} the \emph{"View Source"} link will be displayed
#' with the chart. (Default: \code{FALSE}.)
#' @param editor if \code{TRUE} the \emph{"Open in editor"} link will be
#' displayed with the cahrt. (Default: \code{FALSE}.)
#' @encoding UTF-8
#' @export
#' @examples
#' from_spec("http://rud.is/dl/embedded.json")
from_spec <- function(spec, width=NULL, height=NULL,
renderer=c("svg", "canvas"),
export=FALSE, source=FALSE, editor=FALSE) {
if (is_url(spec)) { spec <- readLines(spec, warn=FALSE) }
spec <- paste0(spec, collapse="", sep="")
# forward options using x
params <- list(
spec=spec,
renderer=renderer[1],
export=export,
source=source,
editor=editor
)
# create widget
htmlwidgets::createWidget(
name = 'spec',
x = params,
width = width,
height = height,
package = 'vegalite'
)
}
#' Convert a spec created with widget idioms to JSON
#'
#' Takes an htmlwidget object and turns it into a JSON Vega-Lite spec
#'
#' @param vl a Vega-Lite object
#' @param pretty if \code{TRUE} (default) then a "pretty-printed" version of the spec
#' will be returned. Use \code{FALSE} for a more compact version.
#' @param to_cb if \code{TRUE}, will copy the spec to the system clipboard. Default is \code{FALSE}.
#' @return JSON spec
#' @importFrom clipr write_clip
#' @export
#' @encoding UTF-8
#' @examples
#' dat <- jsonlite::fromJSON('[
#' {"a": "A","b": 28}, {"a": "B","b": 55}, {"a": "C","b": 43},
#' {"a": "D","b": 91}, {"a": "E","b": 81}, {"a": "F","b": 53},
#' {"a": "G","b": 19}, {"a": "H","b": 87}, {"a": "I","b": 52}
#' ]')
#'
#' vegalite() %>%
#' add_data(dat) %>%
#' encode_x("a", "ordinal") %>%
#' encode_y("b", "quantitative") %>%
#' mark_bar() -> chart
#'
#' to_spec(chart)
to_spec <- function(vl, pretty=TRUE, to_cb=FALSE) {
tmp <- jsonlite::toJSON(vl$x, pretty=pretty, auto_unbox=TRUE)
if (to_cb) clipr::write_clip(tmp)
tmp
}
#' Scaffold HTML/JavaScript/CSS code from \code{vegalite}
#'
#' Create minimal necessary HTML/JavaScript/CSS code to embed a
#' Vega-Lite spec into a web page. This assumes you have the necessary
#' boilerplate javascript & HTML page shell defined as you see in
#' \href{http://vega.github.io/vega-lite/tutorials/getting_started.html#embed}{the Vega-Lite core example}.
#'
#' If you are generating more than one object to embed into a single web page,
#' you will need to ensure each \code{element_id} is unique. Each Vega-Lite
#' \code{div} is classed with \code{vldiv} so you can provide both a central style
#' (say, \code{display:inline-block; margin-auto;}) and targeted ones that use the
#' \code{div} \code{id}.
#'
#' @param vl a Vega-Lite object
#' @param element_id if you don't specify one, an id will be generated. This should
#' be descriptive, but short, and valid javascript & CSS identifier syntax as
#' is is appended to variable names.
#' @param to_cb if \code{TRUE}, will copy the spec to the system clipboard. Default is \code{FALSE}.
#' @encoding UTF-8
#' @export
#' @examples
#' dat <- jsonlite::fromJSON('[
#' {"a": "A","b": 28}, {"a": "B","b": 55}, {"a": "C","b": 43},
#' {"a": "D","b": 91}, {"a": "E","b": 81}, {"a": "F","b": 53},
#' {"a": "G","b": 19}, {"a": "H","b": 87}, {"a": "I","b": 52}
#' ]')
#'
#' vegalite() %>%
#' add_data(dat) %>%
#' encode_x("a", "ordinal") %>%
#' encode_y("b", "quantitative") %>%
#' mark_bar() -> chart
#'
#' embed_spec(chart)
embed_spec <- function(vl, element_id=generate_id(), to_cb=FALSE) {
template <- '<center><div id="%s" class="vldiv"></div></center>
<script>
var spec_%s = JSON.parse(\'%s\');
var embedSpec_%s = { "mode": "vega-lite", "spec": spec_%s, "renderer": spec_%s.embed.renderer, "actions": spec_%s.embed.actions };
vg.embed("#%s", embedSpec_%s, function(error, result) {});
</script>'
tmp <- sprintf(template,
element_id, element_id, to_spec(vl, pretty=FALSE),
element_id, element_id, element_id, element_id, element_id, element_id)
if (to_cb) clipr::write_clip(tmp)
tmp
}
#' @importFrom digest sha1
generate_id <- function() {
sprintf("vl%s", substr(digest::sha1(Sys.time()), 1, 8))
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/fromspec.r
|
#' #' @export
#' ggvega <- function(spec) {
#'
#' update_geom_defaults("point", list(shape=21, colour="steelblue", stroke=1))
#' update_geom_defaults("bar", list(width=0.5, fill="steelblue"))
#'
#' spec <- fromJSON(spec)
#'
#' # if the data isn't local
#'
#' if (!is.null(spec[["data"]][["url"]])) {
#' ext <- file_ext(spec$data$url)
#' if (ext == "json") {
#' spec$data$values <- fromJSON(spec$data$url)
#' } else if (ext == "csv") {
#' spec$data$values <- read.csv(spec$data$url, stringsAsFactors=FALSE)
#' } else if (ext == "tsv") {
#' spec$data$values <- read.csv(spec$data$url, sep="\t", stringsAsFactors=FALSE)
#' }
#' }
#'
#' # Handle case where there is no x or y var specified
#'
#' if (is.null(spec[["encoding"]][["x"]][["field"]])) {
#' spec$data$values$x <- 1
#' spec$encoding$x <- list(field="x", type="nominal")
#' }
#'
#' if (is.null(spec[["encoding"]][["y"]][["field"]])) {
#' spec$data$values$y <- 1
#' spec$encoding$y <- list(field="y", type="nominal")
#' }
#'
#' # start the ggplot
#'
#' gg <- ggplot(data=spec$data$values)
#'
#' # add the obvious
#'
#' gg <- gg + aes_string(x=spec$encoding$x$field,
#' y=spec$encoding$y$field)
#'
#' # handle size encoding
#'
#' if (!is.null(spec[["encoding"]][["size"]])) {
#'
#' gg <- gg + aes_string(size=spec$encoding$size$field)
#'
#' if (spec$encoding$size$type == "quantitative") {
#' gg <- gg + scale_size_continuous()
#' } else if (spec$encoding$size$type == "nominal") {
#' gg <- gg + scale_size_discrete()
#' }
#'
#' }
#'
#' # handle color encoding
#'
#' if (!is.null(spec[["encoding"]][["color"]])) {
#'
#' gg <- gg + aes_string(color=spec$encoding$color$field)
#'
#' if (spec$encoding$color$type == "quantitative") {
#' gg <- gg + scale_color_continuous()
#' } else if (spec$encoding$color$type == "nominal") {
#' gg <- gg + scale_color_discrete()
#' }
#'
#' }
#'
#' # handle shape encoding
#'
#' if (!is.null(spec[["encoding"]][["shape"]])) {
#'
#' gg <- gg + aes_string(shape=spec$encoding$shape$field)
#'
#' if (spec$encoding$shape$type == "quantitative") {
#' gg <- gg + scale_shape_continuous()
#' } else if (spec$encoding$shape$type == "nominal") {
#' gg <- gg + scale_shape_discrete()
#' }
#'
#' }
#'
#' # do the geom thing
#'
#' if (spec$mark %in% c("point", "circle", "square")) {
#' if (spec$mark == "circle") {
#' update_geom_defaults("point", list(shape=16))
#' } else if (spec$mark == "square") {
#' update_geom_defaults("point", list(shape=15))
#' }
#' gg <- gg + geom_point()
#' } else if (spec$mark == "bar") {
#' gg <- gg + geom_bar(fill=color)
#' } else if (spec$mark == "line") {
#'
#' }
#'
#' # scales
#'
#' if (spec$encoding$x$type == "nominal") {
#' gg <- gg + scale_x_discrete()
#' } else if (spec$encoding$x$type == "quantitative") {
#' gg <- gg + scale_x_continuous()
#' } else if (spec$encoding$x$type == "temporal") {
#' }
#'
#' if (spec$encoding$y$type == "nominal") {
#' gg <- gg + scale_y_discrete()
#' } else if (spec$encoding$y$type == "quantitative") {
#' gg <- gg + scale_y_continuous()
#' }
#'
#' # theme
#'
#' gg <- gg + theme_bw()
#'
#' # bye!
#'
#' gg
#'
#' }
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/ggvega.r
|
#' Legend settings (color)
#'
#' @param vl a Vega-Lite object
#' @param orient the orientation of the legend. One of "left" or "right". This
#' determines how the legend is positioned within the scene.
#' @param title the title for the legend.
#' @param format the formatting pattern for axis labels. This is D3’s number
#' format pattern for quantitative axis and D3’s time format pattern
#' for time axis.
#' @param short_time_labels whether month and day names should be abbreviated.
#' @param value explicitly set the visible legend values.
#' @param remove if \code{TRUE}, there will be no legend for this aesthetic.
#' @encoding UTF-8
#' @export
legend_color <- function(vl, orient=NULL, title=NULL,
format=NULL, short_time_labels=NULL,
value=NULL, remove=FALSE) {
chnl <- "color"
if (remove) {
vl$x$encoding[[chnl]]$legend <- FALSE
} else {
if (!is.null(orient)) vl$x$encoding[[chnl]]$legend$orient <- orient
if (!is.null(title)) vl$x$encoding[[chnl]]$legend$title <- title
if (!is.null(format)) vl$x$encoding[[chnl]]$legend$format <- format
if (!is.null(short_time_labels)) vl$x$encoding[[chnl]]$legend$shortTimeLabels <-
short_time_labels
if (!is.null(value)) vl$x$encoding[[chnl]]$legend$value <- value
}
vl
}
#' Legend settings (size)
#'
#' @param vl a Vega-Lite object
#' @param orient the orientation of the legend. One of "left" or "right". This
#' determines how the legend is positioned within the scene.
#' @param title the title for the legend.
#' @param format the formatting pattern for axis labels. This is D3’s number
#' format pattern for quantitative axis and D3’s time format pattern
#' for time axis.
#' @param short_time_labels whether month and day names should be abbreviated.
#' @param value explicitly set the visible legend values.
#' @param remove if \code{TRUE}, there will be no legend for this aesthetic.
#' @encoding UTF-8
#' @export
legend_size <- function(vl, orient=NULL, title=NULL,
format=NULL, short_time_labels=NULL,
value=NULL, remove=FALSE) {
chnl <- "size"
if (remove) {
vl$x$encoding[[chnl]]$legend <- FALSE
} else {
if (!is.null(orient)) vl$x$encoding[[chnl]]$legend$orient <- orient
if (!is.null(title)) vl$x$encoding[[chnl]]$legend$title <- title
if (!is.null(format)) vl$x$encoding[[chnl]]$legend$format <- format
if (!is.null(short_time_labels)) vl$x$encoding[[chnl]]$legend$shortTimeLabels <-
short_time_labels
if (!is.null(value)) vl$x$encoding[[chnl]]$legend$value <- value
}
vl
}
#' Legend settings (shape)
#'
#' @param vl a Vega-Lite object
#' @param orient the orientation of the legend. One of "left" or "right". This
#' determines how the legend is positioned within the scene.
#' @param title the title for the legend.
#' @param format the formatting pattern for axis labels. This is D3’s number
#' format pattern for quantitative axis and D3’s time format pattern
#' for time axis.
#' @param short_time_labels whether month and day names should be abbreviated.
#' @param value explicitly set the visible legend values.
#' @param remove if \code{TRUE}, there will be no legend for this aesthetic.
#' @encoding UTF-8
#' @export
legend_shape <- function(vl, orient=NULL, title=NULL,
format=NULL, short_time_labels=NULL,
value=NULL, remove=FALSE) {
chnl <- "shape"
if (remove) {
vl$x$encoding[[chnl]]$legend <- FALSE
} else {
if (!is.null(orient)) vl$x$encoding[[chnl]]$legend$orient <- orient
if (!is.null(title)) vl$x$encoding[[chnl]]$legend$title <- title
if (!is.null(format)) vl$x$encoding[[chnl]]$legend$format <- format
if (!is.null(short_time_labels)) vl$x$encoding[[chnl]]$legend$shortTimeLabels <-
short_time_labels
if (!is.null(value)) vl$x$encoding[[chnl]]$legend$value <- value
}
vl
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/legend.r
|
#' Bar mark
#'
#' A bar mark represents each data point as a rectangle, where the length is
#' mapped to a quantitative scale.
#'
#' @param vl Vega-Lite object
#' @param orient the orientation of a non-stacked bar, area, and line charts.
#' The value is either "horizontal", or "vertical" (default). For bar and
#' tick, this determines whether the size of the bar and tick should be
#' applied to x or y dimension. For area, this property determines the
#' orient property of the Vega output. For line, this property determines
#' the path order of the points in the line if path channel is not specified.
#' For stacked charts, this is always determined by the orientation of the stack;
#' therefore explicitly specified value will be ignored.
#' @param stack stacking modes for bar and area marks. \code{zero} - stacking
#' with baseline offset at zero value of the scale (for creating typical stacked
#' bar and area chart). \code{normalize} - stacking with normalized domain (for
#' creating normalized stacked bar and area chart). \code{center} - stacking
#' with center baseline (for streamgraph). \code{none} - No-stacking. This will
#' produces layered bar and area chart.
#' @param size The pixel area each the point. For example: in the case of circles,
#' the radius is determined in part by the square root of the size value.
#' @param opacity \code{0.0}-\code{1.0}
#' @param filled whether the shape's color should be used as fill color instead of stroke color.
#' @param color color of the mark – either fill or stroke color based on the filled mark config.
#' @param fill fill color. This config will be overridden by color channel’s
#' specified or mapped values if filled is true.
#' @param stroke stroke color. This config will be overridden by color channel’s
#' specified or mapped values if filled is false.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/mark.html}{Vega-Lite Mark spec}
#' @export
#' @examples
#' dat <- jsonlite::fromJSON('[
#' {"a": "A","b": 28}, {"a": "B","b": 55}, {"a": "C","b": 43},
#' {"a": "D","b": 91}, {"a": "E","b": 81}, {"a": "F","b": 53},
#' {"a": "G","b": 19}, {"a": "H","b": 87}, {"a": "I","b": 52}
#' ]')
#'
#' vegalite() %>%
#' add_data(dat) %>%
#' encode_x("a", "ordinal") %>%
#' encode_y("b", "quantitative") %>%
#' mark_bar()
mark_bar <- function(vl, orient=NULL, stack=NULL, size=NULL, opacity=NULL,
filled=NULL, color=NULL, fill=NULL, stroke=NULL) {
vl$x$mark <- "bar"
if (!is.null(stack)) vl$x$config$mark$stacked <- stack
if (!is.null(size)) vl$x$config$mark$barSize <- size
if (!is.null(orient)) vl$x$config$mark$orient <- orient
if (!is.null(opacity)) vl$x$config$mark$opacity <- opacity
if (!is.null(filled)) vl$x$config$mark$filled <- filled
if (!is.null(color)) vl$x$config$mark$color <- color
if (!is.null(fill)) vl$x$config$mark$fill <- fill
if (!is.null(stroke)) vl$x$config$mark$stroke <- stroke
return(vl)
}
#' Circle mark
#'
#' Circle and square marks are similar to point mark, except that (1) the shape
#' value is always set to circle or square (2) they are filled by default.
#'
#' @param vl a Vega-Lite object
#' @param size The pixel area each the point. For example: in the case of circles,
#' the radius is determined in part by the square root of the size value.
#' @param opacity \code{0.0}-\code{1.0}
#' @param filled whether the shape's color should be used as fill color instead of stroke color.
#' @param color color of the mark – either fill or stroke color based on the filled mark config.
#' @param fill fill color. This config will be overridden by color channel’s
#' specified or mapped values if filled is true.
#' @param stroke stroke color. This config will be overridden by color channel’s
#' specified or mapped values if filled is false.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/mark.html}{Vega-Lite Mark spec}
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
#' encode_x("Horsepower", "quantitative") %>%
#' encode_y("Miles_per_Gallon", "quantitative") %>%
#' mark_circle()
mark_circle <- function(vl, size=NULL, opacity=NULL,
filled=NULL, color=NULL, fill=NULL, stroke=NULL) {
vl$x$mark <- "circle"
vl$x$config$mark$size <- size
if (!is.null(opacity)) vl$x$config$mark$opacity <- opacity
if (!is.null(filled)) vl$x$config$mark$filled <- filled
if (!is.null(color)) vl$x$config$mark$color <- color
if (!is.null(fill)) vl$x$config$mark$fill <- fill
if (!is.null(stroke)) vl$x$config$mark$stroke <- stroke
return(vl)
}
#' Square mark
#'
#' Circle and square marks are similar to point mark, except that (1) the shape
#' value is always set to circle or square (2) they are filled by default.
#'
#' @param vl a Vega-Lite object
#' @param size The pixel area each the point. For example: in the case of circles,
#' the radius is determined in part by the square root of the size value.
#' @param opacity \code{0.0}-\code{1.0}
#' @param filled whether the shape's color should be used as fill color instead of stroke color.
#' @param color color of the mark – either fill or stroke color based on the filled mark config.
#' @param fill fill color. This config will be overridden by color channel’s
#' specified or mapped values if filled is true.
#' @param stroke stroke color. This config will be overridden by color channel’s
#' specified or mapped values if filled is false.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/mark.html}{Vega-Lite Mark spec}
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
#' encode_x("Horsepower", "quantitative") %>%
#' encode_y("Miles_per_Gallon", "quantitative") %>%
#' mark_square()
mark_square <- function(vl, size=NULL, opacity=NULL,
filled=NULL, color=NULL, fill=NULL, stroke=NULL) {
vl$x$mark <- "square"
vl$x$config$mark$size <- size
if (!is.null(opacity)) vl$x$config$mark$opacity <- opacity
if (!is.null(filled)) vl$x$config$mark$filled <- filled
if (!is.null(color)) vl$x$config$mark$color <- color
if (!is.null(fill)) vl$x$config$mark$fill <- fill
if (!is.null(stroke)) vl$x$config$mark$stroke <- stroke
return(vl)
}
#' Tick mark
#'
#' A tick mark represents each data point as a short line. This is a useful
#' mark for displaying the distribution of values in a field.
#'
#' @param vl Vega-Lite object
#' @param orient the orientation of a non-stacked bar, area, and line charts.
#' The value is either "horizontal", or "vertical" (default). For bar and
#' tick, this determines whether the size of the bar and tick should be
#' applied to x or y dimension. For area, this property determines the
#' orient property of the Vega output. For line, this property determines
#' the path order of the points in the line if path channel is not specified.
#' For stacked charts, this is always determined by the orientation of the stack;
#' therefore explicitly specified value will be ignored.
#' @param size The pixel area each the point. For example: in the case of circles,
#' the radius is determined in part by the square root of the size value.
#' @param thickness Thickness of the tick mark. Default value: 1
#' @param opacity \code{0.0}-\code{1.0}
#' @param color color of the mark – either fill or stroke color based on the filled mark config.
#' @param fill fill color. This config will be overridden by color channel’s
#' specified or mapped values if filled is true.
#' @param stroke stroke color. This config will be overridden by color channel’s
#' specified or mapped values if filled is false.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/mark.html}{Vega-Lite Mark spec}
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
#' encode_x("Horsepower", "quantitative") %>%
#' encode_y("Cylinders", "ordinal") %>%
#' mark_tick()
mark_tick <- function(vl, orient=NULL, size=NULL, thickness=1, opacity=NULL,
color=NULL, fill=NULL, stroke=NULL) {
vl$x$mark <- "tick"
vl$x$config$mark$tickThickness <- thickness
if (!is.null(size)) vl$x$config$mark$tickSize <- size
if (!is.null(orient)) vl$x$config$mark$orient <- orient
if (!is.null(opacity)) vl$x$config$mark$opacity <- opacity
if (!is.null(color)) vl$x$config$mark$color <- color
if (!is.null(fill)) vl$x$config$mark$fill <- fill
if (!is.null(stroke)) vl$x$config$mark$stroke <- stroke
return(vl)
}
#' Line mark
#'
#' A line mark represents the data points stored in a field with a line
#' connecting all of these points. Unlike other marks except area that represents
#' one data element per mark, one line mark represent multiple data element as
#' a single line.
#'
#' @param vl Vega-Lite object
#' @param orient the orientation of a non-stacked bar, area, and line charts.
#' The value is either "horizontal", or "vertical" (default). For bar and
#' tick, this determines whether the size of the bar and tick should be
#' applied to x or y dimension. For area, this property determines the
#' orient property of the Vega output. For line, this property determines
#' the path order of the points in the line if path channel is not specified.
#' For stacked charts, this is always determined by the orientation of the stack;
#' therefore explicitly specified value will be ignored.
#' @param interpolate The line interpolation method to use. One of \code{linear}
#' \code{step-before}, \code{step-after}, \code{basis}, \code{basis-open},
#' \code{basis-closed}, \code{bundle}, \code{cardinal}, \code{cardinal-open},
#' \code{cardinal-closed}, \code{monotone}. For more information about each
#' interpolation method, please see D3’s line interpolation.
#' @param tension Depending on the interpolation type, sets the tension parameter.
#' (See D3’s line interpolation.)
#' @param color color of the mark – either fill or stroke color based on the filled mark config.
#' @param fill fill color. This config will be overridden by color channel’s
#' specified or mapped values if filled is true.
#' @param stroke stroke color. This config will be overridden by color channel’s
#' specified or mapped values if filled is false.
#' @param opacity \code{0.0}-\code{1.0}
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/mark.html}{Vega-Lite Mark spec}
#' @export
#' @examples
#' vegalite() %>%
#' cell_size(300, 300) %>%
#' add_data("https://vega.github.io/vega-editor/app/data/driving.json") %>%
#' encode_x("miles", "quantitative") %>%
#' encode_y("gas", "quantitative") %>%
#' encode_path("year", "temporal") %>%
#' scale_x_linear(zero=FALSE) %>%
#' scale_y_linear(zero=FALSE) %>%
#' mark_line()
mark_line <- function(vl, orient=NULL, interpolate=NULL, tension=NULL, opacity=NULL,
color=NULL, fill=NULL, stroke=NULL) {
vl$x$mark <- "line"
if (!is.null(interpolate)) vl$x$config$mark$interpolate <- interpolate
if (!is.null(tension)) vl$x$config$mark$tension <- tension
if (!is.null(orient)) vl$x$config$mark$orient <- orient
if (!is.null(opacity)) vl$x$config$mark$opacity <- opacity
if (!is.null(color)) vl$x$config$mark$color <- color
if (!is.null(fill)) vl$x$config$mark$fill <- fill
if (!is.null(stroke)) vl$x$config$mark$stroke <- stroke
return(vl)
}
#' Area mark
#'
#' An area represent multiple data element as a single area shape.
#'
#' @param vl Vega-Lite object
#' @param orient the orientation of a non-stacked bar, area, and line charts.
#' The value is either "horizontal", or "vertical" (default). For bar and
#' tick, this determines whether the size of the bar and tick should be
#' applied to x or y dimension. For area, this property determines the
#' orient property of the Vega output. For line, this property determines
#' the path order of the points in the line if path channel is not specified.
#' For stacked charts, this is always determined by the orientation of the stack;
#' therefore explicitly specified value will be ignored.
#' @param stack stacking modes for bar and area marks. \code{zero} - stacking
#' with baseline offset at zero value of the scale (for creating typical stacked
#' bar and area chart). \code{normalize} - stacking with normalized domain (for
#' creating normalized stacked bar and area chart). \code{center} - stacking
#' with center baseline (for streamgraph). \code{none} - No-stacking. This will
#' produces layered bar and area chart.
#' @param interpolate The line interpolation method to use. One of \code{linear}
#' \code{step-before}, \code{step-after}, \code{basis}, \code{basis-open},
#' \code{basis-closed}, \code{bundle}, \code{cardinal}, \code{cardinal-open},
#' \code{cardinal-closed}, \code{monotone}. For more information about each
#' interpolation method, please see D3’s line interpolation.
#' @param tension Depending on the interpolation type, sets the tension parameter.
#' (See D3’s line interpolation.)
#' @param opacity \code{0.0}-\code{1.0}
#' @param filled whether the shape's color should be used as fill color instead of stroke color.
#' @param color color of the mark – either fill or stroke color based on the filled mark config.
#' @param fill fill color. This config will be overridden by color channel’s
#' specified or mapped values if filled is true.
#' @param stroke stroke color. This config will be overridden by color channel’s
#' specified or mapped values if filled is false.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/mark.html}{Vega-Lite Mark spec}
#' @export
#' @examples
#' vegalite() %>%
#' cell_size(300, 200) %>%
#' add_data("https://vega.github.io/vega-editor/app/data/unemployment-across-industries.json") %>%
#' encode_x("date", "temporal") %>%
#' encode_y("count", "quantitative", aggregate="sum") %>%
#' encode_color("series", "nominal") %>%
#' scale_color_nominal(range="category20b") %>%
#' timeunit_x("yearmonth") %>%
#' scale_x_time(nice="month") %>%
#' axis_x(axisWidth=0, format="%Y", labelAngle=0) %>%
#' mark_area()
mark_area <- function(vl, orient=NULL, stack=NULL, interpolate=NULL, tension=NULL,
opacity=NULL, filled=NULL, color=NULL, fill=NULL, stroke=NULL) {
vl$x$mark <- "area"
if (!is.null(stack)) vl$x$config$mark$stacked <- stack
if (!is.null(interpolate)) vl$x$config$mark$interpolate <- interpolate
if (!is.null(tension)) vl$x$config$mark$tension <- tension
if (!is.null(orient)) vl$x$config$mark$orient <- orient
if (!is.null(opacity)) vl$x$config$mark$opacity <- opacity
if (!is.null(filled)) vl$x$config$mark$filled <- filled
if (!is.null(color)) vl$x$config$mark$color <- color
if (!is.null(fill)) vl$x$config$mark$fill <- fill
if (!is.null(stroke)) vl$x$config$mark$stroke <- stroke
return(vl)
}
#' Point mark
#'
#' A point mark represents each data point with a symbol.
#'
#' @param vl Vega-Lite object
#' @param shape The symbol shape to use. One of \code{circle}, \code{square},
#' \code{cross}, \code{diamond}, \code{triangle-up}, or \code{triangle-down}.
#' Default value: \code{circle}.
#' @param size The pixel area each the point. For example: in the case of circles,
#' the radius is determined in part by the square root of the size value.
#' @param opacity \code{0.0}-\code{1.0}
#' @param filled whether the shape's color should be used as fill color instead of stroke color.
#' @param color color of the mark – either fill or stroke color based on the filled mark config.
#' @param fill fill color. This config will be overridden by color channel’s
#' specified or mapped values if filled is true.
#' @param stroke stroke color. This config will be overridden by color channel’s
#' specified or mapped values if filled is false.
#' @references \href{http://vega.github.io/vega-lite/docs/mark.html}{Vega-Lite Mark spec}
#' @encoding UTF-8
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
#' encode_x("Horsepower", "quantitative") %>%
#' encode_y("Miles_per_Gallon", "quantitative") %>%
#' mark_point()
mark_point <- function(vl, shape="circle", size=NULL, opacity=NULL, filled=NULL,
color=NULL, fill=NULL, stroke=NULL) {
vl$x$mark <- "point"
vl$x$config$mark$shape <- shape
vl$x$config$mark$size <- size
if (!is.null(opacity)) vl$x$config$mark$opacity <- opacity
if (!is.null(filled)) vl$x$config$mark$filled <- filled
if (!is.null(color)) vl$x$config$mark$color <- color
if (!is.null(fill)) vl$x$config$mark$fill <- fill
if (!is.null(stroke)) vl$x$config$mark$stroke <- stroke
return(vl)
}
#' Text mark
#'
#' A text mark represents each data point with a text instead of a point.
#'
#' @param vl a Vega-Lite object
#' @param opacity \code{0.0}-\code{1.0}
#' @param color color of the mark – either fill or stroke color based on the filled mark config.
#' @param fill fill color. This config will be overridden by color channel’s
#' specified or mapped values if filled is true.
#' @param stroke stroke color. This config will be overridden by color channel’s
#' specified or mapped values if filled is false.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/mark.html}{Vega-Lite Mark spec}
#' @export
#' @examples
#' vegalite() %>%
#' cell_size(300, 200) %>%
#' add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
#' encode_x("Horsepower", "quantitative") %>%
#' encode_y("Miles_per_Gallon", "quantitative") %>%
#' encode_color("Origin", "nominal") %>%
#' calculate("OriginInitial", "datum.Origin[0]") %>%
#' encode_text("OriginInitial", "nominal") %>%
#' mark_text()
mark_text <- function(vl, opacity=NULL, color=NULL, fill=NULL, stroke=NULL) {
vl$x$mark <- "text"
if (!is.null(opacity)) vl$x$config$mark$opacity <- opacity
if (!is.null(color)) vl$x$config$mark$color <- color
if (!is.null(fill)) vl$x$config$mark$fill <- fill
if (!is.null(stroke)) vl$x$config$mark$stroke <- stroke
return(vl)
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/mark.r
|
#' Capture a static (png) version of a widget (e.g. for use in a PDF knitr document)
#'
#' Widgets are generally interactive beasts rendered in an HTML DOM with
#' javascript. That makes them unusable in PDF documents. However, many widgets
#' initial views would work well as static images. This function renders a widget
#' to a file and make it usable in a number of contexts.
#'
#' What is returned depends on the value of \code{output}. By default (\code{"path"}),
#' the full disk path will be returned. If \code{markdown} is specified, a markdown
#' string will be returned with a \code{file:///...} URL. If \code{html} is
#' specified, an \code{<img src='file:///...'/>} tag will be returned and if
#' \code{inline} is specified, a base64 encoded \code{<img>} tag will be returned
#' (just like you'd see in a self-contained HTML file from \code{knitr}).
#'
#' @importFrom webshot webshot
#' @importFrom base64 img
#' @param wdgt htmlwidget to capture
#' @param output how to return the results of the capture (see Details section)
#' @param height,width it's important for many widget to be responsive in HTML
#' documents. PDFs are static beasts and having a fixed image size works
#' better for them. \code{height} & \code{width} will be passed into the
#' rendering process, which means you should probably specify similar
#' values in your widget creation process so the captured \code{<div>}
#' size matches the size you specify here.
#' @param png_render_path by default, this will be a temporary file location but
#' a fully qualified filename (with extension) can be specified. It's up to
#' the caller to free the storage when finished with the resource.
#' @return See Details
#' @export
#' @examples \dontrun{
#' library(webshot)
#' library(vegalite)
#'
#' dat <- jsonlite::fromJSON('[
#' {"a": "A","b": 28}, {"a": "B","b": 55}, {"a": "C","b": 43},
#' {"a": "D","b": 91}, {"a": "E","b": 81}, {"a": "F","b": 53},
#' {"a": "G","b": 19}, {"a": "H","b": 87}, {"a": "I","b": 52}
#' ]')
#'
#' vegalite(viewport_width=350, viewport_height=250) %>%
#' add_data(dat) %>%
#' encode_x("a", "ordinal") %>%
#' encode_y("b", "quantitative") %>%
#' mark_bar() -> vl
#'
#' capture_widget(vl, "inline", 250, 350)
#' }
capture_widget <- function(wdgt,
output=c("path", "markdown", "html", "inline"),
height, width,
png_render_path=tempfile(fileext=".png")) {
wdgt_html_tf <- tempfile(fileext=".html")
htmlwidgets::saveWidget(vl, wdgt_html_tf)
webshot::webshot(url=sprintf("file://%s", wdgt_html_tf),
selector="#htmlwidget_container",
file=wdgt_png_tf,
vwidth=width, vheight=height)
# done with HTML
unlink(wdgt_html_tf)
switch(match.arg(output, c("path", "markdown", "html", "inline")),
`path`=png_render_path,
`markdown`=sprintf("", png_render_path),
`html`=sprintf("<img src='file://%s'/>", png_render_path),
`inline`=base64::img(wdgt_png_tf))
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/phantom.r
|
#' Quantitative Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_x_linear <- function(vl, domain=NULL, range=NULL, clamp=NULL, nice=NULL, zero=NULL) {
# vl$x$encoding$x$scale <- list(type="linear")
if (!is.null(domain)) vl$x$encoding$x$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$x$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$x$scale$clamp <- clamp
if (!is.null(nice)) vl$x$encoding$x$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$x$scale$zero <- zero
vl
}
#' Quantitative Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param exp exponent
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_x_pow <- function(vl, domain=NULL, range=NULL, clamp=NULL, exp=NULL,
nice=NULL, zero=NULL) {
vl$x$encoding$x$scale <- list(type="pow")
if (!is.null(domain)) vl$x$encoding$x$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$x$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$x$scale$clamp <- clamp
if (!is.null(clamp)) vl$x$encoding$x$scale$exp <- exp
if (!is.null(nice)) vl$x$encoding$x$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$x$scale$zero <- zero
vl
}
#' Sqrt Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_x_sqrt <- function(vl, domain=NULL, range=NULL, clamp=NULL, nice=NULL, zero=NULL) {
vl$x$encoding$x$scale <- list(type="sqrt")
if (!is.null(domain)) vl$x$encoding$x$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$x$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$x$scale$clamp <- clamp
if (!is.null(nice)) vl$x$encoding$x$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$x$scale$zero <- zero
vl
}
#' Log Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_x_log <- function(vl, domain=NULL, range=NULL, clamp=NULL, nice=NULL, zero=NULL) {
vl$x$encoding$x$scale <- list(type="log")
if (!is.null(domain)) vl$x$encoding$x$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$x$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$x$scale$clamp <- clamp
if (!is.null(nice)) vl$x$encoding$x$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$x$scale$zero <- zero
vl
}
#' Quantize Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_x_quantize <- function(vl, domain=NULL, range=NULL, clamp=NULL, nice=NULL, zero=NULL) {
vl$x$encoding$x$scale <- list(type="quantize")
if (!is.null(domain)) vl$x$encoding$x$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$x$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$x$scale$clamp <- clamp
if (!is.null(nice)) vl$x$encoding$x$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$x$scale$zero <- zero
vl
}
#' Quantile Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_x_quantile <- function(vl, domain=NULL, range=NULL, clamp=NULL, nice=NULL, zero=NULL) {
vl$x$encoding$x$scale <- list(type="quantile")
if (!is.null(domain)) vl$x$encoding$x$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$x$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$x$scale$clamp <- clamp
if (!is.null(nice)) vl$x$encoding$x$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$x$scale$zero <- zero
vl
}
#' Ordinal Scale
#'
#' @param vl Vega-Lite object
#' @param band_size band size
#' @param padding padding
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
#' add_filter("datum.year == 2000") %>%
#' calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
#' encode_x("gender", "nominal") %>%
#' encode_y("people", "quantitative", aggregate="sum") %>%
#' encode_color("gender", "nominal") %>%
#' scale_x_ordinal(band_size=6) %>%
#' scale_color_nominal(range=c("#EA98D2", "#659CCA")) %>%
#' facet_col("age", "ordinal", padding=4) %>%
#' axis_x(remove=TRUE) %>%
#' axis_y(title="population", grid=FALSE) %>%
#' axis_facet_col(orient="bottom", axisWidth=1, offset=-8) %>%
#' facet_cell(stroke_width=0) %>%
#' mark_bar()
scale_x_ordinal <- function(vl, band_size=NULL, padding=NULL) {
if (!is.null(band_size)) vl$x$encoding$x$scale$bandSize <- band_size
if (!is.null(padding)) vl$x$encoding$x$scale$padding <- padding
vl
}
#' Threshold Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_x_threshold <- function(vl, domain=NULL, range=NULL, clamp=NULL, nice=NULL, zero=NULL) {
vl$x$encoding$x$scale <- list(type="threshold")
if (!is.null(domain)) vl$x$encoding$x$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$x$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$x$scale$clamp <- clamp
if (!is.null(nice)) vl$x$encoding$x$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$x$scale$zero <- zero
vl
}
#' Temporal Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_x_time <- function(vl, domain=NULL, range=NULL, clamp=NULL, nice=NULL, zero=NULL) {
# vl$x$encoding$x$scale <- list(type="temporal")
if (!is.null(domain)) vl$x$encoding$x$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$x$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$x$scale$clamp <- clamp
if (!is.null(nice)) vl$x$encoding$x$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$x$scale$zero <- zero
vl
}
#' Linear Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_y_linear <- function(vl, domain=NULL, range=NULL, clamp=NULL, nice=NULL, zero=NULL) {
# vl$x$encoding$y$scale <- list(type="linear")
if (!is.null(domain)) vl$x$encoding$y$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$y$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$y$scale$clamp <- clamp
if (!is.null(nice)) vl$x$encoding$y$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$y$scale$zero <- zero
vl
}
#' Power Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param exp exponent
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_y_pow <- function(vl, domain=NULL, range=NULL, clamp=NULL, exp=NULL,
nice=NULL, zero=NULL) {
vl$x$encoding$y$scale <- list(type="pow")
if (!is.null(domain)) vl$x$encoding$y$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$y$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$y$scale$clamp <- clamp
if (!is.null(exp)) vl$x$encoding$y$scale$exp <- exp
if (!is.null(nice)) vl$x$encoding$y$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$y$scale$zero <- zero
vl
}
#' Sqrt Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_y_sqrt <- function(vl, domain=NULL, range=NULL, clamp=NULL, nice=NULL, zero=NULL) {
vl$x$encoding$y$scale <- list(type="sqrt")
if (!is.null(domain)) vl$x$encoding$y$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$y$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$y$scale$clamp <- clamp
if (!is.null(nice)) vl$x$encoding$y$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$y$scale$zero <- zero
vl
}
#' Log Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_y_log <- function(vl, domain=NULL, range=NULL, clamp=NULL, nice=NULL, zero=NULL) {
vl$x$encoding$y$scale <- list(type="log")
if (!is.null(domain)) vl$x$encoding$y$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$y$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$y$scale$clamp <- clamp
if (!is.null(nice)) vl$x$encoding$y$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$y$scale$zero <- zero
vl
}
#' Quantize Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_y_quantize <- function(vl, domain=NULL, range=NULL, clamp=NULL, nice=NULL, zero=NULL) {
vl$x$encoding$y$scale <- list(type="quantize")
if (!is.null(domain)) vl$x$encoding$y$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$y$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$y$scale$clamp <- clamp
if (!is.null(nice)) vl$x$encoding$y$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$y$scale$zero <- zero
vl
}
#' Quantile Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_y_quantile <- function(vl, domain=NULL, range=NULL, clamp=NULL, nice=NULL, zero=NULL) {
vl$x$encoding$y$scale <- list(type="quantile")
if (!is.null(domain)) vl$x$encoding$y$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$y$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$y$scale$clamp <- clamp
if (!is.null(nice)) vl$x$encoding$y$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$y$scale$zero <- zero
vl
}
#' Ordinal Scale
#'
#' @param vl Vega-Lite object
#' @param band_size band size
#' @param padding padding
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_y_ordinal <- function(vl, band_size=NULL, padding=NULL) {
# vl$x$encoding$y$scale <- list(type="ordinal")
if (!is.null(band_size)) vl$x$encoding$y$scale$bandSize <- band_size
if (!is.null(padding)) vl$x$encoding$y$scale$padding <- padding
vl
}
#' Threshold Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_y_threshold <- function(vl, domain=NULL, range=NULL, clamp=NULL, nice=NULL, zero=NULL) {
vl$x$encoding$y$scale <- list(type="threshold")
if (!is.null(domain)) vl$x$encoding$y$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$y$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$y$scale$clamp <- clamp
if (!is.null(nice)) vl$x$encoding$y$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$y$scale$zero <- zero
vl
}
#' Temporal Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @param clamp if true, values that exceed the data domain are clamped to either
#' the minimum or maximum range value. Default value: derived from scale
#' config (true by default) Supported Types: only linear, pow, sqrt, and log
#' @param nice If true, modifies the scale domain to use a more human-friendly
#' number range (e.g., 7 instead of 6.96). Default value: true only for
#' quantitative x and y scales and false otherwise.
#' @param zero If true, ensures that a zero baseline value is included in the
#' scale domain. Default value: true if the quantitative field is not binned.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_y_time <- function(vl, domain=NULL, range=NULL, clamp=NULL, nice=NULL, zero=NULL) {
# vl$x$encoding$y$scale <- list(type="time")
if (!is.null(domain)) vl$x$encoding$y$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$y$scale$range <- range
if (!is.null(clamp)) vl$x$encoding$y$scale$clamp <- clamp
if (!is.null(nice)) vl$x$encoding$y$scale$nice <- nice
if (!is.null(zero)) vl$x$encoding$y$scale$zero <- zero
vl
}
#' Nominal Color Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_color_nominal <- function(vl, domain=NULL, range=NULL) {
if (!is.null(domain)) vl$x$encoding$color$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$color$scale$range <- range
vl
}
#' Sequential Color Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_color_sequential <- function(vl, domain=NULL, range=NULL) {
if (!is.null(domain)) vl$x$encoding$color$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$color$scale$range <- range
vl
}
#' Shape Scale
#'
#' @param vl Vega-Lite object
#' @param domain Custom domain values. For quantitative data, this can take the
#' form of a two-element array with minimum and maximum values.
#' @param range The range of the scale represents the set of output visual values.
#' Vega-Lite automatically determines appropriate range based on the scale’s
#' channel and type, but range property can be provided to customize range
#' values.
#' @encoding UTF-8
#' @references \href{http://vega.github.io/vega-lite/docs/scale.html}{Vega-Lite Scales spec}
#' @export
scale_shape <- function(vl, domain=NULL, range=NULL) {
if (!is.null(domain)) vl$x$encoding$color$scale$domain <- domain
if (!is.null(range)) vl$x$encoding$color$scale$range <- range
vl
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/scales.r
|
#' Widget output function for use in Shiny
#'
#' @param outputId widget output id
#' @param width,height widget height/width
#' @export
vegaliteOutput <- function(outputId, width = '100%', height = '400px'){
htmlwidgets::shinyWidgetOutput(outputId, 'vegalite', width, height, package = 'vegalite')
}
#' Widget render function for use in Shiny
#'
#' @param expr expr to render
#' @param env evaluation environemnt
#' @param quoted quote expression?
#' @export
renderVegalite <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
htmlwidgets::shinyRenderWidget(expr, vegaliteOutput, env, quoted = TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/shiny.r
|
#' Create a sort definition object
#'
#' You can sort by aggregated value of another “sort” field by creating a
#' sort field definition object. All three properties must be non-\code{NULL}.
#'
#' @param field the field name to aggregate over.
#' @param op a valid \href{http://vega.github.io/vega-lite/docs/sort.html#aggregate}{aggregation operator}.
#' @param order either \code{ascending} or \code{descending}
#' @encoding UTF-8
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
#' encode_x("Horsepower", type="quantitative", aggregate="mean") %>%
#' encode_y("Origin", "ordinal", sort=sort_def("Horsepower", "mean")) %>%
#' mark_bar()
sort_def <- function(field, op=NULL, order=c("ascending", "descending")) {
if (is.null(field) | is.null(op) | is.null(order)) {
message(paste0("One or more sort definition components is NULL. ",
"Reverting to 'ascending' basic sort", sep="", collapse=""))
return("ascending")
} else {
return(list(field=field, op=op, order=order[1]))
}
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/sort.r
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.