content
stringlengths
0
14.9M
filename
stringlengths
44
136
`summary.poolaccum` <- function(object, display, alpha = 0.05, ...) { probs <- c(alpha/2, 1-alpha/2) if (inherits(object, "estaccumR")) dislabels <- c("S", "chao", "ace") else dislabels <- c("S", "chao", "jack1", "jack2", "boot") disnames <- colnames(object$means[,-1]) names(disnames) <- dislabels if (missing(display)) display <- dislabels else display <- match.arg(display, dislabels, several.ok = TRUE) out <- list() for (item in display) { out[[item]] <- cbind(`N` = object$N, `Mean` = object$means[,disnames[item], drop=FALSE], t(apply(object[[item]], 1, quantile, probs=probs)), `Std.Dev` = apply(object[[item]], 1, sd)) } class(out) <- "summary.poolaccum" out }
/scratch/gouwar.j/cran-all/cranData/vegan/R/summary.poolaccum.R
`summary.prc` <- function (object, axis = 1, scaling = "symmetric", const, digits = 4, correlation = FALSE, ...) { sc = scores(object, scaling = scaling, display = c("sp", "reg"), const, choices = axis, correlation = correlation, ...) b <- sc$regression prnk <- object$pCCA$rank lentreat <- length(object$terminfo$xlev[[2]]) b = matrix(b[-(1:prnk)], nrow = lentreat-1, byrow = TRUE) rownames(b) <- (object$terminfo$xlev[[2]])[-1] colnames(b) <- object$terminfo$xlev[[1]] out <- list(sp = drop(sc$species), coefficients = b, names = names(object$terminfo$xlev), corner = (object$terminfo$xlev[[2]])[1], call = object$call, digits = digits) class(out) <- "summary.prc" out }
/scratch/gouwar.j/cran-all/cranData/vegan/R/summary.prc.R
"summary.procrustes" <- function (object, digits = getOption("digits"), ...) { ans <- object[c("call", "ss")] n <- nrow(object$Yrot) k <- ncol(object$Yrot) ans$resid <- residuals(object) rmse <- sqrt(object$ss/n) ans$n <- n ans$k <- k ans$rmse <- rmse ans$rotation <- object$rotation ans$translation <- object$translation ans$scale <- object$scale ans$digits <- digits class(ans) <- "summary.procrustes" ans }
/scratch/gouwar.j/cran-all/cranData/vegan/R/summary.procrustes.R
"summary.radfit.frame" <- function (object, ...) { labels <- names(object) for (i in seq_along(labels)) { cat("\n***", labels[i], "***\n") print(object[[i]], ...) } invisible(object) }
/scratch/gouwar.j/cran-all/cranData/vegan/R/summary.radfit.frame.R
`summary.specaccum` <- function(object, ...) { if (is.null(object$perm)) stop("specific summary available only for method=\"random\"") else { tmp <- summary(t(object$perm), ...) colnames(tmp) <- paste(1:ncol(tmp), "sites") tmp } }
/scratch/gouwar.j/cran-all/cranData/vegan/R/summary.specaccum.R
`summary.taxondive` <- function (object, ...) { z <- (object$Dplus - object$EDplus)/object$sd.Dplus pval <- 2*pnorm(-abs(z)) out <- cbind(object$D, object$Dstar, object$Dplus, object$sd.Dplus, z, pval) out <- rbind(out, "Expected"=c(object$ED, object$EDstar, object$EDplus, NA, NA, NA)) colnames(out) <- c("Delta", "Delta*", "Delta+", "sd(Delta+)", "z(Delta+)", "Pr(>|z|)") class(out) <- "summary.taxondive" out }
/scratch/gouwar.j/cran-all/cranData/vegan/R/summary.taxondive.R
### summary allocates shared components equally to explanatory sets `summary.varpart` <- function(object, ...) { nsets <- object$part$nsets nfract <- 2^nsets - 1 ## find fractions each component is made of contr <- matrix(0, nfract, nsets, dimnames = list(paste0("[", letters[seq_len(nfract)], "]"), paste0("X", seq_len(nsets)))) setfrac <- rownames(object$part$fract)[seq_len(nsets)] for (i in seq_len(nsets)) contr[,i] <- as.numeric(letters[seq_len(nfract)] %in% strsplit(setfrac[i], "")[[1]]) ## allocate shared fractions equally to all sets contr <- sweep(contr, 1, rowSums(contr), "/") uniqpart <- object$part$indfract[seq_len(nfract), 3] contr <- uniqpart * contr ## returned components contrpart <- colSums(contr) uniqpart <- uniqpart[seq_len(nsets)] names(uniqpart) <- names(contrpart) out <- list("uniqpart" = uniqpart, "contribpart" = contrpart, "contributions" = contr, "setlabels" = object$tables) class(out) <- "summary.varpart" out } `print.summary.varpart` <- function(x, digits = 3, zero.print = "", ...) { ## collect table df <- data.frame("Unique" = x$uniqpart, "Contributed" = x$contribpart, "Component" = x$setlabels) cat("\nUnique fractions and total with shared fractions equally allocated:\n\n") print(df, digits = digits) cat("\nContributions of fractions to sets:\n\n") print.table(x$contributions, digits = digits, zero.print = zero.print) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/vegan/R/summary.varpart.R
swan <- function (x, maxit = Inf, type = 0) { zeros <- -Inf iter <- 0 while(zeros != (zeros <- sum(x == 0)) && any(x == 0) && iter < maxit) { x[x > 0] <- x[x > 0] - min(x[x > 0]) + 1 x[x == 0] <- beals(x, type = type)[x == 0] iter <- iter + 1 } x } ### (Ecology 51, 89-102; 1970).
/scratch/gouwar.j/cran-all/cranData/vegan/R/swan.R
### The function displays (ordered) heatmaps of community data. It ### copies vegemite() for handling 'use', 'sp.ind', 'site.ind' and ### 'select', but then switches to heatmap() to display the ### data. Unlike heatmap(), it does not insist on showing dendrograms, ### but only uses these for sites, and only if given as 'use'. `tabasco` <- function (x, use, sp.ind = NULL, site.ind = NULL, select, Rowv = TRUE, Colv = TRUE, labRow = NULL, labCol = NULL, scale, col = heat.colors(12), ...) { if (any(x < 0)) stop("function cannot be used with negative data values") pltree <- sptree <- NA if (missing(scale)) scale <- "none" else scale <- match.arg(scale, c("none", "column", "row", eval(formals(coverscale)$scale))) if (!missing(use)) { if (!is.list(use) && is.vector(use)) { if (is.null(site.ind)) site.ind <- order(use) if (is.null(sp.ind)) sp.ind <- order(wascores(use, x)) } else if (inherits(use, c("dendrogram", "hclust", "twins"))) { ## "twins" and "dendrogram" are treated as "dendrogram", ## but "hclust" is kept as "hclust": they differ in ## reorder() if (inherits(use, "twins")) { use <- as.dendrogram(use) } if (!is.null(site.ind)) stop("'site.ind' cannot be used with dendrogram") ## The tree/dendrogam and input data must be ordered ## identically. It could be regarded as a "user error" if ## they are not, but this could be really frustrating and ## give obscure errors, and therefore we take care of ## identical ordering here if (inherits(use, "hclust") && !is.null(use$labels)) x <- x[use$labels,] else # dendrogram x <- x[labels(use),] ## Reorder tree if Rowv specified if (isTRUE(Rowv)) { ## order by first CA axis -- decorana() is fastest tmp <- decorana(x, ira = 1) ## reorder() command is equal to all, but "dendrogram" ## will use unweighted mean and "hclust" weighted ## mean. use <- reorder(use, scores(tmp, dis="sites", choices = 1), agglo.FUN = "mean") } else if (length(Rowv) > 1) { ## Rowv is a vector if (length(Rowv) != nrow(x)) stop(gettextf("Rowv has length %d, but 'x' has %d rows", length(Rowv), nrow(x))) use <- reorder(use, Rowv, agglo.FUN = "mean") } if (inherits(use, "dendrogram")) { site.ind <- seq_len(nrow(x)) names(site.ind) <- rownames(x) site.ind <- site.ind[labels(use)] } else { site.ind <- use$order } if (is.null(sp.ind)) sp.ind <- order(wascores(order(site.ind), x)) pltree <- use ## heatmap needs a "dendrogram" if(!inherits(pltree, "dendrogram")) pltree <- as.dendrogram(pltree) } else if (is.list(use)) { tmp <- scores(use, choices = 1, display = "sites") if (is.null(site.ind)) site.ind <- order(tmp) if (is.null(sp.ind)) sp.ind <- try(order(scores(use, choices = 1, display = "species"))) if (inherits(sp.ind, "try-error")) sp.ind <- order(wascores(tmp, x)) } else if (is.matrix(use)) { tmp <- scores(use, choices = 1, display = "sites") if (is.null(site.ind)) site.ind <- order(tmp) if (is.null(sp.ind)) sp.ind <- order(wascores(tmp, x)) } } ## see if sp.ind is a dendrogram or hclust tree if (inherits(sp.ind, c("hclust", "dendrogram", "twins"))) { if (inherits(sp.ind, "twins")) { sp.ind <- as.dendrogram(sp.ind) } sptree <- sp.ind ## Reorder data to match order in the dendrogam (see 'use' above) if (inherits(sptree, "hclust")) x <- x[, sptree$labels] else # dendrogram x <- x[, labels(sptree)] ## Consider reordering species tree if (isTRUE(Colv) && !is.null(site.ind)) { sptree <- reorder(sptree, wascores(order(site.ind), x), agglo.FUN = "mean") } else if (length(Colv) > 1) { if (length(Colv) != ncol(x)) stop(gettextf("Colv has length %d, but 'x' has %d columns", length(Colv), ncol(x))) sptree <- reorder(sptree, Colv, agglo.FUN = "mean") } if (inherits(sptree, "dendrogram")) { sp.ind <- seq_len(ncol(x)) names(sp.ind) <- colnames(x) sp.ind <- sp.ind[labels(sptree)] } else { sp.ind <- sptree$order } if (!inherits(sptree, "dendrogram")) sptree <- as.dendrogram(sptree) ## reverse: origin in the upper left corner sptree <- rev(sptree) } if (!is.null(sp.ind) && is.logical(sp.ind)) sp.ind <- (1:ncol(x))[sp.ind] if (!is.null(site.ind) && is.logical(site.ind)) site.ind <- (1:nrow(x))[site.ind] if (is.null(sp.ind)) sp.ind <- 1:ncol(x) if (is.null(site.ind)) site.ind <- 1:nrow(x) if (!missing(select)) { if (!is.na(pltree)) stop("sites cannot be 'select'ed with dendrograms or hclust trees") if (!is.logical(select)) select <- sort(site.ind) %in% select stake <- colSums(x[select, , drop = FALSE]) > 0 site.ind <- site.ind[select[site.ind]] site.ind <- site.ind[!is.na(site.ind)] } else { stake <- colSums(x[site.ind, ]) > 0 } sp.ind <- sp.ind[stake[sp.ind]] ## heatmap will reorder items by dendrogram so that we need to ## give indices in the unsorted order if rows or columns have a ## dendrogram if (is.na(pltree[1])) rind <- site.ind else rind <- sort(site.ind) if (is.na(sptree[1])) ## reverse: origin in the upper left corner cind <- rev(sp.ind) else cind <- sort(sp.ind) ## we assume t() changes data.frame to a matrix x <- t(x[rind, cind]) ## labels must be ordered if there is no dendrogram if (!is.null(labRow)) labRow <- labRow[cind] if (!is.null(labCol)) labCol <- labCol[rind] x <- switch(scale, "none" = x, "column" = decostand(x, "max", 2), "row" = decostand(x, "max", 1), as.matrix(coverscale(x, scale, character = FALSE))) ## explicit scaling so that zeros and small abundances get ## different colours brk <- (max(x) - min(x[x>0])/2)/length(col) brk <- 0:length(col) * brk heatmap((max(x) - x), Rowv = sptree, Colv = pltree, scale = "none", labRow = labRow, labCol = labCol, col = col, breaks = brk, ...) out <- list(sites = site.ind, species = sp.ind) invisible(out) }
/scratch/gouwar.j/cran-all/cranData/vegan/R/tabasco.R
`taxa2dist` <- function (x, varstep = FALSE, check = TRUE, labels) { rich <- apply(x, 2, function(taxa) length(unique(taxa))) S <- nrow(x) ## check drops redundant levels (constant or non-repeating) if (check) { keep <- rich < S & rich > 1 rich <- rich[keep] x <- x[, keep, drop=FALSE] } i <- rev(order(rich)) x <- x[, i, drop=FALSE] rich <- rich[i] if (varstep) { add <- -diff(c(nrow(x), rich, 1)) add <- add/c(S, rich) add <- add/sum(add) * 100 } else { add <- rep(100/(ncol(x) + check), ncol(x) + check) } if (!is.null(names(add))) names(add) <- c("Base", names(add)[-length(add)]) if (!check) add <- c(0, add) out <- matrix(add[1], nrow(x), nrow(x)) for (i in 1:ncol(x)) { out <- out + add[i + 1] * outer(x[, i], x[, i], "!=") } out <- as.dist(out) attr(out, "method") <- "taxa2dist" attr(out, "steps") <- add if (missing(labels)) { attr(out, "Labels") <- rownames(x) } else { if (length(labels) != nrow(x)) warning(gettextf("labels are wrong: needed %d, got %d", nrow(x), length(labels))) attr(out, "Labels") <- as.character(labels) } if (!check && any(out <= 0)) warning("you used 'check=FALSE' and some distances are zero: was this intended?") out }
/scratch/gouwar.j/cran-all/cranData/vegan/R/taxa2dist.R
`taxondive` <- function (comm, dis, match.force = FALSE) { binary <- FALSE comm <- as.matrix(comm) if (missing(dis)) { n <- ncol(comm) dis <- structure(rep(1, n * (n - 1)/2), Size = n, class = "dist") } dis <- as.dist(dis) if (match.force || attr(dis, "Size") != ncol(comm)) { if (match.force) message("forced matching 'dis' labels and 'comm' names") else message("dimensions do not match between 'comm' and 'dis'") if (all(colnames(comm) %in% labels(dis))) { dis <- as.matrix(dis) dis <- as.dist(dis[colnames(comm), colnames(comm)]) message("matched 'dis' labels by 'comm' names") } else { stop("could not match names in 'dis' and 'comm'") } if (length(unique(colnames(comm))) != ncol(comm)) stop("names not in unique in 'comm': match wrong") if (length(unique(labels(dis))) != attr(dis, "Size")) warning("labels not unique in 'dis': matching probably wrong") } del <- dstar <- dplus <- Ed <- Edstar <- NULL if (!binary) { del <- apply(comm, 1, function(x) sum(as.dist(outer(x, x)) * dis)) dstar <- apply(comm, 1, function(x) sum(dis * (xx <- as.dist(outer(x, x))))/sum(xx)) rs <- rowSums(comm) del <- del/rs/(rs - 1) * 2 cs <- colSums(comm) tmp <- sum(as.dist(outer(cs, cs)) * dis) Ed <- tmp/sum(cs)/sum(cs - 1) * 2 Edstar <- tmp/sum(cs)/(sum(cs) - 1) * 2 } comm <- ifelse(comm > 0, 1, 0) dplus <- apply(comm, 1, function(x) sum(as.dist(outer(x, x)) * dis)) Lambda <- apply(comm, 1, function(x) sum(as.dist(outer(x, x)) * dis^2)) m <- rowSums(comm) dplus <- dplus/m/(m - 1) * 2 Lambda <- Lambda/m/(m - 1) * 2 - dplus^2 S <- attr(dis, "Size") omebar <- sum(dis)/S/(S - 1) * 2 varome <- sum(dis^2)/S/(S - 1) * 2 - omebar^2 omei <- rowSums(as.matrix(dis))/(S - 1) varomebar <- sum(omei^2)/S - omebar^2 vardplus <- 2 * (S - m)/(m * (m - 1) * (S - 2) * (S - 3)) * ((S - m - 1) * varome + 2 * (S - 1) * (m - 2) * varomebar) out <- list(Species = m, D = del, Dstar = dstar, Lambda = Lambda, Dplus = dplus, sd.Dplus = sqrt(vardplus), SDplus = m * dplus, ED = Ed, EDstar = Edstar, EDplus = omebar) class(out) <- "taxondive" out }
/scratch/gouwar.j/cran-all/cranData/vegan/R/taxondive.R
`text.cca` <- function (x, display = "sites", labels, choices = c(1, 2), scaling = "species", arrow.mul, head.arrow = 0.05, select, const, axis.bp = FALSE, correlation = FALSE, hill = FALSE, ...) { if (length(display) > 1) stop("only one 'display' item can be added in one command") pts <- scores(x, choices = choices, display = display, scaling = scaling, const, correlation = correlation, hill = hill, tidy=FALSE) ## store rownames of pts for use later, otherwise if user supplies ## labels, the checks in "cn" branch fail and "bp" branch will ## be entered even if there should be no "bp" plotting cnam <- rownames(pts) if (missing(labels)) labels <- labels.cca(x, display) if (!missing(select)) { pts <- .checkSelect(select, pts) labels <- labels[select] } ## centroids ("cn") have special treatment: also plot biplot ## arrows ("bp") for continuous variables and ordered factors. if (display == "cn") { if (!is.null(nrow(pts))) { # has "cn" cnlabs <- seq_len(nrow(pts)) text(pts, labels = labels[cnlabs], ...) } else { cnlabs <- NULL } pts <- scores(x, choices = choices, display = "bp", scaling = scaling, const, correlation = correlation, hill = hill, tidy=FALSE) bnam <- rownames(pts) pts <- pts[!(bnam %in% cnam), , drop = FALSE] if (nrow(pts) == 0) return(invisible()) else { display <- "bp" if (!is.null(cnlabs)) labels <- labels[-cnlabs] } } ## draw arrows before adding labels if (display %in% c("bp", "reg", "re", "r")) { if (missing(arrow.mul)) { arrow.mul <- ordiArrowMul(pts) } pts <- pts * arrow.mul arrows(0, 0, pts[, 1], pts[, 2], length = head.arrow, ...) pts <- ordiArrowTextXY(pts, labels, rescale = FALSE, ...) if (axis.bp) { axis(side = 3, at = c(-arrow.mul, 0, arrow.mul), labels = rep("", 3)) axis(side = 4, at = c(-arrow.mul, 0, arrow.mul), labels = c(-1, 0, 1)) } } text(pts, labels = labels, ...) invisible() } ### utility function to extract labels used in CCA/RDA/dbRDA plots: ### you may need this if you want to set your own labels=. `labels.cca` <- function(object, display, ...) { if (is.null(object$CCA)) CCA <- "CA" else CCA <- "CCA" switch(display, "sp" =, "species" = rownames(object[[CCA]]$v), "wa" =, "sites" =, "lc" = rownames(object[[CCA]]$u), "reg" = colnames(object[[CCA]]$QR$qr), "bp" = rownames(object[[CCA]]$biplot), "cn" = {cn <- rownames(object[[CCA]]$centroids) bp <- rownames(object[[CCA]]$biplot) c(cn, bp[!(bp %in% cn)]) } ) }
/scratch/gouwar.j/cran-all/cranData/vegan/R/text.cca.R
"text.decorana" <- function (x, display = c("sites", "species"), labels, choices = 1:2, origin = TRUE, select, ...) { localText <- function(..., shrink, origin, scaling, triangular) text(...) display <- match.arg(display) x <- scores(x, display = display, choices = choices, origin = origin, ...) if (!missing(labels)) rownames(x) <- labels if (!missing(select)) x <- .checkSelect(select, x) localText(x, rownames(x), ...) invisible() }
/scratch/gouwar.j/cran-all/cranData/vegan/R/text.decorana.R
"text.metaMDS" <- function (x, display = c("sites", "species"), labels, choices = c(1, 2), shrink = FALSE, select, ...) { display <- match.arg(display) x <- scores(x, display = display, choices = choices, shrink = shrink) if (!missing(labels)) rownames(x) <- labels if (!missing(select)) x <- .checkSelect(select, x) text(x, labels = rownames(x), ...) invisible() }
/scratch/gouwar.j/cran-all/cranData/vegan/R/text.metaMDS.R
`text.ordiplot` <- function (x, what, labels, select, arrows = FALSE, length = 0.05, ...) { sco <- scores(x, what) if (!missing(labels)) rownames(sco) <- labels if (!missing(select)) sco <- .checkSelect(select, sco) scoatt <- attr(sco, "score") if (!is.null(scoatt) && scoatt %in% c("biplot", "regression")) { arrows = TRUE sco <- sco * ordiArrowMul(sco) } if (arrows) { arrows(0, 0, sco[,1], sco[,2], length = length, ...) sco <- ordiArrowTextXY(sco, rownames(sco), rescale = FALSE) } text(sco, labels = rownames(sco), ...) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/vegan/R/text.ordiplot.R
`text.orditkplot` <- function(x, cex = x$args$tcex, col = x$args$tcol, font = attr(x$labels, "font"), ...) { if (is.null(font)) { font <- par("font") } text(x$labels, labels = rownames(x$labels), cex = cex, col = col, font = font, ...) }
/scratch/gouwar.j/cran-all/cranData/vegan/R/text.orditkplot.R
`toCoda` <- function(x) UseMethod("toCoda") `toCoda.oecosimu` <- function(x) { ## mcmc only for sequential methods if (!x$oecosimu$isSeq) stop("'toCoda' is only available for sequential null models") ## named variables rownames(x$oecosimu$simulated) <- names(x$oecosimu$z) chains <- attr(x$oecosimu$simulated, "chains") ## chains: will make each chain as an mcmc object and combine ## these to an mcmc.list if (!is.null(chains) && chains > 1) { x <- x$oecosimu$simulated nsim <- dim(x)[2] niter <- nsim / chains ## iterate over chains x <- lapply(1:chains, function(i) { z <- x[, ((i-1) * niter + 1):(i * niter), drop = FALSE] attr(z, "mcpar") <- c(attr(x, "burnin") + attr(x, "thin"), attr(x, "burnin") + attr(x, "thin") * niter, attr(x, "thin")) attr(z, "class") <- c("mcmc", class(z)) t(z) }) ## combine list of mcmc objects to a coda mcmc.list #x <- as.mcmc.list(x) class(x) <- "mcmc.list" } else { # one chain: make to a single mcmc object x <- as.ts(x) mcpar <- attr(x, "tsp") mcpar[3] <- round(1/mcpar[3]) attr(x, "mcpar") <- mcpar class(x) <- c("mcmc", class(x)) } x } `toCoda.permat` <- toCoda.oecosimu
/scratch/gouwar.j/cran-all/cranData/vegan/R/toCoda.R
##' S3 generic for function to compute tolerances ##' ##' Brought this in here from analogue because of tolerance.cca ##' ##' @param x an R object ##' @param ... arguments passed to other methods `tolerance` <- function(x, ...) UseMethod("tolerance")
/scratch/gouwar.j/cran-all/cranData/vegan/R/tolerance.R
##' Species tolerances and sample heterogeneities ##' ##' Function to compute species tolerances and site heterogeneity measures ##' from unimodal ordinations (CCA & CA). Implements Eq 6.47 and 6.48 from ##' the Canoco 4.5 Reference Manual (pages 178-179). ##' ##' @param x object of class \code{"cca"}. ##' @param choices numeric; which ordination axes to compute ##' tolerances and heterogeneities for. Defaults to axes 1 and 2. ##' @param which character; one of \code{"species"} or \code{"sites"}, ##' indicating whether species tolerances or sample heterogeneities ##' respectively are computed. ##' @param scaling numeric or character; the ordination scaling to use. ##' @param useN2 logical; should the bias in the tolerances / ##' heterogeneities be reduced via scaling by Hill's N2? ##' @param ... arguments passed to other methods ##' @return matrix of tolerances/heterogeneities with some additional ##' attributes: \code{which}, \code{scaling}, and \code{N2}, the latter of which will be \code{NA} if \code{useN2 = FALSE}. ##' @author Gavin L. Simpson ##' @examples ##' data(dune) ##' data(dune.env) ##' mod <- cca(dune ~ ., data = dune.env) ##' tolerance.cca(mod) ##' tolerance.cca <- function(x, choices = 1:2, which = c("species","sites"), scaling = "species", useN2 = TRUE, hill = FALSE, ...) { if(inherits(x, "rda")) { stop("tolerances only available for unimodal ordinations") } if(missing(which)) { which <- "species" } ## zapping epsilon to make approximate 1's into 1's ZAP <- sqrt(.Machine$double.eps) ## reconstruct species/response matrix Y - up to machine precision! Y <- (ordiYbar(x, "initial") * sqrt(x$rowsum %o% x$colsum) + x$rowsum %o% x$colsum) * x$grand.total which <- match.arg(which) siteScrTypes <- if (is.null(x$CCA)) { "sites" } else { "lc" } ## Sort out scaling; only for (C)CA so no correlation arg scaling <- scalingType(scaling, hill = hill) scrs <- scores(x, display = c(siteScrTypes, "species"), choices = choices, scaling = scaling, ...) ## compute N2 if useN2 == TRUE & only if doN2 <- isTRUE(useN2) && ((which == "species" && abs(scaling) == 2) || (which == "sites" && abs(scaling) == 1)) siteScrs <- which(names(scrs) %in% c("sites","constraints")) if(isTRUE(all.equal(which, "sites"))) { res <- matrix(ncol = length(choices), nrow = nrow(scrs[[siteScrs]])) Ytot <- rowSums(Y) for (i in seq_len(NROW(res))) { XiUk <- apply(scrs[["species"]], 1L, `-`, scrs[[siteScrs]][i,]) ## with only one 'choice' drops dimensions if (!is.matrix(XiUk)) XiUk <- t(XiUk) # 1-row matrix YXiUk <- sweep(XiUk^2, 2L, Y[i,], "*") if(any(neg <- YXiUk < 0)) { YXiUk[neg] <- 0 } res[i, ] <- sqrt(rowSums(YXiUk) / Ytot[i]) } rownames(res) <- rownames(scrs[[siteScrs]]) colnames(res) <- colnames(scrs[[siteScrs]]) if(doN2) { y <- sweep(Y, 1, Ytot, "/")^2 N2 <- 1 / rowSums(y, na.rm = TRUE) ## 1/H ## avoid almost-1 for sites with only one spp N2[abs(N2-1) < ZAP] <- 1 ## avoid "negative zeros" form 1 - 1/N2 when N2 ~ 1 res <- sweep(res, 1, sqrt(pmax(1 - 1/N2, 0)), "/") } } else { res <- matrix(ncol = length(choices), nrow = ncol(Y)) Ytot <- colSums(Y) for (i in seq_len(NROW(res))) { XiUk <- apply(scrs[[siteScrs]], 1L, `-`, scrs[["species"]][i,]) ## may drop dimensions if (!is.matrix(XiUk)) XiUk <- t(XiUk) YXiUk <- sweep(XiUk^2, 2L, Y[,i], "*") if (any(neg <- YXiUk < 0)) { YXiUk[neg] <- 0 } res[i, ] <- sqrt(rowSums(YXiUk) / Ytot[i]) } rownames(res) <- colnames(Y) colnames(res) <- colnames(scrs[["species"]]) if(doN2) { y <- sweep(Y, 2, Ytot, "/")^2 N2 <- 1 / colSums(y, na.rm = TRUE) # 1/H ## avoid almost-1 for species present only once N2[abs(N2-1) < ZAP] <- 1 ## avoid "negative zeros" form 1 - 1/N2 when N2 ~ 1 res <- sweep(res, 1, sqrt(pmax(1 - 1/N2, 0)), "/") } } res[!is.finite(res)] <- 0 # some values can be Inf or NaN but are really 0 res[res < sqrt(.Machine$double.eps)] <- 0 # almost-zero tolerances should be zero class(res) <- c("tolerance.cca", "tolerance","matrix") attr(res, "which") <- which attr(res, "scaling") <- scaling attr(res, "N2") <- NA if(doN2) { attr(res, "N2") <- N2 } res # return } `print.tolerance.cca` <- function(x, ...) { cat("\n") msg <- ifelse(attr(x, "which") == "species", "Species Tolerance", "Sample Heterogeneity") writeLines(msg, sep = "\n\n") msg <- paste("Scaling:", attr(x, "scaling")) writeLines(strwrap(msg), sep = "\n\n") attr(x, "model") <- attr(x, "scaling") <- attr(x, "which") <- attr(x, "N2") <- NULL print(unclass(x), ...) cat("\n") }
/scratch/gouwar.j/cran-all/cranData/vegan/R/tolerance.cca.R
### tolerance method for decorana ### ### After rescaling, all values should be 1 `tolerance.decorana` <- function(x, data, choices = 1:4, which = c("sites", "species"), useN2 = TRUE, ...) { ## community data cannot be reconstructed from the result x. We ## use either the one supplied in 'data' or try to find data used ## in decorana, but then we check the plausibility. if (missing(data)) data <- eval(x$call$veg) which <- match.arg(which) ## Native decorana scaling (sites are WA of species) does not ## allow useN2 with species (but this can be done after scaling of ## results, and therefore the code below is ready for this). if (useN2 && which == "species") warning("useN2 is not implemented for species") EPS <- sqrt(.Machine$double.eps) ## transform data like decorana did if (!is.null(x$before)) data <- beforeafter(data, x$before, x$after) if (x$iweigh) data <- downweight(data, x$fraction) ## see if data are plausible given decorana solution if (nrow(data) != nrow(x$rproj)) stop("'data' have wrong row dimension") if (ncol(data) != nrow(x$cproj)) stop("'data' have wrong col dimension") ## check the first eigenvalue ev1 <- svd(initCA(data), nv=0, nu=0)$d[1]^2 ev0 <- if (x$ira) x$evals[1] else x$evals.decorana[1] if (abs(ev1 - ev0) > 100 * EPS) stop("'data' are not plausible given 'decorana' result") ## preliminaries over: start working res <- switch(which, "sites" = x$rproj, "species" = x$cproj) tot <- switch(which, "sites" = rowSums(data), "species" = colSums(data)) ## go over axes for(i in choices) { X <- data * outer(x$rproj[,i], x$cproj[,i], "-")^2 if (which == "species") X <- t(X) res[,i] <- sqrt(rowSums(X)/tot) } ## N2 depends only on rows. Here and we still handle species, ## since support can be added later, though skipped now. if (useN2 && which != "species") { y <- switch(which, "sites" = data, "species" = t(data)) y <- (y / rowSums(y))^2 N2 <- 1 / rowSums(y, na.rm = TRUE) # 1/H N2[abs(N2 - 1) < EPS] <- 1 res <- res / sqrt(pmax(1 - 1/N2, 0)) } res <- res[,choices, drop=FALSE] res[!is.finite(res) | res < EPS] <- 0 class(res) <- c("tolerance.decorana", "tolerance.cca", "tolerance", "matrix") attr(res, "which") <- which attr(res, "scaling") <- "decorana" attr(res, "N2") <- if (useN2 && which != "species") N2 else NA res }
/scratch/gouwar.j/cran-all/cranData/vegan/R/tolerance.decorana.R
`treedist` <- function(x, tree, relative = TRUE, match.force = TRUE, ...) { ## we cannot reconstruct tree with reversals from cophenetic tree <- as.hclust(tree) if (any(diff(tree$height) < -sqrt(.Machine$double.eps))) stop("tree with reversals cannot be handled") x <- as.matrix(x) n <- nrow(x) ABJ <- matrix(0, n , n) dmat <- as.matrix(cophenetic(tree)) ## match names if (ncol(x) != ncol(dmat) || match.force) { if(!match.force) warning("dimensions do not match between 'x' and 'tree': matching by names") nm <- colnames(x) dmat <- dmat[nm, nm] } for(j in 1:n) { for (k in j:n) { jk <- x[j,] > 0 | x[k,] > 0 if (sum(jk) > 1) ABJ[k, j] <- treeheight(update(tree, d = as.dist(dmat[jk, jk]))) } } A <- diag(ABJ) AB <- as.dist(outer(A, A, "+")) ABJ <- as.dist(ABJ) out <- (2 * ABJ - AB) if (relative) out <- out/ABJ out[ABJ==0] <- 0 attr(out, "method") <- if (relative) "treedist" else "raw treeedist" attr(out, "call") <- match.call() attr(out, "Labels") <- row.names(x) out }
/scratch/gouwar.j/cran-all/cranData/vegan/R/treedist.R
`treedive` <- function(comm, tree, match.force = TRUE, verbose = TRUE) { EPS <- sqrt(.Machine$double.eps) comm <- as.matrix(comm) if (!inherits(tree, c("hclust", "spantree"))) stop("'tree' must be an 'hclust' or 'spantree' result object") if (inherits(tree, "hclust") && any(diff(tree$height) < -EPS)) stop("tree with reversals cannot be handled") m <- as.matrix(cophenetic(tree)) ## Check tree/comm match by names if (match.force || ncol(comm) != ncol(m)) { if (match.force && verbose) message("forced matching of 'tree' labels and 'comm' names") else if (verbose) message("dimensions do not match between 'comm' and 'tree'") fnd <- colnames(comm) %in% tree$labels if (!all(fnd) && verbose) { warning("not all names of 'comm' found in 'tree'") comm <- comm[, fnd] } fnd <- tree$labels %in% colnames(comm) if (!all(fnd)) warning("not all names of 'tree' found in 'comm'") comm <- comm[, tree$labels[fnd]] m <- m[tree$labels[fnd], tree$labels[fnd]] if (length(unique(tree$labels)) != length(tree$labels)) stop("names not unique in 'tree': match wrong") if (length(unique(colnames(comm))) != ncol(comm)) stop("names not unique in 'comm': match wrong") } ## Repeat for sites div <- numeric(nrow(comm)) for (i in 1:nrow(comm)) { k <- comm[i,] > 0 nit <- sum(k) ## Trivial cases of zero or one species if (nit==0) div[i] <- NA else if (nit==1) div[i] <- 0 else { d <- as.dist(m[k,k]) cl <- update(tree, d = d) div[i] <- treeheight(cl) } } names(div) <- rownames(comm) div }
/scratch/gouwar.j/cran-all/cranData/vegan/R/treedive.R
`treeheight` <- function(tree) { if (inherits(tree, "spantree")) return(sum(tree$dist)) tree <- as.hclust(tree) ## nodes should start from 0 -- if there are negative heights, ## tree is too pathological to be measured. if (any(tree$height < 0)) stop("negative heights: tree cannot be measured") ## can be done really fast if there are no reversals, but we need ## to traverse the tree with reversals if (is.unsorted(tree$height)) { # slow h <- tree$height m <- tree$merge height <- 0 for (i in 1:nrow(m)) { for (j in 1:2) { if (m[i,j] < 0) height <- height + h[i] else height <- height + abs(h[i] - h[m[i,j]]) } } height } else # fast sum(tree$height) + max(tree$height) }
/scratch/gouwar.j/cran-all/cranData/vegan/R/treeheight.R
`tsallis` <- function (x, scales = seq(0, 2, 0.2), norm=FALSE, hill=FALSE) { if (norm && hill) stop("'norm = TRUE' and 'hill = TRUE' should not be used at the same time") x <- as.matrix(x) if (!is.numeric(x)) stop("input data must be numeric") n <- nrow(x) p <- ncol(x) if (p == 1) { x <- t(x) n <- nrow(x) p <- ncol(x) } x <- decostand(x, "total", 1) m <- length(scales) result <- array(0, dim = c(n, m)) dimnames(result) <- list(sites = rownames(x), scale = scales) for (a in 1:m) { if (scales[a] != 1 && scales[a] != 0) { result[, a] <- (1-(rowSums(x^scales[a])))/(scales[a] - 1) } else { if (scales[a] == 1) result[, a] <- diversity(x, "shannon") if (scales[a] == 0) result[, a] <- rowSums(x > 0) - 1 } if (norm) { ST <- rowSums(x > 0) if (scales[a] == 1) result[, a] <- result[, a] / log(ST) else result[, a] <- result[, a] / ((ST^(1-scales[a]) - 1) / (1 - scales[a])) } if (hill) { result[, a] <- if (scales[a] == 1) { exp(result[, a]) } else { (1 - (scales[a] - 1) * result[, a])^(1/(1-scales[a])) } } } result <- as.data.frame(result) if (any(dim(result) == 1)) result <- unlist(result, use.names = TRUE) class(result) <- c("tsallis", "renyi", class(result)) result }
/scratch/gouwar.j/cran-all/cranData/vegan/R/tsallis.R
`tsallisaccum` <- function (x, scales = seq(0, 2, 0.2), permutations = 100, raw = FALSE, subset, ...) { if (!missing(subset)) x <- subset(x, subset) x <- as.matrix(x) if (!is.numeric(x)) stop("input data must be numeric") n <- nrow(x) p <- ncol(x) if (p == 1) { x <- t(x) n <- nrow(x) p <- ncol(x) } pmat <- getPermuteMatrix(permutations, n) permutations <- nrow(pmat) m <- length(scales) result <- array(dim = c(n, m, permutations)) dimnames(result) <- list(pooled.sites = c(1:n), scale = scales, permutation = c(1:permutations)) for (k in 1:permutations) { result[, , k] <- as.matrix(tsallis((apply(x[pmat[k,], ], 2, cumsum)), scales = scales, ...)) } if (raw) { if (m == 1) { result <- result[, 1, ] } } else { tmp <- array(dim = c(n, m, 6)) for (i in 1:n) { for (j in 1:m) { tmp[i, j, 1] <- mean(result[i, j, 1:permutations]) tmp[i, j, 2] <- sd(result[i, j, 1:permutations]) tmp[i, j, 3] <- min(result[i, j, 1:permutations]) tmp[i, j, 4] <- max(result[i, j, 1:permutations]) tmp[i, j, 5] <- quantile(result[i, j, 1:permutations], 0.025) tmp[i, j, 6] <- quantile(result[i, j, 1:permutations], 0.975) } } result <- tmp dimnames(result) <- list(pooled.sites = c(1:n), scale = scales, c("mean", "stdev", "min", "max", "Qnt 0.025", "Qnt 0.975")) } attr(result, "control") <- attr(pmat, "control") class(result) <- c("tsallisaccum", "renyiaccum", class(result)) result }
/scratch/gouwar.j/cran-all/cranData/vegan/R/tsallisaccum.R
update.nullmodel <- function(object, nsim=1, seed = NULL, ...) { if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (!is.null(seed)) { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } if (object$commsim$isSeq) { perm <- object$commsim$fun(x=object$state, n=1L, nr=object$nrow, nc=object$ncol, rs=object$rowSums, cs=object$colSums, rf=object$rowFreq, cf=object$colFreq, s=object$totalSum, fill=object$fill, thin=as.integer(nsim), ...) state <- perm[,,1L] storage.mode(state) <- object$commsim$mode iter <- as.integer(object$iter + nsim) # assign("state", state, envir=object) # assign("iter", iter, envir=object) # attr(state, "iter") <- iter out <- nullmodel(state, object$commsim) out$iter <- iter out$data <- object$data } else { # state <- NULL out <- object } # invisible(state) out }
/scratch/gouwar.j/cran-all/cranData/vegan/R/update.nullmodel.R
`varpart` <- function (Y, X, ..., data, chisquare = FALSE, transfo, scale = FALSE, add = FALSE, sqrt.dist = FALSE, permutations = 1000) { if (missing(data)) data <- parent.frame() X <- list(X, ...) if ((length(X) < 2 || length(X) > 4)) stop("needs two to four explanatory tables") ## see if distances were given in non-canonical form as a symmetric matrix if ((is.matrix(Y) || is.data.frame(Y)) && isSymmetric(unname(as.matrix(Y)))) Y <- as.dist(Y) ## transfo and scale can be used only with non-distance data if (inherits(Y, "dist")) { inert <- attr(Y, "method") if (is.null(inert)) inert <- "unknown user-supplied" inert <- paste(paste0(toupper(substring(inert, 1, 1)), substring(inert, 2)), "distance") ## sqrt of distances? if (sqrt.dist) Y <- sqrt(Y) else inert <- paste("squared", inert) ## additive constant to euclidify distances? if (is.logical(add) && isTRUE(add)) add <- "lingoes" if (is.character(add)) { add <- match.arg(add, c("lingoes", "cailliez")) if (add == "lingoes") { ac <- addLingoes(as.matrix(Y)) Y <- sqrt(Y^2 + 2 * ac) } else if (add == "cailliez") { ac <- addCailliez(as.matrix(Y)) Y <- Y + ac } if (ac > sqrt(.Machine$double.eps)) inert <- paste(paste0(toupper(substring(add, 1, 1)), substring(add, 2)), "adjusted", inert) } RDA <- "dbRDA" if(!missing(transfo) || !missing(scale)) message("arguments 'transfo' and 'scale' are ignored with distances") } else if (chisquare) { inert = "Chi-square" RDA = "CCA" permutations = getPermuteMatrix(permutations, nrow(Y)) } else { inert <- "variance" RDA <- "RDA" if (!missing(transfo)) { Y <- decostand(Y, transfo) transfo <- attr(Y, "decostand") } if (!missing(transfo) && (is.null(dim(Y)) || ncol(Y) == 1)) warning("transformations are probably meaningless with a single variable") if (scale && !missing(transfo)) warning("Y should not be both transformed and scaled (standardized)") Y <- scale(Y, center = TRUE, scale = scale) } Sets <- list() for (i in seq_along(X)) { if (is.data.frame(X[[i]]) || is.factor(X[[i]])) { ## factor variable or a data.frame (possibly with factors) mf <- as.data.frame(X[[i]]) mf <- model.matrix(~ ., mf) Sets[[i]] <- mf[,-1, drop = FALSE] # remove intercept } else if (inherits(X[[i]], "formula")) { ## Formula interface mf <- model.frame(X[[i]], data, na.action = na.fail, drop.unused.levels = TRUE) trms <- attr(mf, "terms") Sets[[i]] <- model.matrix(trms, mf) if (any(colnames(Sets[[i]]) == "(Intercept)")) { xint <- which(colnames(Sets[[i]]) == "(Intercept)") Sets[[i]] <- (Sets[[i]])[, -xint, drop = FALSE] } } else Sets[[i]] <- as.matrix(X[[i]]) Sets[[i]] <- scale(Sets[[i]], center = TRUE, scale = TRUE) } out <- list() out$part <- switch(length(Sets), NULL, varpart2(Y, Sets[[1]], Sets[[2]], chisquare, permutations), varpart3(Y, Sets[[1]], Sets[[2]], Sets[[3]], chisquare, permutations), varpart4(Y, Sets[[1]], Sets[[2]], Sets[[3]], Sets[[4]], chisquare, permutations)) if (inherits(Y, "dist")) { out$part$ordination <- "dbrda" } else { if (chisquare) out$part$ordination <- "cca" else { out$part$ordination <- "rda" } } if(RDA == "RDA") { out$scale <- scale if (!missing(transfo)) out$transfo <- transfo } else { if (scale || !missing(transfo)) message("arguments 'scale' and 'transfo' ignored: valid only in RDA") } out$inert <- inert out$RDA <- RDA out$call <- match.call() mx <- rep(" ", length(X)) for (i in seq_along(X)) mx[i] <- deparse(out$call[[i+2]], width.cutoff = 500) out$tables <- mx class(out) <- c("varpart", class(out)) out }
/scratch/gouwar.j/cran-all/cranData/vegan/R/varpart.R
`varpart2` <- function (Y, X1, X2, chisquare, permat) { collinwarn <- function(case, mm, m) warning(gettextf("collinearity detected in %s: mm = %d, m = %d", case, mm, m), call. = FALSE) if (inherits(Y, "dist")) { Y <- GowerDblcen(as.matrix(Y^2), na.rm = FALSE) Y <- -Y/2 SS.Y <- sum(diag(Y)) simpleRDA2 <- match.fun(simpleDBRDA) } else { Y <- as.matrix(Y) if (chisquare) { SS.Y <- sum(initCA(Y)^2) simpleRDA2 <- match.fun(simpleCCA) } else { Y <- scale(Y, center = TRUE, scale = FALSE) SS.Y <- sum(Y * Y) } } X1 <- as.matrix(X1) X2 <- as.matrix(X2) n <- nrow(Y) n1 <- nrow(X1) n2 <- nrow(X2) mm1 <- ncol(X1) mm2 <- ncol(X2) if (n1 != n) stop("Y and X1 do not have the same number of rows") if (n2 != n) stop("Y and X2 do not have the same number of rows") X1 <- scale(X1, center = TRUE, scale = FALSE) X2 <- scale(X2, center = TRUE, scale = FALSE) dummy <- simpleRDA2(Y, X1, SS.Y, permat) ab.ua <- dummy$Rsquare ab <- dummy$RsquareAdj m1 <- dummy$m if (m1 != mm1) collinwarn("X1", mm1, m1) dummy <- simpleRDA2(Y, X2, SS.Y, permat) bc.ua <- dummy$Rsquare bc <- dummy$RsquareAdj m2 <- dummy$m if (m2 != mm2) collinwarn("X2", mm2, m2) mm3 <- mm1 + mm2 dummy <- simpleRDA2(Y, cbind(X1, X2), SS.Y, permat) abc.ua <- dummy$Rsquare abc <- dummy$RsquareAdj m3 <- dummy$m if (m3 != mm3) collinwarn("cbind(X1,X2)", mm3, m3) if ((m1 + m2) > m3) bigwarning <- c("X1, X2") else bigwarning <- NULL Df <- c(m1, m2, m3) ## labels and variable names are inconsistent below: the var names ## are from the old model where independent fractions were [a] and ## [c] and shared fraction was [b], but this was made consistent ## with other models where independent fractions are [a], [b], ## [c], [d] and listed before shared fractions. fract <- data.frame(Df = Df, R.squared = c(ab.ua, bc.ua, abc.ua), Adj.R.squared = c(ab, bc, abc), Testable = rep(TRUE, 3) & Df) rownames(fract) <- c("[a+c] = X1", "[b+c] = X2", "[a+b+c] = X1+X2") b <- ab + bc - abc Df <- c(m3-m2, m3-m1, 0, NA) indfract <- data.frame(Df = Df, R.squared = rep(NA, 4), Adj.R.squared = c(ab - b, bc - b, b, 1 - abc), Testable = c(TRUE, TRUE, FALSE, FALSE) & Df) rownames(indfract) <- c("[a] = X1|X2", "[b] = X2|X1", "[c]", "[d] = Residuals") out <- list(SS.Y = SS.Y, fract = fract, indfract = indfract, nsets = 2, bigwarning = bigwarning, n = n1) class(out) <- "varpart234" out }
/scratch/gouwar.j/cran-all/cranData/vegan/R/varpart2.R
`varpart3` <- function (Y, X1, X2, X3, chisquare, permat) { collinwarn <- function(case, mm, m) warning(gettextf("collinearity detected in %s: mm = %d, m = %d", case, mm, m), call. = FALSE) if (inherits(Y, "dist")) { Y <- GowerDblcen(as.matrix(Y^2), na.rm = FALSE) Y <- -Y/2 SS.Y <- sum(diag(Y)) simpleRDA2 <- match.fun(simpleDBRDA) } else { Y <- as.matrix(Y) if (chisquare) { SS.Y <- sum(initCA(Y)^2) simpleRDA2 <- match.fun(simpleCCA) } else { Y <- scale(Y, center = TRUE, scale = FALSE) SS.Y <- sum(Y * Y) } } X1 <- as.matrix(X1) X2 <- as.matrix(X2) X3 <- as.matrix(X3) n <- nrow(Y) n1 <- nrow(X1) n2 <- nrow(X2) n3 <- nrow(X3) mm1 <- ncol(X1) mm2 <- ncol(X2) mm3 <- ncol(X3) if (n1 != n) stop("Y and X1 do not have the same number of rows") if (n2 != n) stop("Y and X2 do not have the same number of rows") if (n3 != n) stop("Y and X3 do not have the same number of rows") X1 <- scale(X1, center = TRUE, scale = FALSE) X2 <- scale(X2, center = TRUE, scale = FALSE) X3 <- scale(X3, center = TRUE, scale = FALSE) dummy <- simpleRDA2(Y, X1, SS.Y, permat) adfg.ua <- dummy$Rsquare adfg <- dummy$RsquareAdj m1 <- dummy$m if (m1 != mm1) collinwarn("X1", mm1, m1) dummy <- simpleRDA2(Y, X2, SS.Y, permat) bdeg.ua <- dummy$Rsquare bdeg <- dummy$RsquareAdj m2 <- dummy$m if (m2 != mm2) collinwarn("X2", mm2, m2) dummy <- simpleRDA2(Y, X3, SS.Y, permat) cefg.ua <- dummy$Rsquare cefg <- dummy$RsquareAdj m3 <- dummy$m if (m3 != mm3) collinwarn("X3", mm3, m3) mm4 = mm1 + mm2 dummy <- simpleRDA2(Y, cbind(X1, X2), SS.Y, permat) abdefg.ua <- dummy$Rsquare abdefg <- dummy$RsquareAdj m4 <- dummy$m if (m4 != mm4) collinwarn("cbind(X1,X2)", mm4, m4) mm5 = mm1 + mm3 dummy <- simpleRDA2(Y, cbind(X1, X3), SS.Y, permat) acdefg.ua <- dummy$Rsquare acdefg <- dummy$RsquareAdj m5 <- dummy$m if (m5 != mm5) collinwarn("cbind(X1,X3)", mm5, m5) mm6 = mm2 + mm3 dummy <- simpleRDA2(Y, cbind(X2, X3), SS.Y, permat) bcdefg.ua <- dummy$Rsquare bcdefg <- dummy$RsquareAdj m6 <- dummy$m if (m6 != mm6) collinwarn("cbind(X2,X3)", mm6, m6) mm7 = mm1 + mm2 + mm3 dummy <- simpleRDA2(Y, cbind(X1, X2, X3), SS.Y, permat) abcdefg.ua <- dummy$Rsquare abcdefg <- dummy$RsquareAdj m7 <- dummy$m if (m7 != mm7) collinwarn("cbind(X1,X2,X3)", mm7, m7) bigwarning <- NULL if ((m1 + m2) > m4) bigwarning <- c(bigwarning, c("X1, X2")) if ((m1 + m3) > m5) bigwarning <- c(bigwarning, c("X1, X3")) if ((m2 + m3) > m6) bigwarning <- c(bigwarning, c("X2, X3")) if ((m1 + m2 + m3) > m7) bigwarning <- c(bigwarning, c("X1, X2, X3")) Df <- c(m1, m2, m3, m4, m5, m6, m7) fract <- data.frame(Df = Df, R.square = c(adfg.ua, bdeg.ua, cefg.ua, abdefg.ua, acdefg.ua, bcdefg.ua, abcdefg.ua), Adj.R.square = c(adfg, bdeg, cefg, abdefg, acdefg, bcdefg, abcdefg), Testable = rep(TRUE, 7) & Df) rownames(fract) <- c("[a+d+f+g] = X1", "[b+d+e+g] = X2", "[c+e+f+g] = X3", "[a+b+d+e+f+g] = X1+X2", "[a+c+d+e+f+g] = X1+X3", "[b+c+d+e+f+g] = X2+X3", "[a+b+c+d+e+f+g] = All") a <- abcdefg - bcdefg b <- abcdefg - acdefg c <- abcdefg - abdefg d <- acdefg - cefg - a e <- abdefg - adfg - b f <- bcdefg - bdeg - c g <- adfg - a - d - f ma <- m7 - m6 mb <- m7 - m5 mc <- m7 - m4 mad <- m5 - m3 maf <- m4 - m2 mbd <- m6 - m3 mbe <- m4 - m1 mce <- m5 - m1 mcf <- m6 - m2 Df <- c(ma, mb, mc, rep(0, 4), NA) indfract <- data.frame(Df = Df, R.square = rep(NA, 8), Adj.R.square = c(a, b, c, d, e, f, g, 1 - abcdefg), Testable = c(rep(TRUE, 3), rep(FALSE, 5)) & Df) rownames(indfract) <- c("[a] = X1 | X2+X3", "[b] = X2 | X1+X3", "[c] = X3 | X1+X2", "[d]", "[e]", "[f]", "[g]", "[h] = Residuals") Df <- c(mad, maf, mbd, mbe, mce, mcf) contr1 <- data.frame(Df = Df, R.square = rep(NA, 6), Adj.R.square = c(a + d, a + f, b + d, b + e, c + e, c + f), Testable = rep(TRUE, 6) & Df) rownames(contr1) <- c("[a+d] = X1 | X3", "[a+f] = X1 | X2", "[b+d] = X2 | X3", "[b+e] = X2 | X1", "[c+e] = X3 | X1", "[c+f] = X3 | X2") out <- list(fract = fract, indfract = indfract, contr1 = contr1, SS.Y = SS.Y, nsets = 3, bigwarning = bigwarning, n = n1) class(out) <- "varpart234" out }
/scratch/gouwar.j/cran-all/cranData/vegan/R/varpart3.R
`varpart4` <- function (Y, X1, X2, X3, X4, chisquare, permat) { collinwarn <- function(case, mm, m) warning(gettextf("collinearity detected in %s: mm = %d, m = %d", case, mm, m), call. = FALSE) if (inherits(Y, "dist")) { Y <- GowerDblcen(as.matrix(Y^2), na.rm = FALSE) Y <- -Y/2 SS.Y <- sum(diag(Y)) simpleRDA2 <- match.fun(simpleDBRDA) } else { Y <- as.matrix(Y) if (chisquare) { SS.Y <- sum(initCA(Y^2)) simpleRDA2 <- match.fun(simpleCCA) } else { Y <- scale(Y, center = TRUE, scale = FALSE) SS.Y <- sum(Y * Y) } } X1 <- as.matrix(X1) X2 <- as.matrix(X2) X3 <- as.matrix(X3) X4 <- as.matrix(X4) n <- nrow(Y) n1 <- nrow(X1) n2 <- nrow(X2) n3 <- nrow(X3) n4 <- nrow(X4) mm1 <- ncol(X1) mm2 <- ncol(X2) mm3 <- ncol(X3) mm4 <- ncol(X4) if (n1 != n) stop("Y and X1 do not have the same number of rows") if (n2 != n) stop("Y and X2 do not have the same number of rows") if (n3 != n) stop("Y and X3 do not have the same number of rows") if (n4 != n) stop("Y and X4 do not have the same number of rows") X1 <- scale(X1, center = TRUE, scale = FALSE) X2 <- scale(X2, center = TRUE, scale = FALSE) X3 <- scale(X3, center = TRUE, scale = FALSE) X4 <- scale(X4, center = TRUE, scale = FALSE) dummy <- simpleRDA2(Y, X1, SS.Y, permat) aeghklno.ua <- dummy$Rsquare aeghklno <- dummy$RsquareAdj m1 <- dummy$m if (m1 != mm1) collinwarn("X1", mm1, m1) dummy <- simpleRDA2(Y, X2, SS.Y, permat) befiklmo.ua <- dummy$Rsquare befiklmo <- dummy$RsquareAdj m2 <- dummy$m if (m2 != mm2) collinwarn("X2", mm2, m2) dummy <- simpleRDA2(Y, X3, SS.Y, permat) cfgjlmno.ua <- dummy$Rsquare cfgjlmno <- dummy$RsquareAdj m3 <- dummy$m if (m3 != mm3) collinwarn("X3", mm3, m3) dummy <- simpleRDA2(Y, X4, SS.Y, permat) dhijkmno.ua <- dummy$Rsquare dhijkmno <- dummy$RsquareAdj m4 <- dummy$m if (m4 != mm4) collinwarn("X4", mm4, m4) mm5 = mm1 + mm2 dummy <- simpleRDA2(Y, cbind(X1, X2), SS.Y, permat) abefghiklmno.ua <- dummy$Rsquare abefghiklmno <- dummy$RsquareAdj m5 <- dummy$m if (m5 != mm5) collinwarn("cbind(X1,X2)", mm5, m5) mm6 = mm1 + mm3 dummy <- simpleRDA2(Y, cbind(X1, X3), SS.Y, permat) acefghjklmno.ua <- dummy$Rsquare acefghjklmno <- dummy$RsquareAdj m6 <- dummy$m if (m6 != mm6) collinwarn("cbind(X1,X3", mm6, m6) mm7 = mm1 + mm4 dummy <- simpleRDA2(Y, cbind(X1, X4), SS.Y, permat) adeghijklmno.ua <- dummy$Rsquare adeghijklmno <- dummy$RsquareAdj m7 <- dummy$m if (m7 != mm7) collinwarn("cbind(X1,X4)", mm7, m7) mm8 = mm2 + mm3 dummy <- simpleRDA2(Y, cbind(X2, X3), SS.Y, permat) bcefgijklmno.ua <- dummy$Rsquare bcefgijklmno <- dummy$RsquareAdj m8 <- dummy$m if (m8 != mm8) collinwarn("cbind(X2,X3)", mm8, m8) mm9 = mm2 + mm4 dummy <- simpleRDA2(Y, cbind(X2, X4), SS.Y, permat) bdefhijklmno.ua <- dummy$Rsquare bdefhijklmno <- dummy$RsquareAdj m9 <- dummy$m if (m9 != mm9) collinwarn("cbind(X2,X4)", mm9, m9) mm10 = mm3 + mm4 dummy <- simpleRDA2(Y, cbind(X3, X4), SS.Y, permat) cdfghijklmno.ua <- dummy$Rsquare cdfghijklmno <- dummy$RsquareAdj m10 <- dummy$m if (m10 != mm10) collinwarn("cbind(X3,X4)", mm10, m10) mm11 = mm1 + mm2 + mm3 dummy <- simpleRDA2(Y, cbind(X1, X2, X3), SS.Y, permat) abcefghijklmno.ua <- dummy$Rsquare abcefghijklmno <- dummy$RsquareAdj m11 <- dummy$m if (m11 != mm11) collinwarn("cbind(X1,X2,X3)", mm11, m11) mm12 = mm1 + mm2 + mm4 dummy <- simpleRDA2(Y, cbind(X1, X2, X4), SS.Y, permat) abdefghijklmno.ua <- dummy$Rsquare abdefghijklmno <- dummy$RsquareAdj m12 <- dummy$m if (m12 != mm12) collinwarn("c(X1,X2,X4)", mm12, m12) mm13 = mm1 + mm3 + mm4 dummy <- simpleRDA2(Y, cbind(X1, X3, X4), SS.Y, permat) acdefghijklmno.ua <- dummy$Rsquare acdefghijklmno <- dummy$RsquareAdj m13 <- dummy$m if (m13 != mm13) collinwarn("cbind(X1,X3,X4)", mm13, m13) mm14 = mm2 + mm3 + mm4 dummy <- simpleRDA2(Y, cbind(X2, X3, X4), SS.Y, permat) bcdefghijklmno.ua <- dummy$Rsquare bcdefghijklmno <- dummy$RsquareAdj m14 <- dummy$m if (m14 != mm14) collinwarn("cbind(X2,X3,X4)", mm14, m14) mm15 = mm1 + mm2 + mm3 + mm4 dummy <- simpleRDA2(Y, cbind(X1, X2, X3, X4), SS.Y, permat) abcdefghijklmno.ua <- dummy$Rsquare abcdefghijklmno <- dummy$RsquareAdj m15 <- dummy$m if (m15 != mm15) collinwarn("cbind(X1,X2,X3,X4)", mm15, m15) bigwarning <- NULL if ((m1 + m2) > m5) bigwarning <- c(bigwarning, c("X1, X2")) if ((m1 + m3) > m6) bigwarning <- c(bigwarning, c("X1, X3")) if ((m1 + m4) > m7) bigwarning <- c(bigwarning, c("X1, X4")) if ((m2 + m3) > m8) bigwarning <- c(bigwarning, c("X2, X3")) if ((m2 + m4) > m9) bigwarning <- c(bigwarning, c("X2, X4")) if ((m3 + m4) > m10) bigwarning <- c(bigwarning, c("X3, X4")) if ((m1 + m2 + m3) > m11) bigwarning <- c(bigwarning, c("X1, X2, X3")) if ((m1 + m2 + m4) > m12) bigwarning <- c(bigwarning, c("X1, X2, X4")) if ((m1 + m3 + m4) > m13) bigwarning <- c(bigwarning, c("X1, X3, X4")) if ((m2 + m3 + m4) > m14) bigwarning <- c(bigwarning, c("X2, X3, X4")) if ((m1 + m2 + m3 + m4) > m15) bigwarning <- c(bigwarning, c("X1, X2, X3, X4")) Df <- c(m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15) fract <- data.frame(Df = Df, R.square = c(aeghklno.ua, befiklmo.ua, cfgjlmno.ua, dhijkmno.ua, abefghiklmno.ua, acefghjklmno.ua, adeghijklmno.ua, bcefgijklmno.ua, bdefhijklmno.ua, cdfghijklmno.ua, abcefghijklmno.ua, abdefghijklmno.ua, acdefghijklmno.ua, bcdefghijklmno.ua, abcdefghijklmno.ua), Adj.R.square = c(aeghklno, befiklmo, cfgjlmno, dhijkmno, abefghiklmno, acefghjklmno, adeghijklmno, bcefgijklmno, bdefhijklmno, cdfghijklmno, abcefghijklmno, abdefghijklmno, acdefghijklmno, bcdefghijklmno, abcdefghijklmno), Testable = rep(TRUE, 15) & Df) rownames(fract) <- c("[aeghklno] = X1", "[befiklmo] = X2", "[cfgjlmno] = X3", "[dhijkmno] = X4", "[abefghiklmno] = X1+X2", "[acefghjklmno] = X1+X3", "[adeghijklmno] = X1+X4", "[bcefgijklmno] = X2+X3", "[bdefhijklmno] = X2+X4", "[cdfghijklmno] = X3+X4", "[abcefghijklmno] = X1+X2+X3", "[abdefghijklmno] = X1+X2+X4", "[acdefghijklmno] = X1+X3+X4", "[bcdefghijklmno] = X2+X3+X4", "[abcdefghijklmno] = All") ae = acdefghijklmno - cdfghijklmno ag = abdefghijklmno - bdefhijklmno ah = abcefghijklmno - bcefgijklmno be = bcdefghijklmno - cdfghijklmno bf = abdefghijklmno - adeghijklmno bi = abcefghijklmno - acefghjklmno cf = acdefghijklmno - adeghijklmno cg = bcdefghijklmno - bdefhijklmno cj = abcefghijklmno - abefghiklmno dh = bcdefghijklmno - bcefgijklmno di = acdefghijklmno - acefghjklmno dj = abdefghijklmno - abefghiklmno Df <- c(m13-m10, m12-m9, m11-m8, m14-m10, m12-m7, m11-m6, m13-m7, m14-m9, m11-m5, m14-m8, m13-m6, m12-m5) contr2 <- data.frame(Df = Df, R.square = rep(NA, 12), Adj.R.square = c(ae, ag, ah, be, bf, bi, cf, cg, cj, dh, di, dj), Testable = rep(TRUE, 12) & Df) rownames(contr2) <- c("[ae] = X1 | X3+X4", "[ag] = X1 | X2+X4", "[ah] = X1 | X2+X3", "[be] = X2 | X3+X4", "[bf] = X2 | X1+X4", "[bi] = X2 | X1+X3", "[cf] = X3 | X1+X4", "[cg] = X3 | X2+X4", "[cj] = X3 | X1+X2", "[dh] = X4 | X2+X3", "[di] = X4 | X1+X3", "[dj] = X4 | X1+X2") aghn = abefghiklmno - befiklmo aehk = acefghjklmno - cfgjlmno aegl = adeghijklmno - dhijkmno bfim = abefghiklmno - aeghklno beik = bcefgijklmno - cfgjlmno befl = bdefhijklmno - dhijkmno cfjm = acefghjklmno - aeghklno cgjn = bcefgijklmno - befiklmo cfgl = cdfghijklmno - dhijkmno dijm = adeghijklmno - aeghklno dhjn = bdefhijklmno - befiklmo dhik = cdfghijklmno - cfgjlmno Df <- c(m5-m2, m6-m3, m7-m4, m5-m1, m8-m3, m9-m4, m6-m1, m8-m2, m10-m4, m7-m1, m9-m2, m10-m3) contr1 <- data.frame(Df = Df, R.square = rep(NA, 12), Adj.R.square = c(aghn, aehk, aegl, bfim, beik, befl, cfjm, cgjn, cfgl, dijm, dhjn, dhik), Testable = rep(TRUE, 12) & Df) rownames(contr1) <- c("[aghn] = X1 | X2", "[aehk] = X1 | X3", "[aegl] = X1 | X4", "[bfim] = X2 | X1", "[beik] = X2 | X3", "[befl] = X2 | X4", "[cfjm] = X3 | X1", "[cgjn] = X3 | X2", "[cfgl] = X3 | X4", "[dijm] = X4 | X1 ", "[dhjn] = X4 | X2", "[dhik] = X4 | X3") a <- abcdefghijklmno - bcdefghijklmno b <- abcdefghijklmno - acdefghijklmno c <- abcdefghijklmno - abdefghijklmno d <- abcdefghijklmno - abcefghijklmno e <- ae - a f <- bf - b g <- ag - a h <- ah - a i <- bi - b j <- cj - c k <- aehk - ae - h l <- aegl - ae - g m <- bfim - bf - i n <- aghn - ag - h o <- aeghklno - aehk - g - l - n indfract <- data.frame(Df = c(m15-m14, m15-m13, m15-m12, m15-m11, rep(0, 12)), R.square = rep(NA, 16), Adj.R.square = c(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, 1 - abcdefghijklmno), Testable = c(rep(TRUE, 4), rep(FALSE, 12))) rownames(indfract) <- c("[a] = X1 | X2+X3+X4", "[b] = X2 | X1+X3+X4", "[c] = X3 | X1+X2+X4", "[d] = X4 | X1+X2+X3", "[e]", "[f]", "[g]", "[h]", "[i]", "[j]", "[k]", "[l]", "[m]", "[n]", "[o]", "[p] = Residuals") out <- list(fract = fract, indfract = indfract, contr1 = contr1, contr2 = contr2, SS.Y = SS.Y, nsets = 4, bigwarning = bigwarning, n = n1) class(out) <- "varpart234" out }
/scratch/gouwar.j/cran-all/cranData/vegan/R/varpart4.R
`vectorfit` <- function (X, P, permutations = 0, strata = NULL, w, ...) { EPS <- sqrt(.Machine$double.eps) if (missing(w) || is.null(w)) w <- 1 if (length(w) == 1) w <- rep(w, nrow(X)) P <- as.matrix(P) if (nrow(P) != nrow(X)) stop("input data have non-matching numbers of observations") Xw <- .Call(do_wcentre, X, w) Pw <- .Call(do_wcentre, P, w) colnames(Pw) <- colnames(P) nc <- ncol(X) Q <- qr(Xw) H <- qr.fitted(Q, Pw) heads <- qr.coef(Q, Pw) r <- diag(cor(H, Pw)^2) r[is.na(r)] <- 0 heads <- decostand(heads, "norm", 2) heads <- t(heads) if (is.null(colnames(X))) colnames(heads) <- paste("Dim", 1:nc, sep = "") else colnames(heads) <- colnames(X) ## make permutation matrix for all variables handled in the next loop nr <- nrow(X) permat <- getPermuteMatrix(permutations, nr, strata = strata) if (ncol(permat) != nr) stop(gettextf("'permutations' have %d columns, but data have %d rows", ncol(permat), nr)) permutations <- nrow(permat) if (permutations) { ptest <- function(indx, ...) { take <- P[indx, , drop = FALSE] take <- .Call(do_wcentre, take, w) Hperm <- qr.fitted(Q, take) diag(cor(Hperm, take))^2 } permstore <- sapply(1:permutations, function(indx, ...) ptest(permat[indx,], ...)) ## Single variable is dropped to a vector, and otherwise ## permutations are the matrix columns and variables are rows if (!is.matrix(permstore)) permstore <- matrix(permstore, ncol=permutations) permstore <- sweep(permstore, 1, r - EPS, ">=") validn <- rowSums(is.finite(permstore)) pvals <- (rowSums(permstore, na.rm = TRUE) + 1)/(validn + 1) } else pvals <- NULL sol <- list(arrows = heads, r = r, permutations = permutations, pvals = pvals) sol$control <- attr(permat, "control") class(sol) <- "vectorfit" sol }
/scratch/gouwar.j/cran-all/cranData/vegan/R/vectorfit.R
## as.mlm: deprecated in 2.5-0, defunct in 2.6-0 `as.mlm` <- function(x) { .Defunct("see ?hatvalues.cca for new alternatives") if (is.null(x$CCA)) stop("'as.mlm' can be used only for constrained ordination") UseMethod("as.mlm") } `as.mlm.cca` <- function (x) { w <- x$rowsum WA <- x$CCA$wa X <- qr.X(x$CCA$QR) ## shall use weighted regression: deweight X X <- (1/sqrt(w)) * X X <- as.data.frame(X) lm(WA ~ ., data = X, weights = w) } `as.mlm.rda` <- function (x) { X <- as.data.frame(qr.X(x$CCA$QR)) WA <- x$CCA$wa lm(WA ~ . , data = X) } ### commsimulator was deprecated in 2.4-0, defunct in 2.6-0 "commsimulator" <- function (x, method, thin = 1) { .Defunct("simulate(nullmodel(x, method))", package="vegan") method <- match.arg(method, c("r0","r1","r2","r00","c0","swap", "tswap", "backtrack", "quasiswap")) ## r0_old is also removed from vegan 2.6-0, but needed for <2.2-0 ## compatibility ##if (method == "r0") ## method <- "r0_old" x <- as.matrix(x) out <- simulate(nullmodel(x, method), nsim = 1, thin = thin) out <- out[,,1] attributes(out) <- attributes(x) out } ### deprecated in 2.2-0, but forgotten and never exported from the NAMESPACE. Make finally defunct for 2.6-0. "permuted.index" <- function (n, strata) { .Defunct("permute package (shuffle or shuffleSet)") if (missing(strata) || is.null(strata)) out <- sample.int(n, n) else { out <- 1:n inds <- names(table(strata)) for (is in inds) { gr <- out[strata == is] if (length(gr) > 1) out[gr] <- sample(gr, length(gr)) } } out }
/scratch/gouwar.j/cran-all/cranData/vegan/R/vegan-defunct.R
## deprecated functions
/scratch/gouwar.j/cran-all/cranData/vegan/R/vegan-deprecated.R
`veganCovEllipse` <- function(cov, center = c(0,0), scale = 1, npoints = 100) { ## Basically taken from the 'car' package: The Cirlce theta <- (0:npoints) * 2 * pi/npoints Circle <- cbind(cos(theta), sin(theta)) ## scale, center and cov must be calculated separately Q <- chol(cov, pivot = TRUE) ## pivot takes care of cases when points are on a line o <- attr(Q, "pivot") t(center + scale * t(Circle %*% Q[,o])) }
/scratch/gouwar.j/cran-all/cranData/vegan/R/veganCovEllipse.R
### Internal function for Mahalanobis transformation of the matrix. ### Mahalanobis transformation of matrix X is M = X S^(-1/2) where S ### is the covariance matrix. The inverse square root of S is found ### via eigen decomposition S = G L G^T, where G is the matrix of ### eigenvectors, and L is the diagonal matrix of eigenvalues. Thus ### S^(-1/2) = G L^(-1/2) G^T. This is an internal function so that ### input must be correct: 'x' must be a centred matrix (not a ### data.frame, not raw data). `veganMahatrans` <- function (x, s2, tol = sqrt(.Machine$double.eps)) { if (missing(s2)) s2 <- cov(x) e <- eigen(s2, symmetric = TRUE) k <- e$values > max(tol, tol * e$values[1L]) sisqr <- e$vectors[,k, drop=FALSE] %*% (sqrt(1/e$values[k]) * t(e$vectors[,k, drop = FALSE])) x %*% sisqr }
/scratch/gouwar.j/cran-all/cranData/vegan/R/veganMahatrans.R
`vegandocs` <- function (doc = c("NEWS", "ONEWS", "FAQ-vegan", "intro-vegan", "diversity-vegan", "decision-vegan", "partitioning", "permutations")) { doc <- match.arg(doc) if (doc == "NEWS") { .Defunct('news(package="vegan")') news(package = "vegan") } else if (doc %in% vignette(package="vegan")$results[, "Item"]) { .Defunct('browseVignettes("vegan")') vignette(doc, package = "vegan") } else if (doc == "permutations") { .Defunct('browseVignettes("permute")') vignette(doc, package = "permute") } else # last resort file.show(system.file(package="vegan", doc)) }
/scratch/gouwar.j/cran-all/cranData/vegan/R/vegandocs.R
`vegdist` <- function (x, method = "bray", binary = FALSE, diag = FALSE, upper = FALSE, na.rm = FALSE, ...) { ZAP <- 1e-15 if (!is.na(pmatch(method, "euclidian"))) method <- "euclidean" ## the order of METHODS below *MUST* match the #define'd numbers ## in vegdist.c METHODS <- c("manhattan", "euclidean", "canberra", "bray", # 4 "kulczynski", "gower", "morisita", "horn", #8 "mountford", "jaccard", "raup", "binomial", "chao", #13 "altGower", "cao", "mahalanobis", "clark", "chisq", "chord", #19 "hellinger", "aitchison", "robust.aitchison") # 22 method <- pmatch(method, METHODS) inm <- METHODS[method] if (is.na(method)) stop("invalid distance method") if (method == -1) stop("ambiguous distance method") ## most tests are faster for matrix than for data frame, and we ## need matrix in .Call() anyway x <- as.matrix(x) if (!na.rm && anyNA(x)) stop("missing values are not allowed with argument 'na.rm = FALSE'") ## all vegdist indices need numeric data (Gower included). if (!(is.numeric(x) || is.logical(x))) stop("input data must be numeric") if (!method %in% c(1,2,6,16,18) && any(rowSums(x, na.rm = TRUE) == 0)) warning("you have empty rows: their dissimilarities may be meaningless in method ", dQuote(inm)) ## 1 manhattan, 2 euclidean, 3 canberra, 6 gower, 16 mahalanobis, 19 chord if (!method %in% c(1,2,3,6,16,19,20) && any(x < 0, na.rm = TRUE)) warning("results may be meaningless because data have negative entries in method ", dQuote(inm)) if (method %in% c(11,18) && any(colSums(x) == 0)) warning("data have empty species which influence the results in method ", dQuote(inm)) if (method == 6) # gower, but no altGower x <- decostand(x, "range", 2, na.rm = TRUE, ...) if (method == 16) # mahalanobis x <- veganMahatrans(scale(x, scale = FALSE)) if (method == 18) # chisq x <- decostand(x, "chi.square") if (method == 21) # aitchison x <- decostand(x, "clr", ...) # dots to pass possible pseudocount if (method == 22) # robust.aitchison x <- decostand(x, "rclr") # No pseudocount for rclr if (binary) x <- decostand(x, "pa") N <- nrow(x) if (method %in% c(7, 13, 15) && !identical(all.equal(x, round(x)), TRUE)) warning("results may be meaningless with non-integer data in method ", dQuote(inm)) d <- .Call(do_vegdist, x, as.integer(method)) if (method == 10) d <- 2 * d/(1 + d) d[d < ZAP] <- 0 if (any(is.na(d))) warning("missing values in results") ## add attribute maxdist: the maximum value of the distance function attr(d, "maxdist") <- if(method %in% c(3,4,5,7,8,10,11,13,17)) # index in 0..1 1 else if (method %in% c(19,20)) # chord, hellinger sqrt(2) else if (method == 9) # Mountford log(2) else # no fixed upper limit NA attr(d, "Size") <- N attr(d, "Labels") <- dimnames(x)[[1]] attr(d, "Diag") <- diag attr(d, "Upper") <- upper attr(d, "method") <- paste(if (binary) "binary ", METHODS[method], sep = "") attr(d, "call") <- match.call() class(d) <- "dist" d }
/scratch/gouwar.j/cran-all/cranData/vegan/R/vegdist.R
`vegemite` <- function (x, use, scale, sp.ind = NULL, site.ind = NULL, zero = ".", select, ...) { if (!missing(use)) { if (!is.list(use) && is.vector(use)) { if (is.null(site.ind)) site.ind <- order(use) if (is.null(sp.ind)) sp.ind <- order(wascores(use, x)) } else if (inherits(use, c("hclust", "twins"))) { if (inherits(use, "twins")) { use <- as.hclust(use) } if (is.null(site.ind)) site.ind <- use$order if (is.null(sp.ind)) sp.ind <- order(wascores(order(site.ind), x)) } else if (inherits(use, "dendrogram")) { if (is.null(site.ind)) { site.ind <- 1:nrow(x) names(site.ind) <- rownames(x) site.ind <- site.ind[labels(use)] } if (is.null(sp.ind)) sp.ind <- order(wascores(order(site.ind), x)) } else if (is.list(use)) { tmp <- scores(use, choices = 1, display = "sites") if (is.null(site.ind)) site.ind <- order(tmp) if (is.null(sp.ind)) sp.ind <- try(order(scores(use, choices = 1, display = "species"))) if (inherits(sp.ind, "try-error")) sp.ind <- order(wascores(tmp, x)) } else if (is.matrix(use)) { tmp <- scores(use, choices = 1, display = "sites") if (is.null(site.ind)) site.ind <- order(tmp) if (is.null(sp.ind)) sp.ind <- order(wascores(tmp, x)) } } if (!is.null(sp.ind) && is.logical(sp.ind)) sp.ind <- seq_len(ncol(x))[sp.ind] if (!is.null(site.ind) && is.logical(site.ind)) site.ind <- seq_len(nrow(x))[site.ind] if (is.null(sp.ind)) sp.ind <- seq_len(ncol(x)) if (is.null(site.ind)) site.ind <- seq_len(nrow(x)) if (!missing(select)) { if (!is.logical(select)) select <- sort(site.ind) %in% select stake <- colSums(x[select, , drop = FALSE]) > 0 site.ind <- site.ind[select[site.ind]] site.ind <- site.ind[!is.na(site.ind)] } else { stake <- colSums(x[site.ind, ]) > 0 } sp.ind <- sp.ind[stake[sp.ind]] x <- x[site.ind, sp.ind] if (!missing(scale)) x <- coverscale(x, scale, ...) usedscale <- attr(x, "scale") if (any(apply(x, 1, nchar) > 1)) stop("cowardly refusing to use longer than one-character symbols:\nUse scale") x <- as.matrix(x) x <- t(x) sp.nam <- rownames(x) sp.len <- max(nchar(sp.nam)) nst <- ncol(x) page.width <- getOption("width") per.page <- page.width - sp.len - 3 istart <- seq(1, nst, by = per.page) iend <- pmin(istart + per.page - 1, nst) for (st in seq_along(istart)) { tbl <- apply(x[, istart[st]:iend[st], drop = FALSE], 1, paste, sep = "", collapse = "") names(tbl) <- NULL tbl <- gsub("0", zero, tbl) tbl <- cbind(sp.nam, tbl) st.nam <- colnames(x)[istart[st]:iend[st]] nlen <- max(nchar(st.nam)) mathead <- matrix(" ", nrow = length(st.nam), ncol = nlen) for (i in seq_along(st.nam)) { tmp <- unlist(strsplit(st.nam[i], NULL)) start <- nlen - length(tmp) + 1 mathead[i, start:nlen] <- tmp } head <- cbind(apply(mathead, 2, paste, sep = "", collapse = "")) tbl <- rbind(cbind(matrix(" ", nrow = nrow(head), 1), head), tbl) d <- list() l <- 0 for (i in dim(tbl)) { d[[l <- l + 1]] <- rep("", i) } dimnames(tbl) <- d print(noquote(tbl)) } out <- list(sites = site.ind, species = sp.ind, table = tbl) cat(length(out$sites), "sites,", length(out$species), "species\n") if (!is.null(usedscale)) cat("scale: ", usedscale, "\n") invisible(out) }
/scratch/gouwar.j/cran-all/cranData/vegan/R/vegemite.R
"veiledspec" <- function(x, ...) { if (!inherits(x, "prestonfit")) x <- prestonfit(x) S.obs <- sum(x$freq) p <- x$coefficients S.tot <- p["S0"]*p["width"]*sqrt(2*pi) out <- c(S.tot, S.obs, S.tot - S.obs) names(out) <- c("Extrapolated","Observed","Veiled") out }
/scratch/gouwar.j/cran-all/cranData/vegan/R/veiledspec.R
`vif.cca` <- function(object) { if (is.null(object$CCA)) stop("can be used only with constrained ordination") Q <- object$CCA$QR out <- rep(NA, NCOL(Q$qr)) names(out)[Q$pivot] <- colnames(Q$qr) rank <- Q$rank V <- chol2inv(Q$qr, size = rank) X <- qr.X(Q)[, Q$pivot[1:rank], drop=FALSE] Vi <- crossprod(X) v1 <- diag(V) v2 <- diag(Vi) out[Q$pivot[1:rank]] <- v1 * v2 out }
/scratch/gouwar.j/cran-all/cranData/vegan/R/vif.cca.R
`wascores` <- function (x, w, expand = FALSE) { if(any(w < 0) || sum(w) == 0) stop("weights must be non-negative and not all zero") x <- as.matrix(x) w <- as.matrix(w) nc <- ncol(x) nr <- ncol(w) wa <- matrix(NA, nrow = nr, ncol = nc) colnames(wa) <- colnames(x) rownames(wa) <- colnames(w) for (i in 1:nr) { wa[i, ] <- apply(x, 2, weighted.mean, w = w[, i]) } if (expand) { i <- complete.cases(wa) x.w <- rowSums(w) ewa.w <- colSums(w[,i, drop=FALSE]) ewa <- wa[i,, drop=FALSE] x.cov <- cov.wt(x, x.w, method = "ML") wa.cov <- cov.wt(ewa, ewa.w, method = "ML") mul <- sqrt(diag(x.cov$cov)/diag(wa.cov$cov)) ewa <- sweep(ewa, 2, wa.cov$center, "-") ewa <- sweep(ewa, 2, mul, "*") ewa <- sweep(ewa, 2, wa.cov$center, "+") wa[i,] <- ewa attr(wa, "shrinkage") <- 1/mul^2 attr(wa, "centre") <- wa.cov$center } wa }
/scratch/gouwar.j/cran-all/cranData/vegan/R/wascores.R
`wcmdscale` <- function(d, k, eig = FALSE, add = FALSE, x.ret = FALSE, w) { ## Force eig=TRUE if add, x.ret or !missing(w) if(x.ret) eig <- TRUE ZERO <- sqrt(.Machine$double.eps) if (!inherits(d, "dist")) { op <- options(warn = 2) on.exit(options(op)) d <- as.dist(d) options(op) } ## handle add constant to make d Euclidean if (is.logical(add) && isTRUE(add)) add <- "lingoes" if (is.character(add)) { add <- match.arg(add, c("lingoes", "cailliez")) if (add == "lingoes") { ac <- addLingoes(as.matrix(d)) d <- sqrt(d^2 + 2 * ac) } else if (add == "cailliez") { ac <- addCailliez(as.matrix(d)) d <- d + ac } } else { ac <- NA } ## Gower centring m <- as.matrix(d^2) n <- nrow(m) if (missing(w)) w <- rep(1, n) m <- .Call(do_wcentre, m, w) m <- t(.Call(do_wcentre, t(m), w)) e <- eigen(-m/2, symmetric = TRUE) ## Remove zero eigenvalues, keep negative keep <- abs(e$values) > max(ZERO, ZERO * e$values[1L]) e$values <- e$values[keep] e$vectors <- e$vectors[, keep, drop = FALSE] ## Deweight and scale axes -- also negative points <- sweep(e$vectors, 1, sqrt(w), "/") points <- sweep(points, 2, sqrt(abs(e$values)), "*") rownames(points) <- rownames(m) ## If 'k' not given, find it as the number of positive ## eigenvalues, and also save negative eigenvalues negaxes <- NULL if (missing(k) || k > sum(e$value > ZERO)) { k <- sum(e$values > ZERO) if (any(e$values < 0)) negaxes <- points[, e$values < 0, drop = FALSE] } if (k) # there may be no positive eigenvalues points <- points[, 1:k, drop=FALSE] points[!is.finite(points)] <- NA ## Goodness of fit ev <- e$values[1:k] ev <- ev[ev > 0] ## GOF for real and all axes GOF <- c(sum(ev)/sum(abs(e$values)), sum(ev)/sum(e$values[e$values > 0])) if (eig || x.ret) { if (NCOL(points) > 0) colnames(points) <- paste("Dim", seq_len(NCOL(points)), sep="") out <- list(points = points, eig = if (eig) e$values, x = if (x.ret) m, ac = ac, add = add, GOF = GOF, weights = w, negaxes = negaxes, call = match.call()) class(out) <- "wcmdscale" } else out <- points out }
/scratch/gouwar.j/cran-all/cranData/vegan/R/wcmdscale.R
"weights.cca" <- function (object, display = "sites", ...) { display <- match.arg(display, c("sites", "species", "lc", "wa")) if (display %in% c("sites", "lc", "wa")) { if (!is.null(object$na.action) && inherits(object$na.action, "exclude")) { object$rowsum <- napredict(object$na.action, object$rowsum) object$rowsum[object$na.action] <- object$rowsum.excluded } object$rowsum } else object$colsum }
/scratch/gouwar.j/cran-all/cranData/vegan/R/weights.cca.R
"weights.decorana" <- function(object, display="sites", ...) { display <- match.arg(display, c("sites","species")) if (display == "sites") object$aidot else object$adotj }
/scratch/gouwar.j/cran-all/cranData/vegan/R/weights.decorana.R
`weights.rda` <- function (object, display = "sites", ...) { display <- match.arg(display, c("sites", "species", "lc", "wa")) if (display %in% c("sites", "lc", "wa")) { n <- nobs(object) if (!is.null(object$na.action) && inherits(object$na.action, "exclude")) n <- n + length(object$na.action) } else n <- length(object$colsum) rep(1, n) }
/scratch/gouwar.j/cran-all/cranData/vegan/R/weights.rda.R
`wisconsin` <- function(x) { x <- decostand(x, "max", 2) mx <- attr(x, "parameters")$max x <- decostand(x, "total", 1) attr(x, "parameters")$max <- mx attr(x, "decostand") <- "wisconsin" x }
/scratch/gouwar.j/cran-all/cranData/vegan/R/wisconsin.R
.onAttach <- function(lib, pkg) { packageStartupMessage("This is vegan ", utils::packageDescription("vegan", fields="Version"), appendLF = TRUE) }
/scratch/gouwar.j/cran-all/cranData/vegan/R/zzz.R
## ----eval=FALSE------------------------------------------------------- # sol <- cca(varespec) # ef <- envfit(sol ~ ., varechem) # plot(sol) # ordiArrowMul(scores(ef, display="vectors"))
/scratch/gouwar.j/cran-all/cranData/vegan/inst/doc/FAQ-vegan.R
<!-- %\VignetteIndexEntry{vegan FAQ} %\VignetteEngine{knitr::knitr} %\VignetteEncoding{UTF-8} --> **vegan** FAQ ============= This document contains answers to some of the most frequently asked questions about R package **vegan**. > This work is licensed under the Creative Commons Attribution 3.0 > License. To view a copy of this license, visit > <https://creativecommons.org/licenses/by/3.0/> or send a letter to > Creative Commons, 543 Howard Street, 5th Floor, San Francisco, > California, 94105, USA. > > Copyright © 2008-2016 vegan development team ------------------------------------------------------------------------ Introduction ------------ ------------------------------------------------------------------------ ### What is **vegan**? **Vegan** is an R package for community ecologists. It contains the most popular methods of multivariate analysis needed in analysing ecological communities, and tools for diversity analysis, and other potentially useful functions. **Vegan** is not self-contained but it must be run under R statistical environment, and it also depends on many other R packages. **Vegan** is [free software](https://www.gnu.org/philosophy/free-sw.html) and distributed under [GPL2 license](https://www.gnu.org/licenses/gpl.html). ------------------------------------------------------------------------ ### What is R? R is a system for statistical computation and graphics. It consists of a language plus a run-time environment with graphics, a debugger, access to certain system functions, and the ability to run programs stored in script files. R has a home page at <https://www.R-project.org/>. It is [free software](https://www.gnu.org/philosophy/free-sw.html) distributed under a GNU-style [copyleft](https://www.gnu.org/copyleft/copyleft.html), and an official part of the [GNU](https://www.gnu.org/) project (“GNU S”). ------------------------------------------------------------------------ ### How to obtain **vegan** and R? Both R and latest release version of **vegan** can be obtained through [CRAN](https://cran.r-project.org). Unstable development version of **vegan** can be obtained through [GitHub](https://github.com/vegandevs/vegan). Formerly **vegan** was developed in [R-Forge](https://r-forge.r-project.org/projects/vegan/), but after moving to [GitHub](https://github.com/vegandevs/vegan) the R-Forge repository is out of date. ------------------------------------------------------------------------ ### What R packages **vegan** depends on? **Vegan** depends on the **permute** package which will provide advanced and flexible permutation routines for **vegan**. The **permute** package is developed together with **vegan** in [GitHub](https://github.com/gavinsimpson/permute). Some individual **vegan** functions depend on packages **MASS**, **mgcv**, **parallel**, **cluster**, **lattice** and **tcltk**. These all are base or recommended R packages that should be available in every R installation. **Vegan** declares these as suggested or imported packages, and you can install **vegan** and use most of its functions without these packages. **Vegan** is accompanied with a supporting package **vegan3d** for three-dimensional and dynamic plotting. The **vegan3d** package needs non-standard packages **rgl** and **scatterplot3d**. ------------------------------------------------------------------------ ### What other packages are available for ecologists? CRAN [Task Views](https://cran.r-project.org/web/views/) include entries like `Environmetrics`, `Multivariate` and `Spatial` that describe several useful packages and functions. If you install R package **ctv**, you can inspect Task Views from your R session, and automatically install sets of most important packages. ------------------------------------------------------------------------ ### What other documentation is available for **vegan**? **Vegan** is a fully documented R package with standard help pages. These are the most authoritative sources of documentation (and as a last resource you can use the force and the read the source, as **vegan** is open source). **Vegan** package ships with other documents which can be read with `browseVignettes("vegan")` command. The documents included in the **vegan** package are - **Vegan** `NEWS` - This document (`FAQ-vegan`). - Short introduction to basic ordination methods in **vegan** (`intro-vegan`). - Introduction to diversity methods in **vegan** (`diversity-vegan`). - Discussion on design decisions in **vegan** (`decision-vegan`). - Description of variance partition procedures in function `varpart` (`partitioning`). Web documents outside the package include: - <https://github.com/vegandevs/vegan>: **vegan** homepage. ------------------------------------------------------------------------ ### Is there a Graphical User Interface (GUI) for **vegan**? Roeland Kindt has made package **BiodiversityR** which provides a GUI for **vegan**. The package is available at [CRAN](https://cran.r-project.org/package=BiodiversityR). It is not a mere GUI for **vegan**, but adds some new functions and complements **vegan** functions in order to provide a workbench for biodiversity analysis. You can install **BiodiversityR** using `install.packages("BiodiversityR")` or graphical package management menu in R. The GUI works on Windows, MacOS X and Linux. ------------------------------------------------------------------------ ### How to cite **vegan**? Use command `citation("vegan")` in R to see the recommended citation to be used in publications. ------------------------------------------------------------------------ ### How to build **vegan** from sources? In general, you do not need to build **vegan** from sources, but binary builds of release versions are available through [CRAN](https://cran.r-project.org/) for Windows and MacOS X. If you use some other operating systems, you may have to use source packages. **Vegan** is a standard R package, and can be built like instructed in R documentation. **Vegan** contains source files in C and FORTRAN, and you need appropriate compilers (which may need more work in Windows and MacOS X). ------------------------------------------------------------------------ ### Are there binaries for devel versions? Not currently. You need tools to build C and Fortran programs to install **vegan**. If you have those, you can use `devtools::install_github("vegan")` to install the most recent devel version. ------------------------------------------------------------------------ ### How to report a bug in **vegan**? If you think you have found a bug in **vegan**, you should report it to **vegan** maintainers or developers. The preferred forum to report bugs is [GitHub](https://github.com/vegandevs/vegan/issues). The bug report should be so detailed that the bug can be replicated and corrected. Preferably, you should send an example that causes a bug. If it needs a data set that is not available in R, you should send a minimal data set as well. You also should paste the output or error message in your message. You also should specify which version of **vegan** you used. Bug reports are welcome: they are the only way to make **vegan** non-buggy. Please note that you shall not send bug reports to R mailing lists, since **vegan** is not a standard R package. ------------------------------------------------------------------------ ### Is it a bug or a feature? It is not necessarily a bug if some function gives different results than you expect: That may be a deliberate design decision. It may be useful to check the documentation of the function to see what was the intended behaviour. It may also happen that function has an argument to switch the behaviour to match your expectation. For instance, function `vegdist` always calculates quantitative indices (when this is possible). If you expect it to calculate a binary index, you should use argument `binary = TRUE`. ------------------------------------------------------------------------ ### Can I contribute to **vegan**? **Vegan** is dependent on user contribution. All feedback is welcome. If you have problems with **vegan**, it may be as simple as incomplete documentation, and we shall do our best to improve the documents. Feature requests also are welcome, but they are not necessarily fulfilled. A new feature will be added if it is easy to do and it looks useful, or if you submit code. If you can write code yourself, the best forum to contribute to vegan is [GitHub](https://github.com/vegandevs/vegan). ------------------------------------------------------------------------ Ordination ---------- ------------------------------------------------------------------------ ### I have only numeric and positive data but **vegan** still complains You are wrong! Computers are painfully pedantic, and if they find non-numeric or negative data entries, you really have them. Check your data. Most common reasons for non-numeric data are that row names were read as a non-numeric variable instead of being used as row names (check argument `row.names` in reading the data), or that the column names were interpreted as data (check argument `header = TRUE` in reading the data). Another common reason is that you had empty cells in your input data, and these were interpreted as missing values. ------------------------------------------------------------------------ ### Can I analyse binary or cover class data? Yes. Most **vegan** methods can handle binary data or cover abundance data. Most statistical tests are based on permutation, and do not make distributional assumptions. There are some methods (mainly in diversity analysis) that need count data. These methods check that input data are integers, but they may be fooled by cover class data. ------------------------------------------------------------------------ ### Why dissimilarities in **vegan** differ from other sources? Most commonly the reason is that other software use presence–absence data whereas **vegan** used quantitative data. Usually **vegan** indices are quantitative, but you can use argument `binary = TRUE` to make them presence–absence. However, the index name is the same in both cases, although different names usually occur in literature. For instance, Jaccard index actually refers to the binary index, but **vegan** uses name `"jaccard"` for the quantitative index, too. Another reason may be that indices indeed are defined differently, because people use same names for different indices. ------------------------------------------------------------------------ ### Why NMDS stress is sometimes 0.1 and sometimes 10? Stress is a proportional measure of badness of fit. The proportions can be expressed either as parts of one or as percents. Function `isoMDS` (**MASS** package) uses percents, and function `monoMDS` (**vegan** package) uses proportions, and therefore the same stress is 100 times higher in `isoMDS`. The results of `goodness` function also depend on the definition of stress, and the same `goodness` is 100 times higher in `isoMDS` than in `monoMDS`. Both of these conventions are equally correct. ------------------------------------------------------------------------ ### I get zero stress but no repeated solutions in `metaMDS` The first (try 0) run of `metaMDS` starts from the metric scaling solution and is usually good, and most sofware only return that solution. However, `metaMDS` tries to see if that standard solution can be repeated, or improved and the improved solution still repeated. In all cases, it will return the best solution found, and there is no burning need to do anything if you get the message tha the solution could not be repeated. If you are keen to know that the solution really is the global optimum, you may follow the instructions in the `metaMDS` help section "Results Could Not Be Repeated" and try more. Most common reason is that you have too few observations for your NMDS. For `n` observations (points) and `k` dimensions you need to estimate `n*k` parameters (ordination scores) using `n*(n-1)/2` dissimilarities. For `k` dimensions you must have `n > 2*k + 1`, or for two dimensions at least six points. In some degenerate situations you may need even a larger number of points. If you have a lower number of points, you can find an undefined number of perfect (stress is zero) but different solutions. Conventional wisdom due to Kruskal is that you should have `n > 4*k + 1` points for `k` dimensions. A typical symptom of insufficient data is that you have (nearly) zero stress but no two convergent solutions. In those cases you should reduce the number of dimensions (`k`) and with very small data sets you should not use `NMDS`, but rely on metric methods. It seems that local and hybrid scaling with `monoMDS` have similar lower limits in practice (although theoretically they could differ). However, higher number of dimensions can be used in metric scaling, both with `monoMDS` and in principal coordinates analysis (`cmdscale` in **stats**, `wcmdscale` in **vegan**). ------------------------------------------------------------------------ ### Zero dissimilarities in isoMDS Function `metaMDS` uses function `monoMDS` as its default method for NMDS, and this function can handle zero dissimilarities. Alternative function `isoMDS` cannot handle zero dissimilarities. If you want to use `isoMDS`, you can use argument `zerodist = "add"` in `metaMDS` to handle zero dissimilarities. With this argument, zero dissimilarities are replaced with a small positive value, and they can be handled in `isoMDS`. This is a kluge, and some people do not like this. A more principal solution is to remove duplicate sites using R command `unique`. However, after some standardizations or with some dissimilarity indices, originally non-unique sites can have zero dissimilarity, and you have to resort to the kluge (or work harder with your data). Usually it is better to use `monoMDS`. ------------------------------------------------------------------------ ### I have heard that you cannot fit environmental vectors or surfaces to NMDS results which only have rank-order scores Claims like this have indeed been at large in the Internet, but they are based on grave misunderstanding and are plainly wrong. NMDS ordination results are strictly metric, and in **vegan** `metaMDS` and `monoMDS` they are even strictly Euclidean. The method is called “non-metric” because the Euclidean distances in ordination space have a non-metric rank-order relationship to community dissimilarities. You can inspect this non-linear step curve using function `stressplot` in **vegan**. Because the ordination scores are strictly Euclidean, it is correct to use **vegan** functions `envfit` and `ordisurf` with NMDS results. ------------------------------------------------------------------------ ### Where can I find numerical scores of ordination axes? Normally you can use function `scores` to extract ordination scores for any ordination method. The `scores` function can also find ordination scores for many non-**vegan** functions such as for `prcomp` and `princomp` and for some **ade4** functions. In some cases the ordination result object stores raw scores, and the axes are also scaled appropriate when you access them with `scores`. For instance, in `cca` and `rda` the ordination object has only so-called normalized scores, and they are scaled for ordination plots or for other use when they are accessed with `scores`. ------------------------------------------------------------------------ ### How the RDA results are scaled? The scaling or RDA results indeed differ from most other software packages. The scaling of RDA is such a complicated issue that it cannot be explained in this FAQ, but it is explained in a separate pdf document on “Design decision and implementation details in vegan” that you can read with command `browseVignettes("vegan")`. ------------------------------------------------------------------------ ### I cannot print and plot RDA results properly If the RDA ordination results have a weird format or you cannot plot them properly, you probably have a name clash with **klaR** package which also has function `rda`, and the **klaR** `print`, `plot` or `predict` functions are used for **vegan** RDA results. You can choose between `rda` functions using `vegan::rda()` or `klaR::rda()`: you will get obscure error messages if you use the wrong function. In general, **vegan** should be able to work normally if **vegan** was loaded after **klaR**, but if **klaR** was loaded later, its functions will take precedence over **vegan**. Sometimes **vegan** namespace is loaded automatically when restoring a previously stored workspace at the start-up, and then **klaR** methods will always take precedence over **vegan**. You should check your loaded packages. **klaR** may be also loaded indirectly via other packages (in the reported cases it was most often loaded via **agricolae** package). **Vegan** and **klaR** both have the same function name (`rda`), and it may not be possible to use these packages simultaneously, and the safest choice is to unload one of the packages if only possible. See discussion in [vegan github issues](https://github.com/vegandevs/vegan/issues/277). If you have a very old version of **ade4** (prior to 1.7-8), you may have similar name clashes with `cca`. The solution is to upgrade **ade4**. ------------------------------------------------------------------------ ### Ordination fails with “Error in La.svd” Constrained ordination (`cca`, `rda`, `capscale`) will sometimes fail with error message `Error in La.svd(x, nu, nv): error code 1 from Lapack routine 'dgesdd'.` It seems that the basic problem is in the `svd` function of `LAPACK` that is used for numerical analysis in R. `LAPACK` is an external library that is beyond the control of package developers and R core team so that these problems may be unsolvable. It seems that the problems with the `LAPACK` code are so common that even the help page of `svd` warns about them Reducing the range of constraints (environmental variables) helps sometimes. For instance, multiplying constraints by a constant \< 1. This rescaling does not influence the numerical results of constrained ordination, but it can complicate further analyses when values of constraints are needed, because the same scaling must be applied there. We can only hope that this problem is fixed in the future versions of R and `LAPACK`. ------------------------------------------------------------------------ ### Variance explained by ordination axes. In general, **vegan** does not directly give any statistics on the “variance explained” by ordination axes or by the constrained axes. This is a design decision: I think this information is normally useless and often misleading. In community ordination, the goal typically is not to explain the variance, but to find the “gradients” or main trends in the data. The “total variation” often is meaningless, and all proportions of meaningless values also are meaningless. Often a better solution explains a smaller part of “total variation”. For instance, in unstandardized principal components analysis most of the variance is generated by a small number of most abundant species, and they are easy to “explain” because data really are not very multivariate. If you standardize your data, all species are equally important. The first axes explains much less of the “total variation”, but now they explain all species equally, and results typically are much more useful for the whole community. Correspondence analysis uses another measure of variation (which is not variance), and again it typically explains a “smaller proportion” than principal components but with a better result. Detrended correspondence analysis and nonmetric multidimensional scaling even do not try to “explain” the variation, but use other criteria. All methods are incommensurable, and it is impossible to compare methods using “explanation of variation”. If you still want to get “explanation of variation” (or a deranged editor requests that from you), it is possible to get this information for some methods: - Eigenvector methods: Functions `rda`, `cca` and `capscale` give the variation of conditional (partialled), constrained (canonical) and residual components, but you must calculate the proportions by hand. Function `eigenvals` extracts the eigenvalues, and `summary(eigenvals(ord))` reports the proportions explained in the result object `ord`. Function `RsquareAdj` gives the R-squared and adjusted R-squared (if available) for constrained components. Function `goodness` gives the same statistics for individual species or sites (species are unavailable with `capscale`). In addition, there is a special function `varpart` for unbiased partitioning of variance between up to four separate components in redundancy analysis. - Detrended correspondence analysis (function `decorana`). The total amount of variation is undefined in detrended correspondence analysis, and therefore proportions from total are unknown and undefined. DCA is not a method for decomposition of variation, and therefore these proportions would not make sense either. - Nonmetric multidimensional scaling. NMDS is a method for nonlinear mapping, and the concept of of variation explained does not make sense. However, 1 - stress\^2 transforms nonlinear stress into quantity analogous to squared correlation coefficient. Function `stressplot` displays the nonlinear fit and gives this statistic. ------------------------------------------------------------------------ ### Can I have random effects in constrained ordination or in `adonis`? No. Strictly speaking, this is impossible. However, you can define models that respond to similar goals as random effects models, although they strictly speaking use only fixed effects. Constrained ordination functions `cca`, `rda` and `capscale` can have `Condition()` terms in their formula. The `Condition()` define partial terms that are fitted before other constraints and can be used to remove the effects of background variables, and their contribution to decomposing inertia (variance) is reported separately. These partial terms are often regarded as similar to random effects, but they are still fitted in the same way as other terms and strictly speaking they are fixed terms. Function `adonis2` can evaluate terms sequentially. In a model with right-hand-side `~ A + B` the effects of `A` are evaluated first, and the effects of `B` after removing the effects of `A`. Sequential tests are also available in `anova` function for constrained ordination results by setting argument `by = "term"`. In this way, the first terms can serve in a similar role as random effects, although they are fitted in the same way as all other terms, and strictly speaking they are fixed terms. All permutation tests in **vegan** are based on the **permute** package that allows constructing various restricted permutation schemes. For instance, you can set levels of `plots` or `blocks` for a factor regarded as a random term. A major reason why real random effects models are impossible in most **vegan** functions is that their tests are based on the permutation of the data. The data are given, that is fixed, and therefore permutation tests are basically tests of fixed terms on fixed data. Random effect terms would require permutations of data with a random component instead of the given, fixed data, and such tests are not available in **vegan**. ------------------------------------------------------------------------ ### Is it possible to have passive points in ordination? **Vegan** does not have a concept of passive points, or a point that should only little influence the ordination results. However, you can add points to eigenvector methods using `predict` functions with `newdata`. You can first perform an ordination without some species or sites, and then you can find scores for all points using your complete data as `newdata`. The `predict` functions are available for basic eigenvector methods in **vegan** (`cca`, `rda`, `decorana`, for an up-to-date list, use command `methods("predict")`). You also can simulate the passive points in R by using low weights to row and columns (this is the method used in software with passive points). For instance, the following command makes row 3 “passive”: `dune[3,] <- 0.001*dune[3,]`. ------------------------------------------------------------------------ ### Class variables and dummies You should define a class variable as an R `factor`, and **vegan** will automatically handle them with formula interface. You also can define constrained ordination without formula interface, but then you must code your class variables by hand. R (and **vegan**) knows both unordered and ordered factors. Unordered factors are internally coded as dummy variables, but one redundant level is removed or aliased. With default contrasts, the removed level is the first one. Ordered factors are expressed as polynomial contrasts. Both of these contrasts explained in standard R documentation. ------------------------------------------------------------------------ ### How are environmental arrows scaled? The printed output of `envfit` gives the direction cosines which are the coordinates of unit length arrows. For plotting, these are scaled by their correlation (square roots of column `r2`). You can see the scaled lengths of `envfit` arrows using command `scores`. The scaled environmental vectors from `envfit` and the arrows for continuous environmental variables in constrained ordination (`cca`, `rda`, `capscale`) are adjusted to fill the current graph. The lengths of arrows do not have fixed meaning with respect to the points (species, sites), but they can only compared against each other, and therefore only their relative lengths are important. If you want change the scaling of the arrows, you can use `text` (plotting arrows and text) or `points` (plotting only arrows) functions for constrained ordination. These functions have argument `arrow.mul` which sets the multiplier. The `plot` function for `envfit` also has the `arrow.mul` argument to set the arrow multiplier. If you save the invisible result of the constrained ordination `plot` command, you can see the value of the currently used `arrow.mul` which is saved as an attribute of `biplot` scores. Function `ordiArrowMul` is used to find the scaling for the current plot. You can use this function to see how arrows would be scaled: ```{r eval=FALSE} sol <- cca(varespec) ef <- envfit(sol ~ ., varechem) plot(sol) ordiArrowMul(scores(ef, display="vectors")) ``` ------------------------------------------------------------------------ ### I want to use Helmert or sum contrasts `vegan` uses standard R utilities for defining contrasts. The default in standard installations is to use treatment contrasts, but you can change the behaviour globally setting `options` or locally by using keyword `contrasts`. Please check the R help pages and user manuals for details. ------------------------------------------------------------------------ ### What are aliased variables and how to see them? Aliased variable has no information because it can be expressed with the help of other variables. Such variables are automatically removed in constrained ordination in **vegan**. The aliased variables can be redundant levels of factors or whole variables. **Vegan** function `alias` gives the defining equations for aliased variables. If you only want to see the names of aliased variables or levels in solution `sol`, use `alias(sol, names.only=TRUE)`. ------------------------------------------------------------------------ ### Plotting aliased variables You can fit vectors or class centroids for aliased variables using `envfit` function. The `envfit` function uses weighted fitting, and the fitted vectors are identical to the vectors in correspondence analysis. ------------------------------------------------------------------------ ### Restricted permutations in **vegan** **Vegan** uses **permute** package in all its permutation tests. The **permute** package will allow restricted permutation designs for time series, line transects, spatial grids and blocking factors. The construction of restricted permutation schemes is explained in the manual page `permutations` in **vegan** and in the documentation of the **permute** package. ------------------------------------------------------------------------ ### How to use different plotting symbols in ordination graphics? The default ordination `plot` function is intended for fast plotting and it is not very configurable. To use different plotting symbols, you should first create and empty ordination plot with `plot(..., type="n")`, and then add `points` or `text` to the created empty frame (here `...` means other arguments you want to give to your `plot` command). The `points` and `text` commands are fully configurable, and allow different plotting symbols and characters. ------------------------------------------------------------------------ ### How to avoid cluttered ordination graphs? If there is a really high number of species or sites, the graphs often are congested and many labels are overwritten. It may be impossible to have complete readable graphics with some data sets. Below we give a brief overview of tricks you can use. Gavin Simpson’s blog [From the bottom of the heap](https://fromthebottomoftheheap.net) has a series of articles on “decluttering ordination plots” with more detailed discussion and examples. - Use only points, possibly with different types if you do not need to see the labels. You may need to first create an empty plot using `plot(..., type="n")`, if you are not satisfied with the default graph. (Here and below `...` means other arguments you want to give to your `plot` command.) - Use points and add labels to desired points using interactive `identify` command if you do not need to see all labels. - Add labels using function `ordilabel` which uses non-transparent background to the text. The labels still shadow each other, but the uppermost labels are readable. Argument `priority` will help in displaying the most interesting labels (see [Decluttering blog, part 1](https://fromthebottomoftheheap.net/2013/01/12/decluttering-ordination-plots-in-vegan-part-1-ordilabel/)). - Use `orditorp` function that uses labels only if these can be added to a graph without overwriting other labels, and points otherwise, if you do not need to see all labels. You must first create an empty plot using `plot(..., type="n")`, and then add labels or points with `orditorp` (see [Decluttering blog](https://fromthebottomoftheheap.net/2013/01/13/decluttering-ordination-plots-in-vegan-part-2-orditorp/)). - Use `ordipointlabel` which uses points and text labels to the points, and tries to optimize the location of the text to minimize the overlap (see [Decluttering blog](https://fromthebottomoftheheap.net/2013/06/27/decluttering-ordination-plots-in-vegan-part-3-ordipointlabel/)). - Ordination `text` and `points` functions have argument `select` that can be used for full control of selecting items plotted as text or points. - Use interactive `orditkplot` function that lets you drag labels of points to better positions if you need to see all labels. Only one set of points can be used (see [Decluttering blog](https://fromthebottomoftheheap.net/2013/12/31/decluttering-ordination-in-vegan-part-4-orditkplot/)). - Most `plot` functions allow you to zoom to a part of the graph using `xlim` and `ylim` arguments to reduce clutter in congested areas. ------------------------------------------------------------------------ ### Can I flip an axis in ordination diagram? Use `xlim` or `ylim` with flipped limits. If you have model `mod <- cca(dune)` you can flip the first axis with `plot(mod, xlim = c(3, -2))`. ------------------------------------------------------------------------ ### Can I zoom into an ordination plot? You can use `xlim` and `ylim` arguments in `plot` or `ordiplot` to zoom into ordination diagrams. Normally you must set both `xlim` and `ylim` because ordination plots will keep the equal aspect ratio of axes, and they will fill the graph so that the longer axis will fit. Dynamic zooming can be done with function `orditkplot`. You can directly save the edited `orditkplot` graph in various graphic formats, or you can export the graph object back to R and use `plot` to display the results. ------------------------------------------------------------------------ Other analysis methods ---------------------- ------------------------------------------------------------------------ ### Is there TWINSPAN? No. It may be possible to port TWINSPAN to **vegan**, but it is not among the **vegan** top priorities. If anybody wants to try porting, I will be happy to help. TWINSPAN has a very permissive license, and it would be completely legal to port the function into R. ------------------------------------------------------------------------ ### Why restricted permutation does not influence adonis results? The permutation scheme influences the permutation distribution of the statistics and probably the significance levels, but does not influence the calculation of the statistics. ------------------------------------------------------------------------ ### How is deviance calculated? Some **vegan** functions, such as `radfit` use base R facility of `family` in maximum likelihood estimation. This allows use of several alternative error distributions, among them `"poisson"` and `"gaussian"`. The R `family` also defines the deviance. You can see the equations for deviance with commands like `poisson()$dev` or `gaussian()$dev`. In general, deviance is 2 times log.likelihood shifted so that models with exact fit have zero deviance. ------------------------------------------------------------------------
/scratch/gouwar.j/cran-all/cranData/vegan/inst/doc/FAQ-vegan.Rmd
### R code from vignette source 'decision-vegan.Rnw' ################################################### ### code chunk number 1: decision-vegan.Rnw:21-26 ################################################### figset <- function() par(mar=c(4,4,1,1)+.1) options(SweaveHooks = list(fig = figset)) options("prompt" = "> ", "continue" = " ") options(width = 55) require(vegan) ################################################### ### code chunk number 2: decision-vegan.Rnw:84-85 (eval = FALSE) ################################################### ## options(mc.cores = 2) ################################################### ### code chunk number 3: decision-vegan.Rnw:125-136 (eval = FALSE) ################################################### ## ## start up and define meandist() ## library(vegan) ## data(sipoo) ## meandist <- ## function(x) mean(vegdist(x, "bray")) ## library(parallel) ## clus <- makeCluster(4) ## clusterEvalQ(clus, library(vegan)) ## mbc1 <- oecosimu(dune, meandist, "r2dtable", ## parallel = clus) ## stopCluster(clus) ################################################### ### code chunk number 4: decision-vegan.Rnw:240-251 ################################################### getOption("SweaveHooks")[["fig"]]() data(sipoo) mod <- nestedtemp(sipoo) plot(mod, "i") x <- mod$c["Falcsubb"] y <- 1 - mod$r["Svartholm"] points(x,y, pch=16, cex=1.5) abline(x+y, -1, lty=2) f <- function(x, p) (1-(1-x)^p)^(1/p) cross <- function(x, a, p) f(x,p) - a + x r <- uniroot(cross, c(0,1), a = x+y, p = mod$p)$root arrows(x,y, r, f(r, mod$p), lwd=4) ################################################### ### code chunk number 5: decision-vegan.Rnw:549-553 ################################################### library(vegan) data(varespec) data(varechem) orig <- cca(varespec ~ Al + K, varechem) ################################################### ### code chunk number 6: a ################################################### plot(orig, dis=c("lc","bp")) ################################################### ### code chunk number 7: decision-vegan.Rnw:562-563 ################################################### getOption("SweaveHooks")[["fig"]]() plot(orig, dis=c("lc","bp")) ################################################### ### code chunk number 8: decision-vegan.Rnw:572-574 ################################################### i <- sample(nrow(varespec)) shuff <- cca(varespec[i,] ~ Al + K, varechem) ################################################### ### code chunk number 9: decision-vegan.Rnw:577-578 ################################################### getOption("SweaveHooks")[["fig"]]() plot(shuff, dis=c("lc","bp")) ################################################### ### code chunk number 10: a ################################################### plot(procrustes(scores(orig, dis="lc"), scores(shuff, dis="lc"))) ################################################### ### code chunk number 11: decision-vegan.Rnw:591-592 ################################################### getOption("SweaveHooks")[["fig"]]() plot(procrustes(scores(orig, dis="lc"), scores(shuff, dis="lc"))) ################################################### ### code chunk number 12: decision-vegan.Rnw:600-603 ################################################### tmp1 <- rda(varespec ~ Al + K, varechem) i <- sample(nrow(varespec)) # Different shuffling tmp2 <- rda(varespec[i,] ~ Al + K, varechem) ################################################### ### code chunk number 13: decision-vegan.Rnw:606-608 ################################################### getOption("SweaveHooks")[["fig"]]() plot(procrustes(scores(tmp1, dis="lc"), scores(tmp2, dis="lc"))) ################################################### ### code chunk number 14: decision-vegan.Rnw:625-627 ################################################### orig shuff ################################################### ### code chunk number 15: decision-vegan.Rnw:632-633 ################################################### getOption("SweaveHooks")[["fig"]]() plot(procrustes(orig, shuff)) ################################################### ### code chunk number 16: decision-vegan.Rnw:646-651 ################################################### tmp1 <- rda(varespec ~ ., varechem) tmp2 <- rda(varespec[i,] ~ ., varechem) proc <- procrustes(scores(tmp1, dis="lc", choi=1:14), scores(tmp2, dis="lc", choi=1:14)) max(residuals(proc)) ################################################### ### code chunk number 17: decision-vegan.Rnw:663-666 ################################################### data(dune) data(dune.env) orig <- cca(dune ~ Moisture, dune.env) ################################################### ### code chunk number 18: decision-vegan.Rnw:671-672 ################################################### getOption("SweaveHooks")[["fig"]]() plot(orig, dis="lc") ################################################### ### code chunk number 19: a ################################################### plot(orig, display="wa", type="points") ordispider(orig, col="red") text(orig, dis="cn", col="blue") ################################################### ### code chunk number 20: decision-vegan.Rnw:696-697 ################################################### getOption("SweaveHooks")[["fig"]]() plot(orig, display="wa", type="points") ordispider(orig, col="red") text(orig, dis="cn", col="blue")
/scratch/gouwar.j/cran-all/cranData/vegan/inst/doc/decision-vegan.R
### R code from vignette source 'diversity-vegan.Rnw' ################################################### ### code chunk number 1: diversity-vegan.Rnw:21-26 ################################################### par(mfrow=c(1,1)) options(width=55) figset <- function() par(mar=c(4,4,1,1)+.1) options(SweaveHooks = list(fig = figset)) options("prompt" = "> ", "continue" = " ") ################################################### ### code chunk number 2: diversity-vegan.Rnw:58-60 ################################################### library(vegan) data(BCI) ################################################### ### code chunk number 3: diversity-vegan.Rnw:78-79 ################################################### H <- diversity(BCI) ################################################### ### code chunk number 4: diversity-vegan.Rnw:86-87 ################################################### J <- H/log(specnumber(BCI)) ################################################### ### code chunk number 5: diversity-vegan.Rnw:113-115 ################################################### k <- sample(nrow(BCI), 6) R <- renyi(BCI[k,]) ################################################### ### code chunk number 6: diversity-vegan.Rnw:122-123 ################################################### getOption("SweaveHooks")[["fig"]]() print(plot(R)) ################################################### ### code chunk number 7: diversity-vegan.Rnw:134-135 ################################################### alpha <- fisher.alpha(BCI) ################################################### ### code chunk number 8: diversity-vegan.Rnw:171-172 ################################################### quantile(rowSums(BCI)) ################################################### ### code chunk number 9: diversity-vegan.Rnw:175-176 ################################################### Srar <- rarefy(BCI, min(rowSums(BCI))) ################################################### ### code chunk number 10: diversity-vegan.Rnw:184-185 ################################################### S2 <- rarefy(BCI, 2) ################################################### ### code chunk number 11: diversity-vegan.Rnw:189-190 ################################################### all(rank(Srar) == rank(S2)) ################################################### ### code chunk number 12: diversity-vegan.Rnw:196-197 ################################################### range(diversity(BCI, "simp") - (S2 -1)) ################################################### ### code chunk number 13: diversity-vegan.Rnw:260-264 ################################################### data(dune) data(dune.taxon) taxdis <- taxa2dist(dune.taxon, varstep=TRUE) mod <- taxondive(dune, taxdis) ################################################### ### code chunk number 14: diversity-vegan.Rnw:267-268 ################################################### getOption("SweaveHooks")[["fig"]]() plot(mod) ################################################### ### code chunk number 15: diversity-vegan.Rnw:294-296 ################################################### tr <- hclust(taxdis, "aver") mod <- treedive(dune, tr) ################################################### ### code chunk number 16: diversity-vegan.Rnw:318-321 ################################################### k <- sample(nrow(BCI), 1) fish <- fisherfit(BCI[k,]) fish ################################################### ### code chunk number 17: diversity-vegan.Rnw:324-325 ################################################### getOption("SweaveHooks")[["fig"]]() plot(fish) ################################################### ### code chunk number 18: diversity-vegan.Rnw:353-354 ################################################### prestondistr(BCI[k,]) ################################################### ### code chunk number 19: diversity-vegan.Rnw:385-387 ################################################### rad <- radfit(BCI[k,]) rad ################################################### ### code chunk number 20: diversity-vegan.Rnw:390-391 ################################################### getOption("SweaveHooks")[["fig"]]() print(radlattice(rad)) ################################################### ### code chunk number 21: a ################################################### sac <- specaccum(BCI) plot(sac, ci.type="polygon", ci.col="yellow") ################################################### ### code chunk number 22: diversity-vegan.Rnw:461-462 ################################################### getOption("SweaveHooks")[["fig"]]() sac <- specaccum(BCI) plot(sac, ci.type="polygon", ci.col="yellow") ################################################### ### code chunk number 23: diversity-vegan.Rnw:490-491 ################################################### ncol(BCI)/mean(specnumber(BCI)) - 1 ################################################### ### code chunk number 24: diversity-vegan.Rnw:508-510 ################################################### beta <- vegdist(BCI, binary=TRUE) mean(beta) ################################################### ### code chunk number 25: diversity-vegan.Rnw:517-518 ################################################### betadiver(help=TRUE) ################################################### ### code chunk number 26: diversity-vegan.Rnw:536-538 ################################################### z <- betadiver(BCI, "z") quantile(z) ################################################### ### code chunk number 27: diversity-vegan.Rnw:548-553 ################################################### data(dune) data(dune.env) z <- betadiver(dune, "z") mod <- with(dune.env, betadisper(z, Management)) mod ################################################### ### code chunk number 28: diversity-vegan.Rnw:556-557 ################################################### getOption("SweaveHooks")[["fig"]]() boxplot(mod) ################################################### ### code chunk number 29: diversity-vegan.Rnw:668-669 ################################################### specpool(BCI) ################################################### ### code chunk number 30: diversity-vegan.Rnw:674-676 ################################################### s <- sample(nrow(BCI), 25) specpool(BCI[s,]) ################################################### ### code chunk number 31: diversity-vegan.Rnw:687-688 ################################################### estimateR(BCI[k,]) ################################################### ### code chunk number 32: diversity-vegan.Rnw:757-759 ################################################### veiledspec(prestondistr(BCI[k,])) veiledspec(BCI[k,]) ################################################### ### code chunk number 33: diversity-vegan.Rnw:773-774 ################################################### smo <- beals(BCI) ################################################### ### code chunk number 34: a ################################################### j <- which(colnames(BCI) == "Ceiba.pentandra") plot(beals(BCI, species=j, include=FALSE), BCI[,j], ylab="Occurrence", main="Ceiba pentandra", xlab="Probability of occurrence") ################################################### ### code chunk number 35: diversity-vegan.Rnw:787-788 ################################################### getOption("SweaveHooks")[["fig"]]() j <- which(colnames(BCI) == "Ceiba.pentandra") plot(beals(BCI, species=j, include=FALSE), BCI[,j], ylab="Occurrence", main="Ceiba pentandra", xlab="Probability of occurrence")
/scratch/gouwar.j/cran-all/cranData/vegan/inst/doc/diversity-vegan.R
### R code from vignette source 'intro-vegan.Rnw' ################################################### ### code chunk number 1: intro-vegan.Rnw:18-23 ################################################### par(mfrow=c(1,1)) options(width=72) figset <- function() par(mar=c(4,4,1,1)+.1) options(SweaveHooks = list(fig = figset)) options("prompt" = "> ", "continue" = " ") ################################################### ### code chunk number 2: intro-vegan.Rnw:73-76 ################################################### library(vegan) data(dune) ord <- decorana(dune) ################################################### ### code chunk number 3: intro-vegan.Rnw:79-80 ################################################### ord ################################################### ### code chunk number 4: intro-vegan.Rnw:103-105 ################################################### ord <- metaMDS(dune) ord ################################################### ### code chunk number 5: a ################################################### plot(ord) ################################################### ### code chunk number 6: intro-vegan.Rnw:120-121 ################################################### getOption("SweaveHooks")[["fig"]]() plot(ord) ################################################### ### code chunk number 7: a ################################################### plot(ord, type = "n") points(ord, display = "sites", cex = 0.8, pch=21, col="red", bg="yellow") text(ord, display = "spec", cex=0.7, col="blue") ################################################### ### code chunk number 8: intro-vegan.Rnw:142-143 ################################################### getOption("SweaveHooks")[["fig"]]() plot(ord, type = "n") points(ord, display = "sites", cex = 0.8, pch=21, col="red", bg="yellow") text(ord, display = "spec", cex=0.7, col="blue") ################################################### ### code chunk number 9: intro-vegan.Rnw:208-210 ################################################### data(dune.env) attach(dune.env) ################################################### ### code chunk number 10: a ################################################### plot(ord, disp="sites", type="n") ordihull(ord, Management, col=1:4, lwd=3) ordiellipse(ord, Management, col=1:4, kind = "ehull", lwd=3) ordiellipse(ord, Management, col=1:4, draw="polygon") ordispider(ord, Management, col=1:4, label = TRUE) points(ord, disp="sites", pch=21, col="red", bg="yellow", cex=1.3) ################################################### ### code chunk number 11: intro-vegan.Rnw:221-222 ################################################### getOption("SweaveHooks")[["fig"]]() plot(ord, disp="sites", type="n") ordihull(ord, Management, col=1:4, lwd=3) ordiellipse(ord, Management, col=1:4, kind = "ehull", lwd=3) ordiellipse(ord, Management, col=1:4, draw="polygon") ordispider(ord, Management, col=1:4, label = TRUE) points(ord, disp="sites", pch=21, col="red", bg="yellow", cex=1.3) ################################################### ### code chunk number 12: intro-vegan.Rnw:252-254 ################################################### ord.fit <- envfit(ord ~ A1 + Management, data=dune.env, perm=999) ord.fit ################################################### ### code chunk number 13: a ################################################### plot(ord, dis="site") plot(ord.fit) ################################################### ### code chunk number 14: b ################################################### ordisurf(ord, A1, add=TRUE) ################################################### ### code chunk number 15: intro-vegan.Rnw:270-272 ################################################### getOption("SweaveHooks")[["fig"]]() plot(ord, dis="site") plot(ord.fit) ordisurf(ord, A1, add=TRUE) ################################################### ### code chunk number 16: intro-vegan.Rnw:292-294 ################################################### ord <- cca(dune ~ A1 + Management, data=dune.env) ord ################################################### ### code chunk number 17: a ################################################### plot(ord) ################################################### ### code chunk number 18: intro-vegan.Rnw:301-302 ################################################### getOption("SweaveHooks")[["fig"]]() plot(ord) ################################################### ### code chunk number 19: intro-vegan.Rnw:319-320 ################################################### cca(dune ~ ., data=dune.env) ################################################### ### code chunk number 20: intro-vegan.Rnw:329-330 ################################################### anova(ord) ################################################### ### code chunk number 21: intro-vegan.Rnw:338-339 ################################################### anova(ord, by="term", permutations=199) ################################################### ### code chunk number 22: intro-vegan.Rnw:344-345 ################################################### anova(ord, by="mar", permutations=199) ################################################### ### code chunk number 23: a ################################################### anova(ord, by="axis", permutations=499) ################################################### ### code chunk number 24: intro-vegan.Rnw:357-359 ################################################### ord <- cca(dune ~ A1 + Management + Condition(Moisture), data=dune.env) ord ################################################### ### code chunk number 25: intro-vegan.Rnw:364-365 ################################################### anova(ord, by="term", permutations=499) ################################################### ### code chunk number 26: intro-vegan.Rnw:373-375 ################################################### how <- how(nperm=499, plots = Plots(strata=dune.env$Moisture)) anova(ord, by="term", permutations = how) ################################################### ### code chunk number 27: intro-vegan.Rnw:379-380 ################################################### detach(dune.env)
/scratch/gouwar.j/cran-all/cranData/vegan/inst/doc/intro-vegan.R
### R code from vignette source 'partitioning.Rnw' ################################################### ### code chunk number 1: partitioning.Rnw:20-26 ################################################### par(mfrow=c(1,1)) figset <- function() par(mar=c(4,4,1,1)+.1) options(SweaveHooks = list(fig = figset)) library(vegan) labs <- paste("Table", 1:4) cls <- c("hotpink", "skyblue", "orange", "limegreen") ################################################### ### code chunk number 2: partitioning.Rnw:39-40 ################################################### getOption("SweaveHooks")[["fig"]]() showvarparts(2, bg = cls, Xnames=labs) ################################################### ### code chunk number 3: partitioning.Rnw:51-52 ################################################### getOption("SweaveHooks")[["fig"]]() showvarparts(3, bg = cls, Xnames=labs) ################################################### ### code chunk number 4: partitioning.Rnw:64-65 ################################################### getOption("SweaveHooks")[["fig"]]() showvarparts(4, bg = cls, Xnames=labs)
/scratch/gouwar.j/cran-all/cranData/vegan/inst/doc/partitioning.R
<!-- %\VignetteIndexEntry{vegan FAQ} %\VignetteEngine{knitr::knitr} %\VignetteEncoding{UTF-8} --> **vegan** FAQ ============= This document contains answers to some of the most frequently asked questions about R package **vegan**. > This work is licensed under the Creative Commons Attribution 3.0 > License. To view a copy of this license, visit > <https://creativecommons.org/licenses/by/3.0/> or send a letter to > Creative Commons, 543 Howard Street, 5th Floor, San Francisco, > California, 94105, USA. > > Copyright © 2008-2016 vegan development team ------------------------------------------------------------------------ Introduction ------------ ------------------------------------------------------------------------ ### What is **vegan**? **Vegan** is an R package for community ecologists. It contains the most popular methods of multivariate analysis needed in analysing ecological communities, and tools for diversity analysis, and other potentially useful functions. **Vegan** is not self-contained but it must be run under R statistical environment, and it also depends on many other R packages. **Vegan** is [free software](https://www.gnu.org/philosophy/free-sw.html) and distributed under [GPL2 license](https://www.gnu.org/licenses/gpl.html). ------------------------------------------------------------------------ ### What is R? R is a system for statistical computation and graphics. It consists of a language plus a run-time environment with graphics, a debugger, access to certain system functions, and the ability to run programs stored in script files. R has a home page at <https://www.R-project.org/>. It is [free software](https://www.gnu.org/philosophy/free-sw.html) distributed under a GNU-style [copyleft](https://www.gnu.org/copyleft/copyleft.html), and an official part of the [GNU](https://www.gnu.org/) project (“GNU S”). ------------------------------------------------------------------------ ### How to obtain **vegan** and R? Both R and latest release version of **vegan** can be obtained through [CRAN](https://cran.r-project.org). Unstable development version of **vegan** can be obtained through [GitHub](https://github.com/vegandevs/vegan). Formerly **vegan** was developed in [R-Forge](https://r-forge.r-project.org/projects/vegan/), but after moving to [GitHub](https://github.com/vegandevs/vegan) the R-Forge repository is out of date. ------------------------------------------------------------------------ ### What R packages **vegan** depends on? **Vegan** depends on the **permute** package which will provide advanced and flexible permutation routines for **vegan**. The **permute** package is developed together with **vegan** in [GitHub](https://github.com/gavinsimpson/permute). Some individual **vegan** functions depend on packages **MASS**, **mgcv**, **parallel**, **cluster**, **lattice** and **tcltk**. These all are base or recommended R packages that should be available in every R installation. **Vegan** declares these as suggested or imported packages, and you can install **vegan** and use most of its functions without these packages. **Vegan** is accompanied with a supporting package **vegan3d** for three-dimensional and dynamic plotting. The **vegan3d** package needs non-standard packages **rgl** and **scatterplot3d**. ------------------------------------------------------------------------ ### What other packages are available for ecologists? CRAN [Task Views](https://cran.r-project.org/web/views/) include entries like `Environmetrics`, `Multivariate` and `Spatial` that describe several useful packages and functions. If you install R package **ctv**, you can inspect Task Views from your R session, and automatically install sets of most important packages. ------------------------------------------------------------------------ ### What other documentation is available for **vegan**? **Vegan** is a fully documented R package with standard help pages. These are the most authoritative sources of documentation (and as a last resource you can use the force and the read the source, as **vegan** is open source). **Vegan** package ships with other documents which can be read with `browseVignettes("vegan")` command. The documents included in the **vegan** package are - **Vegan** `NEWS` - This document (`FAQ-vegan`). - Short introduction to basic ordination methods in **vegan** (`intro-vegan`). - Introduction to diversity methods in **vegan** (`diversity-vegan`). - Discussion on design decisions in **vegan** (`decision-vegan`). - Description of variance partition procedures in function `varpart` (`partitioning`). Web documents outside the package include: - <https://github.com/vegandevs/vegan>: **vegan** homepage. ------------------------------------------------------------------------ ### Is there a Graphical User Interface (GUI) for **vegan**? Roeland Kindt has made package **BiodiversityR** which provides a GUI for **vegan**. The package is available at [CRAN](https://cran.r-project.org/package=BiodiversityR). It is not a mere GUI for **vegan**, but adds some new functions and complements **vegan** functions in order to provide a workbench for biodiversity analysis. You can install **BiodiversityR** using `install.packages("BiodiversityR")` or graphical package management menu in R. The GUI works on Windows, MacOS X and Linux. ------------------------------------------------------------------------ ### How to cite **vegan**? Use command `citation("vegan")` in R to see the recommended citation to be used in publications. ------------------------------------------------------------------------ ### How to build **vegan** from sources? In general, you do not need to build **vegan** from sources, but binary builds of release versions are available through [CRAN](https://cran.r-project.org/) for Windows and MacOS X. If you use some other operating systems, you may have to use source packages. **Vegan** is a standard R package, and can be built like instructed in R documentation. **Vegan** contains source files in C and FORTRAN, and you need appropriate compilers (which may need more work in Windows and MacOS X). ------------------------------------------------------------------------ ### Are there binaries for devel versions? Not currently. You need tools to build C and Fortran programs to install **vegan**. If you have those, you can use `devtools::install_github("vegan")` to install the most recent devel version. ------------------------------------------------------------------------ ### How to report a bug in **vegan**? If you think you have found a bug in **vegan**, you should report it to **vegan** maintainers or developers. The preferred forum to report bugs is [GitHub](https://github.com/vegandevs/vegan/issues). The bug report should be so detailed that the bug can be replicated and corrected. Preferably, you should send an example that causes a bug. If it needs a data set that is not available in R, you should send a minimal data set as well. You also should paste the output or error message in your message. You also should specify which version of **vegan** you used. Bug reports are welcome: they are the only way to make **vegan** non-buggy. Please note that you shall not send bug reports to R mailing lists, since **vegan** is not a standard R package. ------------------------------------------------------------------------ ### Is it a bug or a feature? It is not necessarily a bug if some function gives different results than you expect: That may be a deliberate design decision. It may be useful to check the documentation of the function to see what was the intended behaviour. It may also happen that function has an argument to switch the behaviour to match your expectation. For instance, function `vegdist` always calculates quantitative indices (when this is possible). If you expect it to calculate a binary index, you should use argument `binary = TRUE`. ------------------------------------------------------------------------ ### Can I contribute to **vegan**? **Vegan** is dependent on user contribution. All feedback is welcome. If you have problems with **vegan**, it may be as simple as incomplete documentation, and we shall do our best to improve the documents. Feature requests also are welcome, but they are not necessarily fulfilled. A new feature will be added if it is easy to do and it looks useful, or if you submit code. If you can write code yourself, the best forum to contribute to vegan is [GitHub](https://github.com/vegandevs/vegan). ------------------------------------------------------------------------ Ordination ---------- ------------------------------------------------------------------------ ### I have only numeric and positive data but **vegan** still complains You are wrong! Computers are painfully pedantic, and if they find non-numeric or negative data entries, you really have them. Check your data. Most common reasons for non-numeric data are that row names were read as a non-numeric variable instead of being used as row names (check argument `row.names` in reading the data), or that the column names were interpreted as data (check argument `header = TRUE` in reading the data). Another common reason is that you had empty cells in your input data, and these were interpreted as missing values. ------------------------------------------------------------------------ ### Can I analyse binary or cover class data? Yes. Most **vegan** methods can handle binary data or cover abundance data. Most statistical tests are based on permutation, and do not make distributional assumptions. There are some methods (mainly in diversity analysis) that need count data. These methods check that input data are integers, but they may be fooled by cover class data. ------------------------------------------------------------------------ ### Why dissimilarities in **vegan** differ from other sources? Most commonly the reason is that other software use presence–absence data whereas **vegan** used quantitative data. Usually **vegan** indices are quantitative, but you can use argument `binary = TRUE` to make them presence–absence. However, the index name is the same in both cases, although different names usually occur in literature. For instance, Jaccard index actually refers to the binary index, but **vegan** uses name `"jaccard"` for the quantitative index, too. Another reason may be that indices indeed are defined differently, because people use same names for different indices. ------------------------------------------------------------------------ ### Why NMDS stress is sometimes 0.1 and sometimes 10? Stress is a proportional measure of badness of fit. The proportions can be expressed either as parts of one or as percents. Function `isoMDS` (**MASS** package) uses percents, and function `monoMDS` (**vegan** package) uses proportions, and therefore the same stress is 100 times higher in `isoMDS`. The results of `goodness` function also depend on the definition of stress, and the same `goodness` is 100 times higher in `isoMDS` than in `monoMDS`. Both of these conventions are equally correct. ------------------------------------------------------------------------ ### I get zero stress but no repeated solutions in `metaMDS` The first (try 0) run of `metaMDS` starts from the metric scaling solution and is usually good, and most sofware only return that solution. However, `metaMDS` tries to see if that standard solution can be repeated, or improved and the improved solution still repeated. In all cases, it will return the best solution found, and there is no burning need to do anything if you get the message tha the solution could not be repeated. If you are keen to know that the solution really is the global optimum, you may follow the instructions in the `metaMDS` help section "Results Could Not Be Repeated" and try more. Most common reason is that you have too few observations for your NMDS. For `n` observations (points) and `k` dimensions you need to estimate `n*k` parameters (ordination scores) using `n*(n-1)/2` dissimilarities. For `k` dimensions you must have `n > 2*k + 1`, or for two dimensions at least six points. In some degenerate situations you may need even a larger number of points. If you have a lower number of points, you can find an undefined number of perfect (stress is zero) but different solutions. Conventional wisdom due to Kruskal is that you should have `n > 4*k + 1` points for `k` dimensions. A typical symptom of insufficient data is that you have (nearly) zero stress but no two convergent solutions. In those cases you should reduce the number of dimensions (`k`) and with very small data sets you should not use `NMDS`, but rely on metric methods. It seems that local and hybrid scaling with `monoMDS` have similar lower limits in practice (although theoretically they could differ). However, higher number of dimensions can be used in metric scaling, both with `monoMDS` and in principal coordinates analysis (`cmdscale` in **stats**, `wcmdscale` in **vegan**). ------------------------------------------------------------------------ ### Zero dissimilarities in isoMDS Function `metaMDS` uses function `monoMDS` as its default method for NMDS, and this function can handle zero dissimilarities. Alternative function `isoMDS` cannot handle zero dissimilarities. If you want to use `isoMDS`, you can use argument `zerodist = "add"` in `metaMDS` to handle zero dissimilarities. With this argument, zero dissimilarities are replaced with a small positive value, and they can be handled in `isoMDS`. This is a kluge, and some people do not like this. A more principal solution is to remove duplicate sites using R command `unique`. However, after some standardizations or with some dissimilarity indices, originally non-unique sites can have zero dissimilarity, and you have to resort to the kluge (or work harder with your data). Usually it is better to use `monoMDS`. ------------------------------------------------------------------------ ### I have heard that you cannot fit environmental vectors or surfaces to NMDS results which only have rank-order scores Claims like this have indeed been at large in the Internet, but they are based on grave misunderstanding and are plainly wrong. NMDS ordination results are strictly metric, and in **vegan** `metaMDS` and `monoMDS` they are even strictly Euclidean. The method is called “non-metric” because the Euclidean distances in ordination space have a non-metric rank-order relationship to community dissimilarities. You can inspect this non-linear step curve using function `stressplot` in **vegan**. Because the ordination scores are strictly Euclidean, it is correct to use **vegan** functions `envfit` and `ordisurf` with NMDS results. ------------------------------------------------------------------------ ### Where can I find numerical scores of ordination axes? Normally you can use function `scores` to extract ordination scores for any ordination method. The `scores` function can also find ordination scores for many non-**vegan** functions such as for `prcomp` and `princomp` and for some **ade4** functions. In some cases the ordination result object stores raw scores, and the axes are also scaled appropriate when you access them with `scores`. For instance, in `cca` and `rda` the ordination object has only so-called normalized scores, and they are scaled for ordination plots or for other use when they are accessed with `scores`. ------------------------------------------------------------------------ ### How the RDA results are scaled? The scaling or RDA results indeed differ from most other software packages. The scaling of RDA is such a complicated issue that it cannot be explained in this FAQ, but it is explained in a separate pdf document on “Design decision and implementation details in vegan” that you can read with command `browseVignettes("vegan")`. ------------------------------------------------------------------------ ### I cannot print and plot RDA results properly If the RDA ordination results have a weird format or you cannot plot them properly, you probably have a name clash with **klaR** package which also has function `rda`, and the **klaR** `print`, `plot` or `predict` functions are used for **vegan** RDA results. You can choose between `rda` functions using `vegan::rda()` or `klaR::rda()`: you will get obscure error messages if you use the wrong function. In general, **vegan** should be able to work normally if **vegan** was loaded after **klaR**, but if **klaR** was loaded later, its functions will take precedence over **vegan**. Sometimes **vegan** namespace is loaded automatically when restoring a previously stored workspace at the start-up, and then **klaR** methods will always take precedence over **vegan**. You should check your loaded packages. **klaR** may be also loaded indirectly via other packages (in the reported cases it was most often loaded via **agricolae** package). **Vegan** and **klaR** both have the same function name (`rda`), and it may not be possible to use these packages simultaneously, and the safest choice is to unload one of the packages if only possible. See discussion in [vegan github issues](https://github.com/vegandevs/vegan/issues/277). If you have a very old version of **ade4** (prior to 1.7-8), you may have similar name clashes with `cca`. The solution is to upgrade **ade4**. ------------------------------------------------------------------------ ### Ordination fails with “Error in La.svd” Constrained ordination (`cca`, `rda`, `capscale`) will sometimes fail with error message `Error in La.svd(x, nu, nv): error code 1 from Lapack routine 'dgesdd'.` It seems that the basic problem is in the `svd` function of `LAPACK` that is used for numerical analysis in R. `LAPACK` is an external library that is beyond the control of package developers and R core team so that these problems may be unsolvable. It seems that the problems with the `LAPACK` code are so common that even the help page of `svd` warns about them Reducing the range of constraints (environmental variables) helps sometimes. For instance, multiplying constraints by a constant \< 1. This rescaling does not influence the numerical results of constrained ordination, but it can complicate further analyses when values of constraints are needed, because the same scaling must be applied there. We can only hope that this problem is fixed in the future versions of R and `LAPACK`. ------------------------------------------------------------------------ ### Variance explained by ordination axes. In general, **vegan** does not directly give any statistics on the “variance explained” by ordination axes or by the constrained axes. This is a design decision: I think this information is normally useless and often misleading. In community ordination, the goal typically is not to explain the variance, but to find the “gradients” or main trends in the data. The “total variation” often is meaningless, and all proportions of meaningless values also are meaningless. Often a better solution explains a smaller part of “total variation”. For instance, in unstandardized principal components analysis most of the variance is generated by a small number of most abundant species, and they are easy to “explain” because data really are not very multivariate. If you standardize your data, all species are equally important. The first axes explains much less of the “total variation”, but now they explain all species equally, and results typically are much more useful for the whole community. Correspondence analysis uses another measure of variation (which is not variance), and again it typically explains a “smaller proportion” than principal components but with a better result. Detrended correspondence analysis and nonmetric multidimensional scaling even do not try to “explain” the variation, but use other criteria. All methods are incommensurable, and it is impossible to compare methods using “explanation of variation”. If you still want to get “explanation of variation” (or a deranged editor requests that from you), it is possible to get this information for some methods: - Eigenvector methods: Functions `rda`, `cca` and `capscale` give the variation of conditional (partialled), constrained (canonical) and residual components, but you must calculate the proportions by hand. Function `eigenvals` extracts the eigenvalues, and `summary(eigenvals(ord))` reports the proportions explained in the result object `ord`. Function `RsquareAdj` gives the R-squared and adjusted R-squared (if available) for constrained components. Function `goodness` gives the same statistics for individual species or sites (species are unavailable with `capscale`). In addition, there is a special function `varpart` for unbiased partitioning of variance between up to four separate components in redundancy analysis. - Detrended correspondence analysis (function `decorana`). The total amount of variation is undefined in detrended correspondence analysis, and therefore proportions from total are unknown and undefined. DCA is not a method for decomposition of variation, and therefore these proportions would not make sense either. - Nonmetric multidimensional scaling. NMDS is a method for nonlinear mapping, and the concept of of variation explained does not make sense. However, 1 - stress\^2 transforms nonlinear stress into quantity analogous to squared correlation coefficient. Function `stressplot` displays the nonlinear fit and gives this statistic. ------------------------------------------------------------------------ ### Can I have random effects in constrained ordination or in `adonis`? No. Strictly speaking, this is impossible. However, you can define models that respond to similar goals as random effects models, although they strictly speaking use only fixed effects. Constrained ordination functions `cca`, `rda` and `capscale` can have `Condition()` terms in their formula. The `Condition()` define partial terms that are fitted before other constraints and can be used to remove the effects of background variables, and their contribution to decomposing inertia (variance) is reported separately. These partial terms are often regarded as similar to random effects, but they are still fitted in the same way as other terms and strictly speaking they are fixed terms. Function `adonis2` can evaluate terms sequentially. In a model with right-hand-side `~ A + B` the effects of `A` are evaluated first, and the effects of `B` after removing the effects of `A`. Sequential tests are also available in `anova` function for constrained ordination results by setting argument `by = "term"`. In this way, the first terms can serve in a similar role as random effects, although they are fitted in the same way as all other terms, and strictly speaking they are fixed terms. All permutation tests in **vegan** are based on the **permute** package that allows constructing various restricted permutation schemes. For instance, you can set levels of `plots` or `blocks` for a factor regarded as a random term. A major reason why real random effects models are impossible in most **vegan** functions is that their tests are based on the permutation of the data. The data are given, that is fixed, and therefore permutation tests are basically tests of fixed terms on fixed data. Random effect terms would require permutations of data with a random component instead of the given, fixed data, and such tests are not available in **vegan**. ------------------------------------------------------------------------ ### Is it possible to have passive points in ordination? **Vegan** does not have a concept of passive points, or a point that should only little influence the ordination results. However, you can add points to eigenvector methods using `predict` functions with `newdata`. You can first perform an ordination without some species or sites, and then you can find scores for all points using your complete data as `newdata`. The `predict` functions are available for basic eigenvector methods in **vegan** (`cca`, `rda`, `decorana`, for an up-to-date list, use command `methods("predict")`). You also can simulate the passive points in R by using low weights to row and columns (this is the method used in software with passive points). For instance, the following command makes row 3 “passive”: `dune[3,] <- 0.001*dune[3,]`. ------------------------------------------------------------------------ ### Class variables and dummies You should define a class variable as an R `factor`, and **vegan** will automatically handle them with formula interface. You also can define constrained ordination without formula interface, but then you must code your class variables by hand. R (and **vegan**) knows both unordered and ordered factors. Unordered factors are internally coded as dummy variables, but one redundant level is removed or aliased. With default contrasts, the removed level is the first one. Ordered factors are expressed as polynomial contrasts. Both of these contrasts explained in standard R documentation. ------------------------------------------------------------------------ ### How are environmental arrows scaled? The printed output of `envfit` gives the direction cosines which are the coordinates of unit length arrows. For plotting, these are scaled by their correlation (square roots of column `r2`). You can see the scaled lengths of `envfit` arrows using command `scores`. The scaled environmental vectors from `envfit` and the arrows for continuous environmental variables in constrained ordination (`cca`, `rda`, `capscale`) are adjusted to fill the current graph. The lengths of arrows do not have fixed meaning with respect to the points (species, sites), but they can only compared against each other, and therefore only their relative lengths are important. If you want change the scaling of the arrows, you can use `text` (plotting arrows and text) or `points` (plotting only arrows) functions for constrained ordination. These functions have argument `arrow.mul` which sets the multiplier. The `plot` function for `envfit` also has the `arrow.mul` argument to set the arrow multiplier. If you save the invisible result of the constrained ordination `plot` command, you can see the value of the currently used `arrow.mul` which is saved as an attribute of `biplot` scores. Function `ordiArrowMul` is used to find the scaling for the current plot. You can use this function to see how arrows would be scaled: ```{r eval=FALSE} sol <- cca(varespec) ef <- envfit(sol ~ ., varechem) plot(sol) ordiArrowMul(scores(ef, display="vectors")) ``` ------------------------------------------------------------------------ ### I want to use Helmert or sum contrasts `vegan` uses standard R utilities for defining contrasts. The default in standard installations is to use treatment contrasts, but you can change the behaviour globally setting `options` or locally by using keyword `contrasts`. Please check the R help pages and user manuals for details. ------------------------------------------------------------------------ ### What are aliased variables and how to see them? Aliased variable has no information because it can be expressed with the help of other variables. Such variables are automatically removed in constrained ordination in **vegan**. The aliased variables can be redundant levels of factors or whole variables. **Vegan** function `alias` gives the defining equations for aliased variables. If you only want to see the names of aliased variables or levels in solution `sol`, use `alias(sol, names.only=TRUE)`. ------------------------------------------------------------------------ ### Plotting aliased variables You can fit vectors or class centroids for aliased variables using `envfit` function. The `envfit` function uses weighted fitting, and the fitted vectors are identical to the vectors in correspondence analysis. ------------------------------------------------------------------------ ### Restricted permutations in **vegan** **Vegan** uses **permute** package in all its permutation tests. The **permute** package will allow restricted permutation designs for time series, line transects, spatial grids and blocking factors. The construction of restricted permutation schemes is explained in the manual page `permutations` in **vegan** and in the documentation of the **permute** package. ------------------------------------------------------------------------ ### How to use different plotting symbols in ordination graphics? The default ordination `plot` function is intended for fast plotting and it is not very configurable. To use different plotting symbols, you should first create and empty ordination plot with `plot(..., type="n")`, and then add `points` or `text` to the created empty frame (here `...` means other arguments you want to give to your `plot` command). The `points` and `text` commands are fully configurable, and allow different plotting symbols and characters. ------------------------------------------------------------------------ ### How to avoid cluttered ordination graphs? If there is a really high number of species or sites, the graphs often are congested and many labels are overwritten. It may be impossible to have complete readable graphics with some data sets. Below we give a brief overview of tricks you can use. Gavin Simpson’s blog [From the bottom of the heap](https://fromthebottomoftheheap.net) has a series of articles on “decluttering ordination plots” with more detailed discussion and examples. - Use only points, possibly with different types if you do not need to see the labels. You may need to first create an empty plot using `plot(..., type="n")`, if you are not satisfied with the default graph. (Here and below `...` means other arguments you want to give to your `plot` command.) - Use points and add labels to desired points using interactive `identify` command if you do not need to see all labels. - Add labels using function `ordilabel` which uses non-transparent background to the text. The labels still shadow each other, but the uppermost labels are readable. Argument `priority` will help in displaying the most interesting labels (see [Decluttering blog, part 1](https://fromthebottomoftheheap.net/2013/01/12/decluttering-ordination-plots-in-vegan-part-1-ordilabel/)). - Use `orditorp` function that uses labels only if these can be added to a graph without overwriting other labels, and points otherwise, if you do not need to see all labels. You must first create an empty plot using `plot(..., type="n")`, and then add labels or points with `orditorp` (see [Decluttering blog](https://fromthebottomoftheheap.net/2013/01/13/decluttering-ordination-plots-in-vegan-part-2-orditorp/)). - Use `ordipointlabel` which uses points and text labels to the points, and tries to optimize the location of the text to minimize the overlap (see [Decluttering blog](https://fromthebottomoftheheap.net/2013/06/27/decluttering-ordination-plots-in-vegan-part-3-ordipointlabel/)). - Ordination `text` and `points` functions have argument `select` that can be used for full control of selecting items plotted as text or points. - Use interactive `orditkplot` function that lets you drag labels of points to better positions if you need to see all labels. Only one set of points can be used (see [Decluttering blog](https://fromthebottomoftheheap.net/2013/12/31/decluttering-ordination-in-vegan-part-4-orditkplot/)). - Most `plot` functions allow you to zoom to a part of the graph using `xlim` and `ylim` arguments to reduce clutter in congested areas. ------------------------------------------------------------------------ ### Can I flip an axis in ordination diagram? Use `xlim` or `ylim` with flipped limits. If you have model `mod <- cca(dune)` you can flip the first axis with `plot(mod, xlim = c(3, -2))`. ------------------------------------------------------------------------ ### Can I zoom into an ordination plot? You can use `xlim` and `ylim` arguments in `plot` or `ordiplot` to zoom into ordination diagrams. Normally you must set both `xlim` and `ylim` because ordination plots will keep the equal aspect ratio of axes, and they will fill the graph so that the longer axis will fit. Dynamic zooming can be done with function `orditkplot`. You can directly save the edited `orditkplot` graph in various graphic formats, or you can export the graph object back to R and use `plot` to display the results. ------------------------------------------------------------------------ Other analysis methods ---------------------- ------------------------------------------------------------------------ ### Is there TWINSPAN? No. It may be possible to port TWINSPAN to **vegan**, but it is not among the **vegan** top priorities. If anybody wants to try porting, I will be happy to help. TWINSPAN has a very permissive license, and it would be completely legal to port the function into R. ------------------------------------------------------------------------ ### Why restricted permutation does not influence adonis results? The permutation scheme influences the permutation distribution of the statistics and probably the significance levels, but does not influence the calculation of the statistics. ------------------------------------------------------------------------ ### How is deviance calculated? Some **vegan** functions, such as `radfit` use base R facility of `family` in maximum likelihood estimation. This allows use of several alternative error distributions, among them `"poisson"` and `"gaussian"`. The R `family` also defines the deviance. You can see the equations for deviance with commands like `poisson()$dev` or `gaussian()$dev`. In general, deviance is 2 times log.likelihood shifted so that models with exact fit have zero deviance. ------------------------------------------------------------------------
/scratch/gouwar.j/cran-all/cranData/vegan/vignettes/FAQ-vegan.Rmd
`ordiplot3d` <- function (object, display = "sites", choices = 1:3, col = "black", ax.col = "red", arr.len = 0.1, arr.col = "blue", envfit, xlab, ylab, zlab, ...) { ordiArgAbsorber <- function(..., shrink, origin, scaling, triangular, display, choices, const, truemean, FUN) { match.fun(FUN)(...) } x <- scores(object, display = display, choices = choices, ...) if (missing(xlab)) xlab <- colnames(x)[1] if (missing(ylab)) ylab <- colnames(x)[2] if (missing(zlab)) zlab <- colnames(x)[3] if (is.factor(col)) col = as.numeric(col) col <- rep(col, length = nrow(x)) ## need scatterplot3d (>= 0.3-39) to set aspect ration; earlier we ## had a kluge here pl <- ordiArgAbsorber(x[, 1], x[, 2], x[, 3], color = col, xlab = xlab, ylab = ylab, zlab = zlab, asp = 1, FUN = "scatterplot3d", ...) pl$points3d(range(x[, 1]), c(0, 0), c(0, 0), type = "l", col = ax.col) pl$points3d(c(0, 0), range(x[, 2]), c(0, 0), type = "l", col = ax.col) pl$points3d(c(0, 0), c(0, 0), range(x[, 3]), type = "l", col = ax.col) if (!missing(envfit) || (inherits(object, "cca") && !is.null(object$CCA) && object$CCA$rank > 0)) { if (!missing(envfit)) object <- envfit bp <- scores(object, dis = "bp", choices = choices, ...) cn <- scores(object, dis = "cn", choices = choices, ...) if (!is.null(cn) && !any(is.na(cn))) { bp <- bp[!(rownames(bp) %in% rownames(cn)), , drop = FALSE] cn.xyz <- pl$xyz.convert(cn) points(cn.xyz, pch = "+", cex = 2, col = arr.col) } if (!is.null(bp) && nrow(bp) > 0) { tmp <- pl$xyz.convert(bp) mul <- ordiArrowMul(cbind(tmp$x, tmp$y), fill=1) bp.xyz <- pl$xyz.convert(bp * mul) orig <- pl$xyz.convert(0, 0, 0) arrows(orig$x, orig$y, bp.xyz$x, bp.xyz$y, length = arr.len, col = arr.col) } } ## save the location of the origin pl$origin <- matrix(unlist(pl$xyz.convert(0, 0, 0)), nrow=1) ## Add function that flattens 3d envfit object so that it can be ## projected on the created 3d graph xyz2xy <- pl$xyz.convert envfit.convert <- function(object) { if (!is.null(object$vectors)) { rn <- rownames(object$vectors$arrows) arr <- object$vectors$arrows[, choices, drop = FALSE] arr <- sapply(xyz2xy(arr), cbind) if (!is.matrix(arr)) arr <- matrix(arr, ncol = 2) arr <- sweep(arr, 2, pl$origin) rownames(arr) <- rn object$vectors$arrows <- arr } if (!is.null(object$factors)) { rn <- rownames(object$factors$centroids) object$factors$centroids <- object$factors$centroids[ ,choices, drop = FALSE] object$factors$centroids <- sapply(xyz2xy(object$factors$centroids), cbind) if (!is.matrix(object$factors$centroids)) object$factors$centroids <- matrix(object$factors$centroids, ncol = 2) rownames(object$factors$centroids) <- rn } object } pl$envfit.convert <- envfit.convert ## save projected coordinates of points tmp <- pl$xyz.convert(x) pl$points <- cbind(tmp$x, tmp$y) rownames(pl$points) <- rownames(x) if (exists("bp.xyz")) { pl$arrows <- cbind(bp.xyz$x, bp.xyz$y) rownames(pl$arrows) <- rownames(bp) } if (exists("cn.xyz")) { pl$centroids <- cbind(cn.xyz$x, cn.xyz$y) rownames(pl$centroids) <- rownames(cn) } class(pl) <- c("ordiplot3d", "ordiplot") invisible(pl) }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/ordiplot3d.R
`ordirgl` <- function (object, display = "sites", choices = 1:3, type = "p", col = "black", ax.col = "red", arr.col = "yellow", radius, text, envfit, ...) { x <- scores(object, display = display, choices = choices, ...) if (ncol(x) < 3) stop("3D display needs three dimensions...") ## clear window and set isometric aspect ratio clear3d() op <- aspect3d("iso") if (!all(op$scale == 1)) warning("set isometric aspect ratio, previously was ", paste(round(op$scale, 3), collapse=", ")) ## colors to a vector, factors to numeric if (is.factor(col)) col <- as.numeric(col) col <- rep(col, length = nrow(x)) ## on.exit(aspect3d(op)) # Fails on.exit: rgl plot is still open if (type == "p") { ## default radius if (missing(radius)) radius <- max(apply(x, 2, function(z) diff(range(z))))/100 ## users may expect cex to work (I would) cex <- eval(match.call(expand.dots = FALSE)$...$cex) if (!is.null(cex)) radius <- cex * radius spheres3d(x, radius = radius, col = col, ...) } else if (type == "t") { if (missing(text)) text <- rownames(x) text3d(x[, 1], x[, 2], x[, 3], text, adj = 0.5, col = col, ...) } segments3d(range(x[, 1]), c(0, 0), c(0, 0), col = ax.col) segments3d(c(0, 0), range(x[, 2]), c(0, 0), col = ax.col) segments3d(c(0, 0), c(0, 0), range(x[, 3]), col = ax.col) text3d(1.1 * max(x[, 1]), 0, 0, colnames(x)[1], col = ax.col, adj = 0.5) text3d(0, 1.1 * max(x[, 2]), 0, colnames(x)[2], col = ax.col, adj = 0.5) text3d(0, 0, 1.1 * max(x[, 3]), colnames(x)[3], col = ax.col, adj = 0.5) if (!missing(envfit) || (is.list(object) && !is.null(object$CCA) && object$CCA$rank > 0)) { if (!missing(envfit)) object <- envfit if (!missing(envfit) && is.na(envfit)) return(invisible()) bp <- scores(object, dis = "bp", choices = choices) cn <- scores(object, dis = "cn", choices = choices) if (!is.null(cn) && !any(is.na(cn))) { bp <- bp[!(rownames(bp) %in% rownames(cn)), , drop = FALSE] text3d(cn[, 1], cn[, 2], cn[, 3], rownames(cn), col = arr.col, adj = 0.5) points3d(cn[, 1], cn[, 2], cn[, 3], size = 5, col = arr.col) } if (!is.null(bp) && nrow(bp) > 0) { mul <- c(range(x[, 1]), range(x[, 2]), range(x[, 3]))/c(range(bp[, 1]), range(bp[, 2]), range(bp[, 3])) mul <- mul[is.finite(mul) & mul > 0] mul <- min(mul) bp <- bp * mul for (i in 1:nrow(bp)) { segments3d(c(0, bp[i, 1]), c(0, bp[i, 2]), c(0, bp[i, 3]), col = arr.col) text3d(1.1 * bp[i, 1], 1.1 * bp[i, 2], 1.1 * bp[i, 3], rownames(bp)[i], col = arr.col, adj = 0.5) } } } invisible() }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/ordirgl.R
### This function was a part of vegan package from 2008 (release ### 1.11-0) to 2023 (release 2.6-4) when it was moved vegan3d due to ### its exotic dependencies (Tcl/Tk). Development was mainly done in ### 2008 to 2015 and mostly in 2008. Some obvious changes to be made ### are: ### * Enable several sets ("layers") of scores. Currently the only ### practical way is to base editing on ordipointlabel results (see ### Examples in doc). ### * Enable reading of new sets of scores ("layers") from the Tcl/Tk ### window, even launching an empty window and reading all scores from ### the GUI. ### * Handle arrows, such as CCA and envfit. ### * Enable setting graphical par() per set of scores ("layers"). ### ### Editable Tcl/Tk plot for ordination ### `orditkplot` <- function(x, display = "species", choices = 1:2, width, xlim, ylim, tcex=0.8, tcol, pch = 1, pcol, pbg, pcex = 0.7, labels, ...) { if (!capabilities("tcltk")) stop("your R has no capability for Tcl/Tk") requireNamespace("tcltk") || stop("requires package tcltk") ############################ ### Check and sanitize input ########################### ## Graphical parameters and constants, and save some for later plotting p <- par() sparnam <- c("bg","cex", "cex.axis","cex.lab","col", "col.axis", "col.lab", "family", "fg", "font", "font.axis", "font.lab", "lheight", "lwd", "mar", "mex", "mgp", "ps", "tcl", "las") ## Get par given in the command line and put them to p if (inherits(x, "orditkplot")) { dots <- x$par for (arg in names(x$args)) assign(arg, unlist(x$args[arg])) } else { dots <- match.call(expand.dots = FALSE)$... } if (length(dots) > 0) { dots <- dots[names(dots) %in% sparnam] ## eval() or mar=c(4,4,1,1) will be a call, not numeric dots <- lapply(dots, function(x) if (is.call(x)) eval(x) else x) p <- check.options(new = dots, name.opt = "p", envir = environment()) } savepar <- p[sparnam] PPI <- 72 # Points per Inch p2p <- as.numeric(tcltk::tclvalue(tcltk::tcl("tk", "scaling"))) # Pixel per point DIAM <- 2.7 # diam of plotting symbol ## Plotting symbol diam diam <- round(pcex * DIAM * p2p, 1) ## Sanitize colours sanecol <- function(x) { if (is.numeric(x)) x <- palette()[x] x <- gsub("transparent", "", x) x[is.na(x)] <- "" x } p$bg <- sanecol(p$bg) p$fg <- sanecol(p$fg) p$col <- sanecol(p$col) p$col.axis <- sanecol(p$col.axis) p$col.lab <- sanecol(p$col.lab) ## Point and label colours if (missing(pcol)) pcol <- p$col if (missing(pbg)) pbg <- "transparent" if (missing(tcol)) tcol <- p$col pcol <- sanecol(pcol) pbg <- sanecol(pbg) tcol <- sanecol(tcol) ## Define fonts idx <- match(p$family, c("","serif","sans","mono")) if (!is.na(idx)) p$family <- c("Helvetica", "Times", "Helvetica", "Courier")[idx] saneslant <- function(x) { list("roman", "bold", "italic", c("bold", "italic"))[[x]] } ## fnt must be done later, since family, font and size can be ## vectors and slant can be of length 1 or 2 ## fnt <- c(p$family, round(p$ps*p$cex*tcex), saneslant(p$font)) labfam <- p$family labsize <- round(p$ps * p$cex * tcex) fnt.axis <- c(p$family, round(p$ps*p$cex.axis), saneslant(p$font.axis)) fnt.lab <- c(p$family, round(p$ps*p$cex.lab), saneslant(p$font.lab)) ## Imitate R plotting symbols pch SQ <- sqrt(2) # Scaling factor for plot Point <- function(x, y, pch, col, fill, diam) { x <- round(x) y <- round(y) switch(as.character(pch), "0" = Point(x, y, 22, col, fill = "", diam), "1" = Point(x, y, 21, col, fill = "", diam), "2" = Point(x, y, 24, col, fill = "", diam), "3" = {tcltk::tkcreate(can, "line", x, y+SQ*diam, x, y-SQ*diam, fill=col) tcltk::tkcreate(can, "line", x+SQ*diam, y, x-SQ*diam, y, fill=col)}, "4" = {tcltk::tkcreate(can, "line", x-diam, y-diam, x+diam, y+diam, fill=col) tcltk::tkcreate(can, "line", x-diam, y+diam, x+diam, y-diam, fill=col)}, "5" = Point(x, y, 23, col, fill = "", diam), "6" = Point(x, y, 25, col, fill = "", diam), "7" = {Point(x, y, 4, col, fill, diam) Point(x, y, 0, col, fill, diam)}, "8" = {Point(x, y, 3, col, fill, diam) Point(x, y, 4, col, fill, diam)}, "9" = {Point(x, y, 3, col, fill, diam) Point(x, y, 5, col, fill, diam)}, "10" = {Point(x, y, 3, col, fill, diam/SQ) Point(x, y, 1, col, fill, diam)}, "11" = {Point(x, y, 2, col, fill, diam) Point(x, y, 6, col, fill, diam)}, "12" = {Point(x, y, 3, col, fill, diam/SQ) Point(x, y, 0, col, fill, diam)}, "13" = {Point(x, y, 4, col, fill, diam) Point(x, y, 1, col, fill, diam)}, "14" = {tcltk::tkcreate(can, "line", x-diam, y-diam, x, y+diam, fill = col) tcltk::tkcreate(can, "line", x+diam, y-diam, x, y+diam, fill = col) Point(x, y, 0, col, fill, diam)}, "15" = Point(x, y, 22, col = col, fill = col, diam), "16" = Point(x, y, 21, col = col, fill = col, diam), "17" = Point(x, y, 24, col = col, fill = col, diam), "18" = Point(x, y, 23, col = col, fill = col, diam/SQ), "19" = Point(x, y, 21, col = col, fill = col, diam), "20" = Point(x, y, 21, col = col, fill = col, diam/2), "21" = tcltk::tkcreate(can, "oval", x-diam, y-diam, x+diam, y+diam, outline = col, fill = fill), "22" = tcltk::tkcreate(can, "rectangle", x-diam, y-diam, x+diam, y+diam, outline = col, fill = fill), "23" = tcltk::tkcreate(can, "polygon", x, y+SQ*diam, x+SQ*diam, y, x, y-SQ*diam, x-SQ*diam, y, outline = col, fill = fill), "24" = tcltk::tkcreate(can, "polygon", x, y-SQ*diam, x+sqrt(6)/2*diam, y+SQ/2*diam, x-sqrt(6)/2*diam, y+SQ/2*diam, outline = col, fill = fill), "25" = tcltk::tkcreate(can, "polygon", x, y+SQ*diam, x+sqrt(6)/2*diam, y-SQ/2*diam, x-sqrt(6)/2*diam, y-SQ/2*diam, outline = col, fill = fill), "o" = Point(x, y, 1, col, fill, diam), ## default: text with dummy location of the label {tcltk::tkcreate(can, "text", x, y, text = as.character(pch), fill = col) Point(x, y, 21, col="", fill="", diam)} ) } ############################ ### Initialize Tcl/Tk Window ############################ ## toplevel w <- tcltk::tktoplevel() tcltk::tktitle(w) <- deparse(match.call()) ## Max dim of windows (depends on screen) YSCR <- as.numeric(tcltk::tkwinfo("screenheight", w)) - 150 XSCR <- as.numeric(tcltk::tkwinfo("screenwidth", w)) ################################ ### Buttons and button functions ################################ ## Buttons buts <- tcltk::tkframe(w) ## Copy current canvas to EPS using the standard Tcl/Tk utility cp2eps <- tcltk::tkbutton(buts, text="Copy to EPS", command=function() tcltk::tkpostscript(can, x=0, y=0, height=height, width=width, file=tcltk::tkgetSaveFile( filetypes="{{EPS file} {.eps}}", defaultextension=".eps"))) dismiss <- tcltk::tkbutton(buts, text="Close", command=function() tcltk::tkdestroy(w)) ## Dump current plot to an "orditkplot" object (internally) ordDump <- function() { xy <- matrix(0, nrow=nrow(sco), ncol=2) rownames(xy) <- rownames(sco) colnames(xy) <- colnames(sco) for(nm in names(pola)) { xy[as.numeric(tcltk::tclvalue(id[[nm]])),] <- xy2usr(nm) } curdim <- round(c(width, height) /PPI/p2p, 2) ## Sanitize colours for R plot pbg[pbg == ""] <- "transparent" pcol[pcol == ""] <- "transparent" ## Reduce vector args if all entries are constant argcollapse <- function(x) if (length(unique(x)) == 1) x[1] else x pch <- argcollapse(pch) pcol <- argcollapse(pcol) pbg <- argcollapse(pbg) tcol <- argcollapse(tcol) ## Save args <- list(tcex = tcex, tcol = tcol, pch = pch, pcol = pcol, pbg = pbg, pcex = pcex, xlim = xlim, ylim = ylim) xy <- list(labels = xy, points = sco, par = savepar, args = args, dim = curdim) class(xy) <- "orditkplot" xy } ## Button to dump "orditkplot" object to the R session pDump <- function() { xy <- ordDump() dumpVar <- tcltk::tclVar("") tt <- tcltk::tktoplevel() tcltk::tktitle(tt) <- "R Dump" entryDump <- tcltk::tkentry(tt, width=20, textvariable=dumpVar) tcltk::tkgrid(tcltk::tklabel(tt, text="Enter name for an R object")) tcltk::tkgrid(entryDump, pady="5m") isDone <- function() { dumpName <- tcltk::tclvalue(dumpVar) if (exists(dumpName, envir = parent.frame())) { ok <- tcltk::tkmessageBox(message=paste(sQuote(dumpName), "exists.\nOK to overwrite?"), icon="warning", type="okcancel", default="ok") if(tcltk::tclvalue(ok) == "ok") { assign(dumpName, xy, envir = parent.frame()) tcltk::tkdestroy(tt) } } else { assign(dumpName, xy, envir = parent.frame()) tcltk::tkdestroy(tt) } } tcltk::tkbind(entryDump, "<Return>", isDone) tcltk::tkfocus(tt) } dump <- tcltk::tkbutton(buts, text="Save to R", command=pDump) ## Button to write current "orditkplot" object to a graphical device devDump <- function() { xy <- ordDump() ftypes <- c("eps" = "{EPS File} {.eps}", "pdf" = "{PDF File} {.pdf}", "svg" = "{SVG File} {.svg}", "png" = "{PNG File} {.png}", "jpg" = "{JPEG File} {.jpg .jpeg}", "bmp" = "{BMP File} {.bmp}", "tiff"= "{TIFF File} {.tif .tiff}", "fig" = "{XFig File} {.fig}") falt <- rep(TRUE, length(ftypes)) names(falt) <- names(ftypes) if (!capabilities("png")) falt["png"] <- FALSE if (!capabilities("jpeg")) falt["jpg"] <- FALSE if (!capabilities("cairo")) falt["svg"] <- FALSE ## Should work also in R < 2.8.0 with no capabilities("tiff") if (!isTRUE(unname(capabilities("tiff")))) falt["tiff"] <- FALSE ftypes <- ftypes[falt] ## External Tcl/Tk in Windows seems to buggy with type ## extensions of the file name: the extension is not ## automatically appended, but defaultextension is interpreted ## wrongly so that its value is not used as extension but ## correct appending is done if defaultextension has any ## value. The following kluge is against Tcl/Tk documentation, ## and should be corrected if Tcl/Tk is fixed. if (.Platform$OS.type == "windows") fname <- tcltk::tkgetSaveFile(filetypes=ftypes, defaultextension = TRUE) else fname <- tcltk::tkgetSaveFile(filetypes=ftypes) if(tcltk::tclvalue(fname) == "") return(NULL) fname <- tcltk::tclvalue(fname) ftype <- unlist(strsplit(fname, "\\.")) ftype <- ftype[length(ftype)] if (ftype == "jpeg") ftype <- "jpg" if (ftype == "tif") ftype <- "tiff" mess <- "is not a supported type: file not produced. Supported types are" if (!(ftype %in% names(ftypes))) { tcltk::tkmessageBox(message=paste(sQuote(ftype), mess, paste(names(ftypes), collapse=", ")), icon="warning") return(NULL) } pixdim <- round(xy$dim*PPI*p2p) switch(ftype, eps = postscript(file=fname, width=xy$dim[1], height=xy$dim[2], paper="special", horizontal = FALSE), pdf = pdf(file=fname, width=xy$dim[1], height=xy$dim[2]), svg = svg(filename=fname, width=xy$dim[1], height=xy$dim[2]), png = png(filename=fname, width=pixdim[1], height=pixdim[2]), jpg = jpeg(filename=fname, width=pixdim[1], height=pixdim[2], quality = 100), tiff = tiff(filename=fname, width=pixdim[1], height=pixdim[2]), bmp = bmp(filename=fname, width=pixdim[1], height=pixdim[2]), fig = xfig(file=fname, width=xy$dim[1], height=xy$dim[2])) plot.orditkplot(xy) dev.off() } export <- tcltk::tkbutton(buts, text="Export plot", command=devDump) ########## ### Canvas ########## ## Make canvas sco <- try(scores(x, display=display, choices = choices, ...), silent = TRUE) if (inherits(sco, "try-error")) { tcltk::tkmessageBox(message=paste("No ordination scores were found in", sQuote(deparse(substitute(x)))), icon="error") tcltk::tkdestroy(w) stop("argument x did not contain ordination scores") } if (!missing(labels)) rownames(sco) <- labels ## Recycle graphical parameters in plots nr <- nrow(sco) pcol <- rep(pcol, length=nr) pbg <- rep(pbg, length=nr) pch <- rep(pch, length=nr) tcol <- rep(tcol, length=nr) diam <- rep(diam, length=nr) labfam <- rep(labfam, length=nr) labsize <- rep(labsize, length=nr) if (inherits(x, "ordipointlabel")) labfnt <- attr(x$labels, "font") else labfnt <- rep(p$font, length=nr) ## Select only items within xlim, ylim take <- rep(TRUE, nr) if (!missing(xlim)) take <- take & sco[,1] >= xlim[1] & sco[,1] <= xlim[2] if (!missing(ylim)) take <- take & sco[,2] >= ylim[1] & sco[,2] <= ylim[2] sco <- sco[take,, drop=FALSE] labs <- rownames(sco) pcol <- pcol[take] pbg <- pbg[take] tcol <- tcol[take] pch <- pch[take] diam <- diam[take] labfam <- labfam[take] labsize <- labsize[take] labfnt <- labfnt[take] ## Ranges and pretty values for axes if (missing(xlim)) xlim <- range(sco[,1], na.rm = TRUE) if (missing(ylim)) ylim <- range(sco[,2], na.rm = TRUE) xpretty <- pretty(xlim) ypretty <- pretty(ylim) ## Extend ranges by 4% xrange <- c(-0.04, 0.04) * diff(xlim) + xlim xpretty <- xpretty[xpretty >= xrange[1] & xpretty <= xrange[2]] yrange <- c(-0.04, 0.04) * diff(ylim) + ylim ypretty <- ypretty[ypretty >= yrange[1] & ypretty <= yrange[2]] ## Canvas like they were in the default devices when I last checked if (missing(width)) width <- p$din[1] width <- width * PPI * p2p ## Margin row width also varies with platform and devices ## rpix <- (p$mai/p$mar * PPI * p2p)[1] rpix <- p$cra[2] mar <- round(p$mar * rpix) xusr <- width - mar[2] - mar[4] xincr <- xusr/diff(xrange) yincr <- xincr xy0 <- c(xrange[1], yrange[2]) # upper left corner ## Functions to translate scores to canvas coordinates and back usr2xy <- function(row) { x <- (row[1] - xy0[1]) * xincr + mar[2] y <- (xy0[2] - row[2]) * yincr + mar[3] c(x,y) } ## User coordinates of an item xy2usr <- function(item) { xy <- as.numeric(tcltk::tkcoords(can, item)) x <- xy[1] y <- xy[2] x <- xrange[1] + (x - mar[2])/xincr y <- yrange[2] - (y - mar[3])/yincr c(x,y) } ## Canvas x or y to user coordinates x2usr <- function(xcan) { xrange[1] + (xcan - mar[2])/xincr } y2usr <- function(ycan) { yrange[2] - (ycan - mar[3])/yincr } ## Equal aspect ratio height <- round((diff(yrange)/diff(xrange)) * xusr) height <- height + mar[1] + mar[3] ## Canvas, finally can <- tcltk::tkcanvas(w, relief="sunken", width=width, height=min(height,YSCR), scrollregion=c(0,0,width,height)) if (p$bg != "") tcltk::tkconfigure(can, bg=p$bg) yscr <- tcltk::tkscrollbar(w, command = function(...) tcltk::tkyview(can, ...)) tcltk::tkconfigure(can, yscrollcommand = function(...) tcltk::tkset(yscr, ...)) ## Pack it up tcltk::tkpack(buts, side="bottom", fill="x", pady="2m") tcltk::tkpack(can, side="left", fill="x") tcltk::tkpack(yscr, side="right", fill="y") tcltk::tkgrid(cp2eps, export, dump, dismiss, sticky="s") ## Box x0 <- usr2xy(c(xrange[1], yrange[1])) x1 <- usr2xy(c(xrange[2], yrange[2])) tcltk::tkcreate(can, "rectangle", x0[1], x0[2], x1[1], x1[2], outline = p$fg, width = p$lwd) ## Axes and ticks tl <- -p$tcl * rpix # -p$tcl * p$ps * p2p axoff <- p$mgp[3] * rpix tmp <- xpretty for (i in seq_along(tmp)) { x0 <- usr2xy(c(xpretty[1], yrange[1])) x1 <- usr2xy(c(xpretty[length(xpretty)], yrange[1])) tcltk::tkcreate(can, "line", x0[1], x0[2]+axoff, x1[1], x1[2]+axoff, fill=p$fg) xx <- usr2xy(c(tmp[i], yrange[1])) tcltk::tkcreate(can, "line", xx[1], xx[2] + axoff, xx[1], xx[2]+tl+axoff, fill=p$fg) tcltk::tkcreate(can, "text", xx[1], xx[2] + rpix * p$mgp[2], anchor="n", text=as.character(tmp[i]), fill=p$col.axis, font=fnt.axis) } xx <- usr2xy(c(mean(xrange), yrange[1])) tcltk::tkcreate(can, "text", xx[1], xx[2] + rpix * p$mgp[1], text=colnames(sco)[1], fill=p$col.lab, anchor="n", font=fnt.lab) tmp <- ypretty for (i in seq_along(tmp)) { x0 <- usr2xy(c(xrange[1], tmp[1])) x1 <- usr2xy(c(xrange[1], tmp[length(tmp)])) tcltk::tkcreate(can, "line", x0[1]-axoff, x0[2], x1[1]-axoff, x1[2]) yy <- usr2xy(c(xrange[1], tmp[i])) tcltk::tkcreate(can, "line", yy[1]-axoff, yy[2], yy[1]-tl-axoff, yy[2], fill=p$fg ) tcltk::tkcreate(can, "text", yy[1] - rpix * p$mgp[2] , yy[2], anchor="e", text=as.character(tmp[i]), fill = p$col.axis, font=fnt.axis) } ## Points and labels ## The following 'inherits' works with ordipointlabel, but not ## with zooming if (inherits(x, "orditkplot")) { lsco <- scores(x, "labels") laboff <- rep(0, nrow(lsco)) lsco <- lsco[rownames(sco),] } else { lsco <- sco laboff <- round(p2p * p$ps/2 + diam + 1) } pola <- tcltk::tclArray() # points labtext <- tcltk::tclArray() # text id <- tcltk::tclArray() # index for (i in 1:nrow(sco)) { xy <- usr2xy(sco[i,]) item <- Point(xy[1], xy[2], pch = pch[i], col = pcol[i], fill = pbg[i], diam = diam[i]) xy <- usr2xy(lsco[i,]) fnt <- c(labfam[i], labsize[i], saneslant(labfnt[i])) lab <- tcltk::tkcreate(can, "text", xy[1], xy[2]-laboff[i], text=labs[i], fill = tcol[i], font=fnt) tcltk::tkaddtag(can, "point", "withtag", item) tcltk::tkaddtag(can, "label", "withtag", lab) pola[[lab]] <- item labtext[[lab]] <- labs[i] id[[lab]] <- i } ############################## ### Mouse operations on canvas ############################## ## Plotting and Moving ## Mouse enters a label pEnter <- function() { tcltk::tkdelete(can, "box") hbox <- tcltk::tkcreate(can, "rectangle", tcltk::tkbbox(can, "current"), outline = "red", fill = "yellow") tcltk::tkaddtag(can, "box", "withtag", hbox) tcltk::tkitemraise(can, "current") } ## Mouse leaves a label pLeave <- function() { tcltk::tkdelete(can, "box") } ## Select label pDown <- function(x, y) { x <- as.numeric(x) y <- as.numeric(y) tcltk::tkdtag(can, "selected") tcltk::tkaddtag(can, "selected", "withtag", "current") tcltk::tkitemraise(can, "current") p <- as.numeric(tcltk::tkcoords(can, pola[[tcltk::tkfind(can, "withtag", "current")]])) .pX <<- (p[1]+p[3])/2 .pY <<- (p[2]+p[4])/2 .lastX <<- x .lastY <<- y } ## Move label pMove <- function(x, y) { x <- as.numeric(x) y <- as.numeric(y) tcltk::tkmove(can, "selected", x - .lastX, y - .lastY) tcltk::tkdelete(can, "ptr") tcltk::tkdelete(can, "box") .lastX <<- x .lastY <<- y ## xadj,yadj: adjust for canvas scrolling xadj <- as.numeric(tcltk::tkcanvasx(can, 0)) yadj <- as.numeric(tcltk::tkcanvasy(can, 0)) hbox <- tcltk::tkcreate(can, "rectangle", tcltk::tkbbox(can, "selected"), outline = "red") tcltk::tkaddtag(can, "box", "withtag", hbox) conn <- tcltk::tkcreate(can, "line", .lastX + xadj, .lastY+yadj, .pX, .pY, fill="red") tcltk::tkaddtag(can, "ptr", "withtag", conn) } ## Edit label pEdit <- function() { tcltk::tkdtag(can, "selected") tcltk::tkaddtag(can, "selected", "withtag", "current") tcltk::tkitemraise(can, "current") click <- tcltk::tkfind(can, "withtag", "current") txt <- tcltk::tclVar(labtext[[click]]) i <- as.numeric(id[[click]]) tt <- tcltk::tktoplevel() labEd <- tcltk::tkentry(tt, width=20, textvariable=txt) tcltk::tkgrid(tcltk::tklabel(tt, text = "Edit label")) tcltk::tkgrid(labEd, pady="5m", padx="5m") isDone <- function() { txt <- tcltk::tclvalue(txt) tcltk::tkitemconfigure(can, click, text = txt) rownames(sco)[i] <<- txt tcltk::tkdestroy(tt) } tcltk::tkbind(labEd, "<Return>", isDone) } ## Zooming: draw rectangle and take its user coordinates ## Rectangle: first corner pRect0 <- function(x, y) { x <- as.numeric(x) y <- as.numeric(y) ## yadj here and below adjusts for canvas scrolling yadj <- as.numeric(tcltk::tkcanvasy(can, 0)) .pX <<- x .pY <<- y + yadj } ## Grow rectangle pRect <- function(x, y) { x <- as.numeric(x) y <- as.numeric(y) tcltk::tkdelete(can, "box") yadj <- as.numeric(tcltk::tkcanvasy(can, 0)) .lastX <<- x .lastY <<- y + yadj rect <- tcltk::tkcreate(can, "rectangle", .pX, .pY, .lastX, .lastY, outline="blue") tcltk::tkaddtag(can, "box", "withtag", rect) } ## Redraw ordiktplot with new xlim and ylim pZoom <- function() { nxlim <- sort(c(x2usr(.pX), x2usr(.lastX))) nylim <- sort(c(y2usr(.pY), y2usr(.lastY))) xy <- ordDump() ## Move labels closer to points in zoom ## FIXME: Doesn't do a perfect job mul <- abs(diff(nxlim)/diff(xlim)) xy$labels <- xy$points + (xy$labels - xy$points)*mul xy$args$xlim <- nxlim xy$args$ylim <- nylim orditkplot(xy) } ## Dummy location of the mouse .lastX <- 0 .lastY <- 0 .pX <- 0 .pY <- 0 ## Mouse bindings: ## Moving a label tcltk::tkitembind(can, "label", "<Any-Enter>", pEnter) tcltk::tkitembind(can, "label", "<Any-Leave>", pLeave) tcltk::tkitembind(can, "label", "<1>", pDown) tcltk::tkitembind(can, "label", "<ButtonRelease-1>", function() {tcltk::tkdtag(can, "selected") tcltk::tkdelete(can, "ptr")}) tcltk::tkitembind(can, "label", "<B1-Motion>", pMove) ## Edit labels tcltk::tkitembind(can, "label", "<Double-Button-1>", pEdit) ## Zoom (with one-button mouse) tcltk::tkbind(can, "<Shift-Button-1>", pRect0) tcltk::tkbind(can, "<Shift-B1-Motion>", pRect) tcltk::tkbind(can, "<Shift-ButtonRelease>", pZoom) ## Zoom (with right button) tcltk::tkbind(can, "<Button-3>", pRect0) tcltk::tkbind(can, "<B3-Motion>", pRect) tcltk::tkbind(can, "<ButtonRelease-3>", pZoom) }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/orditkplot.R
`orditree3d` <- function(ord, cluster, prune = 0, display = "sites", choices = c(1,2), col = "blue", text, type = "p", ...) { ## ordination scores in 2d: leaves ord <- scores(ord, choices = choices, display = display, ...) ## pad z-axis to zeros if (ncol(ord) != 2) stop(gettextf("needs plane in 2d, got %d", ncol(ord))) ord <- cbind(ord, 0) if (!inherits(cluster, "hclust")) # works only with hclust cluster <- as.hclust(cluster) # or object that can be converted ## get coordinates of internal nodes with vegan:::reorder.hclust x <- reorder(cluster, ord[,1], agglo.FUN = "mean")$value y <- reorder(cluster, ord[,2], agglo.FUN = "mean")$value xyz <- cbind(x, y, "height" = cluster$height) ## make line colour the mean of point colours if (is.factor(col)) col <- as.numeric(col) col <- rep(col, length = nrow(ord)) lcol <- col2rgb(col)/255 r <- reorder(cluster, lcol[1,], agglo.FUN = "mean")$value g <- reorder(cluster, lcol[2,], agglo.FUN = "mean")$value b <- reorder(cluster, lcol[3,], agglo.FUN = "mean")$value lcol <- rgb(r, g, b) ## set up frame pl <- scatterplot3d(rbind(ord, xyz), type = "n") if (type == "p") pl$points3d(ord, col = col, ...) else if (type == "t") { if (missing(text)) text <- rownames(ord) text(pl$xyz.convert(ord), labels = text, col = col, ...) } ## project leaves and nodes to 2d leaf <- pl$xyz.convert(ord) node <- pl$xyz.convert(xyz) ## two lines from each node down, either to a leaf or to an ## internal node merge <- cluster$merge for (i in seq_len(nrow(merge) - prune)) for (j in 1:2) if (merge[i,j] < 0) segments(node$x[i], node$y[i], leaf$x[-merge[i,j]], leaf$y[-merge[i,j]], col = col[-merge[i,j]], ...) else segments(node$x[i], node$y[i], node$x[merge[i,j]], node$y[merge[i,j]], col = lcol[merge[i,j]], ...) pl$internal <- do.call(cbind, node) pl$points <- do.call(cbind, leaf) pl$col.internal <- as.matrix(lcol) pl$col.points <- as.matrix(col) class(pl) <- c("orditree3d", "ordiplot3d") invisible(pl) } `ordirgltree` <- function(ord, cluster, prune = 0, display = "sites", choices = c(1, 2), col = "blue", text, type = "p", ...) { p <- cbind(scores(ord, choices = choices, display = display, ...), 0) if (ncol(p) != 3) stop(gettextf("needs 2D ordination plane, but got %d", ncol(p)-1)) if (!inherits(cluster, "hclust")) cluster <- as.hclust(cluster) x <- reorder(cluster, p[,1], agglo.FUN = "mean")$value y <- reorder(cluster, p[,2], agglo.FUN = "mean")$value z <- cluster$height merge <- cluster$merge ## adjust height z <- mean(c(diff(range(x)), diff(range(y))))/diff(range(z)) * z ## make line colour the mean of point colours if (is.factor(col)) col <- as.numeric(col) col <- rep(col, length = nrow(p)) lcol <- col2rgb(col)/255 r <- reorder(cluster, lcol[1,], agglo.FUN = "mean")$value g <- reorder(cluster, lcol[2,], agglo.FUN = "mean")$value b <- reorder(cluster, lcol[3,], agglo.FUN = "mean")$value lcol <- rgb(r, g, b) ## plot clear3d() if (type == "p") points3d(p, col = col, ...) else if (type == "t") { if (missing(text)) text <- rownames(p) text3d(p, texts = text, col = col, ...) } for (i in seq_len(nrow(merge) - prune)) for(j in 1:2) if (merge[i,j] < 0) segments3d(c(x[i], p[-merge[i,j],1]), c(y[i], p[-merge[i,j],2]), c(z[i], 0), col = col[-merge[i,j]], ...) else segments3d(c(x[i], x[merge[i,j]]), c(y[i], y[merge[i,j]]), c(z[i], z[merge[i,j]]), col = lcol[merge[i,j]], ...) ## add a short nipple so that you see the root (if you draw the root) if (prune <= 0) { n <- nrow(merge) segments3d(c(x[n],x[n]), c(y[n],y[n]), c(z[n],1.05*z[n]), col = lcol[n], ...) } }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/orditree3d.R
`orglellipse` <- function(object, groups, display = "sites", w = weights(object, display), kind = c("sd", "se", "ehull"), conf, choices = 1:3, alpha = 0.3, col = "red", ...) { weights.default <- function(object, ...) NULL kind <- match.arg(kind) x <- scores(object, display = display, choices = choices, ...) groups <- as.factor(groups) ## evaluate weights w <- eval(w) if (is.null(w) || length(w) == 1) w <- rep(1, nrow(x)) ## covariance and centres as lists Cov <- list() for (g in levels(groups)) if (kind == "ehull") { Cov[[g]] <- ellipsoidhull(x[groups == g,, drop = FALSE]) Cov[[g]]$n.obs <- sum(groups == g) } else Cov[[g]] <- cov.wt(x[groups == g,, drop = FALSE], wt = w[groups == g]) if (kind == "se") for(i in seq_len(length(Cov))) Cov[[i]]$cov <- Cov[[i]]$cov * sum(Cov[[i]]$wt^2) ## recycle colours if (is.factor(col)) col <- as.numeric(col) col <- rep(col, length = length(Cov)) ## rgl::ellipse3d defaults to confidence envelopes, but we want to ## default to sd/se and only use confidence ellipses if conf is ## given if (missing(conf)) t <- 1 else t <- sqrt(qchisq(conf, 3)) ## graph for(i in seq_len(length(Cov))) if (Cov[[i]]$n.obs > 3) if (kind == "ehull") plot3d(ellipse3d(Cov[[i]]$cov, centre = Cov[[i]]$loc, t = sqrt(Cov[[i]]$d2)), add = TRUE, alpha = alpha, col = col[i], ...) else plot3d(ellipse3d(Cov[[i]]$cov, centre = Cov[[i]]$center, t = t), add = TRUE, alpha = alpha, col = col[i], ...) }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/orglellipse.R
`orglpoints` <- function (object, display = "sites", choices = 1:3, radius, col = "black", ...) { x <- scores(object, display = display, choices = choices, ...) ## default radius if (missing(radius)) radius <- max(apply(x, 2, function(z) diff(range(z))))/100 ## honor cex cex <- match.call(expand.dots = FALSE)$...$cex if (!is.null(cex)) radius <- cex * radius ## make a color vector, handle factors if (is.factor(col)) col <- as.numeric(col) col <- rep(col, length = nrow(x)) spheres3d(x, radius = radius, col = col, ...) invisible() }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/orglpoints.R
`orglsegments` <- function (object, groups, order.by, display = "sites", choices = 1:3, col = "black", ...) { pts <- scores(object, display = display, choices = choices, ...) ## order points along segments if (!missing(order.by)) { if (length(order.by) != nrow(pts)) stop(gettextf("the length of order.by (%d) does not match the number of points (%d)", length(order.by), nrow(pts))) ord <- order(order.by) pts <- pts[ord,] groups <- groups[ord] } inds <- names(table(groups)) if (is.factor(col)) col <- as.numeric(col) col <- rep(col, length = length(inds)) names(col) <- inds for (is in inds) { X <- pts[groups == is, , drop = FALSE] if (nrow(X) > 1) { for (i in 2:nrow(X)) { segments3d(c(X[i-1,1],X[i,1]), c(X[i-1,2],X[i,2]), c(X[i-1,3],X[i,3]), col = col[is], ...) } } } invisible() }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/orglsegments.R
### Add lines from vegan::spantree object to an ordirgl plot `orglspantree` <- function(object, spantree, display = "sites", choices = 1:3, col = "black", ...) { if (!inherits(spantree, "spantree")) stop("'spantree' must be a result of vegan::spantree() function") x <- scores(object, display = display, choices = choices, ...) ## get kids k <- spantree$kid ## change colors to rgb in 0..1 and recycle if needed col <- rep(col, length = nrow(x)) if (is.factor(col)) col <- as.numeric(col) col <- col2rgb(col)/255 ## lines for (i in 1:length(k)) { if (is.na(k[i])) # skip NA links: disconnected spantree next one <- x[i+1,] two <- x[k[i],] lcol <- rgb(t(col[, i+1] + col[,k[i]])/2) segments3d(rbind(one, two), col = lcol, ...) } } ### Add lines from an hclust object to an ordirgl plot `orglcluster` <- function(object, cluster, prune = 0, display = "sites", choices = 1:3, col = "black", ...) { if (!inherits(cluster, "hclust")) cluster <- as.hclust(cluster) x <- scores(object, display = display, choices = choices, ...) ## recycle colours if needed if (is.factor(col)) col <- as.numeric(col) col <- rep(col, length = nrow(x)) ## (Ab)use vegan:::reorder.hclust to get the coordinates and ## colours of internal nodes node <- apply(x, 2, function(val) reorder(cluster, val)$value) nodecol <- apply(col2rgb(col)/255, 1, function(val) reorder(cluster, val)$value) nodecol <- rgb(nodecol) ## go through merge matrix merge <- cluster$merge for(i in seq_len(nrow(merge) - prune)) { if(merge[i,1] < 0) one <- x[-merge[i,1],] else one <- node[merge[i,1],] if (merge[i,2] < 0) two <- x[-merge[i,2],] else two <- node[merge[i,2],] segments3d(rbind(one, two), col = nodecol[i], ...) } }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/orglspantree.R
`orglspider` <- function (object, groups, display = "sites", w = weights(object, display), choices = 1:3, col = "black", ...) { weights.default <- function(object, ...) NULL if (inherits(object, "cca") && missing(groups)) { lc <- scores(object, display = "lc", choices = choices, ...) wa <- scores(object, display = "wa", choices = choices, ...) if (length(lc) == 0 || length(wa) == 0) stop("needs constrained ordination with WA and LC scores when 'groups' is missing") for (i in 1:nrow(lc)) segments3d(c(lc[i, 1], wa[i, 1]), c(lc[i, 2], wa[i, 2]), c(lc[i, 3], wa[i, 3]), color = col, ...) } else { pts <- scores(object, display = display, choices = choices, ...) out <- seq(along = groups) w <- eval(w) if (length(w) == 1) w <- rep(1, nrow(pts)) if (is.null(w)) w <- rep(1, nrow(pts)) inds <- names(table(groups)) if (is.factor(col)) col <- as.numeric(col) col <- rep(col, length = length(inds)) names(col) <- inds for (is in inds) { gr <- out[groups == is] if (length(gr) > 1) { X <- pts[gr, ] W <- w[gr] ave <- apply(X, 2, weighted.mean, w = W) for (i in 1:length(gr)) segments3d(c(ave[1], X[i,1]), c(ave[2], X[i, 2]), c(ave[3], X[i, 3]), col = col[is], ...) } } } invisible() }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/orglspider.R
`orgltext` <- function (object, text, display = "sites", choices = 1:3, adj = 0.5, col = "black", ...) { x <- scores(object, display = display, choices = choices, ...) if (missing(text)) text <- rownames(x) ## colors if (is.factor(col)) col <- as.numeric(col) col <- rep(col, length = nrow(x)) text3d(x[, 1], x[, 2], x[, 3], text, adj = adj, col = col, ...) invisible() }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/orgltext.R
`plot.orditkplot` <- function(x, ...) { op <- par(x$par) on.exit(par(op)) plot(x$points, pch = x$args$pch, cex = x$args$pcex, col = x$args$pcol, bg = x$args$pbg, xlim = x$args$xlim, ylim = x$args$ylim, asp=1) font <- attr(x$labels, "font") if (is.null(font)) font <- par("font") text(x$labels, rownames(x$labels), cex = x$args$tcex, col = x$args$tcol, font = font) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/plot.orditkplot.R
`points.orditkplot` <- function(x, pch = x$args$pch, cex = x$args$pcex, col = x$args$pcol, bg = x$args$pbg, ...) { points(x$points, pch = pch, cex = cex, col = col, bg = bg, ...) }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/points.orditkplot.R
`rgl.isomap` <- function(x, web = "white", ...) { if (!inherits(x, "isomap")) stop("'x' must be an 'isomap' result object") ordirgl(x, ...) z <- scores(x, ...) net <- x$net ## skip if web = NA if (any(!is.na(web))) { ## web can be a vector for points (or not): recycle web <- rep(web, length = nrow(z)) if (is.factor(web)) web <- as.numeric(web) web <- col2rgb(web)/255 for (i in 1:nrow(net)) { lcol <- rgb(t(rowMeans(web[, net[i,]]))) segments3d(z[net[i,],1], z[net[i,],2], z[net[i,],3], color=lcol) } } }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/rgl.isomap.R
`rgl.renyiaccum` <- function(x, rgl.height = 0.2, ...) { if (!inherits(x, "renyiaccum")) stop("'x' must be a 'renyiaccum' result object") y <- x[,,1] * rgl.height rgl.min = 0 rgl.max = max(y) xp <- seq(0, 1, len = nrow(y)) z <- seq(0, 1, len = ncol(y)) ylim <- 1000 * range(y) ylen <- ylim[2] - ylim[1] + 1 colorlut <- rainbow(ylen) col <- colorlut[1000*y-ylim[1]+1] clear3d() ## bg3d(color = "white") surface3d(xp, y, z, color=col) y <- x[,,5] * rgl.height ##surface3d(xp,y,z,color="grey", alpha=0.3) surface3d(xp, y, z, color="black", front="lines", back="lines") y <- x[,,6] * rgl.height ##surface3d(xp,y,z,color="grey",alpha=0.3) surface3d(xp, y, z, color="black", front="lines", back="lines") y <- x[,,6]*0 + rgl.min surface3d(xp, y, z, alpha=0) y <- x[,,6] * 0 + rgl.max surface3d(xp, y, z, alpha=0) labs <- pretty(c(rgl.min, range(x))) bbox3d(color="#333377", emission="#333377", specular="#3333FF", shininess=5, alpha=0.8, zlen=0, xlen=0, yat = rgl.height*labs, ylab=labs) text3d(0, rgl.min, 0.5, "Scale", col = "darkblue") text3d(0.5, rgl.min, 0, "Sites", col="darkblue") }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/rgl.renyiaccum.R
## The only guaranteed scores-like item are the projected coordinates ## of 'points'. Usually we get only that, but there may be other ## scores-like things and I anticipate these items may change in the ## future, and we prepare to get any scores-like object. `scores.ordiplot3d` <- function(x, display, ...) { if (missing(display)) return(x$points) ## not the default of points: see what it could be and return scoreslike <- names(x)[sapply(x, is.matrix)] display <- match.arg(display, c(scoreslike, "sites")) ## vegan standards say that scores(x, "sites") should work with ## all scores() methods: return points. if (display == "sites") display <- "points" x[[display]] }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/scores.ordiplot3d.R
`scores.orditkplot` <- function(x, display, ...) { if (!missing(display) && !is.na(pmatch(display, "labels"))) x$labels else x$points }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/scores.orditkplot.R
`text.orditkplot` <- function(x, cex = x$args$tcex, col = x$args$tcol, font = attr(x$labels, "font"), ...) { if (is.null(font)) { font <- par("font") } text(x$labels, labels = rownames(x$labels), cex = cex, col = col, font = font, ...) }
/scratch/gouwar.j/cran-all/cranData/vegan3d/R/text.orditkplot.R
#' Autosize vegaspec #' #' The arguments `width` and `height` are used to override the width and height #' of the provided `spec`, if the `spec` does not have multiple views. #' The dimensions you provide describe the overall width and height of the #' rendered chart, including axes, labels, legends, etc. #' #' In a Vega or Vega-Lite specification, the default interpretation #' of width and height is to describe the dimensions of the #' **data rectangle**, not including the space used by the axes, labels, #' legends, etc. When `width` and `height` are specified using #' [autosize](https://vega.github.io/vega-lite/docs/size.html#autosize), #' the meanings of `width` and `height` change to describe the dimensions #' of the **entire chart**, including axes, labels, legends, etc. #' #' There is an important limitation: specifying `width` and `height` is #' [effective only for single-view and layered specifications]( #' https://vega.github.io/vega-lite/docs/size.html#limitations). #' It will not work for specifications with multiple views #' (e.g. `hconcat`, `vconcat`, `facet`, `repeat`); this will issue a #' warning that there will be no effect on the specification when rendered. #' #' @inheritParams as_vegaspec #' @param width `integer`, if specified, the total rendered width (in pixels) #' of the chart - valid only for single-view charts and layered charts; #' the default is to use the width in the chart specification #' @param height `integer`, if specified, the total rendered height (in pixels) #' of the chart - valid only for single-view charts and layered charts; #' the default is to use the height in the chart specification #' #' @return S3 object with class `vegaspec` #' @examples #' vw_autosize(spec_mtcars, width = 350, height = 350) #' @seealso [Article on vegaspec (sizing)](https://vegawidget.github.io/vegawidget/articles/articles/vegaspec.html#sizing), #' [Vega documentation on sizing](https://vega.github.io/vega-lite/docs/size.html#autosize) #' @export #' vw_autosize <- function(spec, width = NULL, height = NULL) { # validate and assign class spec <- as_vegaspec(spec) spec <- .autosize(spec, width, height) spec } .autosize <- function(spec, ...) { UseMethod(".autosize") } .autosize.default <- function(spec, ...) { stop( ".autosize(): no method for class ", paste(class(spec), collapse = " "), call. = FALSE) } .autosize.vegaspec_hconcat <- function(spec, width = NULL, height = NULL, ...) { # the message that used to be here, and in the other autosize methods, # seemed too chatty NextMethod() } .autosize.vegaspec_vconcat <- function(spec, width = NULL, height = NULL, ...) { NextMethod() } .autosize.vegaspec_concat <- function(spec, width = NULL, height = NULL, ...) { NextMethod() } .autosize.vegaspec_facet <- function(spec, width = NULL, height = NULL, ...) { NextMethod() } .autosize.vegaspec_repeat <- function(spec, width = NULL, height = NULL, ...) { NextMethod() } .autosize.vegaspec_vega_lite <- function(spec, width = NULL, height = NULL, ...) { if (is.null(c(width, height))) { # nothing to do here return(spec) } # using this notation: spec$config <- spec$config %||% list() # # to create a new list only if needed, so as not to # wipe out any parameters in an existing list spec$width <- as.integer(width %||% spec$width) spec$height <- as.integer(height %||% spec$height) spec$config <- spec$config %||% list() spec$config$autosize <- spec$config$autosize %||% list() spec$config$autosize$type <- "fit" spec$config$autosize$contains <- "padding" spec$config$view <- spec$config$view %||% list() spec$config$view$width <- spec$width spec$config$view$height <- spec$height spec } .autosize.vegaspec_vega <- function(spec, width = NULL, height = NULL, ...) { if (is.null(c(width, height))) { # nothing to do here return(spec) } spec$width <- as.integer(width %||% spec$width) spec$height <- as.integer(height %||% spec$height) spec$autosize <- list(type = "fit", contains = "padding") spec$config <- spec$config %||% list() spec$config$autosize <- spec$autosize spec$config$style <- list(cell = list(width = spec$width, height = spec$height)) spec }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/autosize.R
#' Callback helpers #' #' These are used by V8 so that R can access the file-system and the network #' so that external data can be accessed. #' #' Assumes the encoding is UTF-8. #' #' @param url `character` #' @param options named `list`, not yet implemented #' @param filename `character` #' #' @return `character` contents of the file or URL #' @keywords internal #' @export #' vw_fetch <- function(url, options = NULL, encoding = "UTF-8") { # in theory, the vega loader figures out if `url` is a local file or not # in practice, it thinks local files are remote, so it comes here. # hence, we have to handle this ourselves - so we do. if (fs::file_exists(url)) { # local file tmpfile <- url } else { # remote file tmpfile <- withr::local_tempfile() utils::download.file(url, destfile = tmpfile, quiet = TRUE) } vw_load(tmpfile, encoding = encoding) } #' @rdname vw_fetch #' @export #' vw_load <- function(filename, encoding = "UTF-8") { lines <- readLines(filename, warn = FALSE, encoding = "UTF-8") paste(lines, collapse = "\n") }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/callback-helpers.R
#' Example vegaspec: mtcars scatterplot #' #' A Vega-Lite specification to create a scatterplot for `mtcars`. #' #' @format S3 object of class `vegaspec` #' @seealso [as_vegaspec()] #' "spec_mtcars" #' Example dataset: Seattle daily weather #' #' This dataset contains daily weather-observations from Seattle for the #' years 2012-2015, inclusive. #' #' @format A data frame with 1461 observations of six variables #' \describe{ #' \item{date}{`Date`, date of the observation} #' \item{precipitation}{`double`, amount of precipitation (mm)} #' \item{temp_max}{`double`, maximum temperature (°C)} #' \item{temp_min}{`double`, minimum temperature (°C)} #' \item{wind}{`double`, average wind-speed (m/s)} #' \item{weather}{`character`, description of weather} #' } #' @source \url{https://vega.github.io/vega-datasets/data/seattle-weather.csv} #' "data_seattle_daily" #' Example dataset: Seattle hourly temperatures #' #' This dataset contains hourly temperature observations from Seattle for the #' year 2010. #' #' @format A data frame with 8759 observations of two variables #' \describe{ #' \item{date}{`POSIXct`, instant of the observation, uses `"America/Los_Angeles"`} #' \item{temp}{`double`, temperature (°C)} #' } #' @source \url{https://vega.github.io/vega-datasets/data/seattle-weather-hourly-normals.csv} #' "data_seattle_hourly" #' Example dataset: Categorical data #' #' This is a toy dataset; the numbers are generated randomly. #' #' @format A data frame with ten observations of two variables #' \describe{ #' \item{category}{`character`, representative of a nominal variable} #' \item{number}{`double`, representative of a quantitative variable} #' } #' "data_category"
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/data.R
#' Examine vegaspec #' #' This is a thin wrapper to [listviewer::jsonedit()], #' use to interactively examine a Vega or Vega-Lite specification. #' #' @inheritParams listviewer::jsonedit #' @inheritParams as_vegaspec #' @param mode `string` for the initial view from modes. #' `'view'` is the default. #' @param modes string `c('view','code', 'form', 'text', 'tree')` #' will be the default, since these are all the modes #' currently supported by **`jsoneditor`**. #' #' @return S3 object of class `jsonedit` and `htmlwidget` #' @examples #' vw_examine(spec_mtcars) #' #' spec_mtcars_autosize <- #' spec_mtcars %>% #' vw_autosize(width = 300, height = 300) #' #' vw_examine(spec_mtcars_autosize) #' @export #' vw_examine <- function(spec, mode = "view", modes = c("view", "code", "form", "text", "tree"), ..., width = NULL, height = NULL, elementId = NULL) { assert_packages("listviewer") # I would prefer that this function print the htmlwidget and # invisibly return the spec, so as to support piping. # # However, doing this prevents the widget from being printed # when being knit. See https://github.com/vegawidget/vegawidget/issues/30 # # To me, it is more important that this work well with knitr, than # that this be pipeable. # listviewer::jsonedit( listdata = vw_as_json(spec), mode = mode, modes = modes, ..., width = width, height = height, elementId = elementId ) }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/examine.R
#' Construct a vegawidget handler #' #' You will likely call one of the specific handler-constructors: #' [vw_handler_signal()], [vw_handler_event()], in conjunction with #' [vw_handler_add_effect()]. #' #' The handler has three parts: #' #' - `args` arguments to the handler #' - `body_value` the body of a function of the `args`, returns a value #' - `body_effect` the body of a function of the value, `x`, performs a #' side-effect #' #' @param args `character`, vector of names for the arguments for the handler #' @param body_value `character`, body of the *value* part of the handler #' @param body_effect `character`, body of the *effect* part of the handler #' #' @return object with S3 class `vw_handler` #' @seealso [vw_handler_signal()], [vw_handler_event()], #' [vw_handler_add_effect()] #' @keywords internal #' @export #' vw_handler <- function(args, body_value, body_effect) { structure( list( args = args, body_value = body_value, body_effect = body_effect ), class = "vw_handler" ) } #' @keywords internal #' @export #' print.vw_handler <- function(x, ...) { # write out cat(compose_list(x$args, "arguments"), "\n") cat("\n") cat("body_value:\n") cat(x$body_value$text %>% glue::glue_collapse(sep = "\n") %>% indent(2L)) if (!is.null(x$body_effect)) { cat("\nbody_effect:\n") cat(x$body_effect %>% glue::glue_collapse(sep = "\n") %>% indent(2L)) } invisible(x) } #' Constructor for internal S3 class #' #' This S3 class is used to define handler-functions. #' #' @param args `character`, vector of names of arguments for the #' handler-function #' @param bodies `.vw_handler_body`, list of possible bodies for the #' handler-function #' #' @return S3 object with class `.vw_handler_def` #' @keywords internal #' @seealso .vw_handler_body #' @export #' .vw_handler_def <- function(args, bodies) { structure( list(args = args, bodies = bodies), class = ".vw_handler_def" ) } print..vw_handler_def <- function(x, ...) { cat(compose_list(x$args, "arguments"), "\n") if (identical(x$args, "x")) { body_name <- " body_effect" } else { body_name <- " body_value" } mapply( function(text, name) { cat("\n") cat(compose_list(name, body_name), "\n") print(text, n_indent = 2) }, x$bodies, names(x$bodies) ) invisible(x) } #' Constructor for internal S3 class #' #' This S3 class is used to define handler-function bodies. #' #' @param text `character`, text of the function body #' @param params `character`, vector of names of required parameters #' #' @return S3 object with class `.vw_handler_body` #' @keywords internal #' @seealso .vw_handler_def #' @export #' .vw_handler_body <- function(text, params = NULL) { structure( list( text = text, params = params ), class = ".vw_handler_body" ) } print_list <- function(x) { # make a list print like the code that created it x <- jsonlite::toJSON(x, auto_unbox = TRUE, null = "null") x <- gsub(",", ", ", x) # put space after comma x <- gsub("\\{(.*)\\}", "\\1", x) # remove {} x <- gsub("\"([^\"]+)\":", "\\1 = ", x) # remove quotes x <- gsub("null", "NULL", x) # capitalize NULL x } print..vw_handler_body <- function(x, n_indent = 0L, ...) { # if there are parameters, print them if (length(x$params) > 0L) { text <- x$params %>% print_list() %>% compose_list("params") %>% indent(n_indent + 2L) cat(text, "\n") } # print out the text of the body text <- indent(x$text, n = n_indent + 2L) cat("\n") cat(text, sep = "\n") invisible(x) } compose_list <- function(x, title) { x <- glue::glue_collapse(x, sep = ", ") x <- glue::glue("{title}: {x}") } vw_handler_body <- function(handler_body, type) { text <- handler_body params <- list() # is this a name of a handler in the library? bodies <- .vw_handler_library[[type]][["bodies"]] if (handler_body %in% names(bodies)) { # use *that* handler_body text <- bodies[[handler_body]]$text params <- bodies[[handler_body]]$params } # collapse into a single string text <- glue::glue_collapse(text, sep = "\n") # if this is has no whitespace, parentheses, or semicolons, issue a warning js_pattern <- "(\\s|\\(|\\)|;)" if (!grepl(js_pattern, text)) { warning( "handler_body: '", text, "' does not appear to contain valid JavaScript code, ", "and it is not a known ", type, " handler." ) } list(text = text, params = params) }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/js-handler-internal.R
#' Construct a JavaScript handler #' #' A Vega listener needs a JavaScript handler-function to call #' when the object-being-listened-to changes. For instance, [shiny-getters] and #' [add-listeners] functions each have an argument called #' `body_value`, which these functions help you build. #' #' There are two types of handlers defined in this package's handler-library. #' To see the handlers that are defined for each, call the function #' without any arguments: #' #' - `vw_handler_signal()` #' - `vw_handler_data()` #' - `vw_handler_event()` #' #' With a JavaScript handler, you are trying to do two types of things: #' #' - calculate a value based on the handler's arguments #' - produce a side-effect based on that calculated value #' #' Let's look at a concrete example. #' A [*signal* handler](https://vega.github.io/vega/docs/api/view/) #' will take arguments `name` and `value`. Let's say that we want to #' return the value. We could do this two ways: #' #' - `vw_handler_signal("value")`: use this package's handler library #' - `vw_handler_signal("return value;")`: supply the body of the #' handler-function yourself #' #' In the list above, the two calls do exactly the same thing, they build a #' JavaScript function that returns the `value` provided by whatever is calling #' the signal-handler. This will be a valid signal-handler, however, we will #' likely want a signal-handler to *do* something with that value, which is #' why we may wish to add a side-effect. #' #' Let's say we want the handler to print the value to the JavaScript console. #' We would create the signal-handler, then add an effect to print the result #' to the console. #' #' `vw_handler_signal("value") %>% vw_handler_add_effect("console")` #' #' We can add as many effects as we like; for more information, #' please see the documentation for [vw_handler_add_effect()]. #' #' Please be aware that these functions do *not* check for the correctness #' of JavaScript code you supply - any errors you make will not be apparent #' until your visualization is rendered in a browser. #' #' One last note, if `body_value` is already a `vw_handler`, these functions #' are no-ops; they will return the `body_value` unchanged. #' #' @param body_value `character`, the name of a defined handler-body, #' or the text of the body of a handler-function #' #' @return object with S3 class `vw_handler` #' @seealso [vw_handler_add_effect()], #' [vega-view](https://vega.github.io/vega/docs/api/view/) #' @examples #' # list all the available signal-handlers #' vw_handler_signal() #' #' # list all the available data-handlers #' vw_handler_data() #' #' # list all the available event-handlers #' vw_handler_event() #' #' # use a defined signal-handler #' vw_handler_signal("value") #' #' # define your own signal-handler #' vw_handler_signal("return value;") #' @export #' vw_handler_signal <- function(body_value) { handler_type <- .vw_handler_library[["signal"]] # if handler_body is missing, print out available handlers if (missing(body_value)) { print(handler_type) return(invisible(NULL)) } # if handler_body is a handler, return the handler if (inherits(body_value, "vw_handler")) { return(body_value) } # get the handler_body body_value <- vw_handler_body(body_value, "signal") # create the handler args <- handler_type$args vw_handler(args, body_value, NULL) } #' @rdname vw_handler_signal #' @export #' vw_handler_data <- function(body_value) { handler_type <- .vw_handler_library[["data"]] # if handler_body is missing, print out available handlers if (missing(body_value)) { print(handler_type) return(invisible(NULL)) } # if handler_body is a handler, return the handler if (inherits(body_value, "vw_handler")) { return(body_value) } # get the handler_body body_value <- vw_handler_body(body_value, "signal") # create the handler args <- handler_type$args vw_handler(args, body_value, NULL) } #' @rdname vw_handler_signal #' @export #' vw_handler_event <- function(body_value) { handler_type <- .vw_handler_library[["event"]] # if handler_body is missing, print out available handlers if (missing(body_value)) { print(handler_type) return(invisible(NULL)) } # if handler_body is a handler, return the handler if (inherits(body_value, "vw_handler")) { return(body_value) } # get the handler_body body_value <- vw_handler_body(body_value, "event") # create the handler args <- handler_type$args vw_handler(args, body_value, NULL) } #' Add a side-effect to a JavaScript handler #' #' With a JavaScript handler, once you have calculated a value #' based on the handler's arguments (e.g. `name`, `value`) you will #' likely want to produce a side-effect based on that calculated value. #' This function helps you do that. #' #' The calculation of a value is meant to be separate from the #' production of a side-effect. This way, the code for a side-effect #' can be used for any type of handler. #' #' You are supplying the `body_effect` to an effect-handler. This #' takes a single argument, `x`, representing the #' calculated value. Doing this allows us to chain side-effects together; #' be careful not to modify `x` in any of the code you provide. #' #' To see what side-effects are available in this package's handler-library, #' call `vw_handler_add_effect()` without any arguments. You may notice that #' some of the effects, like `"element_text"`, require additional parameters, #' in this case, `selector`. #' #' Those parameters with a default value of `NULL` require you to supply #' a value; those with sensible defaults are optional. #' #' To provide the parameters, call #' `vw_handler_add_effect()` with *named* arguments corresponding to the #' names of the parameters. See the examples for details. #' #' @param vw_handler `vw_handler` created using [vw_handler_signal()] or #' [vw_handler_event()] #' @param body_effect `character`, the name of a defined handler-body, #' or the text of the body of a handler-function #' @param ... additional *named* parameters to be interpolated into the #' text of the handler_body #' #' @return modified copy of `vw_handler` #' @seealso [vw_handler_signal()] #' @examples #' # list all the available effect-handlers #' vw_handler_add_effect() #' #' # build a signal handler that prints some text, #' # then the value, to the console #' vw_handler_signal("value") %>% #' vw_handler_add_effect("console", label = "signal value:") #' #' @export #' vw_handler_add_effect <- function(vw_handler, body_effect, ...) { handler_type <- .vw_handler_library[["effect"]] # if vw_handler is missing, print out available handlers if (missing(vw_handler)) { print(handler_type) return(invisible(NULL)) } # get the handler_body body_effect <- vw_handler_body(body_effect, "effect") # handler body needs to return text and a list of parameters # blend the parameters params_new <- list(...) params_default <- body_effect$params names_common <- names(params_new)[names(params_new) %in% names(params_default)] params <- params_default params[names_common] <- params_new[names_common] # if any of the params are null, warn index_null <- vapply(params, is.null, logical(1L)) names_null <- names(params[index_null]) if (length(names_null) > 0L) { warning( "params not set: ", glue::glue_collapse(names_null, sep = ", ") ) } # mix in the parameters handler_text <- glue::glue_data(.x = params, body_effect$text, .open = "${", .sep = "\n") # append the new effect vw_handler$body_effect <- c(vw_handler$body_effect, handler_text) vw_handler } #' Compose a JavaScript handler #' #' These functions are used to compose a `vw_handler` object into #' text that will be interpreted either as a complete JavaScript #' function, or a function-body. #' #' @inheritParams vw_handler_add_effect #' @param n_indent `integer`, number of spaces to indent the text of the body #' #' @return object with S3 class `JS_EVAL`, text for the function or #' function-body #' #' @keywords internal #' @export #' vw_handler_body_compose <- function(vw_handler, n_indent = 2L) { body_value <- glue::glue_collapse(vw_handler$body_value$text, sep = "\n") # does the handler-body have no effects? if (is.null(vw_handler$body_effect)) { # no effects, only value body <- body_value } else { # we do have effects, combine with value body_value <- indent(body_value, 4L) body_effect <- glue::glue_collapse(vw_handler$body_effect, sep = "\n") %>% indent(2L) body <- glue_js( "(function (x) {", "${body_effect}", "})(", " (function () {", "${body_value}", " })()", ")" ) } body <- body %>% indent(n_indent) %>% JS() body } #' @rdname vw_handler_body_compose #' @keywords internal #' @export #' vw_handler_compose <- function(vw_handler) { args <- glue::glue_collapse(vw_handler$args, sep = ", ") body <- vw_handler_body_compose(vw_handler, n_indent = 2L) fn <- glue_js( "function (${args}) {", "${body}", "}" ) JS(fn) }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/js-handler.R
#' Add JavaScript listeners #' #' Listeners are how we get information out of a Vega chart and into the #' JavaScript environment. To do this, we specify handler-functions to #' run whenever a certain signal changes or an event fires. #' #' The `handler_body` can be the text of the *body* of a JavaScript function; #' the arguments to this function will vary according to the type of listener #' you are adding: #' #' - signal-handler and data-handler arguments: `name`, `value` #' - event-handler arguments: `event`, `item` #' #' This package offers some functions to make it easier to build JavaScript #' handler functions from R: [vw_handler_signal()], [vw_handler_data()], #' and [vw_handler_event()]. You can pipe one of these functions to #' [vw_handler_add_effect()] to perform side-effects on the result. #' #' @name add-listeners #' #' @param x vegawidget object to be monitored #' @param name `character`, name of the signal or dataset to be monitored #' @param handler_body `character` or `JS_EVAL`, text of the body of #' the JavaScript handler-function to be called when the signal or dataset #' changes, or the event fires #' #' @return modified copy of vegawidget object `x` #' @seealso [vw_handler_signal()], [vw_handler_data()], [vw_handler_event()], #' [vw_handler_add_effect()], #' [vega-view](https://vega.github.io/vega/docs/api/view/) #' @export #' vw_add_signal_listener <- function(x, name, handler_body) { # make this into a vw_handler, compose handler_body <- handler_body %>% vw_handler_signal() %>% vw_handler_body_compose(n_indent = 6L) js_call <- glue_js( "function(el, x) {", " this.viewPromise.then(function(view) {", " view.addSignalListener('${name}', function(name, value) {", "${handler_body}", " });", " });", "}" ) htmlwidgets::onRender(x, js_call) } #' @rdname add-listeners #' @export #' vw_add_data_listener <- function(x, name, handler_body) { # make this into a vw_handler, compose handler_body <- handler_body %>% vw_handler_signal() %>% vw_handler_body_compose(n_indent = 6L) js_call <- glue_js( "function(el, x) {", " this.viewPromise.then(function(view) {", " view.addDataListener('${name}', function(name, value) {", "${handler_body}", " });", " });", "}" ) htmlwidgets::onRender(x, js_call) } #' @rdname add-listeners #' #' @param event `character`, name of the type of event to be monitored, #' e.g. `"click"` #' #' @export #' vw_add_event_listener <- function(x, event, handler_body) { # make this into a vw_handler, compose handler_body <- handler_body %>% vw_handler_event() %>% vw_handler_body_compose(n_indent = 6L) js_call <- glue_js( "function(el, x) {", " this.viewPromise.then(function(view) {", " view.addEventListener('${event}', function(event, item) {", "${handler_body}", " });", " });", "}" ) htmlwidgets::onRender(x, js_call) }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/js-listener.R
#' @export print.vegaspec <- function(x, ...) { print(vegawidget(x, ...)) invisible(x) } #' @export format.vegaspec <- function(x, ...) { vw_as_json(x) } #' Knit-print method #' #' If you are knitting to an HTML-based format, the only supported options are #' `vega.width`, `vega.height` (as pixels) and `vega.embed` (as a list). #' If you are knitting to a non-HTML-based format, you additionally #' have the options `dev`, `out.width` and `out.height` available. #' #' The biggest thing to keep in mind about a Vega visualization is that #' very often, the chart tells you how much space it needs, rather #' than than you tell it how much space it has available. In the future, it #' may reveal itself how to manage better this "conversation". #' #' @section HTML-based: #' When knitting to an HTML-based format, the `spec` is rendered as normal, #' it calls [vegawidget()] using the options `vega.width`, `vega.height` #' and `vega.embed`: #' #' - `vega.width` and `vega.height` are passed to [vegawidget()] #' as `width` and `height`, respectively. These values are coerced to numeric, #' so it is ineffective to specify a percentage. They are passed to #' [vw_autosize()] to resize the chart, if #' [possible](https://vega.github.io/vega-lite/docs/size.html#limitations). #' #' - `vega.embed` is passed to [vegawidget()] as `embed`. The function #' [vega_embed()] can be useful to set `vega.embed`. #' #' @section Non-HTML-based: #' When knitting to an non-HTML-based format, e.g. `github_document` or #' `pdf_document`, this function will convert the chart to an image, then knitr #' will incorporate the image into your document. You have the additional #' knitr options `dev`, `out.width`, and `out.height`: #' #' - The supported values of `dev` are `"png"`, `"svg"`, and `"pdf"`. If you #' are knitting to a LaTeX format (e.g. `pdf_document`) and you specify `dev` #' as `"svg"`, it will be implemented as `"pdf"`. #' #' - To scale the image within your document, you can use `out.width` or #' `out.height`. Because the image will already have an aspect ratio, #' it is recommended to specify no more than one of these. #' #' @inheritParams as_vegaspec #' @param ... other arguments #' @param options `list`, knitr options #' #' @seealso [vw_autosize()], [vega_embed()] #' @export #' knit_print.vegaspec <- function(spec, ..., options = NULL){ # it is ineffective to set out.width or out.height as a percentage to_int <- function(x) { if (is.null(x)) { return(NULL) } suppressWarnings({ x_int <- as.integer(x) }) if (is.na(x_int)) { return(NULL) } x_int } embed <- options$vega.embed width <- to_int(options$vega.width) height <- to_int(options$vega.height) # if this goes to HTML, print and be done! if (knitr::is_html_output(excludes = c("markdown", "epub", "gfm"))) { return( knitr::knit_print( vegawidget(spec, embed = embed, width = width, height = height) ) ) } # this does not go to html; we have more thinking to do... # knitr's default screenshoting won't work due to incompatibility with # es6 JS code and webshot. Thus interecept here and do the conversion to # static image ourselves... # determine the graphics-device dev <- options$dev %||% knitr::opts_chunk$get("dev") # if specifying svg and using LaTeX, use pdf if (identical(dev, "svg") && knitr::is_latex_output()) { dev <- "pdf" } # choose writing-function fn_write <- switch( dev, png = vw_write_png, svg = vw_write_svg, pdf = { assert_packages("rsvg") function(spec, path, ...) { svg <- vw_to_svg(spec, ...) rsvg::rsvg_pdf(charToRaw(svg), path) } }, # default, if dev not recognized function(spec, path, ...) { stop("unknown device type: `", dev, "`.") } ) tryCatch({ f <- tempfile() on.exit({unlink(f)}) fn_write(spec, path = f, width = width, height = height) res <- readBin(f, "raw", file.info(f)[, "size"]) structure( list(image = res, extension = paste0(".", dev)), class = "html_screenshot" ) }, error = function(e) { err_msg <- c( "Error printing vegawidget in non-HTML format:", conditionMessage(e) ) knitr::knit_print(err_msg) } ) }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/print.R
#' Rename datasets in a vegaspec #' #' If a vegaspec has named datasets, it may be useful to rename them. #' This function will return a vegaspec with datasets named `data_001`, #' `data_002`, and so on. It will go through the spec and replace the #' references to the names. A future version of this function may give you #' the more control over the names used. #' #' @inheritParams as_vegaspec #' #' @return S3 object of class `vegaspec` #' @export #' vw_rename_datasets <- function(spec) { # coerce to vegaspec spec <- as_vegaspec(spec) # if datasets not present, return spec if (!("datasets" %in% names(spec))) { return(spec) } # determine dataset names dataset_names <- names(spec$datasets) # create new dataset names dataset_names_new <- list(sprintf("data_%03d", seq_along(dataset_names))) names(dataset_names_new) <- dataset_names # create function to replace names fn_replace <- function(x) { if (rlang::has_name(dataset_names_new, x)) { x <- dataset_names_new[[x]] } x } # replace dataset names names(spec$datasets) <- dataset_names_new # function to crawl recursively through the spec fn_crawl <- function(x, fn_rep) { # x is data.frame OR x is not list, return x if (is.data.frame(x) || !is.list(x)) { return(x) } # if x has element "data", data has element "name" if (rlang::has_name(x, "data") && rlang::has_name(x$data, "name")) { # replace name x$data$name <- fn_rep(x$data$name) } # call for each element of list x <- lapply(x, fn_crawl, fn_rep) x } spec <- fn_crawl(spec, fn_replace) spec <- as_vegaspec(spec) spec }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/rename-datasets.R
#' Serialize data-frame time-columns #' #' **Please think of this as an experimental function** #' #' In Vega, for now, there are only two time-zones available: the local #' time-zone of the browser where the spec is rendered, and UTC. This differs #' from R, where a time-zone attribute is available to `POSIXct` vectors. #' Accordingly, when designing a vegaspec that uses time, you have to make some #' some compromises. This function helps you to implement your compromise in #' a principled way, as explained in the opinions below. #' #' Let's assume that your `POSIXct` data has a time-zone attached. #' There are three different scenarios for rendering this data: #' #' - using the time-zone of the browser #' - using UTC #' - using the time-zone of the data #' #' If you intend to display the data using the **time-zone of the browser**, #' or using **UTC**, you should serialize datetimes using ISO-8601, i.e. #' `iso_dttm = TRUE`. In the rest of your vegaspec, you should choose #' local or UTC time-scales accordingly. However, in either case, you should #' use local time-units. No compromise is necessary. #' #' If you intend to display the data using the **time-zone of the browser**, #' this is where you will have to compromise. In this case, you should #' serialize using `iso_dttm = FALSE`. By doing this, your datetimes will be #' serialized using a non-ISO-8601 format, and notably, **using the time-zone** #' of the datetime. When you design your vegaspec, you should treat this as #' if it were a UTC time. You should direct Vega to parse this data as UTC, #' i.e. `{"foo": "utc:'%Y-%m-%d %H:%M:%S'"}`. In other words, Vega should #' interpret your local timestamp as if it were a UTC timestamp. #' As in the first UTC case, you should use UTC time-scales and local #' time-units. #' #' The compromise you are making is this: the internal representation of #' the instants in time will be different in Vega than it will be in R. #' You are losing information because you are converting from a `POSIXct` #' object with a time-zone to a timestamp without a time-zone. It is also #' worth noting that the time information in your Vega plot should not #' be used anywhere else - this should be the last place this serialized #' data should be used because it is no longer trustworthy. For this, #' you will gain the ability to show the data in the context of its #' time-zone. #' #' Dates can be different creatures than datetimes. I think that can be #' "common currency" for dates. I think this is because it is more common to #' compare across different locations using dates as a common index. For #' example, you might compare daily stock-market data from NYSE, CAC-40, and #' Hang Seng. To maintain a common time-index, you might choose UTC to #' represent the dates in all three locations, despite the time-zone #' differences. #' #' This is why the default for `iso_date` is `TRUE`. In this scenario, #' you need not specify to Vega how to parse the date; because of its #' ISO-8601 format, it will parse to UTC. As with the other UTC cases, #' you should use UTC time-scales and local time-units. #' #' @param data `data.frame`, data to be serialized #' @param iso_dttm `logical`, indicates if datetimes (`POSIXct`) are to be #' formatted using ISO-8601 #' @param iso_date `logical`, indicates if dates (`Date`) are to be #' formatted using ISO-8601 #' #' @return object with the same type as `data` #' @seealso [Vega-Lite Time Unit (UTC)](https://vega.github.io/vega-lite/docs/timeunit.html#utc) #' @examples #' # datetimes #' data_seattle_hourly %>% head() #' data_seattle_hourly %>% head() %>% vw_serialize_data(iso_dttm = TRUE) #' data_seattle_hourly %>% head() %>% vw_serialize_data(iso_dttm = FALSE) #' #' # dates #' data_seattle_daily %>% head() #' data_seattle_daily %>% head() %>% vw_serialize_data(iso_date = TRUE) #' data_seattle_daily %>% head() %>% vw_serialize_data(iso_date = FALSE) #' #' @export #' vw_serialize_data <- function(data, iso_dttm = FALSE, iso_date = TRUE) { dttm_format <- function(x, iso) { # if not a datetime, return unchanged if (!inherits(x, "POSIXt")) { return(x) } # is a datetime, format according to iso if (iso) { x <- format(x, format = "%Y-%m-%dT%H:%M:%OS3Z", tz = "UTC") } else { x <- format(x, format = "%Y-%m-%d %H:%M:%OS3") } x } date_format <- function(x, iso) { # if not a Date, return unchanged if (!inherits(x, "Date")) { return(x) } # is a Date, format according to iso if (iso) { x <- format(x, format = "%Y-%m-%d") } else { x <- format(x, format = "%Y/%m/%d") } x } cols <- colnames(data) data[cols] <- lapply(data[cols], dttm_format, iso = iso_dttm) data[cols] <- lapply(data[cols], date_format, iso = iso_date) data }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/serialize_data.R
#' Set base URL #' #' @description #' This is useful for specs where data is specified using a URL. #' Using this function to set the base URL, you can specify the data URL #' in specs using the relative path from the base. #' #' For example, this #' [Vega-Lite example](https://vega.github.io/vega-lite/examples/point_2d.html) #' uses the base URL `https://cdn.jsdelivr.net/npm/vega-datasets@2`. In a spec, #' instead of specifying: #' #' ``` #' data = "https://cdn.jsdelivr.net/npm/vega-datasets@2/data/cars.json" #' ``` #' #' You can call: #' #' ``` #' vw_set_base_url("https://cdn.jsdelivr.net/npm/vega-datasets@2") #' ``` #' #' Then specify: #' #' ``` #' data = "data/cars.json" #' ``` #' #' This function sets the value of `getOption("vega-embed")$loader$baseURL`. #' You need set it only once in a session or RMarkdown file. #' #' @param url `character` URL to use as the base URL. #' #' @return `character` called for side effects, it returns the previous value #' invisibly. #' #' @examples #' # this is the URL used for Vega datasets #' previous <- vw_set_base_url("https://cdn.jsdelivr.net/npm/vega-datasets@2") #' #' # reset to previous value #' vw_set_base_url(previous) #' @export #' vw_set_base_url <- function(url) { # validate that it's a single string assertthat::assert_that( assertthat::is.string(url) || is.null(url) ) vega_embed_local <- getOption("vega.embed", default = list()) url_old <- vega_embed_local[["loader"]][["baseURL"]] vega_embed_local[["loader"]][["baseURL"]] <- url options(vega.embed = vega_embed_local) invisible(url_old) }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/set-base-url.R
#' Run Shiny demonstration-apps #' #' @param example `character`, name of the example to run; if NULL (default), #' prints out a list of available examples #' @param ... additional arguments passed to [shiny::runApp()] #' #' @return invisible NULL, called for side-effects #' @examples #' vw_shiny_demo() # returns available examples #' #' # Run only in interactive R sessions #' if (interactive()) { #' vw_shiny_demo("data-set-get") #' } #' @export #' vw_shiny_demo <- function(example = NULL, ...) { assert_packages("shiny", "fs") # get the entire path examples_long <- fs::dir_ls( system.file("shiny-demo", package = "vegawidget"), type = "directory" ) # trim so it's just the directory-names examples <- basename(examples_long) # if a "good" example is not provided, message the available examples if (is.null(example) || !(example %in% examples)) { examples_collapse <- glue::glue_collapse(glue::double_quote(examples), sep = ", ") message( glue::glue("Available examples: {examples_collapse}") ) return(invisible(NULL)) } # run the app shiny::runApp( system.file("shiny-demo", example, package = "vegawidget"), ... ) invisible(NULL) }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/shiny-demo.R
#' Get information from a Vega chart into Shiny #' #' There are three types of information you can get from a Vega chart, #' a *signal*, *data* (i.e. a dataset), and information associated with #' an *event*. A dataset or a signal must first be defined and **named** #' in the vegaspec. #' #' These getter-functions are called from within #' a Shiny `server()` function, where they act like #' [shiny::reactive()], returning a reactive expression. #' #' To see these functions in action, you can run a shiny-demo: #' #' - `vw_shiny_get_signal()`: call `vw_shiny_demo("signal-set-get")` #' - `vw_shiny_get_data()`: call `vw_shiny_demo("data-set-get")` #' - `vw_shiny_get_event()`: call `vw_shiny_demo("event-get")` #' #' In addition to the chart `outputId`, you will need to provide: #' #' - `vw_shiny_get_signal()`: the `name` of the signal, as defined in the Vega #' specification #' - `vw_shiny_get_data()`: the `name` of the dataset, as defined in the Vega #' specification #' - `vw_shiny_get_event()`: the `event` type, as defined in the #' [Vega Event-Stream reference](https://vega.github.io/vega/docs/event-streams/) #' #' When the signal or data changes, or when the event fires, Vega needs to #' know which information you want returned to Shiny. To do this, #' you provide a JavaScript handler-function: #' #' - `vw_shiny_get_signal()`: the default handler, #' `vw_handler_signal("value")`, #' specifies that the value of the signal be returned. #' #' - `vw_shiny_get_data()`: the default handler, #' `vw_handler_data("value")`, #' specifies that the entire dataset be returned. #' #' - `vw_shiny_get_event()`: the default handler, #' `vw_handler_event("datum")`, #' specifies that the single row of data associated with graphical mark #' be returned. For example, if you are monitoring a `"click"` event, #' Vega would return the row of data that backs any mark #' (like a point) that you click. #' #' If you need to specify a different behavior for the handler, there are a #' couple of options. This package provides #' a library of handler-functions; call [vw_handler_signal()], #' [vw_handler_data()], or [vw_handler_event()] without arguments to #' list the available handlers. #' #' If the library does not contain the handler you need, the `body_value` #' argument will also accept a character string which will be used as #' the **body** of the handler function. #' #' For example, these calls are equivalent: #' #' - `vw_shiny_get_signal(..., body_value = "value")` #' - `vw_shiny_get_signal(..., body_value = vw_handler_signal("value"))` #' - `vw_shiny_get_signal(..., body_value = "return value;")` #' #' If you use a custom-handler that you think may be useful for the #' handler-function library, please #' [file an issue](https://github.com/vegawidget/vegawidget/issues). #' #' @inheritParams shiny-setters #' @param name `character`, name of the signal (defined in Vega specification) #' being monitored #' @param body_value `character` or `JS_EVAL`, the **body** of a JavaScript #' function that Vega will use to handle the signal or event; this function #' must return a value #' #' @return [shiny::reactive()] function that returns the value returned by #' `body_value` #' @name shiny-getters #' @seealso [vw_handler_signal()], [vw_handler_event()], #' vega-view: #' [addSignalListener()](https://github.com/vega/vega/tree/master/packages/vega-view#view_addSignalListener), #' [addEventListener()](https://github.com/vega/vega/tree/master/packages/vega-view#view_addEventListener) #' @export #' vw_shiny_get_signal <- function(outputId, name, body_value = "value") { assert_packages("shiny") session <- shiny::getDefaultReactiveDomain() inputId <- "" # set up an observer to run *once* to add the listener shiny::observe({ shiny::isolate({ # create unique inputId (set in enclosing environment) inputId_proposed <- glue::glue("{outputId}_signal_{name}") inputId <<- get_unique_inputId(inputId_proposed, names(session$input)) # compose_handler_body handler_body <- vw_handler_signal(body_value) %>% vw_handler_add_effect("shiny_input", inputId = session$ns(inputId)) %>% vw_handler_body_compose(n_indent = 0L) # add listener vw_shiny_msg_addSignalListener( outputId, name = name, handlerBody = handler_body ) }) }) # return a reactive that listens to our "private" input shiny::reactive({ session$input[[inputId]] }) } #' @name shiny-getters #' @export #' vw_shiny_get_data <- function(outputId, name, body_value = "value") { assert_packages("shiny") session <- shiny::getDefaultReactiveDomain() inputId <- "" # set up an observer to run *once* to add the listener shiny::observe({ shiny::isolate({ # create unique inputId (set in enclosing environment) inputId_proposed <- glue::glue("{outputId}_data_{name}") inputId <<- get_unique_inputId(inputId_proposed, names(session$input)) # compose_handler_body handler_body <- vw_handler_data(body_value) %>% vw_handler_add_effect("shiny_input", inputId = session$ns(inputId)) %>% vw_handler_body_compose(n_indent = 0L) # add listener vw_shiny_msg_addDataListener( outputId, name = name, handlerBody = handler_body ) }) }) # return a reactive that listens to our "private" input shiny::reactive({ x <- session$input[[inputId]] # coerce this to a data.frame, if need be if (!is.data.frame(x)) { x <- data.frame(as.list(x), stringsAsFactors = FALSE) } x }) } #' @name shiny-getters #' @param event `character`, type of the event being monitored, e.g. `"click"`, #' for list of supported events, please see #' [Vega Event-Stream reference](https://vega.github.io/vega/docs/event-streams/) #' @export #' vw_shiny_get_event <- function(outputId, event, body_value = "datum") { assert_packages("shiny") session <- shiny::getDefaultReactiveDomain() inputId <- "" # set up an observer to run *once* to add the listener shiny::observe({ shiny::isolate({ # create unique inputId (set in enclosing environment) inputId_proposed <- glue::glue("{outputId}_event_{event}") inputId <<- get_unique_inputId(inputId_proposed, names(session$input)) # compose handler_body handler_body <- vw_handler_event(body_value) %>% vw_handler_add_effect("shiny_input", inputId = session$ns(inputId)) %>% vw_handler_body_compose(n_indent = 0L) # add listener vw_shiny_msg_addEventListener( outputId, event = event, handlerBody = handler_body ) }) }) # return a reactive that listens to our "private" input shiny::reactive({ session$input[[inputId]] }) } get_unique_inputId <- function(inputId, names_input) { # compile proposed inputId with names of existing inputs input_names <- c(names_input, inputId) # make input_names unique input_names_new <- make.unique(input_names, sep = "_") # return last element, corresponds to `inputId` utils::tail(input_names_new, 1) }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/shiny-getters.R
#' Shiny-message functions #' #' Use these functions to send messages from Shiny to JavaScript, #' using the vegawidget JavaScript API. #' #' These functions must be called from within a reactive enviromnent. Because #' their purpose is to cause a side-effect (changing the view of a chart), they #' should be called from within [shiny::observe()] functions, or equivalent. #' #' \describe{ #' \item{`vw_shiny_msg_callView`}{this is a multipurpose call} #' \item{`vw_shiny_msg_changeData`}{} #' \item{`vw_shiny_msg_addSignalListener`}{} #' \item{`vw_shiny_msg_addDataListener`}{} #' \item{`vw_shiny_msg_addEventListener`}{} #' } #' #' #' #' @inheritParams shiny-setters #' @param fn `character`, name of vega-view function to call #' @param params `list`, list of parameters which which to call `fn` #' @param run `logical`, indicates if the view should be run immediately, #' default is TRUE #' #' @return `invisible(NULL)`, called for side-effects #' @keywords internal #' @name shiny-message #' @noRd #' vw_shiny_msg_callView <- function(outputId, fn, params, run) { type <- "callView" message <- as.list(environment()) vw_shiny_message(type, message) } #' @rdname shiny-message #' @param name `character` name of the signal or dataset, as defined #' in the vegaspec #' @param data_insert `data.frame`, data to be inserted into #' the named dataset #' @param data_remove `data.frame`, `character`, or `logical`, #' data to be removed - if `logical`, `TRUE` indicates to remove #' all the previous data, `FALSE` indicates to remove no previous #' data - if `character` this will be the body of a JavaScript function #' with a single argument, `data.remove`, this will be a predicate #' function, returning a boolean. #' #' @keywords internal #' @noRd #' vw_shiny_msg_changeData <- function(outputId, name, data_insert, data_remove, run) { type <- "changeData" message <- as.list(environment()) vw_shiny_message(type, message) } #' @rdname shiny-message #' #' @param handlerBody `character` or `JS_EVAL`, the body of a handler function #' for the given listener #' #' @keywords internal #' @noRd #' vw_shiny_msg_addSignalListener <- function(outputId, name, handlerBody) { type <- "addSignalListener" message <- as.list(environment()) vw_shiny_message(type, message) } #' @rdname shiny-message #' #' @param handlerBody `character` or `JS_EVAL`, the body of a handler function #' for the given listener #' #' @keywords internal #' @noRd #' vw_shiny_msg_addDataListener <- function(outputId, name, handlerBody) { type <- "addDataListener" message <- as.list(environment()) vw_shiny_message(type, message) } #' @rdname shiny-message #' #' @param event `character`, name of the event to monitor, e.g. `"click"` #' #' @keywords internal #' @noRd #' vw_shiny_msg_addEventListener <- function(outputId, event, handlerBody) { type <- "addEventListener" message <- as.list(environment()) vw_shiny_message(type, message) } #' @rdname shiny-message #' #' @keywords internal #' @noRd #' vw_shiny_msg_run <- function(outputId) { type <- "run" message <- as.list(environment()) vw_shiny_message(type, message) } # internal function to wrap session$sendCustomMessage vw_shiny_message <- function(type, message) { assert_packages("shiny") session <- shiny::getDefaultReactiveDomain() session$sendCustomMessage(type, message) invisible(NULL) }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/shiny-message.R
#' Set information in a Vega chart from Shiny #' #' There are two ways to change a Vega chart: by setting #' a *signal* or by setting a *dataset*; you can also #' direct a Vega chart to re-run itself. Any signal or #' dataset you set must first be defined and **named** in the vegaspec. #' These functions are called from within #' a Shiny `server()` function, where they act like #' [shiny::observe()] or [shiny::observeEvent()]. #' #' To see these functions in action, you can run a shiny-demo: #' #' - `vw_shiny_set_signal()`: call `vw_shiny_demo("signal-set-get")` #' - `vw_shiny_set_data()`: call `vw_shiny_demo("data-set-get")` #' - `vw_shiny_run()`: call `vw_shiny_demo("data-set-swap-run")` #' #' For the signal and data setters, in addition to the chart `outputId`, #' you will need to provide: #' #' - the `name` of the signal or dataset you wish to keep updated #' - the `value` to which you want to set the signal or dataset; #' this should be a reactive expression like `input$slider` or `rct_dataset()` #' - whether or not you want to `run` the Vega view again immediately #' after setting this value #' #' If you do not set `run = TRUE` in the setter-function, #' you can use the `vw_shiny_run()` function to control when #' the chart re-runs. One possibility is to set its `value` to a reactive #' expression that refers to, for example, a [shiny::actionButton()]. #' #' @param outputId `character`, shiny `outputId` for the vegawidget #' @param name `character`, name of the signal or dataset being set, #' as defined in the vegaspec #' @param value reactive expression, e.g. `input$slider` or `dataset()`, #' that returns the value to which to set the signal or dataset # @param use_cache `logical`, for setting data, indicates to # to send Vega only the *changes* in the dataset, rather # than making a hard reset of the dataset #' @param run `logical` indicates if the chart is to be run immediately #' @param ... other arguments passed on to [shiny::observeEvent()] #' #' @return [shiny::observeEvent()] function that responds to changes in the #' reactive-expression `value` #' @name shiny-setters #' @export #' vw_shiny_set_signal <- function(outputId, name, value, run = TRUE, ...) { assert_packages("shiny") # captures (but does not evaluate) the reactive expression value <- rlang::enquo(value) shiny::observeEvent( eventExpr = rlang::eval_tidy(value), handlerExpr = { # evaluate the (reactive) expression value <- rlang::eval_tidy(value) # call the view API to set the signal value, then (possibly) run vw_shiny_msg_callView( outputId, fn = "signal", params = list(name, value), run = run ) }, ... ) } #' @rdname shiny-setters #' @export #' vw_shiny_set_data <- function(outputId, name, value, run = TRUE, ...) { assert_packages("shiny") # until we sort things out with Vega, cacheing will not work use_cache <- FALSE # if we are caching the data, we need dplyr if (use_cache) { assert_packages("dplyr") } # captures (but does not evaluate) the reactive expression value <- rlang::enquo(value) data_old <- data.frame() shiny::observeEvent( eventExpr = rlang::eval_tidy(value), handlerExpr = { # evaluate the (reactive) expression data <- rlang::eval_tidy(value) names_data <- names(data) # create the change-set only if we are cacheing and the names are the same use_changeset <- FALSE create_changeset <- use_cache && identical(names_data, names(data_old)) if (create_changeset) { # create change-set data_insert <- dplyr::anti_join(data, data_old, by = names_data) data_remove <- dplyr::anti_join(data_old, data, by = names_data) # use the change-set if it is more-efficient than a reset use_changeset <- (nrow(data_insert) + nrow(data_remove) < nrow(data)) } # if we are not using the changeset, make a reset if (!use_changeset) { data_insert <- data data_remove <- NULL } if (use_cache) { # keep a copy of the data in the enclosing environment data_old <<- data } # print(data_insert) # print(data_remove) # call the view API to invoke the changeset, then (possibly) run vw_shiny_msg_changeData(outputId, name, data_insert, data_remove, run) }, ... ) } #' @rdname shiny-setters #' @export #' vw_shiny_run <- function(outputId, value, ...) { assert_packages("shiny") # captures (but does not evaluate) the reactive expression value <- rlang::enquo(value) shiny::observeEvent( eventExpr = rlang::eval_tidy(value), handlerExpr = { # call the view API to run vw_shiny_msg_run(outputId) }, ... ) }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/shiny-setters.R
#' Determine vegaspec version #' #' Use this function to determine the `library` and `version` of a `vegaspec`. #' #' Returns a list with two elements: #' #' \describe{ #' \item{`library`}{`character`, either `"vega"` or `"vega_lite"`} #' \item{`version`}{`character`, version tag} #' } #' #' @inheritParams as_vegaspec #' #' @return `list` with elements `library`, `version` #' @examples #' vw_spec_version(spec_mtcars) #' # vw_to_vega() requires the V8 package #' vw_spec_version(vw_to_vega(spec_mtcars)) #' @export #' vw_spec_version <- function(spec) { spec <- as_vegaspec(spec) version <- parse_schema(spec[["$schema"]]) version } parse_schema <- function(schema) { result <- list(library = "", version = "") regex <- ".*/schema/(vega|vega-lite)/v(.*)\\.json$" if (is.null(schema)) { stop("cannot determine schema type, input string is NULL", call. = FALSE) } has_schema <- grepl(regex, schema) if (!has_schema) { warning( "cannot determine schema type from input string: ", schema, call. = FALSE ) return(result) } result$library <- gsub("-", "_", gsub(regex, "\\1", schema)) result$version <- gsub(regex, "\\2", schema) result } #' Create string for schema-URL #' #' Useful if you are creating a vegaspec manually. #' #' @param library `character`, either `"vega"` or `"vega_lite"` #' @param version `character`, version of library, e.g. `"5.2.0"`; #' if `version` is provided, `major` defaults to `FALSE`. #' @inheritParams vega_version #' #' @return `character` URL for schema #' @examples #' vega_schema() #' vega_schema("vega", major = FALSE) #' vega_schema("vega_lite", version = "5.2.0") #' #' # creating a spec by hand #' spec <- #' list( #' `$schema` = vega_schema(), #' width = 300, #' height = 300 #' # and so on #' ) %>% #' as_vegaspec() #' #' @export #' vega_schema <- function(library = c("vega_lite", "vega"), version = NULL, major = is.null(version)) { library <- match.arg(library) version <- version %||% vega_version(major = major)[[library]] # change "vega_lite" to "vega-lite" library <- gsub("_", "-", library) schema <- glue::glue("https://vega.github.io/schema/{library}/v{version}.json") schema <- as.character(schema) schema } # internal function to help test different schema versions with_schema <- function(value, spec) { schema <- spec[["$schema"]] schema_new <- sub("v(\\d+)\\.json$", glue::glue("v{value}.json"), schema) spec_new <- spec spec_new[["$schema"]] <- schema_new spec_new }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/spec-version.R
#' Create or write image #' #' If you have **[V8](https://CRAN.R-project.org/package=V8)**, #' **[withr](https://withr.r-lib.org/)**, and **[fs](https://fs.r-lib.org/)** #' installed, you can use these functions can to create #' or write images as PNG or SVG, using a `vegaspec` or `vegawidget`. #' To convert to a bitmap, or write a PNG file, you will additionally need #' the **[rsvg](https://CRAN.R-project.org/package=rsvg)** and #' **[png](https://CRAN.R-project.org/package=png)** packages. #' #' These functions can be called using (an object that can be coerced to) #' a `vegaspec`. #' #' The scripts used are adapted from the Vega #' [command line utilities](https://vega.github.io/vega/usage/#cli). #' #' @name image #' @inheritParams vw_autosize #' @param path `character`, local path to which to write the file #' @param scale `numeric`, useful for specifying larger images supporting the #' increased-resolution of retina displays #' @param base_url `character`, the base URL for a data file, useful for #' specifying a local directory; defaults to an empty string #' @param seed `integer`, the random seed for a Vega specification, #' defaults to a "random" integer #' @param ... additional arguments passed to `vw_to_svg()` #' #' @return \describe{ #' \item{`vw_to_svg()`}{`character`, SVG string} #' \item{`vw_to_bitmap()`}{`array`, bitmap array} #' \item{`vw_write_svg()`}{invisible `vegaspec` or `vegawidget`, called for side-effects} #' \item{`vw_write_png()`}{invisible `vegaspec` or `vegawidget`, called for side-effects} #' } #' #' @examples #' # call any of these functions using either a vegaspec or a vegawidget #' svg <- vw_to_svg(vegawidget(spec_mtcars)) #' bmp <- vw_to_bitmap(spec_mtcars) #' vw_write_png(spec_mtcars, file.path(tempdir(), "temp.png")) #' vw_write_svg(spec_mtcars, file.path(tempdir(), "temp.svg")) #' #' # To specify the path to a local file, use base_url #' spec_precip <- #' list( #' `$schema` = vega_schema(), #' data = list(url = "seattle-weather.csv"), #' mark = "tick", #' encoding = list( #' x = list(field = "precipitation", type = "quantitative") #' ) #' ) %>% #' as_vegaspec() #' #' data_dir <- system.file("example-data/", package = "vegawidget") #' vw_write_png( #' spec_precip, #' file.path(tempdir(), "temp-local.png"), #' base_url = data_dir #' ) #' #' @seealso [vega-view library](https://github.com/vega/vega-view#image-export) #' #' @rdname image #' @export #' vw_to_svg <- function(spec, width = NULL, height = NULL, base_url = NULL, seed = NULL) { assert_packages(c("V8", "fs", "withr")) # set defaults base_url <- base_url %||% getOption("vega.embed")[["loader"]][["baseURL"]] %||% "" seed <- seed %||% sample(1e8, size = 1) # convert to vega spec as a string spec <- vw_autosize(spec, width = width, height = height) vega_spec <- vw_to_vega(spec) str_spec <- vw_as_json(vega_spec, pretty = FALSE) # determine versions of vega, vega-lite version_all <- vega_version_all() spec_version <- vw_spec_version(spec) widget <- get_widget_string( spec_version[["library"]], spec_version[["version"]], version_all ) version_widget <- version_all[version_all[["widget"]] == widget, ] version_vega <- version_widget[["vega"]] version_vega_lite <- version_widget[["vega_lite"]] # fire up V8 ct <- V8::v8() ct$source(widgetlib_file("vega", glue::glue("vega@{version_vega}.min.js"))) ct$source(bin_file("vega_to_svg_v8.js")) # send arguments ct$assign("spec", V8::JS(str_spec)) # send as JSON text to avoid jsonlite defaults ct$assign("seed", seed) ct$assign("baseURL", base_url) # evaluate render-function lines_returned <- ct$eval("vwRender(spec, seed, baseURL)", await = TRUE) paste(lines_returned, collapse = "\n") } #' @rdname image #' @export #' vw_to_bitmap <- function(spec, scale = 1, width = NULL, height = NULL, ...) { assert_packages("rsvg") # create the svg svg_res <- vw_to_svg(spec, width = width, height = height, ...) # determine the dimensions of the image using `scale` dim_svg <- svg_dim(svg_res) width_img <- dim_svg$width * scale height_img <- dim_svg$height * scale bm <- rsvg::rsvg(charToRaw(svg_res), width = width_img, height = height_img) bm } # internal function to scrape the text of an SVG string # to return a list of `width` and `height` # svg_dim <- function(svg) { # grab the contents of the viewBox string s <- gsub(".*viewBox=\"([^\"]+)\".*", "\\1", svg) # split string using spaced s <- strsplit(s, " ") num <- as.numeric(s[[1]]) # extract the width and height into a list dim <- list(width = num[[3]], height = num[[4]]) dim }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/to-image.R
#' Convert to Vega specification #' #' If you have **[V8](https://CRAN.R-project.org/package=V8)** installed, #' you can use this function to compile a Vega-Lite specification #' into a Vega specification. #' #' @inheritParams as_vegaspec #' #' @return S3 object of class `vegaspec_vega` and `vegaspec` #' @examples #' vw_spec_version(spec_mtcars) #' vw_spec_version(vw_to_vega(spec_mtcars)) #' @export #' vw_to_vega <- function(spec) { .vw_to_vega(as_vegaspec(spec)) } # use internal S3 generic .vw_to_vega <- function(spec, ...) { UseMethod(".vw_to_vega") } .vw_to_vega.default <- function(spec, ...) { stop(".vw_to_vega(): no method for class ", class(spec), call. = FALSE) } .vw_to_vega.vegaspec_vega_lite <- function(spec, ...) { assert_packages("V8") # determine versions of vega, vega-lite version_all <- vega_version_all() spec_version <- vw_spec_version(spec) widget <- get_widget_string( spec_version[["library"]], spec_version[["version"]], version_all ) version_widget <- version_all[version_all[["widget"]] == widget, ] version_vega <- version_widget[["vega"]] version_vega_lite <- version_widget[["vega_lite"]] # fire up v8 ct <- V8::v8() # polyfill structuredClone, ref: https://stackoverflow.com/questions/73607410 # I think that because Vega(-Lite) specs are designed to be JSON, the # "stringify/parse" method will be sufficient. # # TODO: remove this block of code when {v8} supports structuredClone # ct$source(bin_file("polyfill-structuredClone.js")) ct$source(widgetlib_file("vega", glue::glue("vega@{version_vega}.min.js"))) ct$source( widgetlib_file("vega-lite", glue::glue("vega-lite@{version_vega_lite}.min.js")) ) # convert to vega ct$eval(glue::glue("var vs = vegaLite.compile({vw_as_json(spec)})")) # don't let V8 convert to JSON; send as string ct$eval("var strSpec = JSON.stringify(vs.spec)") str_spec <- ct$get("strSpec") as_vegaspec(str_spec) } .vw_to_vega.vegaspec_vega <- function(spec, ...) { # do nothing, already a Vega spec spec }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/to-vega.R
#' Add vegawidget functions to your package #' #' These functions are offered to help you import and re-export vegawidget #' functions in your package. For more detail, please see #' [this article](https://vegawidget.github.io/vegawidget/articles/articles/import.html). #' #' **`use_vegawidget()`**: #' #' Adds vegawidget functions: #' - [as_vegaspec()], [vw_as_json()] #' - `format()`, `print()`, `knit_print()` #' - [vegawidget()], [vega_embed()], [vw_set_base_url()] #' - [vw_to_svg()] and other image functions #' - [vegawidgetOutput()], [renderVegawidget()] #' #' In practical terms: #' - adds **vegawidget** to `Imports` in your package's DESCRIPTION file. #' - adds **V8**, **withr**, **fs**, **rsvg**, and **png** to `Suggests` #' in your package's DESCRIPTION file. #' - creates `R/utils-vegawidget.R` #' - you can delete references to functions you do not want #' to re-export. #' #' If you have your own S3 class for a spec, specify the `s3_class_name` #' argument. You will have to edit `R/utils-vegawidget-<s3_class_name>.R`: #' - add the code within your class's method for #' to coerce your object to a `vegaspec`. #' #' To permit knit-printing of your custom class, you will have to add some code #' to your package's `.onLoad()` function. #' #' **`use_vegawidget_interactive()`**: #' #' If you want to add the JavaScript and Shiny functions, #' use this after running `use_vegawidget()`. It adds: #' - [vw_add_data_listener()] and other listener-functions. #' - [vw_handler_data()] and other handler functions. #' - [vw_shiny_get_data()] and other Shiny getters. #' - [vw_shiny_set_data()] and other Shiny setters. #' #' In practical terms: #' - adds **shiny**, **dplyr**, to `Suggests`. #' - creates `R/utils-vegawidget-interactive.R`. #' - at your discretion, delete references to functions you do not want #' to re-export. #' #' @param s3_class_name `character`, name of an S3 class for object to #' be coerced to a `vegaspec`; default (NULL) implies no additional class #' #' @return invisible `NULL`, called for side effects #' @export #' use_vegawidget <- function(s3_class_name = NULL) { assert_packages("usethis") usethis::use_package("vegawidget", type = "Imports") suggests <- c("V8", "withr", "fs", "rsvg", "png") usethis::ui_todo( "To render images, {usethis::ui_value('vegawidget')} \\ uses the packages {usethis::ui_value(suggests)}. \\ You may wish to add them to this package's \"Suggests\"." ) filename <- glue::glue("R/utils-vegawidget.R") usethis::ui_todo( "Remove unwanted functions from {usethis::ui_value(filename)}" ) usethis::use_template( "utils-vegawidget.R", save_as = filename, open = TRUE, package = "vegawidget" ) # if we have an S3 class if (!is.null(s3_class_name)) { data <- list(s3_class_name = s3_class_name) filename <- glue::glue("R/utils-vegawidget-{s3_class_name}.R") usethis::ui_todo( "Remove unwanted functions from {usethis::ui_value(filename)}" ) usethis::use_template( "utils-vegawidget-class.R", save_as = filename, data = data, open = TRUE, package = "vegawidget" ) val_fnname <- usethis::ui_value(glue::glue("as_vegaspec.{s3_class_name}()")) usethis::ui_todo("Adapt function {val_fnname}") code <- usethis::ui_code( glue::glue( "vegawidget::s3_register(\"knitr::knit_print\", \"{s3_class_name}\")" ) ) url <- usethis::ui_field('https://vctrs.r-lib.org/reference/s3_register.html') usethis::ui_todo( "To let knit-printing work, make sure you have this functionality \\ in {usethis::ui_code('.onLoad()')} \\ (usually kept in {usethis::ui_value('zzz.R')}): {code} The function {usethis::ui_value('s3_register()')} \\ is copied from the vctrs package, see {url}") } usethis::ui_todo("Document and rebuild package") invisible(NULL) } #' @rdname use_vegawidget #' @export #' use_vegawidget_interactive <- function() { # Assumes you already have run use_vegawidget assert_packages("usethis") usethis::use_package("shiny", type = "Suggests") # shiny usethis::use_package("dplyr", type = "Suggests") filename <- glue::glue("R/utils-vegawidget-interactive.R") usethis::ui_todo( "Remove unwanted functions from {usethis::ui_value(filename)}" ) usethis::use_template( "utils-vegawidget-interactive.R", save_as = filename, open = TRUE, package = "vegawidget" ) usethis::ui_todo("Document and rebuild package") invisible(NULL) }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/use-vegawidget.R
#' Mark character strings as literal JavaScript code #' #' See \code{htmlwidgets::\link[htmlwidgets]{JS}} for details. #' #' @name JS #' @importFrom htmlwidgets JS #' @keywords internal #' @export #' NULL #' @keywords internal #' @export #' print.JS_EVAL <- function(x, ...) { cat(x) invisible(x) } #' Interpolate into a JavaScript string #' #' Uses JavaScript notation to interpolate R variables into a string #' intended to be interpreted as JS. #' #' This is a wrapper to [glue::glue()], but it uses the notation used by #' [JavaScript's template-literals](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Template_literals), #' `${}`. #' #' @inheritParams htmlwidgets::JS #' @param .open `character`, opening delimiter used by [glue::glue()] #' @param .envir `environment`, tells [glue::glue()] where to find #' the variables to be interpolated #' #' @return `glue::glue()` object #' @examples #' x <- 123 #' glue_js("function(){return(${x});}") %>% print() #' @export #' glue_js <- function(..., .open = "${", .envir = parent.frame()) { glue::glue(..., .open = .open, .envir = .envir, .sep = "\n") } # indent a text string indent <- function(x, n = 0L) { # generate the spaces indent <- paste0(rep(" ", n), collapse = "") # insert spaces at beginning of string x <- paste0(indent, x) # insert spaces after every newline x <- gsub("\n", paste0("\n", indent), x) # trim at the end if need be x } # helpers to find JavaScript files for v8 widgetlib_file <- function(...) { system.file("htmlwidgets", "lib", ..., package = "vegawidget") } bin_file <- function(...) { system.file("bin", ..., package = "vegawidget") }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/utils-javascript.R
#' @importFrom rlang `%||%` NULL
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/utils-null.R
#' Assert that packages are loaded #' #' This function can be useful in writing package functions that use #' functions from packages that you "suggest". It asserts that these packages #' are available, and throws an informative error for those packages #' that are not. #' #' @param packages `character` vector of package names to check #' @param ... `character` package names to check #' #' @return `logical` indicating success #' @examples #' # put packages in a character vector #' assert_packages(c("base", "utils")) #' #' # or by themselves #' assert_packages("base", "utils") #' #' \dontrun{ #' # intentionally invokes error-behavior #' assert_packages("utils2") #' } #' #' @seealso [R Packages book](http://r-pkgs.had.co.nz/description.html#dependencies) #' @keywords internal #' @noRd #' assert_packages <- function(packages, ...) { packages <- c(packages, ...) is_missing <- vapply(packages, function(x) {!requireNamespace(x, quietly = TRUE)}, TRUE) missing_pkgs <- packages[is_missing] quote_missing_pkgs <- vapply(missing_pkgs, function(x) {paste0('"', x, '"')}, "") assertthat::assert_that( identical(length(missing_pkgs), 0L), msg = paste( "Package(s):", paste(quote_missing_pkgs, collapse = ", "), "needed for this function to work. Please install.", sep = " " ) ) }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/utils-package.R
#' Pipe operator #' #' See \code{magrittr::\link[magrittr]{pipe}} for details. #' #' @name %>% #' @rdname pipe #' @keywords internal #' @export #' @importFrom magrittr %>% #' @usage lhs \%>\% rhs NULL
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/utils-pipe.R
# these are the internal functions used to support vegaspec operations .as_vegaspec <- function(x, ...) { UseMethod(".as_vegaspec") } .as_vegaspec.default <- function(x, ...) { stop("as_vegaspec(): no method for class ", class(x), call. = FALSE) } .as_vegaspec.list <- function(x, ...) { # if no `$schema` element, add one if (!("$schema" %in% names(x))) { warning( "Spec has no `$schema` element, ", "adding `$schema` element for Vega-Lite major-version" ) x <- c(`$schema` = vega_schema(), x) } # determine if this is a vega or vega_lite spec library <- parse_schema(x[["$schema"]])$library class_library <- paste0("vegaspec_", library) subclass <- NULL if (identical(class_library, "vegaspec_vega_lite")) { subclass <- .get_subclass(x) } class_new <- unique(c(subclass, class_library, "vegaspec", class(x))) spec <- structure(x, class = class_new) spec } .as_list <- function(x, ...) { UseMethod(".as_list") } .as_list.default <- function(x, ...) { stop(".as_list(): no method for class ", class(x), call. = FALSE) } .as_list.vegaspec <- function(x, ...) { # revert to list unclass(x) } .as_list.json <- function(x, ...) { # convert from JSON to list x <- jsonlite::fromJSON(x, simplifyVector = FALSE, simplifyDataFrame = FALSE) x } .as_json <- function(x, pretty, ...) { UseMethod(".as_json") } .as_json.default <- function(x, pretty, ...) { stop(".as_json(): no method for class ", class(x), call. = FALSE) } .as_json.list <- function(x, pretty = TRUE, ...) { # convert from list to JSON jsonlite::toJSON( x, auto_unbox = TRUE, null = "null", na = "null", pretty = pretty, digits = NA ) } .as_json.character <- function(x, pretty = TRUE, ...) { # validate that this is JSON success <- jsonlite::validate(x) assertthat::assert_that( success, msg = attr(success, "err") ) # add json class to character class(x) <- unique(c("json", class(x))) x } # Method for vegaspec .find_urls <- function(spec){ unlisted <- unlist(.as_list(spec), recursive = TRUE, use.names = TRUE) url_ix <- grep("^(.*[[:punct:]])*url$", names(unlisted)) urls <- unname(unlisted[url_ix]) urls } # spec has to be a (nascent) vegaspec .get_subclass <- function(spec) { # subclasses: vegaspec_unit, vegaspec_layer, vegaspec_facet, vegaspec_repeat, # vegaspec_concat, vegaspec_hconcat, vegaspec_vconcat names <- names(spec) if ("concat" %in% names) { return("vegaspec_concat") } if ("hconcat" %in% names) { return("vegaspec_hconcat") } if ("vconcat" %in% names) { return("vegaspec_vconcat") } if ("repeat" %in% names) { return("vegaspec_repeat") } if ("facet" %in% names) { return("vegaspec_facet") } if ("layer" %in% names) { return("vegaspec_layer") } return("vegaspec_unit") }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/utils-vegaspec.R
#' Vega embed options #' #' Helper-function to specify the `embed` argument to `vegawidget()`. #' These arguments reflect the options to the #' [vega-embed](https://github.com/vega/vega-embed/#options) #' library, which ultimately renders the chart specification as HTML. #' #' The most important arguments are `renderer`, `actions`, and `defaultStyle`: #' #' - The default `renderer` is `"canvas"`. #' #' - The default for `actions` is `NULL`, which means that the `export`, #' `source`, and `editor` links are shown, but the `compiled` link is #' not shown. #' - To suppress all action links, call with `actions = FALSE`. #' - To change from the default for a given action link, call with a list: #' `actions = list(editor = FALSE)`. #' #' - The default for `defaultStyle` is `TRUE`, which means that action-links #' are rendered in a widget at the upper-right corner of the rendered chart. #' #' The [vega-embed](https://github.com/vega/vega-embed/#options) library has a lot #' more options, you can supply these as names arguments using `...`. #' #' For example, it is ineffective to set the `width` and `height` parameters #' here when embedding a Vega-Lite specification, as they will be overridden #' by the value in the chart specification. #' #' @param renderer `character` the renderer to use for the view. #' One of `"canvas"` (default) or `"svg"`. #' See [Vega docs](https://vega.github.io/vega/docs/api/view/#view_renderer) #' for details. #' @param actions `logical` or named vector of logicals, determines if action links #' ("Export as PNG/SVG", "View Source", "Open in Vega Editor") #' are included with the embedded view. #' If the value is `TRUE` (default), all action links will be shown #' and none if the value is `FALSE`. This property can be a named vector of #' logicals that maps #' keys (`export`, `source`, `compiled`, `editor`) to logical values for determining #' if each action link should be shown. By default, `export`, `source`, #' and `editor` are `TRUE` and `compiled` is `FALSE`, but these defaults #' can be overridden. For example, if `actions` is #' `list(export = FALSE, source = TRUE)`, the embedded visualization will #' have two links – "View Source" and "Open in Vega Editor". #' @param defaultStyle `logical` or `character` #' default stylesheet for embed actions. If set to `TRUE` (default), #' the embed actions are shown in a menu. Set to `FALSE` to use simple links. #' Provide a `character` string to set the style sheet. #' @param config `character` or `list`, a URL string from which to load #' a Vega/Vega-Lite or Vega-Lite configuration file, or a `list` of #' Vega/Vega-Lite configurations to override the default configuration #' options. If `config` is a URL, it will be subject to standard browser #' security restrictions. Typically this URL will point to a file on the same #' host and port number as the web page itself. #' @param patch `JS` function, `list` or `character`, A function to modify the #' Vega specification before it is parsed. Alternatively, an `list` that, #' when compiled to JSON, will meet #' [JSON-Patch RFC6902](https://www.rfc-editor.org/rfc/rfc6902). #' If you use Vega-Lite, the compiled Vega will be patched. #' Alternatively to the function or the `list`, a URL string from which to #' load the patch can be provided. This URL will be subject to standard #' browser security restrictions. Typically this URL will point to a file #' on the same host and port number as the web page itself. #' @param bind `character` #' @param ... other named items, outlined in #' [vega-embed](https://github.com/vega/vega-embed) options. #' #' @seealso [vega-embed library](https://github.com/vega/vega-embed), #' [vegawidget()] #' #' @examples #' vega_embed(renderer = "svg") #' #' @return `list` to to be used with vega-embed JavaScript library #' @export #' vega_embed <- function(renderer = c("canvas", "svg"), actions = NULL, defaultStyle = TRUE, config = NULL, patch = NULL, bind = NULL, ...) { renderer <- match.arg(renderer) actions <- validate_actions(actions) options <- list( renderer = renderer, actions = actions, defaultStyle = defaultStyle, config = config, patch = patch, bind = bind, ... ) embed_options <- list_remove_null(options) embed_options } list_remove_null <- function(x) { # determine which elements are NULL is_null <- vapply(x, is.null, logical(1)) # remove them by settiing them to NULL (!?!) x[is_null] <- NULL x } validate_actions <- function(actions) { is_null_or_logical <- function(x) { rlang::is_null(x) || rlang::is_scalar_logical(x) } assert_null_or_logical <- function(x, name) { assertthat::assert_that( is_null_or_logical(x), msg = glue::glue( "vega-embed actions: value of `{name}` not NULL or scalar logical." ) ) } assert_named <- function(x) { assertthat::assert_that( !is.null(names(x)), msg = glue::glue("vega-embed actions: lists must be named.") ) } assert_name_legal <- function(name, legal_names) { assertthat::assert_that( name %in% legal_names, msg = glue::glue("vega-embed actions: `{name}` is not a legal name.") ) } # if NULL or scalar logical, all is well - return if (is_null_or_logical(actions)) { return(actions) } # coerce to list and test actions <- as.list(actions) # check names names_not_export <- c("source", "compiled", "editor") names_actions_legal <- c("export", names_not_export) assert_named(actions) purrr::walk(names(actions), assert_name_legal, names_actions_legal) # check source, compiled, editor actions_not_export <- actions[names_not_export] purrr::iwalk(actions_not_export, assert_null_or_logical) # check actions$export if (!rlang::is_null(actions$export)) { names_export_legal <- c("svg", "png") assert_named(actions$export) purrr::walk(names(actions$export), assert_name_legal, names_export_legal) purrr::iwalk(actions$export, assert_null_or_logical) } actions }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/vega-embed.R
#' Get versions of Vega JS libraries #' #' This is an internal function for updating this package. #' #' @param vega_lite_version `character` version of Vega-Lite, e.g. `"2.5.0"` #' #' @return `list` with elements `vega_lite`, `vega`, `vega_embed` #' #' @examples #' \dontrun{ #' # requires network-access #' get_vega_version(vega_lite_version = "2.5.0") #' } #' @keywords internal #' @noRd #' get_vega_version <- function(vega_lite_version) { url <- glue::glue("https://cdn.jsdelivr.net/npm/vega-lite@{vega_lite_version}/package.json") package <- jsonlite::fromJSON(url) # get versions vega_version <- sub("\\^", "", package$peerDependencies$vega) vega_embed_version <- sub("\\^", "", package$devDependencies$`vega-embed`) vega_version <- list( vega_lite = vega_lite_version, vega = vega_version, vega_embed = vega_embed_version ) vega_version } #' Get Vega JavaScript versions #' #' Use these functions to get which versions of Vega JavaScript libraries #' are available. `vega_version_all()` returns a data frame showing all #' versions included in this package, `vega_version_available()` returns #' all versions available - subject to locking, #' `vega_version()` shows the default version. #' #' This package offers multiple widgets, each corresponding to a major version #' of Vega-Lite. Only one of these widgets can be used for a given loading of #' this package. When `vegawidget()` is first called, the widget is "locked" #' according to the `$schema` in the `vegaspec` used, or the default - the #' most-recent version. #' #' \describe{ #' \item{`is_locked`}{indicates if `vegawidget()` is has locked the version.} #' \item{`widget`}{indicates which version of the widget would be used.} #' } #' #' @param major `logical` return major version-tags rather than the #' tags for the specific versions supported by this package #' #' @return \describe{ #' \item{vega_version()}{`list` with elements: `is_locked`, `widget`, #' `vega_lite`, `vega`, `vega_embed`.} #' \item{vega_version_all()}{`data.frame` with elements: `widget`, #' `vega_lite`, `vega`, `vega_embed`.} #' \item{vega_version_available()}{`data.frame` with elements: `widget`, #' `vega_lite`, `vega`, `vega_embed`.} #' } #' #' @examples #' vega_version() #' vega_version(major = TRUE) #' vega_version_all() #' vega_version_available() #' @export #' vega_version <- function(major = FALSE) { x <- vega_version_all(major = major) x <- x[.vega_version_all == vw_env[["widget"]], ] x <- as.list(x) x[["is_locked"]] <- vw_env[["is_locked"]] x } #' @rdname vega_version #' @export #' vega_version_all <- function(major = FALSE) { x <- .vega_version_all if (major) { x <- lapply(x, get_major) x <- as.data.frame(x) } x } #' @rdname vega_version #' @export #' vega_version_available <- function(major = FALSE) { x <- vega_version(major = major) x_all <- vega_version_all(major = major) if (x$is_locked) { return(x_all[x_all[["widget"]] == x[["widget"]], ]) } x_all } # function to return the major component get_major <- function(x) { regexp <- "^\\d+" # predicate - if not string or does not begin with digit, return if (!is.character(x) || !all(grepl(regexp, x))) { return(x) } regmatches(x, regexpr(regexp, x)) } #' Get the index of the candidate that matches the version #' #' @param version `character` #' @param candidates `character` vector #' #' @return `list` with elements: #' - `index`: `integer` index of candidate that best matches version #' - `message`: `character`, if candidate not suitable (`NULL` if OK) #' @examples #' get_candidate("5", c("5.2.0", "4.1.7")) # 1L #' get_candidate("4", c("5.2.0", "4.1.7")) # 2L #' get_candidate("6", c("5.2.0", "4.1.7")) # 1L, with warning #' get_candidate("3", c("5.2.0", "4.1.7")) # 2L, with warning #' get_candidate("5.21.0", c("5.21.0", "5.17.0")) # 1L #' get_candidate("5.01.0", c("5.21.0", "5.17.0")) # 1L #' get_candidate("5.22.0", c("5.21.0", "5.17.0")) # 1L, with warning #' #' @noRd #' get_candidate <- function(version, candidates) { # package_version needs to be vectorized package_major <- function(x) { package_version(x)$major } major <- function(x) { vapply(x, package_major, numeric(1), USE.NAMES = FALSE) } # need to save original for message v_orig <- version # if version has no ".", append a ".0" - so that numeric_version() will work if (!grepl("\\.", version)) { version <- glue::glue("{version}.0") } # if version smaller than smallest candidate, use smallest candidate if (major(version) < min(major(candidates))) { min_can <- min(candidates) return( list( index = match(min_can, candidates), message = glue::glue( "version {v_orig} smaller than minimum major-version available: {min_can}" ) ) ) } # if version larger than largest candidate, use largest candidate if (major(version) > max(major(candidates))) { max_can <- max(candidates) return( list( index = match(max_can, candidates), message = glue::glue( "version {v_orig} larger than maximum major-version available: {max_can}" ) ) ) } # get latest version with same major version can_at_major = candidates[major(version) == major(candidates)] list( index = match(max(can_at_major), candidates), message = NULL ) } get_widget_string <- function(library, version, available) { library_with_underscore <- gsub("-", "_", library) candidates <- available[[library_with_underscore]] result <- get_candidate(version, candidates) widget <- available[["widget"]][result[["index"]]] if (!is.null(result[["message"]])) { warning(glue::glue("{library} {result[['message']]}"), call. = FALSE) } widget } vw_lock_set <- function(value) { vw_env[["is_locked"]] <- as.logical(value[[1]]) } vw_widget_set <- function(value) { vw_env[["widget"]] <- as.character(value[[1]]) }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/vega-version.R
#' Coerce to vegaspec #' #' Vega and Vega-Lite use JSON as their specification-format. Within R, #' it seems natural to work with these specifications as lists. Accordingly, #' a `vegaspec` is also a list. This family of functions is used to coerce lists, #' JSON, and character strings to `vegaspec`. #' #' The `character` method for this function will take: #' \itemize{ #' \item{JSON string.} #' \item{A path to a local JSON file.} #' \item{A URL that returns a JSON file.} #' } #' #' For Vega and Vega-Lite, the translation between lists and JSON is a little #' bit particular. This function, [as_vegaspec()], can be used to translate #' from JSON; [vw_as_json()] can be used to translate to JSON. #' #' You can use the function [vw_spec_version()] to determine if a `vegaspec` is built for #' Vega-Lite or Vega. You can use [vw_to_vega()] to translate a Vega-Lite spec to Vega. #' #' @param spec An object to be coerced to `vegaspec`, a Vega/Vega-Lite specification #' @param encoding `character`, if spec is a file or a URL, specifies the encoding. #' @param ... Other arguments (attempt to future-proof) #' #' @return An object with S3 class `vegaspec` #' @examples #' spec <- list( #' `$schema` = vega_schema(), #' data = list(values = mtcars), #' mark = "point", #' encoding = list( #' x = list(field = "wt", type = "quantitative"), #' y = list(field = "mpg", type = "quantitative"), #' color = list(field = "cyl", type = "nominal") #' ) #' ) #' #' as_vegaspec(spec) #' #' \dontrun{ #' # requires network-access #' as_vegaspec("https://vega.github.io/vega-lite/examples/specs/bar.vl.json") #' } #' @seealso [Vega](https://vega.github.io/vega/), #' [Vega-Lite](https://vega.github.io/vega-lite/), #' [vw_as_json()], [vw_spec_version()], [vw_to_vega()] #' @export #' as_vegaspec <- function(spec, ...) { UseMethod("as_vegaspec") } #' @rdname as_vegaspec #' @export #' as_vegaspec.default <- function(spec, ...) { stop("as_vegaspec(): no method for class ", class(spec), call. = FALSE) } #' @rdname as_vegaspec #' @export #' as_vegaspec.vegaspec <- function(spec, ...) { spec <- .as_list(spec) spec <- .as_vegaspec(spec) spec } #' @rdname as_vegaspec #' @export #' as_vegaspec.list <- function(spec, ...) { spec <- .as_vegaspec(spec) spec } #' @rdname as_vegaspec #' @export #' as_vegaspec.json <- function(spec, ...) { spec <- .as_list(spec) spec <- .as_vegaspec(spec) spec } #' @rdname as_vegaspec #' @export #' as_vegaspec.character <- function(spec, encoding = "UTF-8", ...) { is_url <- rlang::is_string(spec) && grepl("^http(s?)://", spec) is_con <- rlang::is_string(spec) && file.exists(spec) # remote file or local file if (is_url || is_con) { spec <- vw_fetch(spec) } spec <- .as_json(spec) spec <- .as_list(spec) spec <- .as_vegaspec(spec) spec } #' @rdname as_vegaspec #' @export #' as_vegaspec.vegawidget <- function(spec, ...) { # Pull out the spec from a widget object spec <- .as_list(spec$x)$chart_spec .as_vegaspec(spec) } #' Coerce vegaspec to JSON #' #' For Vega and Vega-Lite, the translation between lists and JSON is a little #' bit particular. This function, [vw_as_json()], can be used to translate #' to JSON; [as_vegaspec()] can be used to translate from JSON. #' #' @inheritParams as_vegaspec #' @param pretty `logical` indicates to use pretty (vs. minified) formatting #' #' @return `jsonlite::json` object #' @examples #' vw_as_json(spec_mtcars) #' #' @seealso [as_vegaspec()] #' @export #' vw_as_json <- function(spec, pretty = TRUE) { spec <- as_vegaspec(spec) spec <- .as_json(spec, pretty = pretty) spec }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/vegaspec.R
#' Create a Vega/Vega-Lite htmlwidget #' #' The main use of this package is to render a `vegawidget`, #' which is also an `htmlwidget`. This function builds a `vegawidget` #' using a `vegaspec`. #' #' If `embed` is `NULL`, `vegawidget()` uses: #' #' - `getOption("vega.embed")`, if that is NULL: #' - an empty call to [vega_embed()] #' #' The most-important arguments to [vega_embed()] are: #' #' - `renderer`, to specify `"canvas"` (default) or `"svg"` #' - `actions`, to specify action-links #' for `export`, `source`, `compiled`, and `editor` #' #' If either `width` or `height` is specified, the `autosize()` function #' is used to override the width and height of the `spec`. There are some #' important provisions: #' #' - Specifying `width` and `height` is #' [effective only for single-view charts and layered charts]( #' https://vega.github.io/vega-lite/docs/size.html#limitations). #' It will not work for concatenated, faceted, or repeated charts. #' #' - In the `spec`, the default interpretation of width and height #' is to describe the dimensions of the #' **plotting rectangle**, not including the space used by the axes, labels, #' etc. Here, `width` and `height` describe the dimensions #' of the **entire** rendered chart, including axes, labels, etc. #' #' Please note that if you are using a remote URL to refer to a dataset in #' your vegaspec, it may not render properly in the RStudio IDE, #' due to a security policy set by RStudio. If you open the chart in a #' browser, it should render properly. #' #' @inheritParams as_vegaspec #' @inheritParams vw_autosize #' @param embed `list` to specify #' [vega-embed](https://github.com/vega/vega-embed#options) options, #' see **Details** on how this is set if `NULL`. #' @param elementId `character`, explicit element ID for the vegawidget, #' useful if you have other JavaScript that needs to explicitly #' discover and interact with a specific vegawidget #' @param base_url `character`, the base URL to prepend to data-URL elements #' in the vegaspec. This could be the path #' to a local directory that contains a local file referenced in the spec. #' It could be the base for a remote URL. Please note that by specifying #' the `base_url` here, you will override any `loader` that you specify #' using `vega_embed()`. Please note that this does not work with #' `knitr`. See examples. #' @param ... other arguments passed to [htmlwidgets::createWidget()] #' #' @return S3 object of class `vegawidget` and `htmlwidget` #' @seealso [vega-embed options](https://github.com/vega/vega-embed#options), #' [vega_embed()], [vw_autosize()] #' @examples #' vegawidget(spec_mtcars, width = 350, height = 350) #' #' # vegaspec with a data URL #' spec_precip <- #' list( #' `$schema` = vega_schema(), #' data = list(url = "seattle-weather.csv"), #' mark = "tick", #' encoding = list( #' x = list(field = "precipitation", type = "quantitative") #' ) #' ) %>% #' as_vegaspec() #' #' # define local path to file #' path_local <- system.file("example-data", package = "vegawidget") #' #' # render using local path (does not work with knitr) #' vegawidget(spec_precip, base_url = path_local) #' #'\dontrun{ #' # requires network-access #' #' # define remote path to file #' url_remote <- "https://vega.github.io/vega-datasets/data" #' #' # render using remote path #' # note: does not render in RStudio IDE; open using browser #' vegawidget(spec_precip, base_url = url_remote) #'} #' @export #' vegawidget <- function(spec, embed = NULL, width = NULL, height = NULL, elementId = NULL, base_url = NULL, ...) { # if `embed` is NULL, check for option embed <- embed %||% getOption("vega.embed") # if `embed` is still NULL, set using empty call to vega_embed() embed <- embed %||% vega_embed() # set width, height if available from an option width <- width %||% getOption("vega.width") height <- height %||% getOption("vega.height") # autosize (if needed) spec <- vw_autosize(spec, width = width, height = height) ## base_url # # if `base_url` is specified here, it overrides the loader specified # in `embed` # if specified, set base_url in embed-loader if (!is.null(base_url)) { embed[["loader"]] <- embed[["loader"]] %||% list() embed[["loader"]][["baseURL"]] <- base_url } # check for `baseURL` in `embed[["loader"]]` baseURL <- embed[["loader"]][["baseURL"]] # if base_url is a local directory need to create a dependency if (!is.null(baseURL) && dir.exists(baseURL)) { # warn if knitr is active if (isTRUE(getOption('knitr.in.progress'))) { warning("attaching local data files does not work with knitr") } # make sure that all the URL's in the spec will be sensible urls <- .find_urls(spec) full_urls <- file.path(normalizePath(baseURL), urls) if (any(!file.exists(full_urls))) { stop( "Local file suggested by base_url and urls in spec does not exist:", full_urls[which(!file.exists(full_urls))] ) } # set data-dependency for this chart get_md5 <- function(file) { digest::digest(algo = "md5", file = file) } # get list, key: filename, value: md5 of file files_md5 <- lapply(full_urls, get_md5) # get md5 of list data_md5 <- digest::digest(files_md5, algo = "md5") # get "unique" suffix for data suffix <- elementId %||% data_md5 data_dependency <- htmltools::htmlDependency( name = glue::glue("data-{suffix}"), version = "0.0.0", src = c(file = normalizePath(baseURL)), attachment = basename(full_urls), all_files = FALSE ) # set loader to refer to new location embed[["loader"]][["baseURL"]] <- glue::glue("lib/data-{suffix}-0.0.0/") } else { data_dependency <- NULL } # use internal methods here because spec has already been validated x <- list( chart_spec = .as_list(spec), embed_options = embed ) x <- .as_json(x) # determine widget from spec spec_version <- vw_spec_version(spec) widget <- get_widget_string( spec_version[["library"]], spec_version[["version"]], vega_version_available() ) # lock the widget vw_widget_set(widget) vw_lock_set(TRUE) vegawidget <- htmlwidgets::createWidget( glue::glue("vegawidget-{widget}"), x, width = width, height = height, package = "vegawidget", sizingPolicy = htmlwidgets::sizingPolicy( defaultWidth = "auto", defaultHeight = "auto", viewer.fill = FALSE, knitr.figure = FALSE ), elementId = elementId, # Note -- this blocks the user from being able to specify additional # dependencies themselves through ... but there likely wouldn't be # reason to do so... dependencies = data_dependency, ... ) # insert a generic class for the benefit of as_vegaspec() cls <- class(vegawidget) class(vegawidget) <- c(cls[1], "vegawidget", utils::tail(cls, -1)) vegawidget } #' Shiny-output for vegawidget #' #' Use this function in the UI part of your Shiny app. #' #' @param outputId output variable to read from #' @param width,height Must be a valid CSS unit (like \code{"100\%"}, #' \code{"400px"}, \code{"auto"}) or a number, which will be coerced to a #' string and have \code{"px"} appended. For vegawidgets, `"auto"` is useful #' because, as of now, the spec determines the size of the widget, then the #' widget determines the size of the container. #' @param widget `character`, indicating which version of libraries to use, #' e.g. `"vl5"`. Normally, you should not need to set this. #' See `vega_version_all()` for more information. #' #' @export #' vegawidgetOutput <- function(outputId, width = "auto", height = "auto", widget = NULL) { assert_packages("shiny") widget <- widget %||% vega_version()[["widget"]] widget_avail <- vega_version_all()[["widget"]] assertthat::assert_that( widget %in% widget_avail, msg = glue::glue( "widget value `{widget}` not among legal values: ", "{glue::glue_collapse(widget_avail, sep = ' ')}" ) ) htmlwidgets::shinyWidgetOutput( outputId, glue::glue("vegawidget-{widget}"), width, height, package = "vegawidget" ) } #' Render shiny-output for vegawidget #' #' Use this function in the server part of your Shiny app. #' #' @param expr expression that generates a vegawidget. This can be #' a `vegawidget` or a `vegaspec`. #' @param env The environment in which to evaluate \code{expr}. #' @param quoted Is \code{expr} a quoted expression (with \code{quote()})? This #' is useful if you want to save an expression in a variable. #' #' @export #' renderVegawidget <- function(expr, env = parent.frame(), quoted = FALSE) { assert_packages("shiny") if (!quoted) { expr <- substitute(expr) } # see https://github.com/vegawidget/vegawidget/pull/190/files#r787265907 f <- function(x) { # if sent a vegaspec, convert to a vegawidget if (inherits(x, "vegaspec")) { x <- vegawidget(x) } x } htmlwidgets::shinyRenderWidget( substitute(f(expr)), vegawidgetOutput, env, quoted = TRUE ) }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/vegawidget.R
#' @rdname image #' @export #' vw_write_svg <- function(spec, path, width = NULL, height = NULL, ...) { assert_packages("fs") svg <- vw_to_svg(spec, width = width, height = height, ...) writeLines(svg, fs::path_expand(path)) invisible(spec) } #' @rdname image #' @export #' vw_write_png <- function(spec, path, scale = 1, width = NULL, height = NULL, ...) { assert_packages("fs", "png") bm <- vw_to_bitmap(spec, scale = scale, width = width, height = height, ...) png::writePNG(bm, fs::path_expand(path)) invisible(spec) }
/scratch/gouwar.j/cran-all/cranData/vegawidget/R/write-image.R