content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' How to encode x-axis time values
#'
#' @param vl Vega-Lite object
#' @param unit the property of a channel definition sets the level of specificity
#' for a temporal field. Currently supported values are 'year', 'yearmonth',
#' 'yearmonthday', 'yearmonthdate', 'yearday', 'yeardate', 'yearmonthdayhours'
#' and 'yearmonthdayhoursminutes' for non-periodic time units & 'month',
#' 'day', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'hoursminutes',
#' 'hoursminutesseconds', 'minutesseconds' and 'secondsmilliseconds' for
#' periodic time units.
#' @references \href{http://vega.github.io/vega-lite/docs/timeunit.html}{Vega-Lite Time Unit}
#' @export
#' @examples
#' vegalite() %>%
#' cell_size(300, 300) %>%
#' add_data("https://vega.github.io/vega-editor/app/data/unemployment-across-industries.json") %>%
#' encode_x("date", "temporal") %>%
#' encode_y("count", "quantitative", aggregate="sum") %>%
#' encode_color("series", "nominal") %>%
#' scale_x_time(nice="month") %>%
#' scale_color_nominal(range="category20b") %>%
#' axis_x(axisWidth=0, format="%Y", labelAngle=0) %>%
#' axis_y(remove=TRUE) %>%
#' timeunit_x("yearmonth") %>%
#' mark_area(stack="normalize")
timeunit_x <- function(vl, unit) {
vl$x$encoding$x$timeUnit <- unit
vl
}
#' How to encode y-axis time values
#'
#' @param vl Vega-Lite object
#' @param unit the property of a channel definition sets the level of specificity
#' for a temporal field. Currently supported values are 'year', 'yearmonth',
#' 'yearmonthday', 'yearmonthdate', 'yearday', 'yeardate', 'yearmonthdayhours'
#' and 'yearmonthdayhoursminutes' for non-periodic time units & 'month',
#' 'day', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'hoursminutes',
#' 'hoursminutesseconds', 'minutesseconds' and 'secondsmilliseconds' for
#' periodic time units.
#' @references \href{http://vega.github.io/vega-lite/docs/timeunit.html}{Vega-Lite Time Unit}
#' @export
#' @examples
#' # see timeunit_y()
timeunit_y <- function(vl, unit) {
vl$x$encoding$y$timeUnit <- unit
vl
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/time.r
|
#' Filter 'null' values
#'
#' Whether to filter null values from the data.
#'
#' @param vl Vega-Lite object created by \code{\link{vegalite}}
#' @param setting if \code{NULL} only quantitative and temporal fields are
#' filtered. If \code{TRUE}, all data items with 'null' values are
#' filtered. If \code{FALSE}, all data items are included.
#' @export
filter_null <- function(vl, setting=NULL) {
if (!is.null(setting)) { vl$x$transform$filterNull <- setting }
vl
}
#' Derive new fields
#'
#' @param vl Vega-Lite object created by \code{\link{vegalite}}
#' @param field the field name in which to store the computed value.
#' @param expr a string containing an expression for the formula. Use the variable
#' \code{"datum"} to refer to the current data object.
#' @export
#' @examples
#' vegalite() %>%
#' add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
#' add_filter("datum.year == 2000") %>%
#' calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
#' encode_x("gender", "nominal") %>%
#' encode_y("people", "quantitative", aggregate="sum") %>%
#' encode_color("gender", "nominal") %>%
#' scale_x_ordinal(band_size=6) %>%
#' scale_color_nominal(range=c("#EA98D2", "#659CCA")) %>%
#' facet_col("age", "ordinal", padding=4) %>%
#' axis_x(remove=TRUE) %>%
#' axis_y(title="population", grid=FALSE) %>%
#' axis_facet_col(orient="bottom", axisWidth=1, offset=-8) %>%
#' facet_cell(stroke_width=0) %>%
#' mark_bar()
calculate <- function(vl, field, expr) {
tmp <- data.frame(field=field, expr=expr, stringsAsFactors=FALSE)
if (length(vl$x$transform$calculate) == 0) {
vl$x$transform$calculate <- tmp
} else {
vl$x$transform$calculate <- rbind.data.frame(vl$x$transform$calculate, tmp)
}
vl
}
#' Add a filter
#'
#' @param vl Vega-Lite object created by \code{\link{vegalite}}
#' @param expr Vega Expression for filtering data items (or rows). Each datum
#' object can be referred using bound variable datum. For example, setting
#' \code{expr} to \code{"datum.datum.b2 > 60"} would make the output data includes only
#' items that have values in the field \code{b2} over 60.
#' @export
#' @examples
#' vegalite(viewport_height=200, viewport_width=200) %>%
#' cell_size(200, 200) %>%
#' add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
#' add_filter("datum.year == 2000") %>%
#' calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
#' encode_x("gender", "nominal") %>%
#' encode_y("people", "quantitative", aggregate="sum") %>%
#' encode_color("gender", "nominal") %>%
#' scale_x_ordinal(band_size=6) %>%
#' scale_color_nominal(range=c("#EA98D2", "#659CCA")) %>%
#' facet_col("age", "ordinal", padding=4) %>%
#' axis_x(remove=TRUE) %>%
#' axis_y(title="population", grid=FALSE) %>%
#' axis_facet_col(orient="bottom", axisWidth=1, offset=-8) %>%
#' facet_cell(stroke_width=0) %>%
#' mark_bar()
add_filter <- function(vl, expr) {
vl$x$transform$filter <- expr
vl
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/transform.r
|
#' Create Vega-Lite specs using htmlwidget idioms
#'
#' Creation of Vega-Lite spec charts is virtually 100\% feature complete.
#' Some of the parameters to functions are only documented in TypeScript
#' source code which will take a bit of time to
#' wade through. All the visualizations you find in the
#' \href{http://vega.github.io/vega-lite/gallery.html}{Vega-Lite Gallery} work.
#' \cr
#' Functions also exist which enable creation of widgets from a JSON spec and
#' turning a \code{vegalite} package created object into a JSON spec.
#'
#' You start by calling \code{vegalite()} which allows you to setup core
#' configuration options, including whether you want to display links to
#' show the source and export the visualization. You can also set the background
#' here and the \code{viewport_width} and \code{viewport_height}. Those are
#' very important as they control the height and width of the widget and also
#' the overall area for the chart. This does \emph{not} set the height/width
#' of the actual chart. That is done with \code{cell_size()}.
#'
#' Once you instantiate the widget, you need to \code{add_data()} which can
#' be \code{data.frame}, local CSV, TSV or JSON file (that convert to
#' \code{data.frame}s) or a non-realive URL (wich will not be read and
#' converted but will remain a URL in the Vega-Lite spec.
#'
#' You then need to \code{encode_x()} & \code{encode_y()} variables that
#' map to columns in the data spec and choose one \code{mark_...()} to
#' represent the encoding.
#'
#' Here's a sample, basic Vega-Lite widget:
#'
#' \preformatted{
#' dat <- jsonlite::fromJSON('[
#' {"a": "A","b": 28}, {"a": "B","b": 55}, {"a": "C","b": 43},
#' {"a": "D","b": 91}, {"a": "E","b": 81}, {"a": "F","b": 53},
#' {"a": "G","b": 19}, {"a": "H","b": 87}, {"a": "I","b": 52}
#' ]')
#'
#' vegalite() %>%
#' add_data(dat) %>%
#' encode_x("a", "ordinal") %>%
#' encode_y("b", "quantitative") %>%
#' mark_bar() -> vl
#'
#' vl
#' }
#'
#' That is the minimum set of requirements for a basic Vega-Lite spec and
#' will create a basic widget.
#'
#' You can also convert that R widget object \code{to_spec()} which will return
#' the JSON for the Vega-Lite spec (allowing you to use it outside of R).
#'
#' \preformatted{
#'
#' to_spec(vl)
#'
#' {
#' "description": "",
#' "data": {
#' "values": [
#' { "a": "A", "b": 28 }, { "a": "B", "b": 55 }, { "a": "C", "b": 43 },
#' { "a": "D", "b": 91 }, { "a": "E", "b": 81 }, { "a": "F", "b": 53 },
#' { "a": "G", "b": 19 }, { "a": "H", "b": 87 }, { "a": "I", "b": 52 }
#' ]
#' },
#' "mark": "bar",
#' "encoding": {
#' "x": {
#' "field": "a",
#' "type": "nominal"
#' },
#' "y": {
#' "field": "b",
#' "type": "quantitative"
#' }
#' },
#' "config": [],
#' "embed": {
#' "renderer": "svg",
#' "actions": {
#' "export": false,
#' "source": false,
#' "editor": false
#' }
#' }
#' }
#'
#' }
#'
#' If you already have a Vega-Lite JSON spec that has embedded data or a
#' non-realtive URL, you can create a widget from it via \code{from_spec()}
#' by passing in the full JSON spec or a URL to a full JSON spec.
#'
#' If you're good with HTML (etc) and want a more lightweight embedding options, you
#' can also use \code{\link{embed_spec}} which will scaffold a minimum \code{div} +
#' \code{script} source and embed a spec from a \code{vegalite} object.
#'
#' If you like the way Vega-Lite renders charts, you can also use them as static
#' images in PDF knitted documents with the new \code{capture_widget} function.
#' (NOTE that as of this writing, you can just use the development version of
#' \code{knitr} instead of this function.)
#'
#' @name vegalite-package
#' @docType package
#' @author Bob Rudis (@@hrbrmstr)
NULL
#' @importFrom magrittr %>%
#' @export
magrittr::`%>%`
#' @name JS
#' @rdname JS
#' @title Mark character strings as literal JavaScript code
#' @description Mark character strings as literal JavaScript code
#' @export
NULL
#' @name saveWidget
#' @rdname saveWidget
#' @title Save a widget to an HTML file
#' @description Save a widget to an HTML file
#' @export
NULL
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/vegalite-package.R
|
#' Create and (optionally) visualize a Vega-Lite spec
#'
#' @param description a single element character vector that provides a description of
#' the plot/spec.
#' @param renderer the renderer to use for the view. One of \code{canvas} or
#' \code{svg} (the default)
#' @param export if \code{TRUE} the \emph{"Export as..."} link will
#' be displayed with the chart.(Default: \code{FALSE}.)
#' @param source if \code{TRUE} the \emph{"View Source"} link will be displayed
#' with the chart. (Default: \code{FALSE}.)
#' @param editor if \code{TRUE} the \emph{"Open in editor"} link will be
#' displayed with the cahrt. (Default: \code{FALSE}.)
#' @param viewport_width,viewport_height height and width of the overall
#' visualziation viewport. This is the overall area reserved for the
#' plot. You can leave these \code{NULL} and use \code{\link{cell_size}}
#' instead but you will want to configure both when making faceted plots.
#' @param background plot background color. If \code{NULL} the background will be transparent.
#' @param time_format the default time format pattern for text and labels of
#' axes and legends (in the form of \href{https://github.com/mbostock/d3/wiki/Time-Formatting}{D3 time format pattern}).
#' Default: \code{\%Y-\%m-\%d}
#' @param number_format the default number format pattern for text and labels of
#' axes and legends (in the form of
#' \href{https://github.com/mbostock/d3/wiki/Formatting}{D3 number format pattern}).
#' Default: \code{s}
#' @references \href{http://vega.github.io/vega-lite/docs/config.html#top-level-config}{Vega-Lite top-level config}
#' @importFrom jsonlite fromJSON toJSON unbox
#' @import htmlwidgets stats
#' @importFrom htmltools tag div span
#' @importFrom utils read.csv
#' @name vegalite
#' @rdname vegalite
#' @export
#' @examples
#' dat <- jsonlite::fromJSON('[
#' {"a": "A","b": 28}, {"a": "B","b": 55}, {"a": "C","b": 43},
#' {"a": "D","b": 91}, {"a": "E","b": 81}, {"a": "F","b": 53},
#' {"a": "G","b": 19}, {"a": "H","b": 87}, {"a": "I","b": 52}
#' ]')
#'
#' vegalite() %>%
#' add_data(dat) %>%
#' encode_x("a", "ordinal") %>%
#' encode_y("b", "quantitative") %>%
#' mark_bar()
vegalite <- function(description="", renderer=c("svg", "canvas"),
export=FALSE, source=FALSE, editor=FALSE,
viewport_width=NULL, viewport_height=NULL,
background=NULL, time_format=NULL, number_format=NULL) {
# forward options using x
params <- list(
description = description,
data = list(),
mark = list(),
encoding = list(),
config = list(),
embed = list(renderer=renderer[1],
actions=list(export=export,
source=source,
editor=editor))
)
if (!is.null(viewport_width) & !is.null(viewport_height)) {
params$config$viewport <- c(viewport_width, viewport_height)
}
if (!is.null(background)) { params$config$background <- background }
if (!is.null(time_format)) { params$config$timeFormat <- time_format }
if (!is.null(number_format)) { params$config$numberFormat <- number_format }
# create widget
htmlwidgets::createWidget(
name = 'vegalite',
x = params,
width = viewport_width,
height = viewport_height,
package = 'vegalite'
)
}
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/vegalite.r
|
# .onAttach <- function(...) {
#
# if (!interactive()) return()
#
# packageStartupMessage(paste0("vegalite is under *active* development. ",
# "See https://github.com/hrbrmstr/vegalite for changes"))
#
# }
|
/scratch/gouwar.j/cran-all/cranData/vegalite/R/zzz.r
|
## ------------------------------------------------------------------------
library(vegalite)
## ------------------------------------------------------------------------
dat <- jsonlite::fromJSON('[
{"a": "A","b": 28}, {"a": "B","b": 55}, {"a": "C","b": 43},
{"a": "D","b": 91}, {"a": "E","b": 81}, {"a": "F","b": 53},
{"a": "G","b": 19}, {"a": "H","b": 87}, {"a": "I","b": 52}
]')
vegalite(viewport_height=250) %>%
cell_size(400, 200) %>%
add_data(dat) %>%
encode_x("a", "ordinal") %>%
encode_y("b", "quantitative") %>%
mark_bar()
## ------------------------------------------------------------------------
vegalite(viewport_width=400, viewport_height=400) %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
mark_point()
## ------------------------------------------------------------------------
vegalite(viewport_width=400, viewport_height=400) %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
mark_circle()
## ------------------------------------------------------------------------
vegalite(viewport_width=400, viewport_height=400) %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
encode_color("Origin", "nominal") %>%
encode_shape("Origin", "nominal") %>%
mark_point()
## ------------------------------------------------------------------------
vegalite(viewport_width=400, viewport_height=400) %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
encode_size("Acceleration", "quantitative") %>%
mark_point()
## ------------------------------------------------------------------------
vegalite(viewport_width=400, viewport_height=450) %>%
cell_size(400, 450) %>%
add_data("https://vega.github.io/vega-editor/app/data/stocks.csv") %>%
add_filter("datum.symbol==='GOOG'") %>%
encode_x("date", "temporal") %>%
encode_y("price", "quantitative") %>%
mark_line()
## ------------------------------------------------------------------------
vegalite(viewport_height=200) %>%
cell_size(400, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Cylinders", "ordinal") %>%
mark_tick()
## ------------------------------------------------------------------------
vegalite(viewport_height=500) %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/stocks.csv") %>%
encode_x("date", "temporal") %>%
encode_y("price", "quantitative") %>%
encode_color("symbol", "nominal") %>%
mark_line()
## ------------------------------------------------------------------------
vegalite(viewport_height=350) %>%
add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
encode_x("Worldwide_Gross", "quantitative") %>%
encode_y("US_DVD_Sales", "quantitative") %>%
facet_col("MPAA_Rating", "ordinal") %>%
mark_point()
## ------------------------------------------------------------------------
vegalite(viewport_height=1400) %>%
add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
encode_x("Worldwide_Gross", "quantitative") %>%
encode_y("US_DVD_Sales", "quantitative") %>%
facet_row("MPAA_Rating", "ordinal") %>%
mark_point()
## ------------------------------------------------------------------------
vegalite(viewport_height=2900) %>%
add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
encode_x("Worldwide_Gross", "quantitative") %>%
encode_y("US_DVD_Sales", "quantitative") %>%
facet_col("MPAA_Rating", "ordinal") %>%
facet_row("Major_Genre", "ordinal") %>%
mark_point()
## ------------------------------------------------------------------------
dat <- jsonlite::fromJSON('[
{"x": 0, "y": 1}, {"x": 1, "y": 10},
{"x": 2, "y": 100}, {"x": 3, "y": 1000},
{"x": 4, "y": 10000}, {"x": 5, "y": 100000},
{"x": 6, "y": 1000000}, {"x": 7, "y": 10000000}
]')
vegalite(viewport_height=300) %>%
add_data(dat) %>%
encode_x("x", "quantitative") %>%
encode_y("y", "quantitative") %>%
mark_point() %>%
scale_y_log()
## ------------------------------------------------------------------------
vegalite(viewport_width=500, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
encode_x("people", "quantitative", aggregate="sum") %>%
encode_y("age", "ordinal") %>%
scale_y_ordinal(band_size=17) %>%
add_filter("datum.year == 2000") %>%
mark_bar()
## ------------------------------------------------------------------------
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
encode_x("IMDB_Rating", "quantitative") %>%
encode_y("Rotten_Tomatoes_Rating", "quantitative") %>%
encode_size("*", "quantitative", aggregate="count") %>%
bin_x(maxbins=10) %>%
bin_y(maxbins=10) %>%
mark_point()
## ------------------------------------------------------------------------
vegalite(viewport_width=400, viewport_height=600) %>%
add_data("https://vega.github.io/vega-editor/app/data/barley.json") %>%
encode_x("year", "ordinal") %>%
encode_y("yield", "quantitative", aggregate="median") %>%
encode_color("site", "nominal") %>%
scale_x_ordinal(band_size=50, padding=0.5) %>%
mark_line()
## ------------------------------------------------------------------------
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
encode_x("IMDB_Rating", "quantitative") %>%
encode_y("*", "quantitative", aggregate="count") %>%
bin_x(maxbins=10) %>%
mark_bar()
## ------------------------------------------------------------------------
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/seattle-weather.csv") %>%
encode_x("date", "temporal") %>%
encode_y("*", "quantitative", aggregate="count") %>%
encode_color("weather", "nominal") %>%
scale_color_nominal(domain=c("sun","fog","drizzle","rain","snow"),
range=c("#e7ba52","#c7c7c7","#aec7e8","#1f77b4","#9467bd")) %>%
timeunit_x("month") %>%
mark_bar()
## ------------------------------------------------------------------------
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/barley.json") %>%
encode_x("yield", "quantitative", aggregate="sum") %>%
encode_y("variety", "nominal") %>%
encode_color("site", "nominal") %>%
mark_bar()
## ------------------------------------------------------------------------
vegalite() %>%
cell_size(300, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/unemployment-across-industries.json") %>%
encode_x("date", "temporal") %>%
encode_y("count", "quantitative", aggregate="sum") %>%
encode_color("series", "nominal") %>%
scale_color_nominal(range="category20b") %>%
timeunit_x("yearmonth") %>%
scale_x_time(nice="month") %>%
axis_x(axisWidth=0, format="%Y", labelAngle=0) %>%
mark_area()
## ------------------------------------------------------------------------
vegalite() %>%
cell_size(300, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/unemployment-across-industries.json") %>%
encode_x("date", "temporal") %>%
encode_y("count", "quantitative", aggregate="sum") %>%
encode_color("series", "nominal") %>%
scale_color_nominal(range="category20b") %>%
timeunit_x("yearmonth") %>%
scale_x_time(nice="month") %>%
axis_x(axisWidth=0, format="%Y", labelAngle=0) %>%
mark_area(interpolate="basis", stack="center")
## ------------------------------------------------------------------------
vegalite() %>%
cell_size(300, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
encode_color("Origin", "nominal") %>%
calculate("OriginInitial", "datum.Origin[0]") %>%
encode_text("OriginInitial", "nominal") %>%
mark_text()
## ------------------------------------------------------------------------
vegalite() %>%
cell_size(300, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/unemployment-across-industries.json") %>%
encode_x("date", "temporal") %>%
timeunit_x("yearmonth") %>%
axis_x(axisWidth=0, format="%Y", labelAngle=0) %>%
encode_y("count", "quantitative", aggregate="sum") %>%
mark_area()
## ------------------------------------------------------------------------
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
add_filter("datum.year == 2000") %>%
calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
encode_x("gender", "nominal") %>%
encode_y("people", "quantitative", aggregate="sum") %>%
encode_color("gender", "nominal") %>%
scale_x_ordinal(band_size=6) %>%
scale_color_nominal(range=c("#EA98D2", "#659CCA")) %>%
facet_col("age", "ordinal", padding=4) %>%
axis_x(remove=TRUE) %>%
axis_y(title="population", grid=FALSE) %>%
axis_facet_col(orient="bottom", axisWidth=1, offset=-8) %>%
facet_cell(stroke_width=0) %>%
mark_bar()
## ------------------------------------------------------------------------
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
encode_x("age", "ordinal") %>%
encode_y("people", "quantitative", aggregate="sum") %>%
encode_color("gender", "nominal") %>%
scale_x_ordinal(band_size=17) %>%
scale_color_nominal(range=c("#EA98D2", "#659CCA")) %>%
mark_bar(stack="normalize")
## ------------------------------------------------------------------------
vegalite() %>%
cell_size(300, 300) %>%
add_data("https://vega.github.io/vega-editor/app/data/unemployment-across-industries.json") %>%
encode_x("date", "temporal") %>%
encode_y("count", "quantitative", aggregate="sum") %>%
encode_color("series", "nominal") %>%
scale_x_time(nice="month") %>%
scale_color_nominal(range="category20b") %>%
axis_x(axisWidth=0, format="%Y", labelAngle=0) %>%
axis_y(remove=TRUE) %>%
timeunit_x("yearmonth") %>%
mark_area(stack="normalize")
## ------------------------------------------------------------------------
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
add_filter("datum.year == 2000") %>%
calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
encode_x("age", "ordinal") %>%
encode_y("people", "quantitative", aggregate="sum") %>%
encode_color("gender", "nominal") %>%
scale_x_ordinal(band_size=17) %>%
scale_color_nominal(range=c("#e377c2","#1f77b4")) %>%
axis_y(title="Population") %>%
mark_bar(opacity=0.6, stack="none")
## ------------------------------------------------------------------------
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
add_filter("datum.year == 2000") %>%
calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
encode_x("age", "ordinal") %>%
encode_y("people", "quantitative", aggregate="sum") %>%
encode_color("gender", "nominal") %>%
facet_row("gender", "nominal") %>%
scale_x_ordinal(band_size=17) %>%
scale_color_nominal(range=c("#EA98D2","#659CCA")) %>%
axis_y(title="Population") %>%
mark_bar()
## ------------------------------------------------------------------------
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/barley.json") %>%
encode_x("yield", "quantitative", aggregate="sum") %>%
encode_y("variety", "nominal") %>%
encode_color("site", "nominal") %>%
facet_col("year", "ordinal") %>%
mark_bar()
## ------------------------------------------------------------------------
vegalite(viewport_height=700) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("*", "quantitative", aggregate="count") %>%
encode_color("site", "nominal") %>%
facet_row("Origin", "nominal") %>%
bin_x(maxbins=15) %>%
mark_bar()
## ------------------------------------------------------------------------
vegalite(viewport_height=1200) %>%
add_data("https://vega.github.io/vega-editor/app/data/barley.json") %>%
encode_x("yield", "quantitative", aggregate="mean") %>%
encode_y("variety", "ordinal", sort=sort_def("yield", "mean")) %>%
encode_color("year", "nominal") %>%
facet_row("site", "ordinal") %>%
scale_y_ordinal(band_size=12) %>%
mark_point()
## ------------------------------------------------------------------------
vegalite(viewport_width=300, viewport_height=300) %>%
cell_size(300, 300) %>%
add_data("https://vega.github.io/vega-editor/app/data/driving.json") %>%
encode_x("miles", "quantitative") %>%
encode_y("gas", "quantitative") %>%
encode_path("year", "temporal") %>%
scale_x_linear(zero=FALSE) %>%
scale_y_linear(zero=FALSE) %>%
mark_line()
## ------------------------------------------------------------------------
vegalite(viewport_width=200, viewport_height=200) %>%
cell_size(200, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
encode_color("Origin", "nominal") %>%
encode_order("Origin", "ordinal", sort="descending") %>%
mark_point()
## ------------------------------------------------------------------------
vegalite(viewport_width=200, viewport_height=200) %>%
cell_size(200, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/stocks.csv") %>%
encode_x("date", "temporal") %>%
encode_y("price", "quantitative") %>%
encode_detail("symbol", "nominal") %>%
mark_line()
## ------------------------------------------------------------------------
vegalite() %>%
cell_size(200, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative", aggregate="mean") %>%
encode_y("Displacement", "quantitative", aggregate="mean") %>%
encode_detail("Origin", "nominal") %>%
mark_point()
|
/scratch/gouwar.j/cran-all/cranData/vegalite/inst/doc/intro_to_vegalite.R
|
---
title: "Introduction to vegalite"
author: "Bob Rudis"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Introduction to vegalite}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
`vegalite` is an R `htmlwidget` interface to the [Vega-Lite](https://vega.github.io/vega-lite/) JavaScript visualization library.
What is "Vega" and why "-Lite"? Vega is _"a full declarative visualization grammar, suitable for expressive custom interactive visualization design and programmatic generation._"" Vega-Lite _"provides a higher-level grammar for visual analysis, comparable to ggplot or Tableau, that generates complete Vega specifications."_ Vega-Lite compiles to Vega and is more compact and accessible than Vega. Both are just JSON data files with a particular schema that let you encode the data, encodings and aesthetics for statistical charts.
The following is a gallery of code & examples to help you get started with the package.
```{r}
library(vegalite)
```
### bar mark
```{r}
dat <- jsonlite::fromJSON('[
{"a": "A","b": 28}, {"a": "B","b": 55}, {"a": "C","b": 43},
{"a": "D","b": 91}, {"a": "E","b": 81}, {"a": "F","b": 53},
{"a": "G","b": 19}, {"a": "H","b": 87}, {"a": "I","b": 52}
]')
vegalite(viewport_height=250) %>%
cell_size(400, 200) %>%
add_data(dat) %>%
encode_x("a", "ordinal") %>%
encode_y("b", "quantitative") %>%
mark_bar()
```
### point mark
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
mark_point()
```
### cirlce mark
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
mark_circle()
```
### color and shape
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
encode_color("Origin", "nominal") %>%
encode_shape("Origin", "nominal") %>%
mark_point()
```
### size
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
encode_size("Acceleration", "quantitative") %>%
mark_point()
```
### filtered line
```{r}
vegalite(viewport_width=400, viewport_height=450) %>%
cell_size(400, 450) %>%
add_data("https://vega.github.io/vega-editor/app/data/stocks.csv") %>%
add_filter("datum.symbol==='GOOG'") %>%
encode_x("date", "temporal") %>%
encode_y("price", "quantitative") %>%
mark_line()
```
### ticks
```{r}
vegalite(viewport_height=200) %>%
cell_size(400, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Cylinders", "ordinal") %>%
mark_tick()
```
### multi-series line
```{r}
vegalite(viewport_height=500) %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/stocks.csv") %>%
encode_x("date", "temporal") %>%
encode_y("price", "quantitative") %>%
encode_color("symbol", "nominal") %>%
mark_line()
```
### facet col
```{r}
vegalite(viewport_height=350) %>%
add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
encode_x("Worldwide_Gross", "quantitative") %>%
encode_y("US_DVD_Sales", "quantitative") %>%
facet_col("MPAA_Rating", "ordinal") %>%
mark_point()
```
### facet row
```{r}
vegalite(viewport_height=1400) %>%
add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
encode_x("Worldwide_Gross", "quantitative") %>%
encode_y("US_DVD_Sales", "quantitative") %>%
facet_row("MPAA_Rating", "ordinal") %>%
mark_point()
```
### facet both
```{r}
vegalite(viewport_height=2900) %>%
add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
encode_x("Worldwide_Gross", "quantitative") %>%
encode_y("US_DVD_Sales", "quantitative") %>%
facet_col("MPAA_Rating", "ordinal") %>%
facet_row("Major_Genre", "ordinal") %>%
mark_point()
```
### log scale
```{r}
dat <- jsonlite::fromJSON('[
{"x": 0, "y": 1}, {"x": 1, "y": 10},
{"x": 2, "y": 100}, {"x": 3, "y": 1000},
{"x": 4, "y": 10000}, {"x": 5, "y": 100000},
{"x": 6, "y": 1000000}, {"x": 7, "y": 10000000}
]')
vegalite(viewport_height=300) %>%
add_data(dat) %>%
encode_x("x", "quantitative") %>%
encode_y("y", "quantitative") %>%
mark_point() %>%
scale_y_log()
```
### aggregate bar chart
```{r}
vegalite(viewport_width=500, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
encode_x("people", "quantitative", aggregate="sum") %>%
encode_y("age", "ordinal") %>%
scale_y_ordinal(band_size=17) %>%
add_filter("datum.year == 2000") %>%
mark_bar()
```
### binned scatterplot
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
encode_x("IMDB_Rating", "quantitative") %>%
encode_y("Rotten_Tomatoes_Rating", "quantitative") %>%
encode_size("*", "quantitative", aggregate="count") %>%
bin_x(maxbins=10) %>%
bin_y(maxbins=10) %>%
mark_point()
```
### slope graph
```{r}
vegalite(viewport_width=400, viewport_height=600) %>%
add_data("https://vega.github.io/vega-editor/app/data/barley.json") %>%
encode_x("year", "ordinal") %>%
encode_y("yield", "quantitative", aggregate="median") %>%
encode_color("site", "nominal") %>%
scale_x_ordinal(band_size=50, padding=0.5) %>%
mark_line()
```
### histogram
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
encode_x("IMDB_Rating", "quantitative") %>%
encode_y("*", "quantitative", aggregate="count") %>%
bin_x(maxbins=10) %>%
mark_bar()
```
### stacked bar chart
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/seattle-weather.csv") %>%
encode_x("date", "temporal") %>%
encode_y("*", "quantitative", aggregate="count") %>%
encode_color("weather", "nominal") %>%
scale_color_nominal(domain=c("sun","fog","drizzle","rain","snow"),
range=c("#e7ba52","#c7c7c7","#aec7e8","#1f77b4","#9467bd")) %>%
timeunit_x("month") %>%
mark_bar()
```
### horizontal stacked bar chart
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/barley.json") %>%
encode_x("yield", "quantitative", aggregate="sum") %>%
encode_y("variety", "nominal") %>%
encode_color("site", "nominal") %>%
mark_bar()
```
### stacked area chart
```{r}
vegalite() %>%
cell_size(300, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/unemployment-across-industries.json") %>%
encode_x("date", "temporal") %>%
encode_y("count", "quantitative", aggregate="sum") %>%
encode_color("series", "nominal") %>%
scale_color_nominal(range="category20b") %>%
timeunit_x("yearmonth") %>%
scale_x_time(nice="month") %>%
axis_x(axisWidth=0, format="%Y", labelAngle=0) %>%
mark_area()
```
### streamgraph!
```{r}
vegalite() %>%
cell_size(300, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/unemployment-across-industries.json") %>%
encode_x("date", "temporal") %>%
encode_y("count", "quantitative", aggregate="sum") %>%
encode_color("series", "nominal") %>%
scale_color_nominal(range="category20b") %>%
timeunit_x("yearmonth") %>%
scale_x_time(nice="month") %>%
axis_x(axisWidth=0, format="%Y", labelAngle=0) %>%
mark_area(interpolate="basis", stack="center")
```
### scatter text
```{r}
vegalite() %>%
cell_size(300, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
encode_color("Origin", "nominal") %>%
calculate("OriginInitial", "datum.Origin[0]") %>%
encode_text("OriginInitial", "nominal") %>%
mark_text()
```
### area chart
```{r}
vegalite() %>%
cell_size(300, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/unemployment-across-industries.json") %>%
encode_x("date", "temporal") %>%
timeunit_x("yearmonth") %>%
axis_x(axisWidth=0, format="%Y", labelAngle=0) %>%
encode_y("count", "quantitative", aggregate="sum") %>%
mark_area()
```
### grouped bar chart
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
add_filter("datum.year == 2000") %>%
calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
encode_x("gender", "nominal") %>%
encode_y("people", "quantitative", aggregate="sum") %>%
encode_color("gender", "nominal") %>%
scale_x_ordinal(band_size=6) %>%
scale_color_nominal(range=c("#EA98D2", "#659CCA")) %>%
facet_col("age", "ordinal", padding=4) %>%
axis_x(remove=TRUE) %>%
axis_y(title="population", grid=FALSE) %>%
axis_facet_col(orient="bottom", axisWidth=1, offset=-8) %>%
facet_cell(stroke_width=0) %>%
mark_bar()
```
### normalized stacked bar chart
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
encode_x("age", "ordinal") %>%
encode_y("people", "quantitative", aggregate="sum") %>%
encode_color("gender", "nominal") %>%
scale_x_ordinal(band_size=17) %>%
scale_color_nominal(range=c("#EA98D2", "#659CCA")) %>%
mark_bar(stack="normalize")
```
### normalized stacked bar chart
```{r}
vegalite() %>%
cell_size(300, 300) %>%
add_data("https://vega.github.io/vega-editor/app/data/unemployment-across-industries.json") %>%
encode_x("date", "temporal") %>%
encode_y("count", "quantitative", aggregate="sum") %>%
encode_color("series", "nominal") %>%
scale_x_time(nice="month") %>%
scale_color_nominal(range="category20b") %>%
axis_x(axisWidth=0, format="%Y", labelAngle=0) %>%
axis_y(remove=TRUE) %>%
timeunit_x("yearmonth") %>%
mark_area(stack="normalize")
```
### layered bar chart
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
add_filter("datum.year == 2000") %>%
calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
encode_x("age", "ordinal") %>%
encode_y("people", "quantitative", aggregate="sum") %>%
encode_color("gender", "nominal") %>%
scale_x_ordinal(band_size=17) %>%
scale_color_nominal(range=c("#e377c2","#1f77b4")) %>%
axis_y(title="Population") %>%
mark_bar(opacity=0.6, stack="none")
```
### trellis bar chart
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
add_filter("datum.year == 2000") %>%
calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
encode_x("age", "ordinal") %>%
encode_y("people", "quantitative", aggregate="sum") %>%
encode_color("gender", "nominal") %>%
facet_row("gender", "nominal") %>%
scale_x_ordinal(band_size=17) %>%
scale_color_nominal(range=c("#EA98D2","#659CCA")) %>%
axis_y(title="Population") %>%
mark_bar()
```
### trellis stacked bar chart
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/barley.json") %>%
encode_x("yield", "quantitative", aggregate="sum") %>%
encode_y("variety", "nominal") %>%
encode_color("site", "nominal") %>%
facet_col("year", "ordinal") %>%
mark_bar()
```
### trellis histograms
```{r}
vegalite(viewport_height=700) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("*", "quantitative", aggregate="count") %>%
encode_color("site", "nominal") %>%
facet_row("Origin", "nominal") %>%
bin_x(maxbins=15) %>%
mark_bar()
```
### becker's barley trellis plot
```{r}
vegalite(viewport_height=1200) %>%
add_data("https://vega.github.io/vega-editor/app/data/barley.json") %>%
encode_x("yield", "quantitative", aggregate="mean") %>%
encode_y("variety", "ordinal", sort=sort_def("yield", "mean")) %>%
encode_color("year", "nominal") %>%
facet_row("site", "ordinal") %>%
scale_y_ordinal(band_size=12) %>%
mark_point()
```
### sorting line order
```{r}
vegalite(viewport_width=300, viewport_height=300) %>%
cell_size(300, 300) %>%
add_data("https://vega.github.io/vega-editor/app/data/driving.json") %>%
encode_x("miles", "quantitative") %>%
encode_y("gas", "quantitative") %>%
encode_path("year", "temporal") %>%
scale_x_linear(zero=FALSE) %>%
scale_y_linear(zero=FALSE) %>%
mark_line()
```
### sort layer scatterplot
```{r}
vegalite(viewport_width=200, viewport_height=200) %>%
cell_size(200, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
encode_color("Origin", "nominal") %>%
encode_order("Origin", "ordinal", sort="descending") %>%
mark_point()
```
### detail lines
```{r}
vegalite(viewport_width=200, viewport_height=200) %>%
cell_size(200, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/stocks.csv") %>%
encode_x("date", "temporal") %>%
encode_y("price", "quantitative") %>%
encode_detail("symbol", "nominal") %>%
mark_line()
```
### detail points
```{r}
vegalite() %>%
cell_size(200, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative", aggregate="mean") %>%
encode_y("Displacement", "quantitative", aggregate="mean") %>%
encode_detail("Origin", "nominal") %>%
mark_point()
```
|
/scratch/gouwar.j/cran-all/cranData/vegalite/inst/doc/intro_to_vegalite.Rmd
|
---
title: "Introduction to vegalite"
author: "Bob Rudis"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Introduction to vegalite}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
`vegalite` is an R `htmlwidget` interface to the [Vega-Lite](https://vega.github.io/vega-lite/) JavaScript visualization library.
What is "Vega" and why "-Lite"? Vega is _"a full declarative visualization grammar, suitable for expressive custom interactive visualization design and programmatic generation._"" Vega-Lite _"provides a higher-level grammar for visual analysis, comparable to ggplot or Tableau, that generates complete Vega specifications."_ Vega-Lite compiles to Vega and is more compact and accessible than Vega. Both are just JSON data files with a particular schema that let you encode the data, encodings and aesthetics for statistical charts.
The following is a gallery of code & examples to help you get started with the package.
```{r}
library(vegalite)
```
### bar mark
```{r}
dat <- jsonlite::fromJSON('[
{"a": "A","b": 28}, {"a": "B","b": 55}, {"a": "C","b": 43},
{"a": "D","b": 91}, {"a": "E","b": 81}, {"a": "F","b": 53},
{"a": "G","b": 19}, {"a": "H","b": 87}, {"a": "I","b": 52}
]')
vegalite(viewport_height=250) %>%
cell_size(400, 200) %>%
add_data(dat) %>%
encode_x("a", "ordinal") %>%
encode_y("b", "quantitative") %>%
mark_bar()
```
### point mark
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
mark_point()
```
### cirlce mark
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
mark_circle()
```
### color and shape
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
encode_color("Origin", "nominal") %>%
encode_shape("Origin", "nominal") %>%
mark_point()
```
### size
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
encode_size("Acceleration", "quantitative") %>%
mark_point()
```
### filtered line
```{r}
vegalite(viewport_width=400, viewport_height=450) %>%
cell_size(400, 450) %>%
add_data("https://vega.github.io/vega-editor/app/data/stocks.csv") %>%
add_filter("datum.symbol==='GOOG'") %>%
encode_x("date", "temporal") %>%
encode_y("price", "quantitative") %>%
mark_line()
```
### ticks
```{r}
vegalite(viewport_height=200) %>%
cell_size(400, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Cylinders", "ordinal") %>%
mark_tick()
```
### multi-series line
```{r}
vegalite(viewport_height=500) %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/stocks.csv") %>%
encode_x("date", "temporal") %>%
encode_y("price", "quantitative") %>%
encode_color("symbol", "nominal") %>%
mark_line()
```
### facet col
```{r}
vegalite(viewport_height=350) %>%
add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
encode_x("Worldwide_Gross", "quantitative") %>%
encode_y("US_DVD_Sales", "quantitative") %>%
facet_col("MPAA_Rating", "ordinal") %>%
mark_point()
```
### facet row
```{r}
vegalite(viewport_height=1400) %>%
add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
encode_x("Worldwide_Gross", "quantitative") %>%
encode_y("US_DVD_Sales", "quantitative") %>%
facet_row("MPAA_Rating", "ordinal") %>%
mark_point()
```
### facet both
```{r}
vegalite(viewport_height=2900) %>%
add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
encode_x("Worldwide_Gross", "quantitative") %>%
encode_y("US_DVD_Sales", "quantitative") %>%
facet_col("MPAA_Rating", "ordinal") %>%
facet_row("Major_Genre", "ordinal") %>%
mark_point()
```
### log scale
```{r}
dat <- jsonlite::fromJSON('[
{"x": 0, "y": 1}, {"x": 1, "y": 10},
{"x": 2, "y": 100}, {"x": 3, "y": 1000},
{"x": 4, "y": 10000}, {"x": 5, "y": 100000},
{"x": 6, "y": 1000000}, {"x": 7, "y": 10000000}
]')
vegalite(viewport_height=300) %>%
add_data(dat) %>%
encode_x("x", "quantitative") %>%
encode_y("y", "quantitative") %>%
mark_point() %>%
scale_y_log()
```
### aggregate bar chart
```{r}
vegalite(viewport_width=500, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
encode_x("people", "quantitative", aggregate="sum") %>%
encode_y("age", "ordinal") %>%
scale_y_ordinal(band_size=17) %>%
add_filter("datum.year == 2000") %>%
mark_bar()
```
### binned scatterplot
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
encode_x("IMDB_Rating", "quantitative") %>%
encode_y("Rotten_Tomatoes_Rating", "quantitative") %>%
encode_size("*", "quantitative", aggregate="count") %>%
bin_x(maxbins=10) %>%
bin_y(maxbins=10) %>%
mark_point()
```
### slope graph
```{r}
vegalite(viewport_width=400, viewport_height=600) %>%
add_data("https://vega.github.io/vega-editor/app/data/barley.json") %>%
encode_x("year", "ordinal") %>%
encode_y("yield", "quantitative", aggregate="median") %>%
encode_color("site", "nominal") %>%
scale_x_ordinal(band_size=50, padding=0.5) %>%
mark_line()
```
### histogram
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/movies.json") %>%
encode_x("IMDB_Rating", "quantitative") %>%
encode_y("*", "quantitative", aggregate="count") %>%
bin_x(maxbins=10) %>%
mark_bar()
```
### stacked bar chart
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/seattle-weather.csv") %>%
encode_x("date", "temporal") %>%
encode_y("*", "quantitative", aggregate="count") %>%
encode_color("weather", "nominal") %>%
scale_color_nominal(domain=c("sun","fog","drizzle","rain","snow"),
range=c("#e7ba52","#c7c7c7","#aec7e8","#1f77b4","#9467bd")) %>%
timeunit_x("month") %>%
mark_bar()
```
### horizontal stacked bar chart
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/barley.json") %>%
encode_x("yield", "quantitative", aggregate="sum") %>%
encode_y("variety", "nominal") %>%
encode_color("site", "nominal") %>%
mark_bar()
```
### stacked area chart
```{r}
vegalite() %>%
cell_size(300, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/unemployment-across-industries.json") %>%
encode_x("date", "temporal") %>%
encode_y("count", "quantitative", aggregate="sum") %>%
encode_color("series", "nominal") %>%
scale_color_nominal(range="category20b") %>%
timeunit_x("yearmonth") %>%
scale_x_time(nice="month") %>%
axis_x(axisWidth=0, format="%Y", labelAngle=0) %>%
mark_area()
```
### streamgraph!
```{r}
vegalite() %>%
cell_size(300, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/unemployment-across-industries.json") %>%
encode_x("date", "temporal") %>%
encode_y("count", "quantitative", aggregate="sum") %>%
encode_color("series", "nominal") %>%
scale_color_nominal(range="category20b") %>%
timeunit_x("yearmonth") %>%
scale_x_time(nice="month") %>%
axis_x(axisWidth=0, format="%Y", labelAngle=0) %>%
mark_area(interpolate="basis", stack="center")
```
### scatter text
```{r}
vegalite() %>%
cell_size(300, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
encode_color("Origin", "nominal") %>%
calculate("OriginInitial", "datum.Origin[0]") %>%
encode_text("OriginInitial", "nominal") %>%
mark_text()
```
### area chart
```{r}
vegalite() %>%
cell_size(300, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/unemployment-across-industries.json") %>%
encode_x("date", "temporal") %>%
timeunit_x("yearmonth") %>%
axis_x(axisWidth=0, format="%Y", labelAngle=0) %>%
encode_y("count", "quantitative", aggregate="sum") %>%
mark_area()
```
### grouped bar chart
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
add_filter("datum.year == 2000") %>%
calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
encode_x("gender", "nominal") %>%
encode_y("people", "quantitative", aggregate="sum") %>%
encode_color("gender", "nominal") %>%
scale_x_ordinal(band_size=6) %>%
scale_color_nominal(range=c("#EA98D2", "#659CCA")) %>%
facet_col("age", "ordinal", padding=4) %>%
axis_x(remove=TRUE) %>%
axis_y(title="population", grid=FALSE) %>%
axis_facet_col(orient="bottom", axisWidth=1, offset=-8) %>%
facet_cell(stroke_width=0) %>%
mark_bar()
```
### normalized stacked bar chart
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
encode_x("age", "ordinal") %>%
encode_y("people", "quantitative", aggregate="sum") %>%
encode_color("gender", "nominal") %>%
scale_x_ordinal(band_size=17) %>%
scale_color_nominal(range=c("#EA98D2", "#659CCA")) %>%
mark_bar(stack="normalize")
```
### normalized stacked bar chart
```{r}
vegalite() %>%
cell_size(300, 300) %>%
add_data("https://vega.github.io/vega-editor/app/data/unemployment-across-industries.json") %>%
encode_x("date", "temporal") %>%
encode_y("count", "quantitative", aggregate="sum") %>%
encode_color("series", "nominal") %>%
scale_x_time(nice="month") %>%
scale_color_nominal(range="category20b") %>%
axis_x(axisWidth=0, format="%Y", labelAngle=0) %>%
axis_y(remove=TRUE) %>%
timeunit_x("yearmonth") %>%
mark_area(stack="normalize")
```
### layered bar chart
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
add_filter("datum.year == 2000") %>%
calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
encode_x("age", "ordinal") %>%
encode_y("people", "quantitative", aggregate="sum") %>%
encode_color("gender", "nominal") %>%
scale_x_ordinal(band_size=17) %>%
scale_color_nominal(range=c("#e377c2","#1f77b4")) %>%
axis_y(title="Population") %>%
mark_bar(opacity=0.6, stack="none")
```
### trellis bar chart
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/population.json") %>%
add_filter("datum.year == 2000") %>%
calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>%
encode_x("age", "ordinal") %>%
encode_y("people", "quantitative", aggregate="sum") %>%
encode_color("gender", "nominal") %>%
facet_row("gender", "nominal") %>%
scale_x_ordinal(band_size=17) %>%
scale_color_nominal(range=c("#EA98D2","#659CCA")) %>%
axis_y(title="Population") %>%
mark_bar()
```
### trellis stacked bar chart
```{r}
vegalite(viewport_width=400, viewport_height=400) %>%
add_data("https://vega.github.io/vega-editor/app/data/barley.json") %>%
encode_x("yield", "quantitative", aggregate="sum") %>%
encode_y("variety", "nominal") %>%
encode_color("site", "nominal") %>%
facet_col("year", "ordinal") %>%
mark_bar()
```
### trellis histograms
```{r}
vegalite(viewport_height=700) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("*", "quantitative", aggregate="count") %>%
encode_color("site", "nominal") %>%
facet_row("Origin", "nominal") %>%
bin_x(maxbins=15) %>%
mark_bar()
```
### becker's barley trellis plot
```{r}
vegalite(viewport_height=1200) %>%
add_data("https://vega.github.io/vega-editor/app/data/barley.json") %>%
encode_x("yield", "quantitative", aggregate="mean") %>%
encode_y("variety", "ordinal", sort=sort_def("yield", "mean")) %>%
encode_color("year", "nominal") %>%
facet_row("site", "ordinal") %>%
scale_y_ordinal(band_size=12) %>%
mark_point()
```
### sorting line order
```{r}
vegalite(viewport_width=300, viewport_height=300) %>%
cell_size(300, 300) %>%
add_data("https://vega.github.io/vega-editor/app/data/driving.json") %>%
encode_x("miles", "quantitative") %>%
encode_y("gas", "quantitative") %>%
encode_path("year", "temporal") %>%
scale_x_linear(zero=FALSE) %>%
scale_y_linear(zero=FALSE) %>%
mark_line()
```
### sort layer scatterplot
```{r}
vegalite(viewport_width=200, viewport_height=200) %>%
cell_size(200, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative") %>%
encode_y("Miles_per_Gallon", "quantitative") %>%
encode_color("Origin", "nominal") %>%
encode_order("Origin", "ordinal", sort="descending") %>%
mark_point()
```
### detail lines
```{r}
vegalite(viewport_width=200, viewport_height=200) %>%
cell_size(200, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/stocks.csv") %>%
encode_x("date", "temporal") %>%
encode_y("price", "quantitative") %>%
encode_detail("symbol", "nominal") %>%
mark_line()
```
### detail points
```{r}
vegalite() %>%
cell_size(200, 200) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower", "quantitative", aggregate="mean") %>%
encode_y("Displacement", "quantitative", aggregate="mean") %>%
encode_detail("Origin", "nominal") %>%
mark_point()
```
|
/scratch/gouwar.j/cran-all/cranData/vegalite/vignettes/intro_to_vegalite.Rmd
|
### these functions are defined _ex machina_ for radline objects which
### inherit from glm. Here we define them for radfit objects where
### object$models is a list of radline objects
`AIC.radfit` <-
function (object, k = 2, ...)
{
sapply(object$models, AIC, k = k, ...)
}
`deviance.radfit` <-
function(object, ...)
{
sapply(object$models, deviance, ...)
}
`logLik.radfit` <-
function(object, ...)
{
sapply(object$models, logLik, ...)
}
### Define also for radfit.frames which are lists of radfit objects
`AIC.radfit.frame` <-
function(object, k = 2, ...)
{
sapply(object, AIC, k = k, ...)
}
`deviance.radfit.frame` <-
function(object, ...)
{
sapply(object, deviance, ...)
}
`logLik.radfit.frame` <-
function(object, ...)
{
sapply(object, logLik, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/AIC.radfit.R
|
`CCorA` <-
function(Y, X, stand.Y = FALSE, stand.X = FALSE, permutations = 0, ...)
{
epsilon <- sqrt(.Machine$double.eps)
##
## BEGIN: Internal functions
##
cov.inv <- function(mat, no, epsilon) {
## This function returns:
## 1) mat = matrix F of the principal components (PCA object scores);
## 2) S.inv = the inverse of the covariance matrix;
## 3) m = the rank of matrix 'mat'
## The inverse of the PCA covariance matrix is the diagonal
## matrix of (1/eigenvalues). If ncol(mat) = 1, the
## inverse of the covariance matrix contains 1/var(mat).
mat <- as.matrix(mat) # 'mat' was centred before input to cov.inv
if(ncol(mat) == 1) {
S.inv <- as.matrix(1/var(mat))
m <- 1
} else {
S.svd <- svd(cov(mat))
m <- ncol(mat)
mm <- length(which(S.svd$d > max(epsilon, epsilon * S.svd$d[1L])))
if(mm < m) {
message(gettextf("matrix %d: rank=%d < order %d",
no, mm, m))
m <- mm
}
S.inv <- diag(1/S.svd$d[1:m])
mat <- mat %*% S.svd$u[,1:m] # S.svd$u = normalized eigenvectors
}
list(mat=mat, S.inv=S.inv, m=m)
}
## Check zero variances
var.null <- function (mat, no) {
problems <- diag(cov(mat)) <= 0
if (any(problems)) {
whichProbs <- paste(which(problems), collapse=", ")
warning("zero variance in variable(s) ", whichProbs)
stop("verify/modify your matrix No. ", no)
}
invisible(0)
}
probPillai <- function(Y.per, X, n, S11.inv, S22.inv, s, df1, df2, epsilon,
Fref, permat, ...) {
## Permutation test for Pillai's trace in CCorA.
## Reference: Brian McArdle's unpublished graduate course notes.
S12.per <- cov(Y.per,X)
gross.mat <- S12.per %*% S22.inv %*% t(S12.per) %*% S11.inv
Pillai.per <- sum(diag(gross.mat))
Fper <- (Pillai.per*df2)/((s-Pillai.per)*df1)
Fper >= (Fref-epsilon)
}
## END: internal functions
##
Y <- as.matrix(Y)
var.null(Y,1)
nY <- nrow(Y)
p <- ncol(Y)
if(is.null(colnames(Y))) {
Ynoms <- paste("VarY", 1:p, sep="")
} else {
Ynoms <- colnames(Y)
}
X <- as.matrix(X)
var.null(X,2)
nX <- nrow(X)
q <- ncol(X)
if(is.null(colnames(X))) {
Xnoms <- paste("VarX", 1:q, sep="")
} else {
Xnoms <- colnames(X)
}
if(nY != nX) stop("different numbers of rows in Y and X")
n <- nY
if(is.null(rownames(X)) & is.null(rownames(Y))) {
rownoms <- paste("Obj", 1:n, sep="")
} else {
if(is.null(rownames(X))) {
rownoms <- rownames(Y)
} else {
rownoms <- rownames(X)
}
}
Y.c <- scale(Y, center = TRUE, scale = stand.Y)
X.c <- scale(X, center = TRUE, scale = stand.X)
## Check for identical matrices
if(p == q) {
if(sum(abs(Y-X)) < epsilon^2) stop("Y and X are identical")
if(sum(abs(Y.c-X.c)) < epsilon^2) stop("after centering, Y and X are identical")
}
## Replace Y.c and X.c by tables of their PCA object scores, computed by SVD
temp <- cov.inv(Y.c, 1, epsilon)
Y <- temp$mat
pp <- temp$m
rownames(Y) <- rownoms
temp <- cov.inv(X.c, 2, epsilon)
X <- temp$mat
qq <- temp$m
rownames(X) <- rownoms
## Correction PL, 26dec10
if(max(pp,qq) >= (n-1))
stop("not enough degrees of freedom: max(pp,qq) >= (n-1)")
## Covariance matrices, etc. from the PCA scores
S11 <- cov(Y)
if(sum(abs(S11)) < epsilon) return(0)
S22 <- cov(X)
if(sum(abs(S22)) < epsilon) return(0)
S12 <- cov(Y,X)
if(sum(abs(S12)) < epsilon) return(0)
S11.chol <- chol(S11)
S11.chol.inv <- solve(S11.chol)
S22.chol <- chol(S22)
S22.chol.inv <- solve(S22.chol)
## K summarizes the correlation structure between the two sets of variables
K <- t(S11.chol.inv) %*% S12 %*% S22.chol.inv
K.svd <- svd(K)
Eigenvalues <- K.svd$d^2
##
## Check for circular covariance matrix
if((p == q) & (var(K.svd$d) < epsilon))
warning("[nearly] circular covariance matrix - the solution may be meaningless")
## K.svd$u %*% diag(K.svd$d) %*% t(K.svd$v) # To check that K = U D V'
axenames <- paste("CanAxis",seq_along(K.svd$d),sep="")
U <- K.svd$u
V <- K.svd$v
A <- S11.chol.inv %*% U
B <- S22.chol.inv %*% V
Cy <- (Y %*% A) # Correction 27dec10: remove /sqrt(n-1)
Cx <- (X %*% B) # Correction 27dec10: remove /sqrt(n-1)
## Compute the 'Biplot scores of Y and X variables' a posteriori --
corr.Y.Cy <- cor(Y.c, Cy) # To plot Y in biplot in space Y
corr.Y.Cx <- cor(Y.c, Cx) # Available for plotting Y in space of X
corr.X.Cy <- cor(X.c, Cy) # Available for plotting X in space of Y
corr.X.Cx <- cor(X.c, Cx) # To plot X in biplot in space X
## Add row and column names
rownames(Cy) <- rownames(Cx) <- rownoms
colnames(Cy) <- colnames(Cx) <- axenames
rownames(corr.Y.Cy) <- rownames(corr.Y.Cx) <- Ynoms
rownames(corr.X.Cy) <- rownames(corr.X.Cx) <- Xnoms
colnames(corr.Y.Cy) <- colnames(corr.Y.Cx) <- axenames
colnames(corr.X.Cy) <- colnames(corr.X.Cx) <- axenames
## Compute the two redundancy statistics
RsquareY.X <- simpleRDA2(Y, X)
RsquareX.Y <- simpleRDA2(X, Y)
Rsquare.adj.Y.X <- RsquareAdj(RsquareY.X$Rsquare, n, RsquareY.X$m)
Rsquare.adj.X.Y <- RsquareAdj(RsquareX.Y$Rsquare, n, RsquareX.Y$m)
## Compute Pillai's trace = sum of the canonical eigenvalues
## = sum of the squared canonical correlations
S11.inv <- S11.chol.inv %*% t(S11.chol.inv)
S22.inv <- S22.chol.inv %*% t(S22.chol.inv)
gross.mat <- S12 %*% S22.inv %*% t(S12) %*% S11.inv
PillaiTrace <- sum(diag(gross.mat))
s <- min(pp, qq)
df1 <- max(pp,qq)
df2 <- (n - max(pp,qq) - 1)
Fval <- (PillaiTrace*df2)/((s-PillaiTrace)*df1)
p.Pillai <- pf(Fval, s*df1, s*df2, lower.tail=FALSE)
permat <- getPermuteMatrix(permutations, n, ...)
nperm <- nrow(permat)
if (ncol(permat) != n)
stop(gettextf("'permutations' have %d columns, but data have %d rows",
ncol(permat), n))
if (nperm > 0) {
p.perm <- sapply(seq_len(nperm), function(indx, ...)
probPillai(Y[permat[indx,],] , X, n, S11.inv, S22.inv, s,
df1, df2, epsilon, Fval, nperm, ...))
p.perm <- (sum(p.perm) +1)/(nperm + 1)
} else {
p.perm <- NA
}
out <- list(Pillai=PillaiTrace, Eigenvalues=Eigenvalues, CanCorr=K.svd$d,
Mat.ranks=c(RsquareX.Y$m, RsquareY.X$m),
RDA.Rsquares=c(RsquareY.X$Rsquare, RsquareX.Y$Rsquare),
RDA.adj.Rsq=c(Rsquare.adj.Y.X, Rsquare.adj.X.Y),
nperm=nperm, p.Pillai=p.Pillai, p.perm=p.perm, Cy=Cy, Cx=Cx,
corr.Y.Cy=corr.Y.Cy, corr.X.Cx=corr.X.Cx, corr.Y.Cx=corr.Y.Cx,
corr.X.Cy=corr.X.Cy, control = attr(permat, "control"),
call = match.call())
class(out) <- "CCorA"
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/CCorA.R
|
### Internal function for double centring of a *matrix* of
### dissimilarities. We used .C("dblcen", ..., PACKAGE = "stats")
### which does not dublicate its argument, but it was removed from R
### in r60360 | ripley | 2012-08-22 07:59:00 UTC (Wed, 22 Aug 2012)
### "more conversion to .Call, clean up". Input 'x' *must* be a
### matrix. This was originally an internal function in betadisper.R
### (commit 7cbd4529 Thu Aug 23 08:45:31 2012 +0000)
GowerDblcen <- function(x, na.rm = TRUE)
{
cnt <- colMeans(x, na.rm = na.rm)
x <- sweep(x, 2L, cnt, check.margin = FALSE)
cnt <- rowMeans(x, na.rm = na.rm)
sweep(x, 1L, cnt, check.margin = FALSE)
}
### Internal functions to find additive constants to non-diagonal
### dissimilarities so that there are no negative eigenvalues. The
### Cailliez constant is added to dissimilarities and the Lingoes
### constant is added to squared dissimilarities. Legendre & Anderson
### (Ecol Monogr 69, 1-24; 1999) recommend Lingoes, but
### stats::cmdscale() only provides Cailliez. Input parameters: d are
### a matrix of dissimilarities.
addCailliez <- function(d)
{
n <- nrow(d)
q1 <- seq_len(n)
q2 <- n + q1
## Cailliez makes a 2x2 block matrix with blocks of n x n elements.
## Blocks anti-clockwise, upper left [0]
z <- matrix(0, 2*n, 2*n)
diag(z[q2,q1]) <- -1
z[q1,q2] <- -GowerDblcen(d^2)
z[q2,q2] <- GowerDblcen(2 * d)
## Largest real eigenvalue
e <- eigen(z, symmetric = FALSE, only.values = TRUE)$values
out <- max(Re(e))
max(out, 0)
}
addLingoes <- function(d)
{
## smallest negative eigenvalue (or zero)
d <- -GowerDblcen(d^2)/2
e <- eigen(d, symmetric = TRUE, only.values = TRUE)$values
out <- min(e)
max(-out, 0)
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/GowerDblcen.R
|
### Rotates metaMDS or monoMDS result so that axis one is parallel to
### vector 'x'.
`MDSrotate` <-
function(object, vec, na.rm = FALSE, ...)
{
workswith <- c("metaMDS", "monoMDS", "GO")
if (!inherits(object, workswith))
stop(gettextf("function works only with the results of: %s",
paste(workswith, collapse = ", ")))
x <- object$points
if (is.null(object$species))
sp <- NA
else
sp <- object$species
N <- NCOL(x)
if (N < 2)
stop(gettextf("needs at least two dimensions"))
## check if vec is a factor and then use lda to find a matrix that
## separates optimally factor levels
if (is.factor(vec) || is.character(vec)) {
da <- lda(x, vec)
vec <- predict(da, dimen = N - 1)$x
message(sprintf(ngettext(NCOL(vec),
"factor replaced with discriminant axis",
"factor replaced with %d discriminant axes",
), NCOL(vec)))
if (NCOL(vec) > 1)
message(gettextf("proportional traces: %.3f",
da$svd[1:NCOL(vec)]^2/sum(da$svd^2)))
}
vec <- as.matrix(vec)
NV <- NCOL(vec)
if (NV >= N)
stop(gettextf("you can have max %d vectors, but you had %d",
N-1, NV))
if (!is.numeric(vec))
stop(gettextf("'vec' must be numeric"))
## vectorfit finds the direction cosine. We rotate first axis to
## 'vec' which means that we make other axes orthogonal to 'vec'
## one by one
if (na.rm)
keep <- complete.cases(vec)
else
keep <- !logical(NROW(vec))
## Rotation loop
for(v in seq_len(NV)) {
for (k in (v+1):N) {
arrs <- vectorfit(x[keep,], vec[keep,v], permutations = 0)$arrows
rot <- arrs[c(v,k)]/sqrt(sum(arrs[c(v,k)]^2))
rot <- drop(rot)
## counterclockwise rotation matrix:
## [cos theta -sin theta]
## [sin theta cos theta]
rot <- rbind(rot, rev(rot))
rot[1,2] <- -rot[1,2]
## Rotation of points and species scores
x[, c(v,k)] <- x[, c(v,k)] %*% rot
if (!all(is.na(sp)))
sp[, c(v,k)] <- sp[, c(v,k)] %*% rot
}
}
## Two or more free axes are (optionally) rotated to PCs
if (N - NV > 1 && attr(object$points, "pc")) {
pc <- prcomp(x[,-seq_len(NV)])
x[,-seq_len(NV)] <- pc$x
if (!all(is.na(sp)))
sp[,-seq_len(NV)] <- sp[,-seq_len(NV)] %*% pc$rotation
}
## '[] <-' retains attributes
object$points[] <- x
object$species[] <- sp
attr(object$points, "pc") <- FALSE
object
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/MDSrotate.R
|
`MOStest` <-
function(x, y, interval, ...)
{
if (!missing(interval))
interval <- sort(interval)
x <- eval(x)
m0 <- glm(y ~ x + I(x^2), ...)
k <- coef(m0)
isHump <- unname(k[3] < 0)
hn <- if(isHump) "hump" else "pit"
hump <- unname(-k[2]/2/k[3])
if (missing(interval))
p1 <- min(x)
else
p1 <- interval[1]
if (missing(interval))
p2 <- max(x)
else
p2 <- interval[2]
test <- if (m0$family$family %in% c("binomial", "poisson")) "Chisq" else "F"
tmp <- glm(y ~ I(x^2 - 2*x*p1), ...)
## Chisq test has one column less than F test: extract statistic
## and its P value
statmin <- anova(tmp, m0, test = test)[2, (5:6) - (test == "Chisq")]
tmp <- glm(y ~ I(x^2 - 2*x*p2), ...)
statmax <- anova(tmp, m0, test = test)[2, (5:6) - (test == "Chisq")]
comb <- 1 - (1-statmin[2])*(1-statmax[2])
comb <- unlist(comb)
stats <- rbind(statmin, statmax)
rownames(stats) <- paste(hn, c("at min", "at max"))
stats <- cbind("min/max" = c(p1,p2), stats)
stats <- rbind(stats, "Combined" = c(NA, NA, comb))
vec <- c(p1, p2, hump)
names(vec) <- c("min", "max", hn)
vec <- sort(vec)
isBracketed <- names(vec)[2] == hn
out <- list(isHump = isHump, isBracketed = isBracketed,
hump = vec, family = family(m0), coefficients = stats,
mod = m0)
class(out) <- "MOStest"
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/MOStest.R
|
`RsquareAdj` <-
function(x, ...)
{
UseMethod("RsquareAdj")
}
`RsquareAdj.default` <-
function(x, n, m, ...)
{
r2 <- 1 - (1-x)*(n-1)/(n-m-1)
if (any(na <- m >= n-1))
r2[na] <- NA
r2
}
## Use this with rda() results
`RsquareAdj.rda` <-
function(x, ...)
{
R2 <- x$CCA$tot.chi/x$tot.chi
m <- x$CCA$qrank
n <- nrow(x$CCA$u)
if (is.null(x$pCCA)) {
radj <- RsquareAdj(R2, n, m)
} else {
## Partial model: same adjusted R2 as for component [a] in two
## source varpart model
R2p <- x$pCCA$tot.chi/x$tot.chi
p <- x$pCCA$QR$rank
radj <- RsquareAdj(R2 + R2p, n, m + p) - RsquareAdj(R2p, n, p)
}
list(r.squared = R2, adj.r.squared = radj)
}
## cca result: RsquareAdj is calculated similarly as in
## varpart(). This is similar "semipartial" model as for rda() and
## found as a difference of R2-adj values of combined model with
## constraints + conditions and only conditions.
`RsquareAdj.cca` <-
function (x, permutations = 1000, ...)
{
r2 <- x$CCA$tot.chi / x$tot.chi
if (is.null(x$pCCA)) {
p <- permutest(x, permutations, ...)
radj <- 1 - ((1 - r2) / (1 - mean(p$num / x$tot.chi)))
} else {
p <- getPermuteMatrix(permutations, nobs(x))
Y <- ordiYbar(x, "initial")
r2tot <- (x$pCCA$tot.chi + x$CCA$tot.chi) / x$tot.chi
r2null <- mean(sapply(seq_len(nrow(p)), function(i)
sum(qr.fitted(x$CCA$QR, Y[p[i,],])^2)))
r2tot <- 1 - ((1-r2tot)/(1-r2null/x$tot.chi))
r2p <- x$pCCA$tot.chi / x$tot.chi
r2null <- mean(sapply(seq_len(nrow(p)), function(i)
sum(qr.fitted(x$pCCA$QR, Y[p[i,],])^2)))
r2p <- 1 - ((1-r2p)/(1-r2null/x$tot.chi))
radj <- r2tot - r2p
}
list(r.squared = r2, adj.r.squared = radj)
}
## Linear model: take the result from the summary
RsquareAdj.lm <-
function(x, ...)
{
summary(x)[c("r.squared", "adj.r.squared")]
}
## Generalized linear model: R2-adj only with Gaussian model
RsquareAdj.glm <-
function(x, ...)
{
if (family(x)$family == "gaussian")
summary.lm(x)[c("r.squared", "adj.r.squared")]
else
NA
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/RsquareAdj.R
|
SSarrhenius <-
selfStart(~ k*area^z,
function(mCall, data, LHS, ...)
{
xy <- sortedXyData(mCall[["area"]], LHS, data)
value <- as.vector(coef(lm(log(pmax(xy[,"y"],1)) ~ log(xy[,"x"]))))
value[1] <- exp(value[1])
names(value) <- mCall[c("k","z")]
value
},
c("k","z"))
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/SSarrhenius.R
|
SSgitay <-
selfStart(~ (k + slope*log(area))^2,
function(mCall, data, LHS, ...)
{
xy <- sortedXyData(mCall[["area"]], LHS, data)
value <- as.vector(coef(lm(sqrt(xy[,"y"]) ~ log(xy[,"x"]))))
names(value) <- mCall[c("k","slope")]
value
},
c("k","slope"))
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/SSgitay.R
|
SSgleason <-
selfStart(~ k + slope*log(area),
function(mCall, data, LHS, ...)
{
## Gleason is a linear model: starting values are final ones
xy <- sortedXyData(mCall[["area"]], LHS, data)
value <- as.vector(coef(lm(xy[,"y"] ~ log(xy[,"x"]))))
names(value) <- mCall[c("k","slope")]
value
},
c("k","slope"))
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/SSgleason.R
|
SSlomolino <-
selfStart(~ Asym/(1 + slope^log(xmid/area)),
function(mCall, data, LHS, ...)
{
xy <- sortedXyData(mCall[["area"]], LHS, data)
## approximate with Arrhenius model on log-log
.p <- coef(lm(log(xy[["y"]]) ~ log(xy[["x"]])))
## Asym is value at max(x) but > max(y) and xmid is x which gives
## Asym/2
.Smax <- max(xy[["y"]])*1.1
.S <- exp(.p[1] + log(max(xy[["x"]])) * (.p[2]))
.S <- max(.S, .Smax)
.xmid <- exp((log(.S/2) - .p[1])/.p[2])
## approximate slope for log(Asym/y - 1) ~ log(xmid/x) + 0
.y <- log(.S/xy[["y"]] - 1)
.z <- log(.xmid/xy[["x"]])
.b <- coef(lm(.y ~ .z - 1))
value <- c(.S, .xmid, exp(.b))
names(value) <- mCall[c("Asym","xmid", "slope")]
value
},
c("Asym","xmid","slope"))
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/SSlomolino.R
|
`TukeyHSD.betadisper` <- function(x, which = "group", ordered = FALSE,
conf.level = 0.95, ...) {
df <- data.frame(distances = x$distances, group = x$group)
mod.aov <- aov(distances ~ group, data = df)
TukeyHSD(mod.aov, which = which, ordered = ordered,
conf.level = conf.level, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/TukeyHSD.betadisper.R
|
`add1.cca`<-
function(object, scope, test = c("none", "permutation"),
permutations = how(nperm = 199), ...)
{
if (inherits(object, "prc"))
stop("'step'/'add1' cannot be used for 'prc' objects")
if (is.null(object$terms))
stop("ordination model must be fitted using formula")
test <- match.arg(test)
## Default add1
out <- NextMethod("add1", object, test = "none", ...)
cl <- class(out)
## Loop over terms in 'scope' and do anova.cca
if (test == "permutation") {
## Avoid nested Condition(Condition(x) + z)
hasfla <- update(terms(object$terminfo), . ~ Condition(.))
if (!is.character(scope))
scope <- add.scope(object, update.formula(object, scope))
ns <- length(scope)
adds <- matrix(0, ns+1, 2)
adds[1, ] <- NA
for (i in 1:ns) {
tt <- scope[i]
## Condition(.) previous terms (if present)
if (!is.null(object$CCA)) {
fla <- update(hasfla, paste("~ . +", tt))
nfit <- update(object, fla)
}
else
nfit <- update(object,
as.formula(paste(". ~ . +", tt)))
tmp <- anova(nfit, permutations = permutations, ...)
adds[i+1,] <- unlist(tmp[1,3:4])
}
colnames(adds) <- colnames(tmp)[3:4]
out <- cbind(out, adds)
## check for redundant (0 Df) terms
if (any(nas <- out[,1] < 1, na.rm = TRUE)) {
out[[3]][nas] <- NA
out[[4]][nas] <- NA
}
class(out) <- cl
}
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/add1.cca.R
|
adipart <-
function (...)
{
UseMethod("adipart")
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/adipart.R
|
`adipart.default` <-
function(y, x, index=c("richness", "shannon", "simpson"),
weights=c("unif", "prop"), relative = FALSE, nsimul=99,
method = "r2dtable", ...)
{
## evaluate formula
lhs <- as.matrix(y)
if (missing(x))
x <- cbind(level_1=seq_len(nrow(lhs)),
leve_2=rep(1, nrow(lhs)))
rhs <- data.frame(x)
rhs[] <- lapply(rhs, as.factor)
rhs[] <- lapply(rhs, droplevels, exclude = NA)
nlevs <- ncol(rhs)
if (nlevs < 2)
stop("provide at least two-level hierarchy")
if (any(rowSums(lhs) == 0))
stop("data matrix contains empty rows")
if (any(lhs < 0))
stop("data matrix contains negative entries")
if (is.null(colnames(rhs)))
colnames(rhs) <- paste("level", 1:nlevs, sep="_")
tlab <- colnames(rhs)
## check proper design of the model frame
l1 <- sapply(rhs, function(z) length(unique(z)))
if (!any(sapply(2:nlevs, function(z) l1[z] <= l1[z-1])))
stop("number of levels are inappropriate, check sequence")
rval <- list()
rval[[1]] <- rhs[,nlevs]
nCol <- nlevs - 1
for (i in 2:nlevs) {
rval[[i]] <- interaction(rhs[,nCol], rval[[(i-1)]], drop=TRUE)
nCol <- nCol - 1
}
rval <- as.data.frame(rval[rev(seq_along(rval))])
l2 <- sapply(rval, function(z) length(unique(z)))
if (any(l1 != l2))
stop("levels are not perfectly nested")
## aggregate response matrix
fullgamma <-if (nlevels(rhs[,nlevs]) == 1)
TRUE else FALSE
ftmp <- vector("list", nlevs)
for (i in seq_len(nlevs)) {
ftmp[[i]] <- as.formula(paste("~", tlab[i], "- 1"))
}
## is there burnin/thin in ... ?
burnin <- if (is.null(list(...)$burnin))
0 else list(...)$burnin
thin <- if (is.null(list(...)$thin))
1 else list(...)$thin
base <- if (is.null(list(...)$base))
exp(1) else list(...)$base
## evaluate other arguments
index <- match.arg(index)
weights <- match.arg(weights)
switch(index,
"richness" = {
divfun <- function(x) rowSums(x > 0)},
"shannon" = {
divfun <- function(x) diversity(x, index = "shannon", MARGIN = 1, base=base)},
"simpson" = {
divfun <- function(x) diversity(x, index = "simpson", MARGIN = 1)})
## this is the function passed to oecosimu
wdivfun <- function(x) {
## matrix sum *can* change in oecosimu (but default is constant sumMatr)
sumMatr <- sum(x)
if (fullgamma) {
tmp <- lapply(seq_len(nlevs-1), function(i) t(model.matrix(ftmp[[i]], rhs)) %*% x)
tmp[[nlevs]] <- matrix(colSums(x), nrow = 1, ncol = ncol(x))
} else {
tmp <- lapply(seq_len(nlevs), function(i) t(model.matrix(ftmp[[i]], rhs)) %*% x)
}
## weights will change in oecosimu thus need to be recalculated
if (weights == "prop")
wt <- lapply(seq_len(nlevs), function(i) apply(tmp[[i]], 1, function(z) sum(z) / sumMatr))
else wt <- lapply(seq_len(nlevs), function(i) rep(1 / NROW(tmp[[i]]), NROW(tmp[[i]])))
a <- sapply(seq_len(nlevs), function(i) sum(divfun(tmp[[i]]) * wt[[i]]))
if (relative)
a <- a / a[length(a)]
b <- sapply(2:nlevs, function(i) a[i] - a[(i-1)])
c(a, b)
}
if (nsimul > 0) {
sim <- oecosimu(lhs, wdivfun, method = method, nsimul=nsimul,
burnin=burnin, thin=thin)
} else {
sim <- wdivfun(lhs)
tmp <- rep(NA, length(sim))
sim <- list(statistic = sim,
oecosimu = list(z = tmp, pval = tmp, method = NA, statistic = sim))
}
nam <- c(paste("alpha", seq_len(nlevs-1), sep="."), "gamma",
paste("beta", seq_len(nlevs-1), sep="."))
names(sim$statistic) <- attr(sim$oecosimu$statistic, "names") <- nam
call <- match.call()
call[[1]] <- as.name("adipart")
attr(sim, "call") <- call
attr(sim$oecosimu$simulated, "index") <- index
attr(sim$oecosimu$simulated, "weights") <- weights
attr(sim, "n.levels") <- nlevs
attr(sim, "terms") <- tlab
attr(sim, "model") <- rhs
class(sim) <- c("adipart", class(sim))
sim
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/adipart.default.R
|
`adipart.formula` <-
function(formula, data, index=c("richness", "shannon", "simpson"),
weights=c("unif", "prop"), relative = FALSE, nsimul=99,
method = "r2dtable", ...)
{
## evaluate formula
if (missing(data))
data <- parent.frame()
tmp <- hierParseFormula(formula, data)
## run simulations
sim <- adipart.default(tmp$lhs, tmp$rhs, index = index, weights = weights,
relative = relative, nsimul = nsimul,
method = method, ...)
call <- match.call()
call[[1]] <- as.name("adipart")
attr(sim, "call") <- call
sim
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/adipart.formula.R
|
### this is the original adonis function from vegan 2.5-7. It will be
### deprecated in favour of adonis2.
`adonis` <-
function(formula, data=NULL, permutations=999, method="bray", strata=NULL,
contr.unordered="contr.sum", contr.ordered="contr.poly",
parallel = getOption("mc.cores"), ...)
{
## nobodody seems to believe that adonis is deprecated: give up
## .Deprecated("adonis2", package="vegan")
message("'adonis' will be deprecated: use 'adonis2' instead")
EPS <- sqrt(.Machine$double.eps) ## use with >= in permutation P-values
## formula is model formula such as Y ~ A + B*C where Y is a data
## frame or a matrix, and A, B, and C may be factors or continuous
## variables. data is the data frame from which A, B, and C would
## be drawn.
TOL <- 1e-7
Terms <- terms(formula, data = data)
lhs <- formula[[2]]
lhs <- eval(lhs, data, parent.frame()) # to force evaluation
formula[[2]] <- NULL # to remove the lhs
rhs.frame <- model.frame(formula, data, drop.unused.levels = TRUE) # to get the data frame of rhs
op.c <- options()$contrasts
options( contrasts=c(contr.unordered, contr.ordered) )
rhs <- model.matrix(formula, rhs.frame) # and finally the model.matrix
options(contrasts=op.c)
grps <- attr(rhs, "assign")
qrhs <- qr(rhs)
## Take care of aliased variables and pivoting in rhs
rhs <- rhs[, qrhs$pivot, drop=FALSE]
rhs <- rhs[, 1:qrhs$rank, drop=FALSE]
grps <- grps[qrhs$pivot][1:qrhs$rank]
u.grps <- unique(grps)
nterms <- length(u.grps) - 1
if (nterms < 1)
stop("right-hand-side of formula has no usable terms")
H.s <- lapply(2:length(u.grps),
function(j) {Xj <- rhs[, grps %in% u.grps[1:j] ]
qrX <- qr(Xj, tol=TOL)
Q <- qr.Q(qrX)
tcrossprod(Q[,1:qrX$rank])
})
if (inherits(lhs, "dist")) {
if (any(lhs < -TOL))
stop("dissimilarities must be non-negative")
dmat <- as.matrix(lhs^2)
} else if ((is.matrix(lhs) || is.data.frame(lhs)) &&
isSymmetric(unname(as.matrix(lhs)))) {
dmat <- as.matrix(lhs^2)
lhs <- as.dist(lhs) # crazy: need not to calculate beta.sites
} else {
dist.lhs <- as.matrix(vegdist(lhs, method=method, ...))
dmat <- dist.lhs^2
}
n <- nrow(dmat)
## G is -dmat/2 centred by rows
G <- -sweep(dmat, 1, rowMeans(dmat))/2
SS.Exp.comb <- sapply(H.s, function(hat) sum( G * t(hat)) )
SS.Exp.each <- c(SS.Exp.comb - c(0,SS.Exp.comb[-nterms]) )
H.snterm <- H.s[[nterms]]
## t(I - H.snterm) is needed several times and we calculate it
## here
tIH.snterm <- t(diag(n)-H.snterm)
if (length(H.s) > 1)
for (i in length(H.s):2)
H.s[[i]] <- H.s[[i]] - H.s[[i-1]]
SS.Res <- sum( G * tIH.snterm)
df.Exp <- sapply(u.grps[-1], function(i) sum(grps==i) )
df.Res <- n - qrhs$rank
## Get coefficients both for the species (if possible) and sites
if (inherits(lhs, "dist")) {
beta.sites <- qr.coef(qrhs, as.matrix(lhs))
beta.spp <- NULL
} else {
beta.sites <- qr.coef(qrhs, dist.lhs)
beta.spp <- qr.coef(qrhs, as.matrix(lhs))
}
colnames(beta.spp) <- colnames(lhs)
colnames(beta.sites) <- rownames(lhs)
F.Mod <- (SS.Exp.each/df.Exp) / (SS.Res/df.Res)
f.test <- function(tH, G, df.Exp, df.Res, tIH.snterm) {
## HERE I TRY CHANGING t(H) TO tH, and
## t(I - H.snterm) to tIH.snterm, so that we don't have
## to do those calculations for EACH iteration.
## This is the function we have to do for EACH permutation.
## G is an n x n centered distance matrix
## H is the hat matrix from the design (X)
## note that for R, * is element-wise multiplication,
## whereas %*% is matrix multiplication.
(sum(G * tH)/df.Exp) /
(sum(G * tIH.snterm)/df.Res) }
### Old f.test
### f.test <- function(H, G, I, df.Exp, df.Res, H.snterm){
## (sum( G * t(H) )/df.Exp) /
## (sum( G * t(I-H.snterm) )/df.Res) }
SS.perms <- function(H, G, I){
c(SS.Exp.p = sum( G * t(H) ),
S.Res.p=sum( G * t(I-H) )
) }
## Permutations
p <- getPermuteMatrix(permutations, n, strata = strata)
permutations <- nrow(p)
if (permutations) {
tH.s <- lapply(H.s, t)
## Apply permutations for each term
## This is the new f.test (2011-06-15) that uses fewer arguments
## Set first parallel processing for all terms
if (is.null(parallel))
parallel <- 1
hasClus <- inherits(parallel, "cluster")
isParal <- hasClus || parallel > 1
isMulticore <- .Platform$OS.type == "unix" && !hasClus
if (isParal && !isMulticore && !hasClus) {
parallel <- makeCluster(parallel)
}
if (isParal) {
if (isMulticore) {
f.perms <-
sapply(1:nterms, function(i)
unlist(mclapply(1:permutations, function(j)
f.test(tH.s[[i]], G[p[j,], p[j,]],
df.Exp[i], df.Res, tIH.snterm),
mc.cores = parallel)))
} else {
f.perms <-
sapply(1:nterms, function(i)
parSapply(parallel, 1:permutations, function(j)
f.test(tH.s[[i]], G[p[j,], p[j,]],
df.Exp[i], df.Res, tIH.snterm)))
}
} else {
f.perms <-
sapply(1:nterms, function(i)
sapply(1:permutations, function(j)
f.test(tH.s[[i]], G[p[j,], p[j,]],
df.Exp[i], df.Res, tIH.snterm)))
}
## Close socket cluster if created here
if (isParal && !isMulticore && !hasClus)
stopCluster(parallel)
P <- (rowSums(t(f.perms) >= F.Mod - EPS)+1)/(permutations+1)
} else { # no permutations
f.perms <- P <- rep(NA, nterms)
}
SumsOfSqs = c(SS.Exp.each, SS.Res, sum(SS.Exp.each) + SS.Res)
tab <- data.frame(Df = c(df.Exp, df.Res, n-1),
SumsOfSqs = SumsOfSqs,
MeanSqs = c(SS.Exp.each/df.Exp, SS.Res/df.Res, NA),
F.Model = c(F.Mod, NA,NA),
R2 = SumsOfSqs/SumsOfSqs[length(SumsOfSqs)],
P = c(P, NA, NA))
rownames(tab) <- c(attr(attr(rhs.frame, "terms"), "term.labels")[u.grps],
"Residuals", "Total")
colnames(tab)[ncol(tab)] <- "Pr(>F)"
attr(tab, "heading") <- c(howHead(attr(p, "control")),
"Terms added sequentially (first to last)\n")
class(tab) <- c("anova", class(tab))
out <- list(aov.tab = tab, call = match.call(),
coefficients = beta.spp, coef.sites = beta.sites,
f.perms = f.perms, model.matrix = rhs, terms = Terms)
class(out) <- "adonis"
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/adonis-deprecated.R
|
`adonis2` <-
function(formula, data, permutations = 999, method = "bray",
sqrt.dist = FALSE, add = FALSE, by = "terms",
parallel = getOption("mc.cores"), na.action = na.fail,
strata = NULL, ...)
{
## handle missing data
if (missing(data))
data <- model.frame(delete.response(terms(formula)),
na.action = na.action)
## we accept only by = "terms", "margin" or NULL
if (!is.null(by))
by <- match.arg(by, c("terms", "margin", "onedf"))
## evaluate lhs
YVAR <- formula[[2]]
lhs <- eval(YVAR, environment(formula), globalenv())
environment(formula) <- environment()
## Take care that input lhs are dissimilarities
if ((is.matrix(lhs) || is.data.frame(lhs)) &&
isSymmetric(unname(as.matrix(lhs))))
lhs <- as.dist(lhs)
if (!inherits(lhs, "dist"))
lhs <- vegdist(as.matrix(lhs), method=method, ...)
## adjust distances if requested
if (sqrt.dist)
lhs <- sqrt(lhs)
if (is.logical(add) && isTRUE(add))
add <- "lingoes"
if (is.character(add)) {
add <- match.arg(add, c("lingoes", "cailliez"))
if (add == "lingoes") {
ac <- addLingoes(as.matrix(lhs))
lhs <- sqrt(lhs^2 + 2 * ac)
}
else if (add == "cailliez") {
ac <- addCailliez(as.matrix(lhs))
lhs <- lhs + ac
}
}
## adonis0 & anova.cca should see only dissimilarities (lhs)
if (!missing(data)) # expand and check terms
formula <- terms(formula, data=data)
if (is.null(attr(data, "terms"))) # not yet a model.frame?
data <- model.frame(delete.response(terms(formula)), data,
na.action = na.action)
formula <- update(formula, lhs ~ .)
sol <- adonis0(formula, data = data, method = method)
## handle permutations
perm <- getPermuteMatrix(permutations, NROW(data), strata = strata)
out <- anova(sol, permutations = perm, by = by,
parallel = parallel)
## attributes will be lost when adding a new column
att <- attributes(out)
## add traditional adonis output on R2
out <- rbind(out, "Total" = c(nobs(sol)-1, sol$tot.chi, NA, NA))
out <- cbind(out[,1:2], "R2" = out[,2]/sol$tot.chi, out[,3:4])
## Fix output header to show the adonis2() call instead of adonis0()
att$heading[2] <- deparse(match.call(), width.cutoff = 500L)
att$names <- names(out)
att$row.names <- rownames(out)
attributes(out) <- att
out
}
`adonis0` <-
function(formula, data=NULL, method="bray")
{
## First we collect info for the uppermost level of the analysed
## object
Trms <- terms(data)
sol <- list(call = match.call(),
method = "adonis",
terms = Trms,
terminfo = list(terms = Trms))
sol$call$formula <- formula(Trms)
TOL <- 1e-7
lhs <- formula[[2]]
lhs <- eval(lhs, environment(formula)) # to force evaluation
formula[[2]] <- NULL # to remove the lhs
rhs <- model.matrix(formula, data) # and finally the model.matrix
assign <- attr(rhs, "assign") ## assign attribute
sol$terminfo$assign <- assign[assign > 0]
rhs <- rhs[,-1, drop=FALSE] # remove the (Intercept) to get rank right
rhs <- scale(rhs, scale = FALSE, center = TRUE) # center
qrhs <- qr(rhs)
## input lhs should always be dissimilarities
if (!inherits(lhs, "dist"))
stop("internal error: contact developers")
if (any(lhs < -TOL))
stop("dissimilarities must be non-negative")
## if there was an na.action for rhs, we must remove the same rows
## and columns from the lhs (initDBRDA later will work similarly
## for distances and matrices of distances).
if (!is.null(nas <- na.action(data))) {
lhs <- as.matrix(lhs)[-nas,-nas, drop=FALSE]
n <- nrow(lhs)
} else
n <- attr(lhs, "Size")
## G is -dmat/2 centred
G <- initDBRDA(lhs)
## preliminaries are over: start working
Gfit <- qr.fitted(qrhs, G)
Gres <- qr.resid(qrhs, G)
## collect data for the fit
if(!is.null(qrhs$rank) && qrhs$rank > 0)
CCA <- list(rank = qrhs$rank,
qrank = qrhs$rank,
tot.chi = sum(diag(Gfit)),
QR = qrhs)
else
CCA <- NULL # empty model
## collect data for the residuals
CA <- list(rank = n - max(qrhs$rank, 0) - 1,
u = matrix(0, nrow=n),
tot.chi = sum(diag(Gres)))
## all together
sol$tot.chi <- sum(diag(G))
sol$adjust <- 1
sol$Ybar <- G
sol$CCA <- CCA
sol$CA <- CA
class(sol) <- c("adonis2", "dbrda", "rda", "cca")
sol
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/adonis.R
|
`alias.cca` <-
function (object, names.only = FALSE, ...)
{
if (is.null(object$CCA))
stop("no constrained component, 'alias' cannot be applied")
if (is.null(object$CCA$alias))
stop("no aliased terms")
if (names.only)
return(object$CCA$alias)
CompPatt <- function(x, ...) {
x[abs(x) < 1e-06] <- 0
class(x) <- "mtable"
x[abs(x) < 1e-06] <- NA
x
}
Model <- object$terms
attributes(Model) <- NULL
value <- list(Model = Model)
R <- object$CCA$QR$qr
R <- R[1:min(dim(R)), , drop = FALSE]
R[lower.tri(R)] <- 0
d <- dim(R)
rank <- object$CCA$QR$rank
p <- d[2]
value$Complete <- if (is.null(p) || rank == p)
NULL
else {
p1 <- 1:rank
X <- R[p1, p1]
Y <- R[p1, -p1, drop = FALSE]
beta12 <- as.matrix(qr.coef(qr(X), Y))
CompPatt(t(beta12))
}
class(value) <- "listof"
value
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/alias.cca.R
|
`anosim` <-
function (x, grouping, permutations = 999,
distance = "bray", strata = NULL, parallel = getOption("mc.cores"))
{
EPS <- sqrt(.Machine$double.eps)
if (!inherits(x, "dist")) { # x is not "dist": try to change it
if ((is.matrix(x) || is.data.frame(x)) &&
isSymmetric(unname(as.matrix(x)))) {
x <- as.dist(x)
attr(x, "method") <- "user supplied square matrix"
}
else
x <- vegdist(x, method = distance)
}
if (any(x < -sqrt(.Machine$double.eps)))
warning("some dissimilarities are negative - is this intentional?")
sol <- c(call = match.call())
grouping <- as.factor(grouping)
## check that dims match
if (length(grouping) != attr(x, "Size"))
stop(
gettextf("dissimilarities have %d observations, but grouping has %d",
attr(x, "Size"), length(grouping)))
if (length(levels(grouping)) < 2)
stop("there should be more than one class level")
matched <- function(irow, icol, grouping) {
grouping[irow] == grouping[icol]
}
x.rank <- rank(x)
N <- attr(x, "Size")
div <- length(x)/2
irow <- as.vector(as.dist(row(matrix(nrow = N, ncol = N))))
icol <- as.vector(as.dist(col(matrix(nrow = N, ncol = N))))
within <- matched(irow, icol, grouping)
## check that there is replication
if (!any(within))
stop("there should be replicates within groups")
aver <- tapply(x.rank, within, mean)
statistic <- -diff(aver)/div
cl.vec <- rep("Between", length(x))
take <- as.numeric(irow[within])
cl.vec[within] <- levels(grouping)[grouping[take]]
cl.vec <- factor(cl.vec, levels = c("Between", levels(grouping)))
ptest <- function(take, ...) {
cl.perm <- grouping[take]
tmp.within <- matched(irow, icol, cl.perm)
tmp.ave <- tapply(x.rank, tmp.within, mean)
-diff(tmp.ave)/div
}
permat <- getPermuteMatrix(permutations, N, strata = strata)
if (ncol(permat) != N)
stop(gettextf("'permutations' have %d columns, but data have %d rows",
ncol(permat), N))
permutations <- nrow(permat)
if (permutations) {
## Parallel processing
if (is.null(parallel))
parallel <- 1
hasClus <- inherits(parallel, "cluster")
if (hasClus || parallel > 1) {
if(.Platform$OS.type == "unix" && !hasClus) {
perm <- unlist(mclapply(1:permutations,
function(i, ...)
ptest(permat[i,]),
mc.cores = parallel))
} else {
if (!hasClus) {
parallel <- makeCluster(parallel)
}
perm <- parRapply(parallel, permat, ptest)
if (!hasClus)
stopCluster(parallel)
}
} else {
perm <- sapply(1:permutations, function(i) ptest(permat[i,]))
}
p.val <- (1 + sum(perm >= statistic - EPS))/(1 + permutations)
} else { # no permutations
p.val <- perm <- NA
}
sol$signif <- p.val
sol$perm <- perm
sol$permutations <- permutations
sol$statistic <- as.numeric(statistic)
sol$class.vec <- cl.vec
sol$dis.rank <- x.rank
sol$dissimilarity <- attr(x, "method")
sol$control <- attr(permat, "control")
class(sol) <- "anosim"
sol
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/anosim.R
|
`anova.betadisper` <- function(object, ...)
{
model.dat <- with(object, data.frame(Distances = distances,
Groups = group))
n.grps <- with(model.dat, length(unique(as.numeric(Groups))))
if(n.grps < 2)
stop("anova() only applicable to two or more groups")
anova(lm(Distances ~ Groups, data = model.dat))
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/anova.betadisper.R
|
`anova.cca` <-
function(object, ..., permutations = how(nperm=999), by = NULL,
model = c("reduced", "direct", "full"),
parallel = getOption("mc.cores"), strata = NULL,
cutoff = 1, scope = NULL)
{
EPS <- sqrt(.Machine$double.eps) # for permutation P-values
model <- match.arg(model)
## permutation matrix
N <- nrow(object$CA$u)
permutations <- getPermuteMatrix(permutations, N, strata = strata)
seed <- attr(permutations, "seed")
control <- attr(permutations, "control")
## see if this was a list of ordination objects
dotargs <- list(...)
## we do not want to give dotargs to anova.ccalist, but we
## evaluate 'parallel' and 'model' here
if (length(dotargs)) {
isCCA <- sapply(dotargs, function(z) inherits(z, "cca"))
if (any(isCCA)) {
dotargs <- dotargs[isCCA]
object <- c(list(object), dotargs)
sol <-
anova.ccalist(object,
permutations = permutations,
model = model,
parallel = parallel)
attr(sol, "Random.seed") <- seed
attr(sol, "control") <- control
return(sol)
}
}
## We only have a single model: check if it is empty
if (is.null(object$CA) || is.null(object$CCA) ||
object$CCA$rank == 0 || object$CA$rank == 0)
return(anova.ccanull(object))
## by cases
if (!is.null(by)) {
by <- match.arg(by, c("terms", "margin", "axis", "onedf"))
if (is.null(object$terms))
stop("model must be fitted with formula interface")
sol <- switch(by,
"terms" = anova.ccabyterm(object,
permutations = permutations,
model = model, parallel = parallel),
"margin" = anova.ccabymargin(object,
permutations = permutations,
model = model, parallel = parallel,
scope = scope),
"axis" = anova.ccabyaxis(object,
permutations = permutations,
model = model, parallel = parallel,
cutoff = cutoff),
"onedf" = anova.ccaby1df(object,
permutations = permutations,
model = model, parallel = parallel)
)
attr(sol, "Random.seed") <- seed
attr(sol, "control") <- control
return(sol)
}
## basic overall test: pass other arguments except 'strata'
## because 'permutations' already is a permutationMatrix
tst <- permutest.cca(object, permutations = permutations,
model = model, parallel = parallel, ...)
Fval <- c(tst$F.0, NA)
Pval <- (sum(tst$F.perm >= tst$F.0 - EPS) + 1)/(tst$nperm + 1)
Pval <- c(Pval, NA)
table <- data.frame(tst$df, tst$chi, Fval, Pval)
if (inherits(object, c("capscale", "dbrda")) && object$adjust == 1)
varname <- "SumOfSqs"
else if (inherits(object, "rda"))
varname <- "Variance"
else
varname <- "ChiSquare"
colnames(table) <- c("Df", varname, "F", "Pr(>F)")
head <- paste0("Permutation test for ", tst$method, " under ",
tst$model, " model\n", howHead(control))
mod <- paste("Model:", c(object$call))
structure(table, heading = c(head, mod), Random.seed = seed,
control = control, F.perm = tst$F.perm,
class = c("anova.cca", "anova", "data.frame"))
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/anova.cca.R
|
### Implementation of by-cases for anova.cca. These are all internal
### functions that are not intended to be called by users in normal
### sessions, but they should be called from anova.cca. Therefore the
### user interface is rigid and input is not checked. The
### 'permutations' should be a permutation matrix.
### by = "terms" calls directly permutest.cca which decomposes the
### inertia between successive terms within compiled C code.
`anova.ccabyterm` <-
function(object, permutations, model, parallel)
{
## The result
sol <- permutest(object, permutations = permutations,
model = model, by = "terms", parallel = parallel)
## Reformat
EPS <- sqrt(.Machine$double.eps)
Pval <- (colSums(sweep(sol$F.perm, 2, sol$F.0 - EPS, ">=")) + 1) /
(sol$nperm + 1)
out <- data.frame(sol$df, sol$chi, c(sol$F.0, NA), c(Pval, NA))
if (inherits(object, c("capscale", "dbrda")) && object$adjust == 1)
varname <- "SumOfSqs"
else if (inherits(object, "rda"))
varname <- "Variance"
else
varname <- "ChiSquare"
dimnames(out) <- list(c(sol$termlabels, "Residual"),
c("Df", varname, "F", "Pr(>F)"))
head <- paste0("Permutation test for ", object$method, " under ",
model, " model\n",
"Terms added sequentially (first to last)\n",
howHead(attr(permutations, "control")))
mod <- paste("Model:", c(object$call))
attr(out, "heading") <- c(head, mod)
attr(out, "F.perm") <- sol$F.perm
class(out) <- c("anova.cca", "anova","data.frame")
out
}
## by = "margin": we omit each term in turn and compare against the
## complete model. This does not involve partial terms (Conditions) on
## other variables, but the permutations remain similar in "direct"
## and "reduced" (default) models (perhaps this model should not be
## used with "full" models?). This is basically similar decomposition
## as by="term", but compares models without each term in turn against
## the complete model in separate calls to permutest.cca. From vegan
## 2.5-0 this does not update model formula -- this avoids scoping
## issues and makes the function more robust when embedded in other
## functions. Instead, we call ordConstrained with method="pass" with
## modified constraint matrix.
`anova.ccabymargin` <-
function(object, permutations, scope, ...)
{
EPS <- sqrt(.Machine$double.eps)
nperm <- nrow(permutations)
## We need term labels but without Condition() terms
if (!is.null(scope) && is.character(scope))
trms <- scope
else
trms <- drop.scope(object)
## Condition() not considered marginal
alltrms <- intersect(attr(terms(object$terminfo), "term.labels"),
attr(terms(object), "term.labels"))
trmlab <- intersect(alltrms, trms)
if (length(trmlab) == 0)
stop("the scope was empty: no available marginal terms")
## baseline: all terms
big <- permutest(object, permutations, ...)
dfbig <- big$df[2]
chibig <- big$chi[2]
scale <- big$den/dfbig
## Collect all marginal models. This differs from old version
## (vegan 2.0) where other but 'nm' were partialled out within
## Condition(). Now we only fit the model without 'nm' and compare
## the difference against the complete model.
Y <- ordiYbar(object, "init")
X <- model.matrix(object)
## we must have Constraints to get here, but we may also have
## Conditions
if (!is.null(object$pCCA)) {
Z <- X$Conditions
X <- X$Constraints
} else {
Z <- NULL
}
ass <- object$terminfo$assign
if (is.null(ass))
stop("old style result object: update() your model")
## analyse only terms of 'ass' thar are in scope
scopeterms <- which(alltrms %in% trmlab)
mods <- lapply(scopeterms, function(i, ...)
permutest(ordConstrained(Y, X[, ass != i, drop=FALSE], Z, "pass"),
permutations, ...), ...)
## Chande in df
Df <- sapply(mods, function(x) x$df[2]) - dfbig
## F of change
Chisq <- sapply(mods, function(x) x$chi[2]) - chibig
Fstat <- (Chisq/Df)/(chibig/dfbig)
## Simulated F-values
Fval <- sapply(mods, function(x) x$num)
## Had we an empty model we need to clone the denominator
if (length(Fval) == 1)
Fval <- matrix(Fval, nrow = nperm)
Fval <- sweep(-Fval, 1, big$num, "+")
Fval <- sweep(Fval, 2, Df, "/")
Fval <- sweep(Fval, 1, scale, "/")
## Simulated P-values
Pval <- (colSums(sweep(Fval, 2, Fstat - EPS, ">=")) + 1)/(nperm + 1)
## Collect results to anova data.frame
out <- data.frame(c(Df, dfbig), c(Chisq, chibig),
c(Fstat, NA), c(Pval, NA))
if (inherits(object, c("capscale", "dbrda")) && object$adjust == 1)
varname <- "SumOfSqs"
else if (inherits(object, "rda"))
varname <- "Variance"
else
varname <- "ChiSquare"
dimnames(out) <- list(c(trmlab, "Residual"),
c("Df", varname, "F", "Pr(>F)"))
head <- paste0("Permutation test for ", object$method, " under ",
big$model, " model\n",
"Marginal effects of terms\n",
howHead(attr(permutations, "control")))
mod <- paste("Model:", c(object$call))
attr(out, "heading") <- c(head, mod)
attr(out, "F.perm") <- Fval
class(out) <- c("anova.cca", "anova", "data.frame")
out
}
### by = "axis" uses partial model: we use the original constraints,
### but add previous axes 1..(k-1) to Conditions when evaluating the
### significance of axis k which is compared against the first
### eigenvalue of the permutations. To avoid scoping issues, this
### calls directly ordConstrained() with modified Conditions (Z) and
### original Constraints (X) instead of updating formula. This
### corresponds to "forward" model in Legendre, Oksanen, ter Braak
### (2011).
### In 2.2-x to 2.4-3 we used "marginal model" where original
### Constraints were replaced with LC scores axes (object$CCA$u), and
### all but axis k were used as Conditions when evaluating the
### significance of axis k. My (J.Oksanen) simulations showed that
### this gave somewhat biased results.
`anova.ccabyaxis` <-
function(object, permutations, model, parallel, cutoff = 1)
{
EPS <- sqrt(.Machine$double.eps)
## On 29/10/15 (983ba7726) we assumed that dbrda(d ~ dbrda(d ~
## x)$CCA$u) is not equal to dbrda(d ~ x) when there are negative
## eigenvalues, but it seems that it is OK if constrained
## eigenvalues are non-negative
if (inherits(object, "dbrda") && any(object$CCA$eig < 0))
stop("by = 'axis' cannot be used when constraints have negative eigenvalues")
nperm <- nrow(permutations)
## Observed F-values and Df
eig <- object$CCA$eig
resdf <- nobs(object) - length(eig) - max(object$pCCA$QR$rank, 0) - 1
Fstat <- eig/object$CA$tot.chi*resdf
Df <- rep(1, length(eig))
## collect header and varname here: 'object' is modified later
if (inherits(object, c("capscale", "dbrda")) && object$adjust == 1)
varname <- "SumOfSqs"
else if (inherits(object, "rda"))
varname <- "Variance"
else
varname <- "ChiSquare"
head <- paste0("Permutation test for ", object$method, " under ",
model, " model\n",
"Forward tests for axes\n",
howHead(attr(permutations, "control")))
head <- c(head, paste("Model:", c(object$call)))
## constraints and model matrices
Y <- object$Ybar
if (is.null(Y))
stop("old style result object: update() your model")
if (!is.null(object$pCCA))
Z <- qr.X(object$pCCA$QR)
else
Z <- NULL
X <- model.matrix(object)
if (!is.null(object$pCCA)) {
Z <- X$Conditions
X <- X$Constraints
} else {
Z <- NULL
}
LC <- object$CCA$u
Pvals <- rep(NA, ncol(LC))
F.perm <- matrix(ncol = ncol(LC), nrow = nperm)
for (i in seq_along(eig)) {
if (i > 1) {
object <- ordConstrained(Y, X, cbind(Z, LC[, seq_len(i-1)]), "pass")
}
if (length(eig) == i) {
mod <- permutest(object, permutations, model = model,
parallel = parallel)
} else {
mod <- permutest(object, permutations, model = model,
parallel = parallel, first = TRUE)
}
Pvals[i] <- (sum(mod$F.perm >= mod$F.0 - EPS) + 1) / (nperm + 1)
F.perm[ , i] <- mod$F.perm
if (Pvals[i] > cutoff)
break
}
out <- data.frame(c(Df, resdf), c(eig, object$CA$tot.chi),
c(Fstat, NA), c(Pvals,NA))
rownames(out) <- c(names(eig), "Residual")
colnames(out) <- c("Df", varname, "F", "Pr(>F)")
attr(out, "heading") <- head
attr(out, "F.perm") <- F.perm
class(out) <- c("anova.cca", "anova", "data.frame")
out
}
### Wrap permutest.cca(..., by="onedf") in a anova.cca form
`anova.ccaby1df` <-
function(object, permutations, model, parallel)
{
## Compute
sol <- permutest(object, permutations = permutations,
model = model, by = "onedf", parallel = parallel)
## Reformat
EPS <- sqrt(.Machine$double.eps)
Pval <- (colSums(sweep(sol$F.perm, 2, sol$F.0 - EPS, ">=")) + 1) /
(sol$nperm + 1)
out <- data.frame(sol$df, sol$chi, c(sol$F.0, NA), c(Pval, NA))
if (inherits(object, c("capscale", "dbrda")) && object$adjust == 1)
varname <- "SumOfSqs"
else if (inherits(object, "rda"))
varname <- "Variance"
else
varname <- "ChiSquare"
dimnames(out) <- list(c(sol$termlabels, "Residual"),
c("Df", varname, "F", "Pr(>F)"))
head <- paste0("Permutation test for ", object$method, " under ",
model, " model\n",
"Sequential test for contrasts\n",
howHead(attr(permutations, "control")))
mod <- paste("Model:", c(object$call))
attr(out, "heading") <- c(head, mod)
attr(out, "F.perm") <- sol$F.perm
class(out) <- c("anova.cca", "anova","data.frame")
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/anova.ccabyterm.R
|
`anova.ccalist` <-
function(object, permutations, model, parallel)
{
EPS <- sqrt(.Machine$double.eps)
## 'object' *must* be a list of cca objects, and 'permutations'
## *must* be a permutation matrix -- we assume that calling
## function takes care of this, and this function is not directly
## called by users.
nmodels <- length(object)
## check that input is valid
## 1. All models must be fitted with the same method
method <- sapply(object, function(z) z$method)
if (!all(method == method[1]))
stop("same ordination method must be used in all models")
else
method <- method[1]
## 2. All models must be fitted with formula interface
if (any(sapply(object, function(x) is.null(x$terms))))
stop("all models must be fitted with formula interface")
## 3. Same response
resp <- sapply(object, function(z) deparse(formula(z)[[2]]))
if (!all(resp == resp[1]))
stop("response must be same in all models")
## 4. Same no. of observations
N <- sapply(object, nobs)
if (!all(N == N[1]))
stop("number of observations must be same in all models")
else
N <- N[1]
## 5. Terms must be nested
trms <- lapply(object, function(z) labels(terms(z)))
o <- order(sapply(trms, length))
for (i in 2:nmodels)
if (!all(trms[[o[i-1]]] %in% trms[[o[i]]]))
stop("models must be nested")
## Check permutation matrix
nperm <- nrow(permutations)
## check
if (ncol(permutations) != N)
stop(gettextf("'permutations' have %d columns, but data have %d rows",
ncol(nperm), N))
## All models are evaluated in permutest.cca with identical
## permutations so that the differences of single permutations can
## be used to assess the significance of differences of fitted
## models. This strictly requires nested models (not checked
## here): all terms of the smaller model must be included in the
## larger model.
mods <- lapply(object, function(z)
permutest.cca(z, permutations = permutations,
model = model, parallel = parallel))
dfs <- sapply(mods, function(z) z$df)
dev <- sapply(mods, function(z) z$chi)
resdf <- dfs[2,]
df <- -diff(resdf)
resdev <- dev[2,]
changedev <- -diff(resdev)
big <- which.min(resdf)
scale <- resdev[big]/resdf[big]
fval <- changedev/df/scale
## Collect permutation results: denominator of F varies in each
## permutation.
pscale <- mods[[big]]$den/resdf[big]
## Numerator of F
pfvals <- sapply(mods, function(z) z$num)
if (is.list(pfvals))
pfvals <- do.call(cbind, pfvals)
pfvals <- apply(pfvals, 1, diff)
## dropped to vector?
if (!is.matrix(pfvals))
pfvals <- matrix(pfvals, nrow = 1, ncol = nperm)
pfvals <- sweep(pfvals, 1, df, "/")
pfvals <- sweep(pfvals, 2, pscale, "/")
pval <- rowSums(sweep(pfvals, 1, fval - EPS, ">="))
pval <- (pval + 1)/(nperm + 1)
## collect table
table <- data.frame(resdf, resdev, c(NA, df),
c(NA,changedev), c(NA,fval), c(NA,pval))
if (inherits(object, c("capscale", "dbrda")) && object$adjust == 1)
varname <- "SumOfSqs"
else if (inherits(object, "rda"))
varname <- "Variance"
else
varname <- "ChiSquare"
dimnames(table) <- list(1L:nmodels,
c("ResDf", paste0("Res", varname), "Df",
varname, "F", "Pr(>F)"))
## Collect header information
formulae <- sapply(object,
function(z) deparse(formula(z), width.cutoff = 500))
head <- paste0("Permutation tests for ", method, " under ",
mods[[big]]$model, " model\n",
howHead(attr(permutations, "control")))
topnote <- paste("Model ", format(1L:nmodels), ": ", formulae,
sep = "", collapse = "\n")
structure(table, heading = c(head,topnote),
F.perm = t(pfvals),
class = c("anova.cca", "anova", "data.frame"))
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/anova.ccalist.R
|
### anova.cca cannot be performed if residuals or constraints are
### NULL, and this function handles these cases (but it doesn't test
### that these are the cases).
`anova.ccanull` <-
function(object, ...)
{
table <- matrix(0, nrow = 2, ncol = 4)
if (object$CA$rank == 0) {
table[1,] <- c(object$CCA$qrank, object$CCA$tot.chi, NA, NA)
table[2,] <- c(0,0,NA,NA)
}
else {
table[1,] <- c(0,0,0,NA)
table[2,] <- c(nrow(object$CA$u) - 1, object$CA$tot.chi, NA, NA)
}
rownames(table) <- c("Model", "Residual")
if (inherits(object, c("capscale", "dbrda")) && object$adjust == 1)
varname <- "SumOfSqs"
else if (inherits(object, "rda"))
varname <- "Variance"
else
varname <- "ChiSquare"
colnames(table) <- c("Df", varname, "F", "Pr(>F)")
table <- as.data.frame(table)
if (object$CA$rank == 0)
head <- "No residual component\n"
else if (is.null(object$CCA) || object$CCA$rank == 0)
head <- "No constrained component\n"
else
head <- c("!!!!! ERROR !!!!!\n")
head <- c(head, paste("Model:", c(object$call)))
if (exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE))
seed <- get(".Random.seed", envir = .GlobalEnv, inherits = FALSE)
else
seed <- NULL
structure(table, heading = head, Random.seed = seed,
class = c("anova.cca", "anova", "data.frame"))
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/anova.ccanull.R
|
`anova.prc` <-
function(object, ...)
{
## if user specified 'by', cast prc() to an rda() and call anova
## on its result
extras <- match.call(expand.dots = FALSE)
if ("by" %in% names(extras$...)) {
Y <- as.character(object$call$response)
X <- as.character(object$call$treatment)
Z <- as.character(object$call$time)
fla <- paste(Y, "~", X, "*", Z, "+ Condition(", Z, ")")
fla <- as.formula(fla)
## get extras
m <- match(c("data", "scale", "subset", "na.action"),
names(object$call), 0)
call <- object$call[c(1,m)]
call$formula <- fla
call[[1]] <- as.name("rda")
object <- eval(call, parent.frame())
anova(object, ...)
} else {
NextMethod("anova", object, ...)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/anova.prc.R
|
`as.fisher` <-
function (x, ...)
{
if (inherits(x, "fisher"))
return(x)
## is not fisher but a 1 x n data.frame or matrix: matrix is faster
x <- as.matrix(x)
if (!identical(all.equal(x, round(x)), TRUE))
stop("function accepts only integers (counts)")
freq <- x[x > 0]
freq <- table(freq, deparse.level = 0)
nm <- names(freq)
freq <- as.vector(freq)
names(freq) <- nm
class(freq) <- "fisher"
freq
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/as.fisher.R
|
### Casts a vegan spantree object into single linkage dendrogram of
### class hclust. The non-trivial items in "hclust" object are a
### 'merge' matrix for fusions of points and/or clusters, a 'height'
### vector which gives the heights of each fusion, and an 'order'
### vector that gives the order of leaves in the plotted
### dendrogram. The 'height's are only sorted spantree segment
### distances, but for 'merge' we need to establish cluster
### memberships, and for 'order' we must traverse the tree.
`as.hclust.spantree` <-
function(x, ...)
{
## Order by the lengths of spanning tree links
o <- order(x$dist)
npoints <- x$n
if(npoints < 2)
stop("needs at least two points")
## Ordered indices of dads and kids
dad <- (2:npoints)[o]
kid <- x$kid[o]
## merge matrix of hclust has negative index when a single point
## is added to a tree and a positive index when a group is joined
## to a tree, and the group is numbered by the level it was
## formed.
labs <- -seq_len(npoints)
merge <- matrix(0, nrow=npoints-1, ncol=2)
for(i in 1:nrow(merge)) {
merge[i, ] <- c(labs[dad[i]], labs[kid[i]])
## update labs for the current group and its kids
labs[labs %in% labs[c(dad[i], kid[i])]] <- i
}
order <- hclustMergeOrder(merge)
out <- list(merge = merge, height = x$dist[o], order = order,
labels = x$labels, method = "spantree", call =
match.call())
class(out) <- "hclust"
out
}
### Internal vegan function to get the 'order' from a merge matrix of
### an hclust tree
`hclustMergeOrder` <-
function(merge)
{
## Get order of leaves with recursive search from the root
order <- numeric(nrow(merge)+1)
ind <- 0
## "<<-" updates data only within hclustMergeOrder, but outside
## the visit() function.
visit <- function(i, j) {
if (merge[i,j] < 0) {
ind <<- ind+1
order[ind] <<- -merge[i,j]
} else {
visit(merge[i,j], 1)
visit(merge[i,j], 2)
}
}
visit(nrow(merge), 1)
visit(nrow(merge), 2)
return(order)
}
### Reorder an hclust tree. Basic R provides reorder.dendrogram, but
### this functoin works with 'hclust' objects, and also differs in
### implementation. We use either weighted mean, min or max or
### sum. The dendrogram is always ordered in ascending order, so that
### with max the left kid always has lower value. So with 'max' the
### largest value is smaller in leftmost group. The choice 'sum'
### hardly makes sense, but it is the default in
### reorder.dendrogram. The ordering with 'mean' differs from
### reorder.dendrogram which uses unweighted means, but here we weight
### means by group sizes so that the mean of an internal node is the
### mean of its leaves.
`reorder.hclust` <-
function(x, wts,
agglo.FUN = c("mean", "min", "max", "sum", "uwmean"),
...)
{
agglo.FUN <- match.arg(agglo.FUN)
merge <- x$merge
nlev <- nrow(merge)
stats <- numeric(nlev)
counts <- numeric(nlev)
pair <- numeric(2)
pairw <- numeric(2)
## Go through merge, order each level and update the statistic.
for(i in 1:nlev) {
for(j in 1:2) {
if (merge[i,j] < 0) {
pair[j] <- wts[-merge[i,j]]
pairw[j] <- 1
} else {
pair[j] <- stats[merge[i,j]]
pairw[j] <- counts[merge[i,j]]
}
}
## reorder
merge[i,] <- merge[i, order(pair)]
## statistic for this merge level
stats[i] <-
switch(agglo.FUN,
"mean" = weighted.mean(pair, pairw),
"min" = min(pair),
"max" = max(pair),
"sum" = sum(pair),
"uwmean" = mean(pair))
counts[i] <- sum(pairw)
}
## Get the 'order' of the reordered dendrogram
order <- hclustMergeOrder(merge)
x$merge <- merge
x$order <- order
x$value <- stats
x
}
### Trivial function to reverse the order of an hclust tree (why this
### is not in base R?)
`rev.hclust` <-
function(x)
{
x$order <- rev(x$order)
x
}
### Get coordinates for internal or terminal nodes (leaves) that would
### be used in plot.hclust
`scores.hclust` <-
function(x, display = "internal", ...)
{
extnam <- c("leaves", "terminal")
intnam <- c("internal")
display <- match.arg(display, c(extnam, intnam))
## Terminal nodes (leaves): plot.hclust scales x-axis for n points
## as 1..n. The y-value is the 'height' where the terminal node
## was fused to the tree.
if(display %in% extnam) {
merge <- x$merge
y <- numeric(nrow(merge) + 1)
for(i in 1:nrow(merge))
for(j in 1:2)
if(merge[i,j] < 0)
y[-merge[i,j]] <- x$height[i]
xx <- order(x$order)
xy <- cbind(`x` = xx, `height` = y)
} else {
## Internal nodes are given in the order they were fused which
## also is the order of 'height'
xx <- reorder(x, order(x$order), agglo.FUN = "uwmean")$value
xy <- cbind(`x`= xx, `height` = x$height)
}
xy
}
## variant of stats::cutree() that numbers the clusters in the order
## they appear in the dendrogram (left to right). The stats::cutree
## numbers the clusters in the order the elements appear in the data
## set.
`cutreeord` <-
function(tree, k = NULL, h = NULL)
{
cut <- cutree(tree, k, h)
## order of classes in the tree
if (!is.matrix(cut)) {
cut <- order(unique(cut[tree$order]))[cut]
names(cut) <- tree$labels
} else {
for(i in seq_len(ncol(cut))) {
cut[,i] <- order(unique(cut[tree$order,i]))[cut[,i]]
}
}
cut
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/as.hclust.spantree.R
|
`as.mcmc.oecosimu` <-
function(x)
{
## Deprecated in favour of toCoda: using as an S3 method would
## need importFrom(coda, as.mcmc) and that would add dependence on
## coda
.Deprecated("toCoda", package = "vegan")
## mcmc only for sequential methods
if (!x$oecosimu$isSeq)
stop("as.mcmc available only for sequential null models")
## named variables
rownames(x$oecosimu$simulated) <- names(x$oecosimu$z)
chains <- attr(x$oecosimu$simulated, "chains")
## chains: will make each chain as an mcmc object and combine
## these to an mcmc.list
if (!is.null(chains) && chains > 1) {
x <- x$oecosimu$simulated
nsim <- dim(x)[2]
niter <- nsim / chains
## iterate over chains
x <- lapply(1:chains, function(i) {
z <- x[, ((i-1) * niter + 1):(i * niter), drop = FALSE]
attr(z, "mcpar") <-
c(attr(x, "burnin") + attr(x, "thin"),
attr(x, "burnin") + attr(x, "thin") * niter,
attr(x, "thin"))
attr(z, "class") <- c("mcmc", class(z))
t(z)
})
## combine list of mcmc objects to a coda mcmc.list
#x <- as.mcmc.list(x)
class(x) <- "mcmc.list"
} else { # one chain: make to a single mcmc object
x <- as.ts(x)
mcpar <- attr(x, "tsp")
mcpar[3] <- round(1/mcpar[3])
attr(x, "mcpar") <- mcpar
class(x) <- c("mcmc", class(x))
}
x
}
`as.mcmc.permat` <- as.mcmc.oecosimu
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/as.mcmc.oecosimu.R
|
`as.preston` <-
function (x, tiesplit = TRUE, ...)
{
if (inherits(x, "preston"))
return(x)
## practically integer
if (!identical(all.equal(x, round(x)), TRUE))
stop("function accepts only integers (counts)")
## need exact integers, since, e.g., sqrt(2)^2 - 2 = 4.4e-16 and
## tie breaks fail
if (!is.integer(x))
x <- round(x)
x <- x[x > 0]
if (tiesplit) {
## Assume log2(2^k) == k *exactly* for integer k
xlog2 <- log2(x)
ties <- xlog2 == ceiling(xlog2)
tiefreq <- table(xlog2[ties])
notiefreq <- table(ceiling(xlog2[!ties]))
itie <- as.numeric(names(tiefreq)) + 1
nitie <- as.numeric(names(notiefreq)) + 1
freq <- numeric(max(itie+1, nitie))
## split tied values between two adjacent octaves
freq[itie] <- tiefreq/2
freq[itie+1] <- freq[itie+1] + tiefreq/2
freq[nitie] <- freq[nitie] + notiefreq
} else {
xlog2 <- ceiling(log2(x))
tmp <- table(xlog2)
indx <- as.numeric(names(tmp)) + 1
freq <- numeric(max(indx))
freq[indx] <- tmp
}
names(freq) <- seq_along(freq) - 1
## remove empty octaves
freq <- freq[freq>0]
class(freq) <- "preston"
freq
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/as.preston.R
|
`as.rad` <-
function(x)
{
if (inherits(x, "rad"))
return(x)
## recursive call for several observations
if (isTRUE(nrow(x) > 1)) {
comm <- apply(x, 1, as.rad)
class(comm) <- "rad.frame"
return(comm)
}
take <- x > 0
nm <- names(x)
comm <- x[take]
names(comm) <- nm[take]
comm <- sort(comm, decreasing = TRUE, index.return = TRUE)
## ordered index of included taxa
index <- which(take)[comm$ix]
comm <- comm$x
attr(comm, "index") <- index
class(comm) <- "rad"
comm
}
## do not print 'index' attribute
`print.rad` <-
function(x, ...)
{
print(as.table(x), ...)
invisible(x)
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/as.rad.R
|
`as.ts.oecosimu` <-
function(x, ...)
{
if (!x$oecosimu$isSeq)
stop("as.ts available only for sequential methods")
chains <- attr(x$oecosimu$simulated, "chains")
if (!is.null(chains) && chains > 1)
stop("as.ts available only for single chain")
thin <- attr(x$oecosimu$simulated, "thin")
startval <- attr(x$oecosimu$simulated, "burnin") + thin
out <- ts(t(x$oecosimu$simulated), start = startval, deltat=thin,
names = names(x$oecosimu$z))
attr(out, "burnin") <- NULL
attr(out, "thin") <- NULL
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/as.ts.oecosimu.R
|
`as.ts.permat` <-
function(x, type = "bray", ...)
{
type <- match.arg(type, c("bray", "chisq"))
out <- summary(x)[[type]]
if (!is.ts(out)) {
seqmethods <- sapply(make.commsim(), function(z) make.commsim(z)$isSeq)
seqmethods <- names(seqmethods)[seqmethods]
## seqmethods <- c("swap", "tswap", "abuswap")
stop(gettextf("as.ts available only for sequential methods %s",
paste(seqmethods, collapse=", ")))
}
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/as.ts.permat.R
|
`avgdist` <-
function(x, sample, distfun = vegdist, meanfun = mean,
transf = NULL, iterations = 100, dmethod = "bray",
diag = TRUE, upper = TRUE, ...)
{
if (missing(sample)) {
stop("Subsampling depth must be supplied via argument 'sample'")
} else {
if (!(is.numeric(sample) && sample > 0L)) {
stop("Invalid subsampling depth; 'sample' must be positive & numeric")
}
}
if (!is.numeric(iterations)) {
stop("Invalid iteration count; must be numeric")
}
inputcast <- x
distfun <- match.fun(distfun)
if (!is.null(transf)) {
transf <- match.fun(transf)
}
## warn here if data do not look observed counts with singletons
minobs <- min(x[x > 0])
if (minobs > 1)
warning(gettextf("most observed count data have counts 1, but smallest count is %d", minobs))
# Get the list of iteration matrices
distlist <- lapply(seq_len(iterations), function(i) {
# Suppress warnings because it will otherwise return many warnings about
# subsampling depth not being met, which we deal with below by returning
# samples that do not meet the threshold.
inputcast <- suppressWarnings(rrarefy(inputcast, sample = sample))
# Remove those that did not meet the depth cutoff
inputcast <- inputcast[c(rowSums(inputcast) >= sample), ]
if (!is.null(transf)) {
inputcast <- transf(inputcast)
}
outdist <- distfun(inputcast, method = dmethod,
diag = TRUE, upper = TRUE, ...)
as.matrix(outdist)
})
# Use the dist list to get the average values
meanfun <- match.fun(meanfun)
# Save row names from distlist
# Take from first element since should all be the same
rnames <- row.names(distlist[[1]])
afunc <- array(
unlist(as.matrix(distlist)),
c(dim(as.matrix(distlist[[1]])), length(distlist)))
output <- apply(afunc, 1:2, meanfun, ...)
# Set the names on the matrix
colnames(output) <- rownames(output) <- rnames
# Print any samples that were removed, if they were removed
dropsamples <- setdiff(row.names(inputcast), row.names(output))
if (length(dropsamples) > 0L) {
warning(gettextf(
"The following sampling units were removed because they were below sampling depth: %s",
paste(dropsamples, collapse = ", ")))
}
output <- as.dist(output, diag = diag, upper = upper)
attr(output, "call") <- match.call()
attr(output, "method") <- "avgdist"
output
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/avgdist.R
|
##############################################################
## COMPUTES BEALS SMOOTHING FOR ALL SPECIES IN TABLE #
## This is a more complete function than the previous one #
## in the vegan package. The parameter values that give the #
## equivalence are 'beals(x, NA, x, 0, include=TRUE)' #
## #
## 'x' matrix to be replaced by beals values #
## 'reference' matrix to be used as source for joint occurrences#
## 'type' sets the way to use abundance values #
## 0 - presence/absence #
## 1 - abundances for conditioned probabilities #
## 2 - abundances for weighted average #
## 3 - abundances for both #
## 'species' a column index used to compute Beals function #
## for a single species. The default (NA) indicates #
## all species. #
## 'include' flag to include target species in the computation#
##############################################################
`beals` <-
function(x, species=NA, reference=x, type=0, include=TRUE)
{
refX <- reference
## this checks whether it was chosen from available options
mode <- as.numeric(match.arg(as.character(type), c("0","1","2","3")))
spIndex <- species
incSp <- include
refX <- as.matrix(refX)
x <- as.matrix(x)
if (!(is.numeric(x) || is.logical(x)))
stop("input data must be numeric")
if(mode==0 || mode ==2) refX <- ifelse(refX > 0, 1, 0)
if(mode==0 || mode ==1) x <- ifelse(x > 0, 1, 0)
##Computes conditioned probabilities
if(is.na(spIndex)){
M <- crossprod(ifelse(refX > 0, 1, 0),refX)
C <-diag(M)
M <- sweep(M, 2, replace(C,C==0,1), "/")
if(!incSp)
for (i in 1:ncol(refX))
M[i,i] <- 0
} else {
C <- colSums(refX)
M <- crossprod(refX,ifelse(refX > 0, 1, 0)[,spIndex])
M <- M/replace(C,C==0,1)
if(!incSp)
M[spIndex] <- 0
}
##Average of conditioned probabilities
S <- rowSums(x)
if(is.na(spIndex)) {
b <-x
for (i in 1:nrow(x)) {
b[i, ] <- rowSums(sweep(M, 2, x[i, ], "*"))
}
SM <- rep(S,ncol(x))
if(!incSp)
SM <- SM-x
b <- b/replace(SM,SM==0,1)
} else {
b <-rowSums(sweep(x,2,M,"*"))
if(!incSp)
S <- S-x[,spIndex]
b <- b/replace(S,S==0,1)
}
b
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/beals.R
|
`betadisper` <-
function(d, group, type = c("median","centroid"), bias.adjust=FALSE,
sqrt.dist = FALSE, add = FALSE)
{
## inline function for double centring. We used .C("dblcen", ...,
## PACKAGE = "stats") which does not dublicate its argument, but
## it was removed from R in r60360 | ripley | 2012-08-22 07:59:00
## UTC (Wed, 22 Aug 2012) "more conversion to .Call, clean up".
dblcen <- function(x, na.rm = TRUE) {
cnt <- colMeans(x, na.rm = na.rm)
x <- sweep(x, 2L, cnt, check.margin = FALSE)
cnt <- rowMeans(x, na.rm = na.rm)
sweep(x, 1L, cnt, check.margin = FALSE)
}
## inline function for spatial medians
spatialMed <- function(vectors, group, pos) {
axes <- seq_len(NCOL(vectors))
spMedPos <- ordimedian(vectors, group, choices = axes[pos])
spMedNeg <- ordimedian(vectors, group, choices = axes[!pos])
cbind(spMedPos, spMedNeg)
}
## inline function for centroids
centroidFUN <- function(vec, group) {
cent <- apply(vec, 2,
function(x, group) tapply(x, INDEX = group, FUN = mean),
group = group)
if(!is.matrix(cent)) { ## if only 1 group, cent is vector
cent <- matrix(cent, nrow = 1,
dimnames = list(as.character(levels(group)),
paste0("Dim", seq_len(NCOL(vec)))))
}
cent
}
## inline function for distance computation
Resids <- function(x, c) {
if(is.matrix(c))
d <- x - c
else
d <- sweep(x, 2, c)
rowSums(d^2)
}
## Tolerance for zero Eigenvalues
TOL <- sqrt(.Machine$double.eps)
## uses code from stats:::cmdscale by R Core Development Team
if(!inherits(d, "dist"))
stop("distances 'd' must be a 'dist' object")
## Someone really tried to analyse correlation like object in range -1..+1
if (any(d < -TOL, na.rm = TRUE))
stop("dissimilarities 'd' must be non-negative")
## adjust to avoid negative eigenvalues (if they disturb you)
if (sqrt.dist)
d <- sqrt(d)
if (is.logical(add) && isTRUE(add))
add <- "lingoes"
if (is.character(add)) {
add <- match.arg(add, c("lingoes", "cailliez"))
if (add == "lingoes") {
ac <- addLingoes(as.matrix(d))
d <- sqrt(d^2 + 2 * ac)
}
else if (add == "cailliez") {
ac <- addCailliez(as.matrix(d))
d <- d + ac
}
}
if(missing(type))
type <- "median"
type <- match.arg(type)
## checks for groups - need to be a factor for later
group <- if(!is.factor(group)) {
as.factor(group)
} else { ## if already a factor, drop empty levels
droplevels(group, exclude = NA) # need exclude = NA under Rdevel r71113
}
n <- attr(d, "Size")
x <- matrix(0, ncol = n, nrow = n)
x[row(x) > col(x)] <- d^2
## site labels
labs <- attr(d, "Labels")
## remove NAs in group
if(any(gr.na <- is.na(group))) {
group <- group[!gr.na]
x <- x[!gr.na, !gr.na]
## update n otherwise C call crashes
n <- n - sum(gr.na)
## update labels
labs <- labs[!gr.na]
message("missing observations due to 'group' removed")
}
## remove NA's in d
if(any(x.na <- apply(x, 1, function(x) any(is.na(x))))) {
x <- x[!x.na, !x.na]
group <- group[!x.na]
## update n otherwise C call crashes
n <- n - sum(x.na)
## update labels
labs <- labs[!x.na]
message("missing observations due to 'd' removed")
}
x <- x + t(x)
x <- dblcen(x)
e <- eigen(-x/2, symmetric = TRUE)
vectors <- e$vectors
eig <- e$values
## Remove zero eigenvalues
eig <- eig[(want <- abs(eig) > max(TOL, TOL * eig[1L]))]
## scale Eigenvectors
vectors <- vectors[, want, drop = FALSE] %*% diag(sqrt(abs(eig)),
nrow = length(eig))
## store which are the positive eigenvalues
pos <- eig > 0
## group centroids in PCoA space
centroids <-
switch(type,
centroid = centroidFUN(vectors, group),
median = spatialMed(vectors, group, pos)
)
## for each of the groups, calculate distance to centroid for
## observation in the group
## Uses in-line Resids function as we want LAD residuals for
## median method, and LSQ residuals for centroid method
dist.pos <- Resids(vectors[, pos, drop=FALSE],
centroids[group, pos, drop=FALSE])
dist.neg <- 0
if(any(!pos))
dist.neg <- Resids(vectors[, !pos, drop=FALSE],
centroids[group, !pos, drop=FALSE])
## zij are the distances of each point to its group centroid
if (any(dist.neg > dist.pos)) {
## Negative squared distances give complex valued distances:
## take only the real part (which is zero). Github issue #306.
warning("some squared distances are negative and changed to zero")
zij <- Re(sqrt(as.complex(dist.pos - dist.neg)))
} else {
zij <- sqrt(dist.pos - dist.neg)
}
if (bias.adjust) {
n.group <- as.vector(table(group))
zij <- zij*sqrt(n.group[group]/(n.group[group]-1))
}
## pre-compute group mean distance to centroid/median for `print` method
grp.zij <- tapply(zij, group, "mean")
## add in correct labels
if (any(want))
colnames(vectors) <- names(eig) <-
paste("PCoA", seq_along(eig), sep = "")
if(is.matrix(centroids))
colnames(centroids) <- names(eig)
else
names(centroids) <- names(eig)
rownames(vectors) <- names(zij) <- labs
retval <- list(eig = eig, vectors = vectors, distances = zij,
group = group, centroids = centroids,
group.distances = grp.zij, call = match.call())
class(retval) <- "betadisper"
attr(retval, "method") <- attr(d, "method")
attr(retval, "type") <- type
attr(retval, "bias.adjust") <- bias.adjust
retval
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/betadisper.R
|
`betadiver` <-
function(x, method = NA, order = FALSE, help = FALSE, ...)
{
beta <- list("w"="(b+c)/(2*a+b+c)", "-1"="(b+c)/(2*a+b+c)", "c"="(b+c)/2",
"wb"="b+c", "r"="2*b*c/((a+b+c)^2-2*b*c)",
"I"="log(2*a+b+c) - 2*a*log(2)/(2*a+b+c) - ((a+b)*log(a+b) + (a+c)*log(a+c)) / (2*a+b+c)",
"e"="exp(log(2*a+b+c) - 2*a*log(2)/(2*a+b+c) - ((a+b)*log(a+b) + (a+c)*log(a+c)) / (2*a+b+c))-1",
"t"="(b+c)/(2*a+b+c)", "me"="(b+c)/(2*a+b+c)",
"j"="a/(a+b+c)", "sor"="2*a/(2*a+b+c)",
"m"="(2*a+b+c)*(b+c)/(a+b+c)",
"-2"="pmin(b,c)/(pmax(b,c)+a)",
"co"="(a*c+a*b+2*b*c)/(2*(a+b)*(a+c))",
"cc"="(b+c)/(a+b+c)", "g"="(b+c)/(a+b+c)",
"-3"="pmin(b,c)/(a+b+c)", "l"="(b+c)/2",
"19"="2*(b*c+1)/(a+b+c)/(a+b+c-1)",
"hk"="(b+c)/(2*a+b+c)", "rlb"="a/(a+c)",
"sim"="pmin(b,c)/(pmin(b,c)+a)",
"gl"="2*abs(b-c)/(2*a+b+c)",
"z"="(log(2)-log(2*a+b+c)+log(a+b+c))/log(2)"
)
if (help) {
for (i in seq_along(beta))
writeLines(strwrap(paste(i, " \"", names(beta[i]),
"\" = ", beta[[i]], "\n", sep="")))
return(invisible(NULL))
}
x <- ifelse(x > 0, 1, 0)
if (order) {
x <- x[order(rowSums(x)),]
}
d <- tcrossprod(x)
a <- as.dist(d)
S <- diag(d)
N <- length(S)
b <- as.dist(matrix(rep(S, N), nrow=N)) - a
c <- as.dist(matrix(rep(S, each=N), nrow=N)) - a
if (is.na(method) || is.null(method) || is.logical(method) && !method) {
out <- list(a = a, b = b, c = c)
class(out) <- "betadiver"
return(out)
}
out <- eval(parse(text=beta[[method]]))
out <- as.dist(out)
mxdist <- c(1,1,NA,NA,1,log(2),1,1,1,0,0,NA,1,1,1,1,NA,NA,NA,1,0,1,NA,1)
names(mxdist) <- names(beta)
attr(out, "maxdist") <- unname(mxdist[method])
attr(out, "method") <- paste("beta", names(beta[method]), sep=".")
attr(out, "call") <- match.call()
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/betadiver.R
|
bgdispersal <- function (mat, PAonly = FALSE, abc = FALSE)
{
mat <- as.matrix(mat)
names <- rownames(mat)
if (sum((mat - decostand(mat, "pa"))) == 0) {
PAonly <- TRUE
mat1 <- mat
}
else {
mat1 <- decostand(mat, "pa")
if (PAonly == FALSE) mat2 <- mat
}
n <- nrow(mat)
## p <- ncol(mat) # unused
a <- mat1 %*% t(mat1)
b <- mat1 %*% (1 - t(mat1))
c <- (1 - mat1) %*% t(mat1)
## d <- ncol(mat1) - a - b - c # unused
DD1 <- (a * (b - c))/((a + b + c)^2)
DD2 <- (2 * a * (b - c))/((2 * a + b + c) * (a + b + c))
## McNemar <- (abs(b - c) - 1)^2/(b + c) # Old code
## diag(McNemar) <- 0 # Old code
McNemar <- matrix(NA, n, n, dimnames=list(names,names))
pP.Mc <- matrix(NA, n, n, dimnames=list(names,names))
for (j in 1:(n - 1)) {
for (jj in (j + 1):n) {
bb = b[j, jj]
cc = c[j, jj]
if ((bb + cc) == 0) {
McNemar[j, jj] = 0
pP.Mc[j, jj] = 1
} else {
if(bb == 0) { B = 0 } else { B = bb*log(bb) }
if(cc == 0) { C = 0 } else { C = cc*log(cc) }
## Williams correction
q = 1 + 1/(2*(bb+cc))
## McNemar = 2*(b*log(b) + c*log(c) - (b+c)*log((b+c)/2))
McNemar[j, jj] = 2*(B + C - (bb+cc)*log((bb+cc)/2)) / q
pP.Mc[j, jj] <- pchisq(McNemar[j, jj], 1, lower.tail = FALSE)
if ((b[j, jj] + c[j, jj]) == 0)
pP.Mc[j, jj] <- 1
}
}
}
if (!PAonly) {
DD3 <- matrix(0, n, n, dimnames=list(names,names))
DD4 <- matrix(0, n, n, dimnames=list(names,names))
row.sum <- rowSums(mat2)
for (j in 1:(n - 1)) {
for (jj in (j + 1):n) {
W <- sum(apply(mat2[c(j, jj), ], 2, min))
A <- row.sum[j]
B <- row.sum[jj]
temp3 <- W * (A - B)/((A + B - W)^2)
temp4 <- 2 * W * (A - B)/((A + B) * (A + B - W))
DD3[j, jj] <- temp3
DD3[jj, j] <- -temp3
DD4[j, jj] <- temp4
DD4[jj, j] <- -temp4
}
}
out <- list(DD1 = DD1, DD2 = DD2, DD3 = DD3, DD4 = DD4,
McNemar = McNemar, prob.McNemar = pP.Mc)
}
else {
out <- list(DD1 = DD1, DD2 = DD2, McNemar = McNemar,
prob.McNemar = pP.Mc)
}
if (abc) {
out$a <- a
out$b <- b
out$c <- c
}
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/bgdispersal.R
|
"bioenv" <-
function(...)
{
UseMethod("bioenv")
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/bioenv.R
|
`bioenv.default` <-
function (comm, env, method = "spearman", index = "bray", upto = ncol(env),
trace = FALSE, partial = NULL,
metric = c("euclidean", "mahalanobis", "manhattan", "gower"),
parallel = getOption("mc.cores"),
...)
{
metric <- match.arg(metric)
method <- match.arg(method, eval(formals(cor)$method))
if (any(sapply(env, is.factor)) && metric != "gower")
stop("you have factors in 'env': only 'metric = \"gower\"' is allowed")
if (is.null(partial)) {
corfun <- function(dx, dy, dz, method, ...) {
cor(dx, dy, method=method, ...)
}
} else {
corfun <- function(dx, dy, dz, method, ...) {
rxy <- cor(dx, dy, method=method, ...)
rxz <- cor(dx, dz, method=method, ...)
ryz <- cor(dy, dz, method=method, ...)
(rxy - rxz*ryz)/sqrt(1-rxz*rxz)/sqrt(1-ryz*ryz)
}
}
if (!is.null(partial))
partpart <- deparse(substitute(partial))
else
partpart <- NULL
if (!is.null(partial) && !inherits(partial, "dist"))
partial <- dist(partial)
if (!is.null(partial) && !pmatch(method, c("pearson", "spearman"),
nomatch=FALSE))
stop(gettextf("method %s is invalid in partial bioenv", method))
## remove constant variables
constant <- apply(env, 2, function(x) length(unique(x))) <= 1
if (any(constant)) {
warning(
gettextf("the following variables are constant and were removed: %s",
paste(colnames(env)[constant], collapse=", ")))
env <- env[, !constant, drop = FALSE]
}
n <- ncol(env)
if (n < 1)
stop("no usable variables in 'env'")
ntake <- 2^n - 1
ndone <- 0
upto <- min(upto, n)
if (n > 8 || trace) {
if (upto < n)
cat("Studying", sum(choose(n, 1:upto)), "of ")
cat(ntake, "possible subsets (this may take time...)\n")
flush.console()
}
## Check metric and adapt data and distance function
if (metric == "euclidean") {
x <- scale(env, scale = TRUE)
distfun <- function(x) dist(x)
} else if (metric == "mahalanobis") {
x <- as.matrix(scale(env, scale = FALSE))
distfun <- function(x) dist(veganMahatrans(x))
} else if (metric == "gower") {
x <- env
distfun <- function(x) daisy(x, metric = "gower")
} else if (metric == "manhattan") {
x <- decostand(env, "range")
distfun <- function(x) dist(x, "manhattan")
} else {
stop("unknown metric")
}
best <- list()
if (inherits(comm, "dist")) {
comdis <- comm
index <- attr(comdis, "method")
if (is.null(index))
index <- "unspecified"
} else if ((is.matrix(comm) || is.data.frame(comm)) &&
isSymmetric(unname(as.matrix(comm)))) {
comdis <- as.dist(comm)
index <- "supplied square matrix"
} else {
comdis <- vegdist(comm, method = index)
}
## Prepare for parallel processing
if (is.null(parallel))
parallel <- 1
hasClus <- inherits(parallel, "cluster")
isParal <- hasClus || parallel > 1
isMulticore <- .Platform$OS.type == "unix" && !hasClus
if (isParal && !isMulticore && !hasClus) {
parallel <- makeCluster(parallel)
on.exit(stopCluster(parallel))
}
## get the number of clusters
if (inherits(parallel, "cluster"))
nclus <- length(parallel)
else
nclus <- parallel
CLUSLIM <- 8
## The proper loop
for (i in 1:upto) {
if (trace) {
nvar <- choose(n, i)
cat("No. of variables ", i, ", No. of sets ", nvar,
"...", sep = "")
flush.console()
}
sets <- t(combn(1:n, i))
if (!is.matrix(sets))
sets <- as.matrix(t(sets))
if (isParal && nrow(sets) >= CLUSLIM*nclus) {
if (isMulticore) {
est <- unlist(mclapply(1:nrow(sets), function(j)
corfun(comdis,
distfun(x[,sets[j,],drop = FALSE]),
partial, method = method, ...),
mc.cores = parallel))
} else {
est <- parSapply(parallel, 1:nrow(sets), function(j)
corfun(comdis, distfun(x[,sets[j,],drop = FALSE]),
partial, method = method, ...))
}
} else {
est <- sapply(1:nrow(sets), function(j)
corfun(comdis, distfun(x[,sets[j,], drop=FALSE ]),
partial, method = method, ...))
}
best[[i]] <- list(best = sets[which.max(est), ], est = max(est))
if (trace) {
ndone <- ndone + nvar
cat(" done (", round(100 * ndone/ntake, 1), "%)\n",
sep = "")
flush.console()
}
}
whichbest <- which.max(lapply(best, function(tmp) tmp$est))
out <- list(names = colnames(env), method = method, index = index,
metric = metric, upto = upto, models = best,
whichbest = whichbest,
partial = partpart, x = x, distfun = distfun)
out$call <- match.call()
out$call[[1]] <- as.name("bioenv")
class(out) <- "bioenv"
out
}
## Function to extract the environmental distances used within
## bioenv. The default is to take the best model, but any model can be
## specified by its number.
`bioenvdist` <-
function(x, which = "best")
{
## any non-numeric argument is regarded as "best"
if(!is.numeric(which))
which <- x$whichbest
x$distfun(x$x[, x$models[[which]]$best, drop = FALSE])
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/bioenv.default.R
|
`bioenv.formula` <-
function (formula, data, ...)
{
if (missing(data))
data <- environment(formula)
fla <- formula
comm <- formula[[2]]
comm <- eval(comm, environment(formula), parent.frame())
formula[[2]] <- NULL
env <- model.frame(formula, data, na.action = NULL)
out <- bioenv(comm, env, ...)
out$formula <- fla
out$call <- match.call()
out$call[[1]] <- as.name("bioenv")
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/bioenv.formula.R
|
`biplot.CCorA` <-
function(x, plot.type="ov", xlabs, plot.axes = 1:2, int=0.5, col.Y="red", col.X="blue", cex=c(0.7,0.9), ...)
{
## Function sets par(): reset them on.exit
opar <- par(no.readonly = TRUE)
on.exit(par(opar))
#### Internal function
larger.frame <- function(mat, percent=0.10)
# Produce an object plot 10% larger than strictly necessary
{
range.mat <- apply(mat,2,range)
z <- apply(range.mat, 2, function(x) x[2]-x[1])
range.mat[1,] <- range.mat[1,]-z*percent
range.mat[2,] <- range.mat[2,]+z*percent
range.mat
}
####
TYPE <- c("objects","variables","ov","biplots")
type <- pmatch(plot.type, TYPE)
if(is.na(type)) stop("Invalid plot.type")
epsilon <- sqrt(.Machine$double.eps)
if(length(which(x$Eigenvalues > epsilon)) == 1)
stop(gettextf(
"axes (%s) not plotted because the solution has only one dimension",
paste(plot.axes, collapse =",")))
if(max(plot.axes) > length(which(x$Eigenvalues > epsilon)))
stop(gettextf(
"axes (%s) not plotted because the solution has fewer dimensions",
paste(plot.axes, collapse=",")))
if (missing(xlabs))
xlabs <- rownames(x$Cy)
else if (!is.null(xlabs) && is.na(xlabs))
xlabs <- rep(NA, nrow(x$Cy))
else if (is.null(xlabs))
xlabs <- 1:nrow(x$Cy)
#
lf.Y <- larger.frame(x$Cy[,plot.axes])
lf.X <- larger.frame(x$Cx[,plot.axes])
#
# Four plot types are available
if(type == 1) { # Object plots
# cat('plot.type = objects')
par(mfrow=c(1,2), pty = "s")
plot(lf.Y, asp=1, xlab=colnames(x$Cy[,plot.axes[1]]), ylab=colnames(x$Cy[,plot.axes[2]]), type="n")
points(x$Cy[,plot.axes], col=col.Y) # Solid dot: pch=19
text(x$Cy[,plot.axes],labels=xlabs, pos=3, col=col.Y, cex=cex[1])
title(main = c("CCorA object plot","First data table (Y)"), line=2)
#
plot(lf.X, asp=1, xlab=colnames(x$Cy[,plot.axes[1]]), ylab=colnames(x$Cy[,plot.axes[2]]), type="n")
points(x$Cx[,plot.axes], col=col.X) # Solid dot: pch=19
text(x$Cx[,plot.axes],labels=xlabs, pos=3, col=col.X, cex=cex[1])
title(main = c("CCorA object plot","Second data table (X)"), line=2)
###
###
} else if(type == 2) { # Variable plots
# cat('plot.type = variables')
par(mfrow=c(1,2), pty = "s")
plot(x$corr.Y.Cy[,plot.axes], asp=1, xlim=c(-1,1), ylim=c(-1,1), xlab=colnames(x$Cy[,plot.axes[1]]),
ylab=colnames(x$Cy[,plot.axes[2]]), type="n")
text(x$corr.Y.Cy[,plot.axes],labels=rownames(x$corr.Y.Cy), pos=3, col=col.Y, cex=cex[2])
arrows(0,0,x$corr.Y.Cy[,plot.axes[1]],x$corr.Y.Cy[,plot.axes[2]], length=0.05, col=col.Y)
abline(h=0, v=0)
lines(cos(seq(0, 2*pi, l=100)), sin(seq(0, 2*pi, l=100)))
lines(int * cos(seq(0, 2*pi, l=100)), int * sin(seq(0, 2*pi, l=100)))
title(main = c("CCorA variable plot","First data table (Y)"), line=2)
#
plot(x$corr.X.Cx[,plot.axes], asp=1, xlim=c(-1,1), ylim=c(-1,1), xlab=colnames(x$Cy[,plot.axes[1]]),
ylab=colnames(x$Cy[,plot.axes[2]]), type="n")
text(x$corr.X.Cx[,plot.axes],labels=rownames(x$corr.X.Cx), pos=3, col=col.X, cex=cex[2])
arrows(0,0,x$corr.X.Cx[,plot.axes[1]],x$corr.X.Cx[,plot.axes[2]], length=0.05, col=col.X)
abline(h=0, v=0)
lines(cos(seq(0, 2*pi, l=100)), sin(seq(0, 2*pi, l=100)))
lines(int * cos(seq(0, 2*pi, l=100)), int * sin(seq(0, 2*pi, l=100)))
title(main = c("CCorA variable plot","Second data table (X)"), line=2)
###
###
} else if(type == 3) { # Object and variable plots
# cat('plot.type = ov')
# par(mfrow=c(2,2), mar=c(4.5,3.5,2,1))
layout(matrix(c(1,2,3,4), ncol = 2, nrow = 2,
byrow = TRUE), widths = 1, heights = c(0.5,0.5))
par(pty = "s", mar = c(4.5,3.5,2,1))
#
plot(lf.Y, asp=1, xlab=colnames(x$Cy[,plot.axes[1]]), ylab=colnames(x$Cy[,plot.axes[2]]), type="n")
points(x$Cy[,plot.axes], col=col.Y) # Solid dot: pch=19
text(x$Cy[,plot.axes],labels=xlabs, pos=3, col=col.Y, cex=cex[1])
title(main = c("First data table (Y)"), line=1)
#
plot(lf.X, asp=1, xlab=colnames(x$Cy[,plot.axes[1]]), ylab=colnames(x$Cy[,plot.axes[2]]), type="n")
points(x$Cx[,plot.axes], col=col.X) # Solid dot: pch=19
text(x$Cx[,plot.axes],labels=xlabs, pos=3, col=col.X, cex=cex[1])
title(main = c("Second data table (X)"), line=1)
#
plot(x$corr.Y.Cy[,plot.axes], asp=1, xlim=c(-1,1), ylim=c(-1,1), xlab=colnames(x$Cy[,plot.axes[1]]),
ylab=colnames(x$Cy[,plot.axes[2]]), type="n")
text(x$corr.Y.Cy[,plot.axes],labels=rownames(x$corr.Y.Cy), pos=3, col=col.Y, cex=cex[2])
arrows(0,0,x$corr.Y.Cy[,plot.axes[1]],x$corr.Y.Cy[,plot.axes[2]], length=0.05, col=col.Y)
abline(h=0, v=0)
lines(cos(seq(0, 2*pi, l=100)), sin(seq(0, 2*pi, l=100)))
lines(int * cos(seq(0, 2*pi, l=100)), int * sin(seq(0, 2*pi, l=100)))
#
plot(x$corr.X.Cx[,plot.axes], asp=1, xlim=c(-1,1), ylim=c(-1,1), xlab=colnames(x$Cy[,plot.axes[1]]),
ylab=colnames(x$Cy[,plot.axes[2]]), type="n")
text(x$corr.X.Cx[,plot.axes],labels=rownames(x$corr.X.Cx), pos=3, col=col.X, cex=cex[2])
arrows(0,0,x$corr.X.Cx[,plot.axes[1]],x$corr.X.Cx[,plot.axes[2]], length=0.05, col=col.X)
abline(h=0, v=0)
lines(cos(seq(0, 2*pi, l=100)), sin(seq(0, 2*pi, l=100)))
lines(int * cos(seq(0, 2*pi, l=100)), int * sin(seq(0, 2*pi, l=100)))
###
###
} else if(type == 4) { # Biplots
# cat('plot.type = biplot')
par(mfrow=c(1,2), pty = "s")
biplot(x$Cy[,plot.axes], x$corr.Y.Cy[,plot.axes], col=c("black",col.Y), xlim=lf.Y[,1], ylim=lf.Y[,2],
xlabs = xlabs, arrow.len=0.05, cex=cex, ...)
title(main = c("CCorA biplot","First data table (Y)"), line=4)
#
biplot(x$Cx[,plot.axes], x$corr.X.Cx[,plot.axes], col=c("black",col.X), xlim=lf.X[,1], ylim=lf.X[,2],
xlabs = xlabs, arrow.len=0.05, cex=cex, ...)
title(main = c("CCorA biplot","Second data table (X)"), line=4)
}
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/biplot.CCorA.R
|
## biplot.rda
##
## draws pca biplots with species as arrows
##
`biplot.cca` <-
function(x, ...)
{
if (!inherits(x, "rda"))
stop("biplot can be used only with linear ordination (e.g., PCA)")
else
NextMethod("biplot", x, ...)
}
`biplot.rda` <- function(x, choices = c(1, 2), scaling = "species",
display = c("sites", "species"),
type, xlim, ylim, col = c(1,2), const,
correlation = FALSE, ...) {
if(!inherits(x, "rda"))
stop("'biplot.rda' is only for objects of class 'rda'")
if(!is.null(x$CCA))
stop("'biplot.rda' not suitable for models with constraints")
TYPES <- c("text", "points", "none")
display <- match.arg(display, several.ok = TRUE)
if (length(col) == 1)
col <- c(col,col)
g <- scores(x, choices = choices, display = display,
scaling = scaling, correlation = correlation, const)
if (!is.list(g)) {
g <- list(default = g)
names(g) <- display
}
if (missing(type)) {
nitlimit <- 80
nit <- max(nrow(g$species), nrow(g$sites))
if (nit > nitlimit)
type <- rep("points", 2)
else type <- rep("text", 2)
}
else type <- match.arg(type, TYPES, several.ok = TRUE)
if(length(type) < 2)
type <- rep(type, 2)
if (missing(xlim))
xlim <- range(g$species[, 1], g$sites[, 1], na.rm = TRUE)
if (missing(ylim))
ylim <- range(g$species[, 2], g$sites[, 2], na.rm = TRUE)
plot(g[[1]], xlim = xlim, ylim = ylim, type = "n", asp = 1,
...)
abline(h = 0, lty = 3)
abline(v = 0, lty = 3)
if (!is.null(g$species)) {
if (type[1] == "points")
arrlen <- 1
else
arrlen <- 0.85
if (type[1] != "none")
arrows(0, 0, g$species[,1] * arrlen, g$species[, 2] * arrlen,
col = col[2], length = 0.05)
if (type[1] == "text")
text(g$species, rownames(g$species),
col = col[2], cex = 0.7)
}
if (!is.null(g$sites)) {
if (type[2] == "text")
text(g$sites, rownames(g$sites), cex = 0.7, col = col[1])
else if (type[2] == "points")
points(g$sites, pch = 1, cex = 0.7, col = col[1])
}
class(g) <- "ordiplot"
invisible(g)
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/biplot.rda.R
|
`boxplot.betadisper` <- function(x, ylab = "Distance to centroid", ...) {
tmp <- boxplot(x$distances ~ x$group, ylab = ylab, ...)
invisible(tmp)
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/boxplot.betadisper.R
|
`boxplot.specaccum` <-
function(x, add=FALSE, ...)
{
if (x$method != "random")
stop("boxplot available only for method=\"random\"")
if (!add) {
plot(x$sites, x$richness, type="n", xlab="Sites", ylab="Species",
ylim=c(1, max(x$richness, na.rm = TRUE)), ...)
}
tmp <- boxplot(data.frame(t(x$perm)), add=TRUE, at=x$sites, axes=FALSE, ...)
invisible(tmp)
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/boxplot.specaccum.R
|
`bstick` <-
function(n, ...) UseMethod("bstick")
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/bstick.R
|
`bstick.cca` <-
function(n, ...)
{
if(!inherits(n, c("rda", "cca")))
stop("'n' not of class \"cca\" or \"rda\"")
if(!is.null(n$CCA) && n$CCA$rank > 0)
stop("'bstick' only for unconstrained models")
## No idea how to define bstick for dbrda or capscale with
## negative eigenvalues
if (inherits(n, c("dbrda", "capscale")) &&
(!is.null(n$CA$imaginary.u) || !is.null(n$CA$imaginary.u.eig)))
stop(gettextf("'bstick' cannot be used for '%s' with negative eigenvalues",
class(n)[1]))
## need to select appropriate total inertia
tot.chi <- n$CA$tot.chi
n.comp <- n$CA$rank
res <- bstick.default(n.comp, tot.chi, ...)
names(res) <- names(n$CA$eig)
res
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/bstick.cca.R
|
`bstick.decorana` <-
function(n, ...)
{
tot.chi <- n$totchi
## assume full rank input
n.comp <- min(nrow(n$rproj), nrow(n$cproj)) - 1
res <- bstick.default(n.comp, tot.chi, ...)
## only four axes in decorana
res <- res[1:4]
names(res) <- names(n$evals)
res
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/bstick.decorana.R
|
`bstick.default` <-
function(n, tot.var = 1, ...)
{
res <- rev(cumsum(tot.var/n:1)/n)
names(res) <- paste("Stick", seq(len=n), sep="")
res
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/bstick.default.R
|
`bstick.prcomp` <-
function(n, ...)
{
if(!inherits(n, "prcomp"))
stop("'n' not of class \"prcomp\"")
tot.chi <- sum(n$sdev^2)
n.comp <- length(n$sdev)
res <- bstick.default(n.comp, tot.chi, ...)
names(res) <- dimnames(n$rotation)[[2]]
res
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/bstick.prcomp.R
|
`bstick.princomp` <-
function(n, ...)
{
if(!inherits(n, "princomp"))
stop("'n' not of class \"princomp\"")
tot.chi <- sum(n$sdev^2)
n.comp <- length(n$sdev)
res <- bstick.default(n.comp, tot.chi, ...)
names(res) <- dimnames(n$loadings)[[2]]
res
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/bstick.princomp.R
|
`cIndexKM` <-
function (y, x, index = "all")
{
kmeans_res <- y
#############################################
gss <- function(x, clsize, withins)
{
allmean <- colMeans(x)
dmean <- sweep(x, 2, allmean, "-")
allmeandist <- sum(dmean^2)
wgss <- sum(withins)
bgss <- allmeandist - wgss
list(wgss = wgss, bgss = bgss)
}
#############################################
### Function modified by SD and PL from the original "cIndexKM" in "cclust"
### to accommodate a single response variable as well as singleton groups
### and remove unwanted index.
### The index
################################################
calinski <- function(zgss, clsize)
{
n <- sum(clsize)
k <- length(clsize)
## undefined 0/0 for one class (or fewer in error cases)
if (k <= 1)
NA
else
zgss$bgss/(k - 1)/(zgss$wgss/(n - k))
}
################################################
ssi <- function(centers, clsize)
{
ncl <- dim(centers)[1]
nvar <- dim(centers)[2]
cmax <- apply(centers, 2, max)
cmin <- apply(centers, 2, min)
cord <- apply(centers, 2, order)
cmaxi <- cord[ncl, ]
cmini <- cord[1, ]
meanmean <- mean(centers)
absmdif <- abs(apply(centers, 2, mean) - meanmean)
span <- cmax - cmin
csizemax <- clsize[cmaxi]
csizemin <- clsize[cmini]
hiest <- nvar
hiestw <- hiest * max(max(csizemax), max(csizemin)) *
exp(-min(absmdif))
sist <- sum(span)/hiest
sistw <- (span * exp(-absmdif)) %*% sqrt(csizemax * csizemin)/hiestw
list(ssi = sist, ssiw = sistw)
}
################################################
zgss <- gss(x, kmeans_res$size, kmeans_res$withinss)
index <- pmatch(index, c("calinski", "ssi", "all"))
if (is.na(index))
stop("invalid clustering index")
if (index == -1)
stop("ambiguous index")
vecallindex <- numeric(3)
if (any(index == 1) || (index == 3))
vecallindex[1] <- calinski(zgss, kmeans_res$size)
if (any(index == 2) || (index == 3))
vecallindex[2] <- ssi(kmeans_res$centers, kmeans_res$size)$ssiw
names(vecallindex) <- c("calinski", "ssi")
if (index < 3)
vecallindex <- vecallindex[index]
vecallindex
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/cIndexKM.R
|
`calibrate` <-
function(object, ...)
{
UseMethod("calibrate")
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/calibrate.R
|
`calibrate.cca` <-
function(object, newdata, rank = "full", ...)
{
## inversion solve(b) requires a square matrix, and we should
## append imaginary dims to get those in dbrda with negative
## constrained eigenvalues. Work is need to to verify this can be
## done, and therefore we just disable calibrate with negative
## eigenvalues in constraints.
if (inherits(object, "dbrda") && object$CCA$poseig < object$CCA$qrank)
stop("cannot be used with 'dbrda' with imaginary constrained dimensions")
if (!is.null(object$pCCA))
stop("does not work with conditioned (partial) models")
if (is.null(object$CCA) || object$CCA$rank == 0)
stop("needs constrained model")
if (object$CCA$rank < object$CCA$qrank)
stop("rank of constraints is higher than rank of dependent data")
if (rank != "full")
rank <- min(rank, object$CCA$rank)
else
rank <- object$CCA$rank
if (missing(newdata))
wa <- object$CCA$wa
else
wa <- predict(object, type="wa", newdata=newdata)
qrank <- object$CCA$qrank
b <- (coef(object))[object$CCA$QR$pivot[1:qrank], , drop=FALSE]
b <- solve(b)
pred <- wa[ , 1:rank, drop=FALSE] %*% b[1:rank, , drop =FALSE]
envcen <- object$CCA$envcentre[object$CCA$QR$pivot]
envcen <- envcen[1:object$CCA$qrank]
sweep(pred, 2, envcen, "+")
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/calibrate.cca.R
|
`calibrate.ordisurf` <-
function(object, newdata, ...)
{
if (missing(newdata))
fit <- predict(object, type = "response", ...)
else {
## Got only a vector of two coordinates
if (is.vector(newdata) && length(newdata) == 2)
newdata = data.frame(x1 = newdata[1], x2 = newdata[2])
## Got a matrix or a data frme
else{
if (NCOL(newdata) < 2)
stop("needs a matrix or a data frame with two columns")
newdata <- data.frame(x1 = newdata[,1], x2 = newdata[,2])
}
fit <- predict(object, newdata = newdata, type = "response", ...)
}
fit
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/calibrate.ordisurf.R
|
`capscale` <-
function (formula, data, distance = "euclidean", sqrt.dist = FALSE,
comm = NULL, add = FALSE, dfun = vegdist,
metaMDSdist = FALSE, na.action = na.fail, subset = NULL, ...)
{
if (!inherits(formula, "formula"))
stop("needs a model formula")
if (missing(data)) {
data <- parent.frame()
}
else {
data <- eval(match.call()$data, environment(formula),
enclos = .GlobalEnv)
}
formula <- formula(terms(formula, data = data))
## The following line was eval'ed in environment(formula), but
## that made update() fail. Rethink the line if capscale() fails
## mysteriously at this point.
X <- eval(formula[[2]], envir=environment(formula),
enclos = globalenv())
## see if user supplied dissimilarities as a matrix
if ((is.matrix(X) || is.data.frame(X)) &&
isSymmetric(unname(as.matrix(X))))
X <- as.dist(X)
if (!inherits(X, "dist")) {
comm <- X
vdata <- as.character(formula[[2]])
dfun <- match.fun(dfun)
if (metaMDSdist) {
commname <- as.character(formula[[2]])
X <- metaMDSdist(comm, distance = distance, zerodist = "ignore",
commname = commname, distfun = dfun, ...)
commname <- attr(X, "commname")
comm <- eval.parent(parse(text=commname))
} else {
X <- dfun(X, distance)
}
} else { # vdata name
if (missing(comm))
vdata <- NULL
else
vdata <- deparse(substitute(comm))
}
inertia <- attr(X, "method")
if (is.null(inertia))
inertia <- "unknown"
inertia <- paste(toupper(substr(inertia, 1, 1)),
substring(inertia, 2), sep = "")
inertia <- paste(inertia, "distance")
if (!sqrt.dist)
inertia <- paste("squared", inertia)
## postpone info on euclidification till we have done so
## evaluate formula: ordiParseFormula will return dissimilarities
## as a symmetric square matrix (except that some rows may be
## deleted due to missing values)
d <- ordiParseFormula(formula,
data,
na.action = na.action,
subset = substitute(subset),
X = X)
## ordiParseFormula subsets rows of dissimilarities: do the same
## for columns ('comm' is handled later). ordiParseFormula
## returned the original data, but we use instead the potentially
## changed X and discard d$X.
if (!is.null(d$subset)) {
X <- as.matrix(X)[d$subset, d$subset, drop = FALSE]
}
## Delete columns if rows were deleted due to missing values
if (!is.null(d$na.action)) {
X <- as.matrix(X)[-d$na.action, -d$na.action, drop = FALSE]
}
X <- as.dist(X)
k <- attr(X, "Size") - 1
if (sqrt.dist)
X <- sqrt(X)
if (max(X) >= 4 + .Machine$double.eps) {
inertia <- paste("mean", inertia)
adjust <- sqrt(k)
X <- X/adjust
}
else {
adjust <- 1
}
nm <- attr(X, "Labels")
## wcmdscale, optionally with additive adjustment
X <- wcmdscale(X, x.ret = TRUE, add = add)
if(any(dim(X$points) == 0)) # there may be no positive dims
X$points <- matrix(0, NROW(X$points), 1)
## this may have been euclidified: update inertia
if (!is.na(X$ac) && X$ac > sqrt(.Machine$double.eps))
inertia <- paste(paste0(toupper(substring(X$add, 1, 1)),
substring(X$add, 2)),
"adjusted", inertia)
if (is.null(rownames(X$points)))
rownames(X$points) <- nm
sol <- ordConstrained(X$points, d$Y, d$Z, method = "capscale")
## update for negative eigenvalues
if (any(X$eig < 0)) {
negax <- X$eig[X$eig < 0]
sol$CA$imaginary.chi <- sum(negax)
sol$tot.chi <- sol$tot.chi + sol$CA$imaginary.chi
sol$CA$imaginary.rank <- length(negax)
sol$CA$imaginary.u.eig <- X$negaxes
}
if (!is.null(comm)) {
sol$vdata <- vdata
comm <- scale(comm, center = TRUE, scale = FALSE)
sol$colsum <- apply(comm, 2, sd)
## take a 'subset' of the community after scale()
if (!is.null(d$subset))
comm <- comm[d$subset, , drop = FALSE]
## NA action after 'subset'
if (!is.null(d$na.action))
comm <- comm[-d$na.action, , drop = FALSE]
if (!is.null(sol$pCCA) && sol$pCCA$rank > 0)
comm <- qr.resid(sol$pCCA$QR, comm)
if (!is.null(sol$CCA) && sol$CCA$rank > 0) {
v.eig <- t(comm) %*% sol$CCA$u/sqrt(k)
sol$CCA$v <- decostand(v.eig, "normalize", MARGIN = 2)
comm <- qr.resid(sol$CCA$QR, comm)
}
if (!is.null(sol$CA) && sol$CA$rank > 0) {
v.eig <- t(comm) %*% sol$CA$u/sqrt(k)
sol$CA$v <- decostand(v.eig, "normalize", MARGIN = 2)
}
} else {
## input data were dissimilarities, and no 'comm' defined:
## species scores make no sense and are made NA
sol$CA$v[] <- NA
if (!is.null(sol$CCA))
sol$CCA$v[] <- NA
sol$colsum <- NA
}
if (!is.null(sol$CCA) && sol$CCA$rank > 0)
sol$CCA$centroids <- centroids.cca(sol$CCA$wa, d$modelframe)
if (!is.null(sol$CCA$alias))
sol$CCA$centroids <- unique(sol$CCA$centroids)
if (!is.null(sol$CCA$centroids)) {
rs <- rowSums(sol$CCA$centroids^2)
sol$CCA$centroids <- sol$CCA$centroids[rs > 1e-04, ,
drop = FALSE]
if (nrow(sol$CCA$centroids) == 0)
sol$CCA$centroids <- NULL
}
sol$call <- match.call()
sol$terms <- terms(formula, "Condition", data = data)
sol$terminfo <- ordiTerminfo(d, data)
sol$call$formula <- formula(d$terms, width.cutoff = 500)
sol$call$formula[[2]] <- formula[[2]]
sol$sqrt.dist <- sqrt.dist
if (!is.na(X$ac) && X$ac > 0) {
sol$ac <- X$ac
sol$add <- X$add
}
sol$adjust <- adjust
sol$inertia <- inertia
if (metaMDSdist)
sol$metaMDSdist <- commname
sol$subset <- d$subset
sol$na.action <- d$na.action
class(sol) <- c("capscale", "rda", "cca")
if (!is.null(sol$na.action))
sol <- ordiNAexclude(sol, d$excluded)
sol
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/capscale.R
|
"cascadeKM" <-
function(data, inf.gr, sup.gr, iter = 100, criterion="calinski",
parallel = getOption("mc.cores"))
{
### DESCRIPTION
### This function use the 'kmeans' function of the 'stats' package to create
### a cascade of partitions from K = nb_inf_gr to K = nb_sup_gr
### INPUT
###
### data The data matrix; the objects are the rows
### nb_inf_gr Number of groups (K) for the first partition (min)
### nb_sup_gr Number of groups (K) for the last partition (max)
### iter The number of random starting configurations for each value of K
### criterion The criterion that will be used to select the best
### partition. See the 'clustIndex' function in PACKAGE = cclust
### OUTPUT
###
### The same as in the kmeans packages
### EXAMPLE
###
### result <- cascadeKM(donnee, 2, 30, iter = 50, criterion = 'calinski')
###
### data = data table
### 2 = lowest number of groups for K-means
### 30 = highest number of groups for K-means
### iter = 50: start kmeans 50 times using different random configurations
### criterion = 'calinski': the Calinski-Harabasz (1974) criterion to determine
### the best value of K for the data set. 'Best' is in the least-squares sense.
###
### Main function
data <- as.matrix(data)
if(!is.null(nrow(data))){
partition <- matrix(NA, nrow(data), sup.gr - inf.gr + 1)
} else {
partition <- matrix(NA, length(data), sup.gr - inf.gr + 1)
}
results <- matrix(NA, 2, sup.gr - inf.gr + 1)
size <- matrix(NA, sup.gr, sup.gr - inf.gr + 1)
## Pour tous les nombres de groupes voulus
h <- 1
## Parallelise K-means
if (is.null(parallel))
parallel <- 1
hasClus <- inherits(parallel, "cluster")
if(!hasClus && parallel <= 1) { # NO parallel computing
tmp <- lapply(inf.gr:sup.gr, function (ii) {
kmeans(data, ii, iter.max = 50, nstart = iter)
})
} else {
if(hasClus || .Platform$OS.type == "windows") {
if(!hasClus)
cl <- makeCluster(parallel)
tmp <- parLapply(cl, inf.gr:sup.gr, function (ii)
kmeans(data, ii, iter.max = 50, nstart = iter))
if (!hasClus)
stopCluster(cl)
} else { # "unix"
tmp <- mclapply(inf.gr:sup.gr, function (ii)
kmeans(data, ii, iter.max = 50, nstart = iter),
mc.cores = parallel)
}
}
#Set values of stuff using results from K-means
for(ii in inf.gr:sup.gr)
{
#Index for tmp object
idx <- ii - inf.gr + 1
j <- ii - inf.gr + 1
#tmp <- kmeans(data, ii, iter.max = 50, nstart=iter)
size[1:ii,h] <- tmp[[idx]]$size
h <- h + 1
partition[, j] <- tmp[[idx]]$cluster
## Compute SSE statistic
results[1, j] <- sum(tmp[[idx]]$withinss)
## Compute stopping criterion
results[2, j] <- cIndexKM(tmp[[idx]], data, index = tolower(criterion))
}
colnames(partition) <- paste(inf.gr:sup.gr, "groups")
tmp <- rownames(data)
if(is.null(tmp)){
r.name <- c(1:nrow(partition))
}else{
r.name <- tmp
}
rownames(partition) <- r.name
colnames(results) <- paste(inf.gr:sup.gr, "groups")
rownames(results)<-c("SSE", criterion)
colnames(size) <- paste(inf.gr:sup.gr, "groups")
rownames(size) <- paste("Group", 1:sup.gr)
tout<-list(partition=partition, results=results, criterion=criterion, size=size)
class(tout) <- "cascadeKM"
tout
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/cascadeKM.R
|
"cca" <-
function (...)
{
UseMethod("cca")
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/cca.R
|
`cca.default` <-
function (X, Y = NULL, Z = NULL, ...)
{
## Protect against grave misuse: some people have used
## dissimilarities instead of data
if (inherits(X, "dist") || NCOL(X) == NROW(X) &&
isTRUE(all.equal(X, t(X))))
stop("function cannot be used with (dis)similarities")
X <- as.matrix(X)
if (!is.null(Y)) {
if (is.data.frame(Y) || is.factor(Y))
Y <- model.matrix(~ ., as.data.frame(Y))[,-1,drop=FALSE]
Y <- as.matrix(Y)
}
if (!is.null(Z)) {
if (is.data.frame(Z) || is.factor(Z))
Z <- model.matrix(~ ., as.data.frame(Z))[,-1,drop=FALSE]
Z <- as.matrix(Z)
}
if (any(rowSums(X) <= 0))
stop("all row sums must be >0 in the community data matrix")
if (any(tmp <- colSums(X) <= 0)) {
exclude.spec <- seq(along=tmp)[tmp]
names(exclude.spec) <- colnames(X)[tmp]
class(exclude.spec) <- "exclude"
X <- X[, !tmp, drop = FALSE]
}
sol <- ordConstrained(X, Y, Z, method = "cca")
if (exists("exclude.spec")) {
if (!is.null(sol$CCA$v))
attr(sol$CCA$v, "na.action") <- exclude.spec
if (!is.null(sol$CA$v))
attr(sol$CA$v, "na.action") <- exclude.spec
}
call <- match.call()
call[[1]] <- as.name("cca")
sol <- c(list(call = call,
inertia = "scaled Chi-square"),
sol)
class(sol) <- "cca"
sol
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/cca.default.R
|
`cca.formula` <-
function (formula, data, na.action = na.fail, subset = NULL, ...)
{
if (missing(data)) {
data <- parent.frame()
} else {
data <- eval(match.call()$data, environment(formula),
enclos = .GlobalEnv)
}
d <- ordiParseFormula(formula, data = data, na.action = na.action,
subset = substitute(subset))
sol <- cca.default(d$X, d$Y, d$Z)
if (!is.null(sol$CCA) && sol$CCA$rank > 0) {
centroids <- centroids.cca(sol$CCA$wa, d$modelframe,
sol$rowsum)
if (!is.null(sol$CCA$alias))
centroids <- unique(centroids)
## See that there really are centroids
if (!is.null(centroids)) {
rs <- rowSums(centroids^2)
centroids <- centroids[rs > 1e-04,, drop = FALSE]
if (length(centroids) == 0)
centroids <- NULL
}
if (!is.null(centroids))
sol$CCA$centroids <- centroids
}
## replace cca.default call
call <- match.call()
call[[1]] <- as.name("cca")
call$formula <- formula(d$terms)
sol$call <- call
if (!is.null(d$na.action)) {
sol$na.action <- d$na.action
sol <- ordiNAexclude(sol, d$excluded)
}
if (!is.null(d$subset))
sol$subset <- d$subset
## drops class in c()
sol <- c(sol,
list(terms = d$terms,
terminfo = ordiTerminfo(d, d$modelframe)))
class(sol) <- "cca"
sol
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/cca.formula.R
|
`centroids.cca` <-
function(x, mf, wt)
{
if (is.null(mf) || is.null(x))
return(NULL)
facts <- sapply(mf, is.factor) | sapply(mf, is.character)
if (!any(facts))
return(NULL)
mf <- mf[, facts, drop = FALSE]
## Explicitly exclude NA as a level
mf <- droplevels(mf, exclude = NA)
if (missing(wt))
wt <- rep(1, nrow(mf))
ind <- seq_len(nrow(mf))
workhorse <- function(x, wt)
colSums(x * wt) / sum(wt)
## As NA not a level, centroids only for non-NA levels of each factor
tmp <- lapply(mf, function(fct)
tapply(ind, fct, function(i) workhorse(x[i,, drop=FALSE], wt[i])))
tmp <- lapply(tmp, function(z) sapply(z, rbind))
pnam <- labels(tmp)
out <- NULL
if (ncol(x) == 1) {
nm <- unlist(sapply(pnam,
function(nm) paste(nm, names(tmp[[nm]]), sep="")),
use.names=FALSE)
out <- matrix(unlist(tmp), nrow=1, dimnames = list(NULL, nm))
} else {
for (i in seq_along(tmp)) {
colnames(tmp[[i]]) <- paste(pnam[i], colnames(tmp[[i]]),
sep = "")
out <- cbind(out, tmp[[i]])
}
}
out <- t(out)
colnames(out) <- colnames(x)
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/centroids.cca.R
|
## internal function for checking select arguments in ordination plotting
## functions
.checkSelect <- function(select, scores) {
## check `select` and length of scores match
if(is.logical(select) &&
!isTRUE(all.equal(length(select), NROW(scores)))) {
warning("length of 'select' does not match the number of scores: ignoring 'select'")
} else {
scores <- if(is.matrix(scores)) {
scores[select, , drop = FALSE]
} else {
scores[select]
}
}
scores
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/checkSelect.R
|
## CLAM, reproduction of software described in Chazdon et al. 2011
## Ecology, 92, 1332--1343
clamtest <-
function(comm, groups, coverage.limit = 10,
specialization = 2/3, npoints = 20, alpha = 0.05/20)
{
## inital checks
comm <- as.matrix(comm)
if (NROW(comm) < 2)
stop("'comm' must have at least two rows")
if (nrow(comm) > 2 && missing(groups))
stop("'groups' is missing")
if (nrow(comm) == 2 && missing(groups))
groups <- if (is.null(rownames(comm)))
c("Group.1", "Group.2") else rownames(comm)
if (length(groups) != nrow(comm))
stop("length of 'groups' must equal to 'nrow(comm)'")
if (length(unique(groups)) != 2)
stop("number of groups must be two")
glabel <- as.character(unique(groups))
if (is.null(colnames(comm)))
colnames(comm) <- paste("Species", 1:ncol(comm), sep=".")
if (any(colSums(comm) <= 0))
stop("'comm' contains columns with zero sums")
spp <- colnames(comm)
## reproduced from Chazdon et al. 2011, Ecology 92, 1332--1343
S <- ncol(comm)
if (nrow(comm) == 2) {
Y <- comm[glabel[1],]
X <- comm[glabel[2],]
} else {
Y <- colSums(comm[which(groups==glabel[1]),])
X <- colSums(comm[which(groups==glabel[2]),])
}
names(X) <- names(Y) <- NULL
#all(ct$Total_SG == Y)
#all(ct$Total_OG == X)
m <- sum(Y)
n <- sum(X)
if (sum(Y) <= 0 || sum(X) <= 0)
stop("group totals of zero are not allowed")
## check if comm contains integer, especially for singletons
if (any(X[X>0] < 1) || any(Y[Y>0] < 1))
warning("non-integer values <1 detected: analysis may not be meaningful")
if (abs(sum(X,Y) - sum(as.integer(X), as.integer(Y))) > 10^-6)
warning("non-integer values detected")
C1 <- 1 - sum(X==1)/n
C2 <- 1 - sum(Y==1)/m
## this stands for other than 2/3 cases
uu <- specialization/(1-specialization)
## critical level
Zp <- qnorm(alpha, lower.tail=FALSE)
#p_i=a
#pi_i=b
## function to calculate test statistic from Appendix D
## (Ecological Archives E092-112-A4)
## coverage limit is count, not freq !!!
testfun <- function(p_i, pi_i, C1, C2, n, m) {
C1 <- ifelse(p_i*n < coverage.limit, C1, 1)
C2 <- ifelse(pi_i*m < coverage.limit, C2, 1)
Var <- C1^2*(p_i*(1-p_i)/n) + uu^2*C2^2*(pi_i*(1-pi_i)/m)
C1*p_i - C2*pi_i*uu - Zp*sqrt(Var)
}
## root finding for iso-lines (instead of itarative search)
rootfun <- function(pi_i, C1, C2, n, m, upper) {
f <- function(p_i) testfun(p_i/n, pi_i/m, C1, C2, n, m)
if (length(unique(sign(c(f(1), f(upper))))) > 1)
ceiling(uniroot(f, lower=1, upper=upper)$root) else NA
}
## sequences for finding Xmin and Ymin values
Xseq <- as.integer(trunc(seq(1, max(X), len=npoints)))
Yseq <- as.integer(trunc(seq(1, max(Y), len=npoints)))
## finding Xmin and Ymin values for Xseq and Yseq
Xmins <- sapply(Yseq, function(z) rootfun(z, C1, C2, n, m, upper=max(X)))
Ymins <- sapply(Xseq, function(z) rootfun(z, C2, C1, m, n, upper=max(Y)))
## needed to tweak original set of rules (extreme case reported
## by Richard Telford failed here)
if (all(is.na(Xmins)))
Xmins[1] <- 1
if (all(is.na(Ymins)))
Ymins[1] <- 1
minval <- list(data.frame(x=Xseq[!is.na(Ymins)], y=Ymins[!is.na(Ymins)]),
data.frame(x=Xmins[!is.na(Xmins)], y=Yseq[!is.na(Xmins)]))
## shared but too rare
Ymin <- Ymins[1]
Xmin <- Xmins[1]
sr <- X < Xmin & Y < Ymin
## consequence of manually setting Xmin/Ymin resolved here
tmp1 <- if (Xmin==1)
list(x=1, y=Xmin) else approx(c(Xmin, 1), c(1, Ymin), xout=1:Xmin)
tmp2 <- if (Ymin==1)
list(x=1, y=Ymin) else approx(c(1, Ymin), c(Xmin, 1), xout=1:Ymin)
for (i in 1:S) {
if (X[i] %in% tmp1$x)
sr[i] <- Y[i] < tmp1$y[which(X[i]==tmp1$x)]
if (Y[i] %in% tmp2$x)
sr[i] <- X[i] < tmp2$y[which(Y[i]==tmp2$x)]
}
## classification
a <- ifelse(X==0, 1, X)/n # \hat{p_i}
b <- ifelse(Y==0, 1, Y)/m # \hat{\pi_i}
specX <- !sr & testfun(a, b, C1, C2, n, m) > 0
specY <- !sr & testfun(b, a, C2, C1, m, n) > 0
gen <- !sr & !specX & !specY
## crosstable
tmp <- ifelse(cbind(gen, specY, specX, sr), 1, 0)
classes <- factor((1:4)[rowSums(tmp*col(tmp))], levels=1:4)
levels(classes) <- c("Generalist", paste("Specialist", glabel[1], sep="_"),
paste("Specialist", glabel[2], sep="_"), "Too_rare")
tab <- data.frame(Species=spp, y=Y, x=X, Classes=classes)
colnames(tab)[2:3] <- paste("Total", glabel, sep="_")
rownames(tab) <- NULL
class(tab) <- c("clamtest","data.frame")
attr(tab, "settings") <- list(labels = glabel,
coverage.limit = coverage.limit, specialization = specialization,
npoints = npoints, alpha = alpha)
attr(tab, "minv") <- minval
attr(tab, "coverage") <- structure(c(C2, C1), .Names=glabel)
tab
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/clamtest.R
|
`coef.cca` <-
function (object, norm = FALSE, ...)
{
if(is.null(object$CCA))
stop("unconstrained models do not have coefficients")
Q <- object$CCA$QR
u <- object$CCA$u
## if rank==0, the next would fail, but this kluge gives
## consistent results with coef.rda and vegan 2.4
if (ncol(u))
u <- sqrt(object$rowsum) * u
## scores.cca uses na.predict and may add missing NA rows to u,
## but Q has no missing cases
if (nrow(Q$qr) < nrow(u) && inherits(object$na.action, "exclude"))
u <- u[-object$na.action,, drop=FALSE]
b <- qr.coef(Q, u)
if (norm)
b <- sqrt(colSums(qr.X(Q)^2)) * b
b
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/coef.cca.R
|
`coef.radfit` <-
function (object, ...)
{
out <- sapply(object$models, function(x) if (length(coef(x)) <
3)
c(coef(x), rep(NA, 3 - length(coef(x))))
else coef(x))
out <- t(out)
colnames(out) <- paste("par", 1:3, sep = "")
out
}
`coef.radfit.frame` <-
function(object, ...)
{
lapply(object, coef, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/coef.radfit.R
|
`coef.rda` <-
function (object, norm = FALSE, ...)
{
if(is.null(object$CCA))
stop("unconstrained models do not have coefficients")
Q <- object$CCA$QR
u <- object$CCA$u
## scores.cca uses na.predict and may add missing NA rows to u,
## but Q has no missing cases
if (nrow(Q$qr) < nrow(u) && inherits(object$na.action, "exclude"))
u <- u[-object$na.action,, drop=FALSE]
b <- qr.coef(Q, u)
if (norm)
b <- sqrt(colSums(qr.X(Q)^2)) * b
b
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/coef.rda.R
|
## this is function to create a commsim object, does some checks
## there is a finite number of useful arguments here
## but I added ... to allow for unforeseen algorithms,
## or being able to reference to external objects
commsim <-
function(method, fun, binary, isSeq, mode)
{
fun <- if (!missing(fun))
match.fun(fun) else stop("'fun' missing")
if (any(!(names(formals(fun)) %in%
c("x", "n", "nr", "nc", "rs", "cs", "rf", "cf", "s", "fill", "thin", "..."))))
stop("unexpected arguments in 'fun'")
out <- structure(list(method = if (!missing(method))
as.character(method)[1L] else stop("'method' missing"),
binary = if (!missing(binary))
as.logical(binary)[1L] else stop("'binary' missing"),
isSeq = if (!missing(isSeq))
as.logical(isSeq)[1L] else stop("'isSeq' missing"),
mode = if (!missing(mode))
match.arg(as.character(mode)[1L],
c("integer", "double")) else stop("'mode' missing"),
fun = fun), class = "commsim")
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/commsim.R
|
`confint.MOStest` <-
function (object, parm = 1, level = 0.95, ...)
{
confint(profile(object), level = level, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/confint.MOStest.R
|
## Contribution diversity
## Lu, H.P., H.H. Wagner and X.Y. Chen (2007).
## A contribution diversity approach to evaluate species diversity.
## Basic and Applied Ecology 8: 1 -12.
`contribdiv` <-
function(comm, index = c("richness", "simpson"), relative = FALSE,
scaled = TRUE, drop.zero = FALSE)
{
index <- match.arg(index)
comm <- as.matrix(comm) # faster than data.frame
x <- comm[rowSums(comm) > 0, colSums(comm) > 0]
n <- nrow(x)
S <- ncol(x)
if (index == "richness") {
n.i <- colSums(x > 0)
S.k <- rowSums(x > 0)
alpha <- S.k / n
beta <- apply(x, 1, function(z) sum((n - n.i[z > 0]) / (n * n.i[z > 0])))
denom <- 1
} else {
P.ik <- decostand(x, "total")
P.i <- apply(P.ik, 2, function(z) sum(z) / n)
P.i2 <- matrix(P.i, n, S, byrow=TRUE)
alpha <- diversity(x, "simpson")
beta <- rowSums(P.ik * (P.ik - P.i2))
denom <- n
}
gamma <- alpha + beta
D <- sum(beta) / sum(gamma)
if (relative) {
denom <- if (scaled)
{denom * sum(gamma)} else 1
alpha <- (alpha - mean(alpha)) / denom
beta <- (beta - mean(beta)) / denom
gamma <- (gamma - mean(gamma)) / denom
}
rval <- data.frame(alpha = alpha, beta = beta, gamma = gamma)
if (!drop.zero && nrow(comm) != n) {
nas <- rep(NA, nrow(comm))
rval2 <- data.frame(alpha = nas, beta = nas, gamma = nas)
rval2[rowSums(comm) > 0, ] <- rval
rval <- rval2
}
attr(rval, "diff.coef") <- D
attr(rval, "index") <- index
attr(rval, "relative") <- relative
attr(rval, "scaled") <- scaled
class(rval) <- c("contribdiv", "data.frame")
rval
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/contribdiv.R
|
`cophenetic.spantree` <-
function(x)
{
n <- x$n
mat <- matrix(NA, nrow=n, ncol=n)
if (n < 2)
return(as.dist(mat))
ind <- apply(cbind(2:n, x$kid), 1, sort)
ind <- t(ind[2:1,])
mat[ind] <- x$dist
d <- as.dist(mat)
attr(d, "Labels") <- x$labels
stepacross(d, path = "extended", toolong=0, trace=FALSE)
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/cophenetic.spantree.R
|
`coverscale` <-
function (x, scale = c("Braun.Blanquet", "Domin", "Hult", "Hill",
"fix", "log"), maxabund, character = TRUE)
{
scale <- match.arg(scale)
sol <- as.data.frame(x)
x <- as.matrix(x)
switch(scale, Braun.Blanquet = {
codes <- c("r", "+", as.character(1:5))
lims <- c(0, 0.1, 1, 5, 25, 50, 75, 100)
}, Domin = {
codes <- c("+", as.character(1:9), "X")
lims <- c(0, 0.01, 0.1, 1, 5, 10, 25, 33, 50, 75, 90,
100)
}, Hult = {
codes <- as.character(1:5)
lims <- c(0, 100/2^(4:1), 100)
}, Hill = {
codes <- as.character(1:5)
lims <- c(0, 2, 5, 10, 20, Inf)
}, fix = {
codes <- c("+", as.character(1:9), "X")
lims <- c(0:10, 11 - 10 * .Machine$double.eps)
}, log = {
codes <- c("+", as.character(1:9))
if (missing(maxabund))
maxabund <- max(x)
lims <- c(0, maxabund/2^(9:1), maxabund)
})
for (i in 1:nrow(x)) {
if (!character)
codes <- FALSE
tmp <- x[i, ] > 0
sol[i, tmp] <- cut(x[i, tmp], breaks = lims, labels = codes,
right = FALSE, include.lowest = TRUE)
}
attr(sol, "scale") <-
if (scale == "log") paste("log, with maxabund", maxabund) else scale
sol
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/coverscale.R
|
`dbrda` <-
function (formula, data, distance = "euclidean",
sqrt.dist = FALSE, add = FALSE, dfun = vegdist,
metaMDSdist = FALSE, na.action = na.fail,
subset = NULL, ...)
{
if (!inherits(formula, "formula"))
stop("needs a model formula")
if (missing(data)) {
data <- parent.frame()
}
else {
data <- eval(match.call()$data, environment(formula),
enclos = .GlobalEnv)
}
formula <- formula(terms(formula, data = data))
## The following line was eval'ed in environment(formula), but
## that made update() fail. Rethink the line if capscale() fails
## mysteriously at this point.
X <- eval(formula[[2]], envir=environment(formula),
enclos = globalenv())
if ((is.matrix(X) || is.data.frame(X)) &&
isSymmetric(unname(as.matrix(X))))
X <- as.dist(X)
if (!inherits(X, "dist")) {
comm <- X
dfun <- match.fun(dfun)
if (metaMDSdist) {
commname <- as.character(formula[[2]])
X <- metaMDSdist(comm, distance = distance, zerodist = "ignore",
commname = commname, distfun = dfun, ...)
commname <- attr(X, "commname")
comm <- eval.parent(parse(text=commname))
} else {
X <- dfun(X, distance)
}
}
## get the name of the inertia
inertia <- attr(X, "method")
if (is.null(inertia))
inertia <- "unknown"
inertia <- paste(toupper(substr(inertia, 1, 1)),
substring(inertia, 2), sep = "")
inertia <- paste(inertia, "distance")
## evaluate formula: ordiParseFormula will return dissimilarities
## as a symmetric square matrix (except that some rows may be
## deleted due to missing values)
d <- ordiParseFormula(formula,
data,
na.action = na.action,
subset = substitute(subset),
X = X)
## ordiParseFormula subsets rows of dissimilarities: do the same
## for columns ('comm' is handled later). ordiParseFormula
## returned the original data, but we use instead the potentially
## changed X and discard d$X.
if (!is.null(d$subset)) {
X <- as.matrix(X)[d$subset, d$subset, drop = FALSE]
}
## Delete columns if rows were deleted due to missing values
if (!is.null(d$na.action)) {
X <- as.matrix(X)[-d$na.action, -d$na.action, drop = FALSE]
}
X <- as.matrix(X)
k <- NROW(X) - 1
## sqrt & add adjustments
if (sqrt.dist)
X <- sqrt(X)
if (is.logical(add) && isTRUE(add))
add <- "lingoes"
if (is.character(add)) {
add <- match.arg(add, c("lingoes", "cailliez"))
if (add == "lingoes") {
ac <- addLingoes(X)
X <- sqrt(X^2 + 2 * ac)
} else if (add == "cailliez") {
ac <- addCailliez(X)
X <- X + ac
}
diag(X) <- 0
} else {
ac <- 0
}
## update the name of the inertia
if (!sqrt.dist)
inertia <- paste("squared", inertia)
if (ac > sqrt(.Machine$double.eps))
inertia <- paste(paste0(toupper(substring(add, 1, 1)),
substring(add, 2)), "adjusted", inertia)
if (max(X) >= 4 + .Machine$double.eps) {
inertia <- paste("mean", inertia)
adjust <- sqrt(k)
X <- X/adjust
}
else {
adjust <- 1
}
## Get components of inertia with negative eigenvalues following
## McArdle & Anderson (2001), section "Theory". G is their
## double-centred Gower matrix, but instead of hat matrix, we use
## QR decomposition to get the components of inertia.
sol <- ordConstrained(X, d$Y, d$Z, method = "dbrda")
sol$colsum <- NA
## separate eigenvectors associated with negative eigenvalues from
## u into imaginary.u
if (!is.null(sol$CCA) && sol$CCA$rank > sol$CCA$poseig) {
sol$CCA$imaginary.u <- sol$CCA$u[, -seq_len(sol$CCA$poseig),
drop = FALSE]
sol$CCA$u <- sol$CCA$u[, seq_len(sol$CCA$poseig), drop = FALSE]
}
if (!is.null(sol$CA) && sol$CA$rank > sol$CA$poseig) {
sol$CA$imaginary.u <- sol$CA$u[, -seq_len(sol$CA$poseig),
drop = FALSE]
sol$CA$u <- sol$CA$u[, seq_len(sol$CA$poseig), drop = FALSE]
}
if (!is.null(sol$CCA) && sol$CCA$rank > 0)
sol$CCA$centroids <-
centroids.cca(sol$CCA$u, d$modelframe)
if (!is.null(sol$CCA$alias))
sol$CCA$centroids <- unique(sol$CCA$centroids)
if (!is.null(sol$CCA$centroids)) {
rs <- rowSums(sol$CCA$centroids^2)
sol$CCA$centroids <- sol$CCA$centroids[rs > 1e-04, ,
drop = FALSE]
if (nrow(sol$CCA$centroids) == 0)
sol$CCA$centroids <- NULL
}
sol$call <- match.call()
sol$terms <- terms(formula, "Condition", data = data)
sol$terminfo <- ordiTerminfo(d, data)
sol$call$formula <- formula(d$terms, width.cutoff = 500)
sol$call$formula[[2]] <- formula[[2]]
sol$sqrt.dist <- sqrt.dist
if (!is.na(ac) && ac > 0) {
sol$ac <- ac
sol$add <- add
}
sol$adjust <- adjust
sol$inertia <- inertia
if (metaMDSdist)
sol$metaMDSdist <- commname
if (!is.null(d$subset))
sol$subset <- d$subset
if (!is.null(d$na.action)) {
sol$na.action <- d$na.action
## dbrda cannot add WA scores in na.exclude, and the following
## does nothing except adds residuals.zombie
sol <- ordiNAexclude(sol, d$excluded)
}
class(sol) <- c("dbrda", "rda", "cca")
sol
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/dbrda.R
|
`decorana` <-
function (veg, iweigh = 0, iresc = 4, ira = 0, mk = 26, short = 0,
before = NULL, after = NULL)
{
## constants
Const2 <- 5
Const3 <- 1e-11
ZEROEIG <- 1e-7 # same limit as in the C function do_decorana
## data
veg <- as.matrix(veg)
if (any(veg < 0))
stop("'decorana' cannot handle negative data entries")
## optional data transformation
if (!is.null(before)) {
veg <- beforeafter(veg, before, after)
}
if (iweigh) {
veg <- downweight(veg, Const2)
}
v <- attr(veg, "v")
v.fraction <- attr(veg, "fraction")
## marginal sums after optional data transformations
aidot <- rowSums(veg)
if (any(aidot <= 0))
stop("all row sums must be >0 in the community matrix: remove empty sites")
adotj <- colSums(veg)
if (any(adotj <= 0))
warning("some species were removed because they were missing in the data")
adotj[adotj < Const3] <- Const3
## check arguments
if (mk < 10)
mk <- 10
if (mk > 46)
mk <- 46
if (ira)
iresc <- 0
## Start analysis
CA <- .Call(do_decorana, veg, ira, iresc, short, mk, as.double(aidot),
as.double(adotj))
if (ira)
dnames <- paste("RA", 1:4, sep = "")
else dnames <- paste("DCA", 1:4, sep = "")
dimnames(CA$rproj) <- list(rownames(veg), dnames)
dimnames(CA$cproj) <- list(colnames(veg), dnames)
names(CA$evals) <- dnames
origin <- apply(CA$rproj, 2, weighted.mean, aidot)
vegChi <- initCA(veg) # needed for eigenvalues & their sum
totchi <- sum(vegChi^2)
if (ira) {
evals.decorana <- NULL
evals.ortho <- NULL
}
else {
evals.decorana <- CA$evals
if (any(ze <- evals.decorana <= 0))
CA$evals[ze] <- 0
## centred and weighted scores
x0 <- scale(CA$rproj, center = origin, scale = FALSE)
x0 <- sqrt(aidot/sum(aidot)) * x0
y0 <- scale(CA$cproj, center = origin, scale = FALSE)
y0 <- sqrt(adotj/sum(adotj)) * y0
## eigenvalue: shrinking of scores y0 --WA--> x0
evals <- colSums(x0^2)/colSums(y0^2)
evals[evals < ZEROEIG | !is.finite(evals)] <- 0
CA$evals <- evals
## decorana finds row scores from species scores, and for
## additive eigenvalues we need orthogonalized species
## scores. Q of QR decomposition will be orthonormal and if we
## use it for calculating row scores, these directly give
## additive eigenvalues.
qy <- qr.Q(qr(y0))
evals.ortho <- numeric(4) # qy can have < 4 columns
evals.ortho[seq_len(ncol(qy))] <- colSums(crossprod(t(vegChi), qy)^2)
evals.ortho[evals.ortho < ZEROEIG | !is.finite(evals.ortho)] <- 0
names(evals.ortho) <- names(evals.decorana)
}
additems <- list(totchi = totchi, evals.ortho = evals.ortho,
evals.decorana = evals.decorana, origin = origin,
v = v, fraction = v.fraction, iweigh = iweigh,
before = before, after = after,
call = match.call())
CA <- c(CA, additems)
class(CA) <- "decorana" # c() strips class
CA
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/decorana.R
|
`decostand` <-
function (x, method, MARGIN, range.global, logbase = 2, na.rm = FALSE, ...)
{
wasDataFrame <- is.data.frame(x)
x <- as.matrix(x)
METHODS <- c("total", "max", "frequency", "normalize", "range", "rank",
"rrank", "standardize", "pa", "chi.square", "hellinger",
"log", "clr", "rclr", "alr")
method <- match.arg(method, METHODS)
if (any(x < 0, na.rm = TRUE)) {
k <- min(x, na.rm = TRUE)
if (method %in% c("total", "frequency", "pa", "chi.square", "rank",
"rrank", "rclr")) {
warning("input data contains negative entries: result may be non-sense")
}
}
else k <- .Machine$double.eps
attr <- NULL
switch(method, total = {
if (missing(MARGIN))
MARGIN <- 1
tmp <- pmax(k, apply(x, MARGIN, sum, na.rm = na.rm))
x <- sweep(x, MARGIN, tmp, "/")
attr <- list("total" = tmp, "margin" = MARGIN)
}, max = {
if (missing(MARGIN))
MARGIN <- 2
tmp <- pmax(k, apply(x, MARGIN, max, na.rm = na.rm))
x <- sweep(x, MARGIN, tmp, "/")
attr <- list("max" = tmp, "margin" = MARGIN)
}, frequency = {
if (missing(MARGIN))
MARGIN <- 2
tmp <- pmax(k, apply(x, MARGIN, sum, na.rm = na.rm))
fre <- apply(x > 0, MARGIN, sum, na.rm = na.rm)
tmp <- fre/tmp
x <- sweep(x, MARGIN, tmp, "*")
attr <- list("scale" = tmp, "margin" = MARGIN)
}, normalize = {
if (missing(MARGIN))
MARGIN <- 1
tmp <- apply(x^2, MARGIN, sum, na.rm = na.rm)
tmp <- pmax(.Machine$double.eps, sqrt(tmp))
x <- sweep(x, MARGIN, tmp, "/")
attr <- list("norm" = tmp, "margin" = MARGIN)
}, range = {
if (missing(MARGIN))
MARGIN <- 2
if (missing(range.global))
xtmp <- x
else {
if (dim(range.global)[MARGIN] != dim(x)[MARGIN])
stop("range matrix does not match data matrix")
xtmp <- as.matrix(range.global)
}
tmp <- apply(xtmp, MARGIN, min, na.rm = na.rm)
ran <- apply(xtmp, MARGIN, max, na.rm = na.rm)
ran <- ran - tmp
ran <- pmax(k, ran, na.rm = na.rm)
x <- sweep(x, MARGIN, tmp, "-")
x <- sweep(x, MARGIN, ran, "/")
attr <- list("min" = tmp, "range" = ran, "margin" = MARGIN)
}, rank = {
if (missing(MARGIN)) MARGIN <- 1
x[x==0] <- NA
x <- apply(x, MARGIN, rank, na.last = "keep")
if (MARGIN == 1) # gives transposed x
x <- t(x)
x[is.na(x)] <- 0
attr <- list("margin" = MARGIN)
}, rrank = {
if (missing(MARGIN)) MARGIN <- 1
x <- decostand(x, "rank", MARGIN = MARGIN)
x <- sweep(x, MARGIN, specnumber(x, MARGIN = MARGIN), "/")
attr <- list("margin" = MARGIN)
}, standardize = {
if (!missing(MARGIN) && MARGIN == 1)
x <- t(scale(t(x)))
else {
x <- scale(x)
MARGIN <- 2
}
attr <- list("center" = attr(x, "scaled:center"),
"scale" = attr(x, "scaled:scale"),
"margin" = MARGIN)
}, pa = {
x <- ifelse(x > 0, 1, 0)
}, chi.square = {
if (missing(MARGIN))
MARGIN <- 1
## MARGIN 2 transposes the result!
if (MARGIN == 2)
x <- t(x)
rs <- pmax(k, rowSums(x, na.rm = na.rm))
cs <- pmax(k, colSums(x, na.rm = na.rm))
tot <- sum(x, na.rm = na.rm)
x <- sqrt(tot) * x/outer(rs, sqrt(cs))
attr <- list("tot" = tot, "rsum" = rs, "csum" = cs, margin = MARGIN)
}, hellinger = {
x <- sqrt(decostand(x, "total", MARGIN = MARGIN, na.rm = na.rm))
attr <- attr(x, "parameters")
}, log = {### Marti Anderson logs, after Etienne Laliberte
if (!isTRUE(all.equal(as.integer(x), as.vector(x)))) {
minpos <- min(x[x > 0], na.rm = TRUE)
x <- x / minpos
warning("non-integer data: divided by smallest positive value",
call. = FALSE)
} else {
minpos <- 1
}
x[x > 0 & !is.na(x)] <- log(x[x > 0 & !is.na(x)], base = logbase) + 1
attr <- list("logbase" = logbase, minpos = minpos)
}, alr = {
if (missing(MARGIN))
MARGIN <- 1
if (MARGIN == 1)
x <- t(.calc_alr(t(x), ...))
else x <- .calc_alr(x, ...)
attr <- attr(x, "parameters")
attr$margin <- MARGIN
}, clr = {
if (missing(MARGIN))
MARGIN <- 1
if (MARGIN == 1)
x <- .calc_clr(x, ...)
else x <- t(.calc_clr(t(x), ...))
attr <- attr(x, "parameters")
attr$margin <- MARGIN
}, rclr = {
if (missing(MARGIN))
MARGIN <- 1
if (MARGIN == 1)
x <- .calc_rclr(x, ...)
else x <- t(.calc_rclr(t(x), ...))
attr <- attr(x, "parameters")
attr$margin <- MARGIN
})
if (any(is.nan(x)))
warning("result contains NaN, perhaps due to impossible mathematical
operation\n")
if (wasDataFrame)
x <- as.data.frame(x)
attr(x, "parameters") <- attr
attr(x, "decostand") <- method
x
}
## Modified from the original version in mia R package
.calc_clr <-
function(x, pseudocount=0, na.rm = TRUE)
{
# Add pseudocount
x <- x + pseudocount
# Error with negative values
if (any(x <= 0, na.rm = na.rm)) {
stop("'clr' cannot be used with non-positive data: use pseudocount > ",
-min(x, na.rm = na.rm) + pseudocount, call. = FALSE)
}
# In every sample, calculate the log of individual entries.
# Then calculate
# the sample-specific mean value and subtract every entries'
# value with that.
clog <- log(x)
means <- rowMeans(clog)
clog <- clog - means
attr(clog, "parameters") <- list("means" = means,
"pseudocount" = pseudocount)
clog
}
# Modified from the original version in mia R package
.calc_rclr <-
function(x, na.rm = TRUE)
{
# Error with negative values
if (any(x < 0, na.rm = na.rm)) {
stop("'rclr' cannot be used with negative data", call. = FALSE)
}
# Log transform
clog <- log(x)
# Convert zeros to NAs in rclr
clog[is.infinite(clog)] <- NA
# Calculate log of geometric mean for every sample, ignoring the NAs
mean_clog <- rowMeans(clog, na.rm = na.rm)
# Divide all values by their sample-wide geometric means
# Log and transpose back to original shape
xx <- log(x) - mean_clog
# If there were zeros, there are infinite values after logarithmic transform.
# Convert those to zero.
xx[is.infinite(xx)] <- 0
attr(xx, "parameters") <- list("means" = mean_clog)
xx
}
.calc_alr <-
function (x, reference = 1, pseudocount = 0, na.rm = TRUE)
{
# Add pseudocount
x <- x + pseudocount
# If there is negative values, gives an error.
if (any(x < 0, na.rm = na.rm)) {
stop("'alr' cannot be used with negative data: use pseudocount >= ",
-min(x, na.rm = na.rm) + pseudocount, call. = FALSE)
}
## name must be changed to numeric index for [-reference,] to work
if (is.character(reference)) {
reference <- which(reference == colnames(x))
if (!length(reference)) # found it?
stop("'reference' name was not found in data", call. = FALSE)
}
if (reference > ncol(x) || reference < 1)
stop("'reference' should be a name or index 1 to ",
ncol(x), call. = FALSE)
clog <- log(x)
refvector <- clog[, reference]
clog <- clog[, -reference] - refvector
attr(clog, "parameters") <- list("reference" = refvector,
"index" = reference,
"pseudocount" = pseudocount)
clog
}
`decobackstand` <-
function(x, zap = TRUE)
{
method <- attr(x, "decostand")
if (is.null(method))
stop("function can be used only with 'decostand' standardized data")
para <- attr(x, "parameters")
if(is.null(para)) # for old results & "pa"
stop("object has no information to backtransform data")
x <- switch(method,
"total" = sweep(x, para$margin, para$total, "*"),
"max" = sweep(x, para$margin, para$max, "*"),
"frequency" = sweep(x, para$margin, para$scale, "/"),
"normalize" = sweep(x, para$margin, para$norm, "*"),
"range" = { x <- sweep(x, para$margin, para$range, "*")
sweep(x, para$margin, para$min, "+")},
"standardize" = {x <- sweep(x, para$margin, para$scale, "*")
sweep(x, para$margin, para$center, "+") },
"hellinger" = sweep(x^2, para$margin, para$total, "*"),
"chi.square" = { rc <- outer(para$rsum, sqrt(para$csum))
x <- x * rc /sqrt(para$tot)
if (para$margin == 1) x else t(x) },
"log" = { x[x > 0 & !is.na(x)] <-
para$logbase^(x[x > 0 & !is.na(x)] - 1)
x * para$minpos},
"clr" = exp(sweep(x, para$margin, para$means, "+")) -
para$pseudocount,
"rclr" = { x[x == 0] <- -Inf # x==0 was set: should be safe
exp(sweep(x, para$margin, para$means, "+"))},
"wisconsin" = { x <- sweep(x, 1, para$total, "*")
sweep(x, 2, para$max, "*") },
stop("no backtransformation available for method ",
sQuote(method))
)
if (zap)
x[abs(x) < sqrt(.Machine$double.eps)] <- 0
x
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/decostand.R
|
## evaluate user-defined dissimilarity function.
`designdist` <-
function (x, method = "(A+B-2*J)/(A+B)",
terms = c("binary", "quadratic", "minimum"),
abcd = FALSE, alphagamma = FALSE, name, maxdist)
{
terms <- match.arg(terms)
if ((abcd || alphagamma) && terms != "binary")
warning("perhaps terms should be 'binary' with 'abcd' or 'alphagamma'?")
x <- as.matrix(x)
## only do numeric data for which "pa", minimum and quadratic make sense
if (!(is.numeric(x) || is.logical(x)))
stop("input data must be numeric")
N <- nrow(x)
P <- ncol(x)
if (terms == "binary")
x <- ifelse(x > 0, 1, 0)
if (terms == "binary" || terms == "quadratic")
x <- tcrossprod(x)
if (terms == "minimum")
x <- .Call(do_minterms, as.matrix(x))
d <- diag(x)
A <- as.dist(outer(rep(1, N), d))
B <- as.dist(outer(d, rep(1, N)))
J <- as.dist(x)
## 2x2 contingency table notation
if (abcd) {
a <- J
b <- A - J
c <- B - J
d <- P - A - B + J
}
## beta diversity notation
if (alphagamma) {
alpha <- (A + B)/2
gamma <- A + B - J
delta <- abs(A - B)/2
}
dis <- eval(parse(text = method))
attributes(dis) <- attributes(J)
attr(dis, "call") <- match.call()
if (missing(name))
attr(dis, "method") <- paste(terms, method)
else attr(dis, "method") <- name
if (!missing(maxdist)) {
if (!is.na(maxdist) && any(dis > maxdist)) {
warning("'maxdist' was lower than some distances: setting to NA")
maxdist <- NA
}
attr(dis, "maxdist") <- maxdist
}
dis
}
## similar to designdist, but uses Chao's terms U & V instead of J, A,
## B (or their derived terms) in designdist. I considered having this
## as an option 'terms = "chao"' in designdist, but there really is so
## little in common and too many if's needed.
`chaodist` <-
function(x, method = "1 - 2*U*V/(U+V)", name)
{
x <- as.matrix(x)
## need integer data
if (!identical(all.equal(x, round(x)), TRUE))
stop("function accepts only integers (counts)")
N <- nrow(x)
## do_chaoterms returns a list with U, V which are non-classed
## vectors where the order of terms matches 'dist' objects
vu <- .Call(do_chaoterms, x)
U <- vu$U
V <- vu$V
## dissimilarities
dis <- eval(parse(text = method))
dis <- structure(dis, Size = N, Labels = rownames(x), Diag = FALSE,
Upper = FALSE, call = match.call(), class = "dist")
if (missing(name))
attr(dis, "method") <- paste("chao", method)
else
attr(dis, "method") <- name
dis
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/designdist.R
|
`deviance.cca` <-
function(object, ...)
{
object$CA$tot.chi * object$grand.tot
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/deviance.cca.R
|
`deviance.rda` <-
function(object, ...)
{
object$CA$tot.chi * (nobs(object) - 1)
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/deviance.rda.R
|
`dispindmorisita` <-
function(x, unique.rm=FALSE, crit=0.05, na.rm=FALSE)
{
x <- as.matrix(x)
n <- nrow(x)
p <- ncol(x)
Imor <- apply(x, 2, function(y) n * ((sum(y^2) - sum(y)) / (sum(y)^2 - sum(y))))
Smor <- Imor
chicr <- qchisq(c(0+crit/2, 1-crit/2), n-1, lower.tail=FALSE)
Muni <- apply(x, 2, function(y) (chicr[2] - n + sum(y)) / (sum(y) - 1))
Mclu <- apply(x, 2, function(y) (chicr[1] - n + sum(y)) / (sum(y) - 1))
rs <- colSums(x, na.rm=na.rm)
pchi <- pchisq(Imor * (rs - 1) + n - rs, n-1, lower.tail=FALSE)
for (i in 1:p) {
if (rs[i] > 1) {
if (Imor[i] >= Mclu[i] && Mclu[i] > 1)
Smor[i] <- 0.5 + 0.5 * ((Imor[i] - Mclu[i]) / (n - Mclu[i]))
if (Mclu[i] > Imor[i] && Imor[i] >=1)
Smor[i] <- 0.5 * ((Imor[i] - 1) / (Mclu[i] - 1))
if (1 > Imor[i] && Imor[i] > Muni[i])
Smor[i] <- -0.5 * ((Imor[i] - 1) / (Muni[i] - 1))
if (1 > Muni[i] && Muni[i] > Imor[i])
Smor[i] <- -0.5 + 0.5 * ((Imor[i] - Muni[i]) / Muni[i])
}
}
out <- data.frame(imor = Imor, mclu = Mclu, muni = Muni,
imst = Smor, pchisq = pchi)
usp <- which(colSums(x > 0) == 1)
if (unique.rm && length(usp) != 0)
out <- out[-usp,]
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/dispindmorisita.R
|
`dispweight` <-
function(comm, groups, nsimul = 999, nullmodel = "c0_ind",
plimit = 0.05)
{
## no groups?
if (missing(groups))
groups <- rep(1, nrow(comm))
## Remove empty levels of 'groups' or this fails cryptically (and
## take care 'groups' is a factor)
groups <- factor(groups)
## Statistic is the sum of squared differences by 'groups'
means <- apply(comm, 2, function(x) tapply(x, groups, mean))
## handle 1-level factors: all sites belong to the same 'groups'
if (is.null(dim(means)))
means <- matrix(means, nrow=1, ncol = length(means),
dimnames = list(levels(groups), names(means)))
## expand to matrix of species means
fitted <- means[groups,]
dhat <- colSums((comm - fitted)^2/fitted, na.rm = TRUE)
## Get df for non-zero blocks of species. Completely ignoring
## all-zero blocks for species sounds strange, but was done in the
## original paper, and we follow here. However, this was not done
## for significance tests, and only concerns 'D' and 'weights'.
nreps <- table(groups)
div <- colSums(sweep(means > 0, 1, nreps - 1, "*"))
## "significance" of overdispersion is assessed from Chi-square
## evaluated separately for each species. This means fixing only
## marginal totals for species but letting row marginals vary
## freely, unlike in standard Chi-square where both margins are
## fixed. In vegan this is achieved by nullmodel 'c0_ind'. Instead
## of one overall simulation, nullmodel is generated separately
## for each of 'groups'
chisq <- function(x) {
fitted <- colMeans(x)
colSums(sweep(x, 2, fitted)^2, na.rm = TRUE) / fitted
}
simulated <- matrix(0, nrow = ncol(comm), ncol = nsimul)
for (lev in levels(groups)) {
nm <- nullmodel(comm[groups == lev,], nullmodel)
if (nm$commsim$binary)
stop("'binary' nullmodel cannot be used")
tmp <- apply(simulate(nm, nsimul), 3, chisq)
ok <- !is.na(tmp)
simulated[ok] <- simulated[ok] + tmp[ok]
}
## p value based on raw dhat, then we divide
p <- (rowSums(dhat <= simulated) + 1) / (nsimul + 1)
dhat <- dhat/div
weights <- ifelse(p <= plimit, 1/dhat, 1)
comm <- sweep(comm, 2, weights, "*")
attr(comm, "D") <- dhat
attr(comm, "df") <- div
attr(comm, "p") <- p
attr(comm, "weights") <- weights
attr(comm, "nsimul") <- nsimul
attr(comm, "nullmodel") <- nullmodel
class(comm) <- c("dispweight", class(comm))
comm
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/dispweight.R
|
`distconnected` <-
function(dis, toolong = 1, trace = TRUE)
{
n <- attr(dis, "Size")
out <- .C(stepabyss, dis = as.double(dis), n = as.integer(n),
toolong = as.double(toolong), val = integer(n),
NAOK = TRUE)$val
if (trace) {
cat("Connectivity of distance matrix with threshold dissimilarity",
toolong,"\n")
n <- length(unique(out))
if (n == 1)
cat("Data are connected\n")
else {
cat("Data are disconnected:", n, "groups\n")
print(table(out, dnn="Groups sizes"))
}
}
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/distconnected.R
|
`diversity` <-
function (x, index = "shannon", groups, equalize.groups = FALSE,
MARGIN = 1, base = exp(1))
{
x <- drop(as.matrix(x))
if (!is.numeric(x))
stop("input data must be numeric")
if (any(x < 0, na.rm = TRUE))
stop("input data must be non-negative")
## sum communities for groups
if (!missing(groups)) {
if (MARGIN == 2)
x <- t(x)
if (length(groups) == 1) # total for all SU
groups <- rep(groups, NROW(x))
if (equalize.groups)
x <- decostand(x, "total")
x <- aggregate(x, list(groups), sum) # pool SUs by groups
rownames(x) <- x[,1]
x <- x[,-1, drop=FALSE]
if (MARGIN == 2)
x <- t(x)
}
INDICES <- c("shannon", "simpson", "invsimpson")
index <- match.arg(index, INDICES)
if (length(dim(x)) > 1) {
total <- apply(x, MARGIN, sum)
x <- sweep(x, MARGIN, total, "/")
} else {
x <- x/(total <- sum(x))
}
if (index == "shannon")
x <- -x * log(x, base)
else
x <- x*x
if (length(dim(x)) > 1)
H <- apply(x, MARGIN, sum, na.rm = TRUE)
else
H <- sum(x, na.rm = TRUE)
if (index == "simpson")
H <- 1 - H
else if (index == "invsimpson")
H <- 1/H
## check NA in data
if (any(NAS <- is.na(total)))
H[NAS] <- NA
H
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/diversity.R
|
### Support functions for decorana
## Hill's downweighting
## An exported function that can be called outside decorana
`downweight` <-
function (veg, fraction = 5)
{
Const1 <- 1e-10
if (fraction < 1)
fraction <- 1/fraction
veg <- as.matrix(veg)
yeig1 <- colSums(veg)
y2 <- colSums(veg^2) + Const1
y2 <- yeig1^2/y2
amax <- max(y2)/fraction
v <- rep(1, ncol(veg))
downers <- y2 < amax
v[downers] <- (y2/amax)[downers]
veg <- sweep(veg, 2, v, "*")
attr(veg, "v") <- v
attr(veg, "fraction") <- fraction
veg
}
## Hill's piecewise tranformation. Values of before are replaced with
## values of after, and intermediary values with linear interpolation.
## Not exported: if you think you need something like this, find a
## better tool in R.
`beforeafter` <-
function(x, before, after)
{
if (is.null(before) || is.null(after))
stop("both 'before' and 'after' must be given", call. = FALSE)
if (is.unsorted(before))
stop("'before' must be sorted", call. = FALSE)
if (length(before) != length(after))
stop("'before' and 'after' must have same lengths", call. = FALSE)
for(i in seq_len(nrow(x))) {
k <- x[i,] > 0
x[i, k] <- approx(before, after, x[i, k], rule = 2)$y
}
x
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/downweight.R
|
`drop1.cca` <-
function(object, scope, test = c("none", "permutation"),
permutations = how(nperm = 199), ...)
{
if (inherits(object, "prc"))
stop("'step'/'drop1' cannot be used for 'prc' objects")
if (is.null(object$terms))
stop("ordination model must be fitted using formula")
test <- match.arg(test)
out <- NextMethod("drop1", object, test="none", ...)
cl <- class(out)
if (test == "permutation") {
rn <- rownames(out)[-1]
if (missing(scope))
scope <- rn
else if (!is.character(scope))
scope <- drop.scope(scope)
adds <- anova(object, by = "margin", scope = scope,
permutations = permutations, ...)
out <- cbind(out, rbind(NA, adds[rn,3:4]))
class(out) <- cl
}
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/drop1.cca.R
|
"eigengrad" <-
function (x, w)
{
attr(wascores(x, w, expand=TRUE), "shrinkage")
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/eigengrad.R
|
# Extract eigenvalues from an object that has them
`eigenvals` <-
function(x, ...)
{
UseMethod("eigenvals")
}
`eigenvals.default`<-
function(x, ...)
{
## svd and eigen return unspecified 'list', see if this could be
## either of them (like does cmdscale)
out <- NA
if (is.list(x)) {
## eigen
if (length(x) == 2 && all(names(x) %in% c("values", "vectors")))
out <- x$values
## svd: return squares of singular values
else if (length(x) == 3 && all(names(x) %in% c("d", "u", "v")))
out <- x$d^2
## cmdscale() will return all eigenvalues from R 2.12.1
else if (all(c("points","eig","GOF") %in% names(x)))
out <- x$eig
}
class(out) <- "eigenvals"
out
}
## squares of sdev
`eigenvals.prcomp` <-
function(x, ...)
{
out <- x$sdev^2
names(out) <- colnames(x$rotation)
## honour prcomp(..., rank.=) which only requests rank. eigenvalues
if (ncol(x$rotation) < length(out)) {
sumev <- sum(out)
out <- out[seq_len(ncol(x$rotation))]
attr(out, "sumev") <- sumev
}
class(out) <- "eigenvals"
out
}
## squares of sdev
`eigenvals.princomp` <-
function(x, ...)
{
out <- x$sdev^2
class(out) <- "eigenvals"
out
}
## concatenate constrained and unconstrained eigenvalues in cca, rda
## and capscale (vegan) -- ignore pCCA component
`eigenvals.cca` <- function(x, model = c("all", "unconstrained", "constrained"),
constrained = NULL, ...)
{
out <- if (!is.null(constrained)) {
## old behaviour
message("Argument `constrained` is deprecated; use `model` instead.")
if (constrained) {
x$CCA$eig
} else {
c(x$CCA$eig, x$CA$eig)
}
} else {
## new behaviour
model <- match.arg(model)
if (identical(model, "all")) {
c(x$CCA$eig, x$CA$eig)
} else if (identical(model, "unconstrained")) {
x$CA$eig
} else {
x$CCA$eig
}
}
if (!is.null(out)) {
class(out) <- "eigenvals"
}
out
}
## wcmdscale (in vegan)
`eigenvals.wcmdscale` <-
function(x, ...)
{
out <- x$eig
class(out) <- "eigenvals"
out
}
## pcnm (in vegan)
`eigenvals.pcnm` <-
function(x, ...)
{
out <- x$values
class(out) <- "eigenvals"
out
}
## betadisper (vegan)
`eigenvals.betadisper` <- function(x, ...) {
out <- x$eig
class(out) <- "eigenvals"
out
}
## dudi objects of ade4
`eigenvals.dudi` <-
function(x, ...)
{
out <- x$eig
class(out) <- "eigenvals"
out
}
## labdsv::pco
`eigenvals.pco` <-
function(x, ...)
{
out <- x$eig
class(out) <- "eigenvals"
out
}
## labdsv::pca
`eigenvals.pca` <-
function(x, ...)
{
out <- x$sdev^2
## pca() may return only some first eigenvalues
if ((seig <- sum(out)) < x$totdev) {
names(out) <- paste("PC", seq_along(out), sep="")
out <- c(out, "Rest" = x$totdev - seig)
}
class(out) <- "eigenvals"
out
}
`eigenvals.decorana` <-
function(x, kind = c("additive", "axiswise", "decorana"), ...)
{
kind <- match.arg(kind)
if (x$ira == 1) {
out <- x$evals
attr(out, "sumev") <- x$totchi
} else {
out <- switch(kind,
"additive" = x$evals.ortho,
"axiswise" = x$evals,
"decorana" = x$evals.decorana)
if (kind == "additive")
attr(out, "sumev") <- x$totchi
else
attr(out, "sumev") <- NA
}
class(out) <- "eigenvals"
out
}
`print.eigenvals` <-
function(x, ...)
{
print(zapsmall(unclass(x), ...))
invisible(x)
}
`summary.eigenvals` <- function(object, ...) {
## dbRDA can have negative eigenvalues: do not give cumulative
## proportions
if (!is.null(attr(object, "sumev"))) {
sumev <- attr(object, "sumev")
} else {
sumev <- sum(object)
}
vars <- object/sumev
cumvars <- if (!anyNA(vars) && all(vars >= 0)) {
cumsum(vars)
} else {
NA
}
out <- rbind(`Eigenvalue` = object,
`Proportion Explained` = abs(vars),
`Cumulative Proportion` = cumvars)
class(out) <- c("summary.eigenvals", "matrix")
out
}
## before R svn commit 70391 we used print.summary.prcomp, but now we
## need our own version that is similar to pre-70391 R function
`print.summary.eigenvals` <-
function(x, digits = max(3L, getOption("digits") - 3L), ...)
{
cat("Importance of components:\n")
class(x) <- "matrix"
print(x, digits = digits, ...)
invisible(x)
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/eigenvals.R
|
"envfit" <-
function(...)
{
UseMethod("envfit")
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/envfit.R
|
`envfit.default` <-
function (ord, env, permutations = 999, strata = NULL, choices = c(1, 2),
display = "sites", w = weights(ord, display), na.rm = FALSE, ...)
{
weights.default <- function(object, ...) NULL
w < eval(w)
vectors <- NULL
factors <- NULL
X <- scores(ord, display = display, choices = choices, ...)
keep <- complete.cases(X) & complete.cases(env)
if (any(!keep)) {
if (!na.rm)
stop("missing values in data: consider na.rm = TRUE")
X <- X[keep,, drop=FALSE]
## drop any lost levels, explicitly don't include NA as a level
env <- droplevels(env[keep,, drop=FALSE], exclude = NA)
w <- w[keep]
na.action <- structure(seq_along(keep)[!keep], class="omit")
}
## make permutation matrix for all variables handled in the next loop
nr <- nrow(X)
permat <- getPermuteMatrix(permutations, nr, strata = strata)
if (ncol(permat) != nr)
stop(gettextf("'permutations' have %d columns, but data have %d rows",
ncol(permat), nr))
if (is.data.frame(env)) {
vects <- sapply(env, is.numeric)
if (any(!vects)) { # have factors
Pfac <- env[, !vects, drop = FALSE]
P <- env[, vects, drop = FALSE]
if (length(P)) { # also have vectors
vectors <- vectorfit(X, P, permutations, strata,
choices, w = w, ...)
}
factors <- factorfit(X, Pfac, permutations, strata,
choices, w = w, ...)
sol <- list(vector = vectors, factors = factors)
}
else vectors <- vectorfit(X, env, permutations, strata,
choices, w = w, ...)
}
else vectors <- vectorfit(X, env, permutations, strata,
choices, w = w, ...)
sol <- list(vectors = vectors, factors = factors)
if (!is.null(na.action))
sol$na.action <- na.action
class(sol) <- "envfit"
sol
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/envfit.default.R
|
`envfit.formula` <-
function(formula, data, ...)
{
if (missing(data))
data <- environment(formula)
X <- formula[[2]]
X <- eval(X, environment(formula), enclos = .GlobalEnv)
formula[[2]] <- NULL
P <- model.frame(formula, data, na.action = na.pass)
envfit.default(X, P, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/envfit.formula.R
|
##" Individual based accumulation model. Similar to poolaccum but uses
##estimateR. Inherits from "poolaccum" class and uses its methods.
`estaccumR` <-
function(x, permutations = 100, parallel = getOption("mc.cores"))
{
n <- nrow(x)
N <- seq_len(n)
estFun <- function(idx) {
estimateR(apply(x[idx,], 2, cumsum))[c(1,2,4),]
}
permat <- getPermuteMatrix(permutations, n)
nperm <- nrow(permat)
## parallel processing
if (is.null(parallel))
parallel <- 1
hasClus <- inherits(parallel, "cluster")
if (hasClus || parallel > 1) {
if(.Platform$OS.type == "unix" && !hasClus) {
tmp <- mclapply(1:nperm, function(i)
estFun(permat[i,]),
mc.cores = parallel)
} else {
if (!hasClus) {
parallel <- makeCluster(parallel)
}
tmp <- parLapply(parallel, 1:nperm, function(i) estFun(permat[i,]))
if (!hasClus)
stopCluster(parallel)
}
} else {
tmp <- lapply(1:nperm, function(i) estFun(permat[i,]))
}
S <- sapply(tmp, function(x) x[1,])
chao <- sapply(tmp, function(x) x[2,])
ace <- sapply(tmp, function(x) x[3,])
means <- cbind(N = N, S = rowMeans(S), Chao = rowMeans(chao),
ACE = rowMeans(ace))
out <- list(S = S, chao = chao, ace = ace, N = N, means = means)
attr(out, "control") <- attr(permat, "control")
class(out) <- c("estaccumR", "poolaccum")
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/estaccumR.R
|
"estimateR" <-
function(x, ...) UseMethod("estimateR")
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/estimateR.R
|
"estimateR.data.frame" <-
function(x, ...) apply(x, 1, estimateR, ...)
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/estimateR.data.frame.R
|
`estimateR.default` <-
function (x, ...)
{
gradF <- function(a, i) {
.expr4 <- sum(i * a)
.expr7 <- 1 - a[1]/(1 - sum(i * a))
.expr8 <- 1/.expr7
.expr10 <- sum(a)
.expr12 <- sum(i * (i - 1) * a)
.expr13 <- sum(a) * sum(i * (i - 1) * a)
.expr14 <- .expr7 * .expr4
.expr15 <- .expr4 - 1
.expr16 <- .expr14 * .expr15
.expr18 <- .expr13/.expr16 - 1
.expr20 <- sum(a) + a[1] * .expr18
.expr23 <- (1 - sum(i * a))^2
.expr25 <- 1/(1 - sum(i * a)) + a[1]/(1 - sum(i * a))^2
.expr26 <- .expr7^2
.expr35 <- .expr16^2
Grad <- a[1] * i/(.expr23 * .expr26) * .expr20 + .expr8 *
(1 + a[1] * ((.expr12 + (.expr10 * i * (i - 1)))/.expr16 -
.expr13 * ((.expr7 * i - (a[1] * i/.expr23) *
.expr4) * .expr15 + .expr14 * i)/.expr35))
Grad[1] <- .expr25/.expr26 * .expr20 + .expr8 * (1 +
(.expr18 + a[1] * (.expr12/.expr16 - .expr13 * ((.expr7 -
.expr25 * .expr4) * .expr15 + .expr14)/.expr35)))
Grad
}
## we need integers
if (!identical(all.equal(x, round(x)), TRUE))
stop("function accepts only integers (counts)")
## and they must be exact
if (!is.integer(x))
x <- round(x)
X <- x[x > 0]
## N <- sum(X) # do NOT use small-sample correction
SSC <- 1 # (N-1)/N # do NOT use small-sample correction
T.X <- table(X)
S.obs <- length(X)
S.rare <- sum(T.X[as.numeric(names(T.X)) <= 10])
S.abund <- sum(T.X[as.numeric(names(T.X)) > 10])
N.rare <- sum(X[X < 11])
i <- 1:10
COUNT <- function(i, counts) {
length(counts[counts == i])
}
a <- sapply(i, COUNT, X)
## EstimateS uses basic Chao only if a[2] > 0, and switches to
## bias-corrected version only if a[2] == 0. However, we always
## use bias-corrected form. The switchin code is commented out so
## that it is easy to put back.
##if (a[2] > 0)
## S.Chao1 <- S.obs + SSC * a[1]^2/2/a[2]
##else if (a[1] > 0)
##
S.Chao1 <- S.obs + SSC * a[1]*(a[1]-1) / (a[2]+1)/2
##else
## S.Chao1 <- S.obs
Deriv.Ch1 <- gradF(a, i)
## The commonly used variance estimator is wrong for bias-reduced
## Chao estimate. It is based on the variance estimator of basic
## Chao estimate, but replaces the basic terms with corresponding
## terms in the bias-reduced estimate. The following is directly
## derived from the bias-reduced estimate.
## The commonly used one (for instance, in EstimateS):
##sd.Chao1 <-
## sqrt(SSC*(a[1]*(a[1]-1)/2/(a[2]+1) +
## SSC*(a[1]*(2*a[1]-1)^2/4/(a[2]+1)^2 +
## a[1]^2*a[2]*(a[1]-1)^2/4/(a[2]+1)^4)))
sd.Chao1 <- (a[1]*((-a[2]^2+(-2*a[2]-a[1])*a[1])*a[1] +
(-1+(-4+(-5-2*a[2])*a[2])*a[2] +
(-2+(-1+(2*a[2]+2)*a[2])*a[2] +
(4+(6+4*a[2])*a[2] + a[1]*a[2])*a[1])*a[1])*S.Chao1))/
4/(a[2]+1)^4/S.Chao1
sd.Chao1 <- sqrt(sd.Chao1)
C.ace <- 1 - a[1]/N.rare
i <- seq_along(a)
thing <- i * (i - 1) * a
Gam <- sum(thing) * S.rare/(C.ace * N.rare * (N.rare - 1)) -
1
S.ACE <- S.abund + S.rare/C.ace + max(Gam, 0) * a[1]/C.ace
sd.ACE <- sqrt(sum(Deriv.Ch1 %*% t(Deriv.Ch1) * (diag(a) -
a %*% t(a)/S.ACE)))
out <- list(S.obs = S.obs, S.chao1 = S.Chao1, se.chao1 = sd.Chao1,
S.ACE = S.ACE, se.ACE = sd.ACE)
out <- unlist(out)
out
}
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/estimateR.default.R
|
"estimateR.matrix" <-
function(x, ...) apply(x, 1, estimateR, ...)
|
/scratch/gouwar.j/cran-all/cranData/vegan/R/estimateR.matrix.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.