content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
# Copyright (c) 2016 - 2024, Adrian Dusa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, in whole or in part, are permitted provided that the
# following conditions are met:
# * Redistributions of enclosed data must cite this package according to
# the citation("venn") command specific to this R package, along with the
# appropriate weblink to the CRAN package "venn".
# * Redistributions of enclosed data in other R packages must list package
# "venn" as a hard dependency in the Imports: field.
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The names of its contributors may NOT be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL ADRIAN DUSA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
`extractInfo` <- function(
x, what = c("counts", "intersections", "both"), use.names = FALSE
) {
what <- match.arg(what)
if (!is.list(x)) {
admisc::stopError("Argument x should be a list")
}
if (length(x) > 7) {
x <- x[seq(7)]
}
nofsets <- length(x)
if (any(names(x) == "")) {
names(x) <- LETTERS[seq(nofsets)]
}
snames <- names(x)
tt <- sapply(
rev(seq(nofsets)),
function(x) {
rep.int(
c(sapply(0:1, function(y) rep.int(y, 2^(x - 1)))),
2^nofsets / 2^x
)
}
)
colnames(tt) <- snames
intersections <- apply(tt, 1,
function(y) {
setdiff(Reduce(intersect, x[y == 1]), unlist(x[y == 0]))
}
)
if (!isTRUE(use.names)) {
snames <- seq(length(snames))
}
names(intersections) <- apply(
tt,
1,
function(x) paste(snames[x == 1], collapse = ":")
)
ttcts <- unlist(lapply(intersections, length))
intersections <- intersections[ttcts > 0]
tt <- as.data.frame(cbind(tt, counts = ttcts))
if (what == "counts") {
return(tt)
}
if (what == "intersections") {
return(intersections)
}
return(list(tt, intersections))
}
|
/scratch/gouwar.j/cran-all/cranData/venn/R/extractInfo.R
|
# Copyright (c) 2016 - 2024, Adrian Dusa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, in whole or in part, are permitted provided that the
# following conditions are met:
# * Redistributions of enclosed data must cite this package according to
# the citation("venn") command specific to this R package, along with the
# appropriate weblink to the CRAN package "venn".
# * Redistributions of enclosed data in other R packages must list package
# "venn" as a hard dependency in the Imports: field.
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The names of its contributors may NOT be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL ADRIAN DUSA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
`getCentroid` <-
function(data) {
return(lapply(data, function(x) {
if (all(is.na(x[nrow(x), ]))) {
x <- x[-nrow(x), ]
}
if (nrow(x) > 10) {
vals <- seq(1, nrow(x), by = floor(nrow(x)/10))
x <- x[c(vals, nrow(x)), ]
}
asum <- cxsum <- cysum <- 0
for (i in seq(2, nrow(x))) {
asum <- asum + x$x[i - 1]*x$y[i] - x$x[i]*x$y[i - 1]
cxsum <- cxsum + (x$x[i - 1] + x$x[i])*(x$x[i - 1]*x$y[i] - x$x[i]*x$y[i - 1])
cysum <- cysum + (x$y[i - 1] + x$y[i])*(x$x[i - 1]*x$y[i] - x$x[i]*x$y[i - 1])
}
return(c((1/(3*asum))*cxsum, (1/(3*asum))*cysum))
}))
}
|
/scratch/gouwar.j/cran-all/cranData/venn/R/getCentroid.R
|
# Copyright (c) 2016 - 2024, Adrian Dusa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, in whole or in part, are permitted provided that the
# following conditions are met:
# * Redistributions of enclosed data must cite this package according to
# the citation("venn") command specific to this R package, along with the
# appropriate weblink to the CRAN package "venn".
# * Redistributions of enclosed data in other R packages must list package
# "venn" as a hard dependency in the Imports: field.
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The names of its contributors may NOT be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL ADRIAN DUSA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
`getZones` <- function(area, snames, ellipse = FALSE) {
funargs <- unlist(lapply(match.call(), deparse)[-1])
if (is.character(area)) {
x <- gsub("[[:space:]]", "", area)
if (!all(gsub("0|1|-", "", x) == "")) {
if (any(grepl("\\$solution", funargs["area"]))) {
obj <- get(unlist(strsplit(funargs["area"], split = "[$]"))[1])
snames <- obj$tt$options$conditions
x <- paste(x, collapse = " + ")
}
x <- gsub("[[:space:]]", "", x)
if (!all(gsub("0|1|-|\\+", "", x) == "")) {
x <- admisc::translate(x, snames = snames)
snames <- colnames(x)
x <- paste(apply(x, 1, function(y) {
y[y < 0] <- "-"
paste(y, collapse="")
}), collapse = "+")
}
if (!all(gsub("0|1|-|\\+", "", x) == "")) {
cat("\n")
stop("Invalid specification of the area.\n\n", call. = FALSE)
}
area <- unlist(strsplit(x, split="\\+"))
}
nofsets <- unique(nchar(area))
if (length(nofsets) > 1) {
cat("\n")
stop("Different numbers of sets in the area.\n\n", call. = FALSE)
}
if (!identical(unique(gsub("1|0|-", "", area)), "")) {
cat("\n")
stop("The arguent \"area\" should only contain \"1\"s, \"0\"s and dashes \"-\".\n\n", call. = FALSE)
}
area <- sort(unique(unlist(lapply(strsplit(area, split = ""), function(x) {
dashes <- x == "-"
if (any(dashes)) {
sumdash <- sum(dashes)
tt <- sapply(rev(seq(sumdash)), function(x) {
rep.int(c(sapply(0:1, function(y) rep.int(y, 2^(x - 1)))), 2^sumdash/2^x)})
for (i in as.numeric(x[!dashes])) {
tt <- cbind(tt, i)
}
mbase <- rev(c(1, cumprod(rev(rep(2, ncol(tt))))))[-1]
tt <- tt[, match(seq(ncol(tt)), c(which(dashes), which(!dashes)))]
return(as.vector(tt %*% mbase))
}
else {
x <- as.numeric(x)
mbase <- rev(c(1, cumprod(rev(rep(2, length(x))))))[-1]
return(sum(x * mbase))
}
}))))
}
else {
nofsets <- snames
}
area <- area + 1
if (nofsets < 4 | nofsets > 5) {
ellipse <- FALSE
}
if (identical(area, 1)) {
area <- seq(2^nofsets)[-1]
}
if (length(area) > 1) {
checkz <- logical(length(area))
names(checkz) <- area
checkz[1] <- TRUE
result <- list()
while(!all(checkz)) {
checkz <- checkZone(as.numeric(names(checkz)[1]), area, checkz, nofsets, ib, ellipse)
result[[length(result) + 1]] <- as.numeric(names(checkz)[checkz])
area <- area[!checkz]
checkz <- checkz[!checkz]
if (length(checkz) > 0) {
checkz[1] <- TRUE
}
}
}
else {
result = list(area)
}
result <- lapply(result, function(x) {
b <- ib$b[ib$s == nofsets & ib$v == as.numeric(ellipse) & is.element(ib$i, x)]
if (any(duplicated(b))) {
b <- setdiff(b, b[duplicated(b)])
}
v2 <- borders[borders$s == nofsets & borders$v == as.numeric(ellipse) & borders$b == b[1], c("x", "y")]
v2 <- v2[-nrow(v2), ]
ends <- as.numeric(v2[nrow(v2), ])
checkb <- logical(length(b))
names(checkb) <- b
checkb[1] <- TRUE
counter <- 0
while(!all(checkb)) {
for (i in which(!checkb)) {
temp <- borders[borders$s == nofsets & borders$v == as.numeric(ellipse) & borders$b == b[i], c("x", "y")]
flag <- FALSE
if (all(ends == as.numeric(temp[1, ]))) {
v2 <- rbind(v2, temp[-nrow(temp), ])
checkb[i] <- TRUE
}
else if (all(ends == as.numeric(temp[nrow(temp) - 1, ]))) {
temp <- temp[-nrow(temp), ]
v2 <- rbind(v2, temp[seq(nrow(temp), 1), ])
checkb[i] <- TRUE
}
if (checkb[i]) {
ends <- as.vector(v2[nrow(v2), ])
}
}
counter <- counter + 1
if (counter > length(checkb)) {
cat("\n")
stop("Unknown error.\n\n", call. = FALSE)
}
}
return(rbind(v2, rep(NA, 2)))
})
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/venn/R/getZones.R
|
# Copyright (c) 2016 - 2024, Adrian Dusa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, in whole or in part, are permitted provided that the
# following conditions are met:
# * Redistributions of enclosed data must cite this package according to
# the citation("venn") command specific to this R package, along with the
# appropriate weblink to the CRAN package "venn".
# * Redistributions of enclosed data in other R packages must list package
# "venn" as a hard dependency in the Imports: field.
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The names of its contributors may NOT be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL ADRIAN DUSA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
`openPlot` <- function(plotsize = 15, par = TRUE, ggplot = FALSE, ...) {
if (ggplot) {
cf <- ggplot2::coord_fixed()
cf$default <- TRUE
return(ggplot2::ggplot() + ggplot2::geom_blank() +
cf +
ggplot2::coord_fixed(xlim = c(0, 1000), ylim = c(0, 1000)) +
ggplot2::theme(axis.line = ggplot2::element_blank(),
axis.text.x = ggplot2::element_blank(),
axis.text.y = ggplot2::element_blank(),
axis.ticks = ggplot2::element_blank(),
axis.title.x = ggplot2::element_blank(),
axis.title.y = ggplot2::element_blank(),
legend.position = "none",
panel.background = ggplot2::element_blank(),
panel.border = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank(),
plot.background = ggplot2::element_blank(),
axis.ticks.length.x = ggplot2::unit(.25, "cm"),
axis.ticks.length.y = ggplot2::unit(.25, "cm"),
plot.title = ggplot2::element_text(size = 0),
plot.subtitle = ggplot2::element_text(size = 0),
plot.tag = ggplot2::element_text(size = 0),
plot.caption = ggplot2::element_text(size = 0)))
}
else {
if (par) {
if (dev.cur() == 1) {
dev.new(width = (plotsize + 1)/2.54, height = (plotsize + 1)/2.54)
}
par(new = FALSE, xpd = TRUE, mai = c(0.05, 0.05, 0.05, 0.05))
}
dots <- list(...)
plot(x = 0:1000, type = "n", axes = FALSE, asp = 1, xlab = "", ylab = "")
if (!is.null(dots$main)) {
title(main = dots$main, line = dots$line)
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/venn/R/openPlot.R
|
# Copyright (c) 2016 - 2024, Adrian Dusa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, in whole or in part, are permitted provided that the
# following conditions are met:
# * Redistributions of enclosed data must cite this package according to
# the citation("venn") command specific to this R package, along with the
# appropriate weblink to the CRAN package "venn".
# * Redistributions of enclosed data in other R packages must list package
# "venn" as a hard dependency in the Imports: field.
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The names of its contributors may NOT be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL ADRIAN DUSA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
`plotRules` <- function (
rules, zcolor = "bw", ellipse = FALSE, opacity = 0.3, allborders = TRUE,
box = TRUE, gvenn = NULL, ...
) {
zeroset <- matrix(c(0, 1000, 1000, 0, 0, 0, 0, 1000, 1000, 0), ncol = 2)
colnames(zeroset) <- c("x", "y")
default <- identical(zcolor, "style")
allsets <- TRUE
x <- NULL
y <- NULL
if (is.list(rules)) {
if (identical(zcolor, "bw")) {
zcolor <- rep("#96bc72", length.out = length(rules))
}
else if (identical(zcolor, "style")) {
zcolor <- colorRampPalette(c("red", "blue", "green", "yellow"))(length(rules))
}
else {
zcolor <- rep(zcolor, length.out = length(rules))
}
nofsets <- unique(unlist(lapply(rules, function(x) {
nchar(unlist(strsplit(x, split = "\\+")))
})))
tt <- sapply(rev(seq(nofsets)), function(x) {
rep(c(sapply(0:1, function(y) rep(y, 2^(x - 1)))), 2^nofsets/2^x)
})
rownames(tt) <- seq(nrow(tt)) - 1
rowns <- lapply(rules, function(x) {
sort(unique(unlist(lapply(strsplit(x, split = "\\+"), function(x) {
unlist(lapply(strsplit(x, split = ""), function(x) {
ttc <- tt
for (j in seq(length(x))) {
if (x[j] != "-") {
ttc <- subset(ttc, ttc[, j] == x[j])
}
}
return(as.numeric(rownames(ttc)))
}))
}))))
})
wholesets <- unlist(lapply(rules, function(x) {
ifelse(nchar(gsub("-", "", x)) == 1, as.vector(regexpr("[0-9]", x)), 0)
}))
allwhole <- all(wholesets > 0)
allsets <- length(rules) == nofsets & allwhole
if (nofsets < 4 | nofsets > 5) {
ellipse <- FALSE
}
zones <- vector("list", length(wholesets))
irregular <- unlist(lapply(rowns, function(x) any(x == 0)))
if (any(irregular)) {
for (i in which(irregular)) {
zones[[i]] <- getZones(rowns[[i]], nofsets, ellipse)
polygons <- rbind(zeroset, rep(NA, 2), zones[[i]][[1]])
polygons <- polygons[-nrow(polygons), ]
if (is.null(gvenn)) {
polypath(polygons, rule = "evenodd", col = adjustcolor(zcolor[i], alpha.f = opacity), border = NA)
} else {
gvenn <- gvenn + ggpolypath::geom_polypath(polygons, rule = "evenodd", col = adjustcolor(zcolor[i], alpha.f = opacity))
}
}
}
if (any(!irregular)) {
if (any(wholesets > 0)) {
for (i in which(wholesets > 0)) {
zones[[i]][[1]] <- sets[
sets$s == nofsets &
sets$v == as.numeric(ellipse) &
sets$n == wholesets[i],
c("x", "y")
]
}
}
if (any(wholesets == 0)) {
for (i in which(wholesets == 0 & !irregular)) {
zones[[i]] <- getZones(rowns[[i]], nofsets, ellipse)
}
}
for (i in seq(length(zones))) {
if (!irregular[i]) {
for (j in seq(length(zones[[i]]))) {
if (is.null(gvenn)) {
polygon(zones[[i]][[j]], col = adjustcolor(zcolor[i], alpha.f = opacity), border = NA)
} else {
gvenn <- gvenn + ggplot2::geom_polygon(data = zones[[i]][[j]], ggplot2::aes(x, y), fill = adjustcolor(zcolor[i], alpha.f = opacity))
}
}
}
}
}
}
else if (is.numeric(rules)) {
nofsets <- rules
allsets <- TRUE
allwhole <- TRUE
if (identical(zcolor, "style")) {
zcolor <- colorRampPalette(c("red", "yellow", "green", "blue"))(nofsets)
}
else if (!identical(zcolor, "bw")) {
zcolor <- rep(zcolor, length.out = nofsets)
}
if (nofsets < 4 | nofsets > 5) {
ellipse <- FALSE
}
}
else {
admisc::stopError("Something went wrong.")
}
other.args <- list(...)
if (box) {
if (is.null(gvenn)) {
lines(zeroset)
}
else {
gvenn <- gvenn + ggplot2::geom_path(data = as.data.frame(zeroset), ggplot2::aes(x, y))
}
}
if (!identical(zcolor, "bw")) {
bcolor <- rgb(t(col2rgb(zcolor)/1.4), maxColorValue = 255)
}
else {
bcolor <- "#000000"
}
if (allsets & allwhole) {
temp <- sets[sets$s == nofsets & sets$v == as.numeric(ellipse), c("x", "y")]
if (is.numeric(rules) & !identical(zcolor, "bw")) {
if (is.null(gvenn)) {
polygon(temp, col = adjustcolor(zcolor, alpha.f = opacity), border = NA)
}
else {
breaks <- which(apply(temp, 1, function(x) any(is.na(x))))
start <- 1
for (b in seq(length(breaks))) {
if (b > 1) start <- breaks[b - 1] + 1
gvenn <- gvenn + ggplot2::geom_polygon(data = temp[seq(start, breaks[b] - 1), ], ggplot2::aes(x, y), fill = adjustcolor(zcolor[b], alpha.f = opacity))
}
}
}
if (default) {
for (i in seq(nofsets)) {
temp <- sets[sets$s == nofsets & sets$v == as.numeric(ellipse) & sets$n == i, c("x", "y")]
if (is.null(gvenn)) {
suppressWarnings( lines(temp, col = bcolor[i]))
}
else {
breaks <- which(apply(temp, 1, function(x) any(is.na(x))))
start <- 1
for (b in seq(length(breaks))) {
if (b > 1) start <- breaks[b - 1] + 1
gvenn <- gvenn + ggplot2::geom_path(ggplot2::aes(x, y), data = temp[seq(start, breaks[b] - 1), ], col = bcolor[i])
}
}
}
}
else {
if (length(other.args) > 0) {
other.args <- lapply(other.args, function(x) {
rep(x, length.out = nofsets)
})
for (i in seq(nofsets)) {
plotdata <- sets[sets$s == nofsets & sets$v == as.numeric(ellipse) & sets$n == i, c("x", "y")]
if (is.null(gvenn)) {
seplines <- list(as.name("lines"), x = plotdata)
suppress <- list(as.name("suppressWarnings"))
for (j in names(other.args)) {
seplines[[j]] <- other.args[[j]][i]
}
suppress[[2]] <- as.call(seplines)
eval(as.call(suppress))
}
else {
seplines <- list(ggplot2::geom_path)
if (all(is.na(tail(plotdata, 1)))) {
plotdata <- plotdata[-nrow(plotdata), , drop = FALSE]
}
seplines$mapping <- ggplot2::aes(x, y)
seplines$data <- plotdata
for (j in names(other.args)) {
seplines[[j]] <- other.args[[j]][i]
}
gvenn <- gvenn + eval(as.call(seplines))
}
}
}
else {
temp <- sets[sets$s == nofsets & sets$v == as.numeric(ellipse), c("x", "y")]
if (is.null(gvenn)) {
suppressWarnings(lines(temp))
}
else {
breaks <- which(apply(temp, 1, function(x) any(is.na(x))))
start <- 1
for (b in seq(length(breaks))) {
if (b > 1) start <- breaks[b - 1] + 1
gvenn <- gvenn + ggplot2::geom_path(ggplot2::aes(x, y), data = temp[seq(start, breaks[b] - 1), ])
}
}
}
}
}
else {
if (allborders) {
temp <- sets[sets$s == nofsets & sets$v == as.numeric(ellipse), c("x", "y")]
if (is.null(gvenn)) {
suppressWarnings(lines(temp))
}
else {
breaks <- which(apply(temp, 1, function(x) any(is.na(x))))
start <- 1
for (b in seq(length(breaks))) {
if (b > 1) start <- breaks[b - 1] + 1
gvenn <- gvenn + ggplot2::geom_path(ggplot2::aes(x, y), data = temp[seq(start, breaks[b] - 1), ])
}
}
}
else {
if (!is.element("col", names(other.args))) {
other.args$col <- "black"
}
}
if (default) {
for (i in seq(length(zones))) {
for (j in seq(length(zones[[i]]))) {
if (is.null(gvenn)) {
suppressWarnings(lines(zones[[i]][[j]], col = bcolor[i]))
}
else {
temp <- zones[[i]][[j]]
breaks <- which(apply(temp, 1, function(x) any(is.na(x))))
start <- 1
for (b in seq(length(breaks))) {
if (b > 1) start <- breaks[b - 1] + 1
gvenn <- gvenn + ggplot2::geom_path(ggplot2::aes(x, y), data = temp[seq(start, breaks[b] - 1), ], col = bcolor[i])
}
}
}
}
}
else {
if (length(other.args) > 0) {
other.args <- lapply(other.args, function(x) {
rep(x, length.out = length(rules))
})
for (i in seq(length(zones))) {
for (j in seq(length(zones[[i]]))) {
if (is.null(gvenn)) {
seplines <- list(as.name("lines"), x = zones[[i]][[j]])
suppress <- list(as.name("suppressWarnings"))
if (any(names(other.args) == "col")) {
other.args$col <- admisc::splitstr(other.args$col)
}
for (j in names(other.args)) {
seplines[[j]] <- other.args[[j]][i]
}
suppress[[2]] <- as.call(seplines)
eval(as.call(suppress))
}
else {
temp <- zones[[i]][[j]]
breaks <- which(apply(temp, 1, function(x) any(is.na(x))))
start <- 1
for (b in seq(length(breaks))) {
if (b > 1) start <- breaks[b - 1] + 1
seplines <- list(ggplot2::geom_path)
seplines[["data"]] <- temp[seq(start, breaks[b] - 1), ]
seplines[["mapping"]] <- ggplot2::aes(x, y)
if (any(names(other.args) == "col")) {
other.args$col <- admisc::splitstr(other.args$col)
}
for (j in names(other.args)) {
seplines[[j]] <- other.args[[j]][i]
}
gvenn <- gvenn + eval(as.call(seplines))
}
}
}
}
}
}
}
if (!is.null(gvenn)) {
return(gvenn)
}
}
|
/scratch/gouwar.j/cran-all/cranData/venn/R/plotRules.R
|
# Copyright (c) 2016 - 2024, Adrian Dusa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, in whole or in part, are permitted provided that the
# following conditions are met:
# * Redistributions of enclosed data must cite this package according to
# the citation("venn") command specific to this R package, along with the
# appropriate weblink to the CRAN package "venn".
# * Redistributions of enclosed data in other R packages must list package
# "venn" as a hard dependency in the Imports: field.
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The names of its contributors may NOT be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL ADRIAN DUSA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
`getBorders` <- function() return(borders)
`getIB` <- function() return(ib)
`getICoords` <- function() return(icoords)
`getSCoords` <- function() return(scoords)
`getInts` <- function() return(ints)
`getSets` <- function() return(sets)
|
/scratch/gouwar.j/cran-all/cranData/venn/R/utils.R
|
# Copyright (c) 2016 - 2024, Adrian Dusa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, in whole or in part, are permitted provided that the
# following conditions are met:
# * Redistributions of enclosed data must cite this package according to
# the citation("venn") command specific to this R package, along with the
# appropriate weblink to the CRAN package "venn".
# * Redistributions of enclosed data in other R packages must list package
# "venn" as a hard dependency in the Imports: field.
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The names of its contributors may NOT be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL ADRIAN DUSA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
`venn` <- function(
x, snames = "", ilabels = NULL, ellipse = FALSE, zcolor = "bw",
opacity = 0.3, plotsize = 15, ilcs = 0.6, sncs = 0.85, borders = TRUE,
box = TRUE, par = TRUE, ggplot = FALSE, ...
) {
if (missing(x)) {
admisc::stopError("Argument <x> is missing.")
}
dots <- list(...)
counts <- dots$counts
cts <- NULL
tjqca <- is.element("trajectory", names(dots))
trajectory <- dots$trajectory
tjcases <- names(trajectory)
dots$trajectory <- NULL
if (!is.null(ilabels)) {
if (identical(ilabels, "counts")) {
counts <- TRUE
ilabels <- NULL
}
else {
if (isTRUE(ilabels)) {
counts <- NULL
}
else {
if (is.atomic(ilabels) && !is.logical(ilabels)) {
cts <- ilabels
counts <- NULL
ilabels <- NULL
}
}
}
}
if (is.null(counts)) {
counts <- FALSE
}
else {
if (is.atomic(counts) && !is.logical(counts)) {
cts <- counts
counts <- TRUE
}
counts <- isTRUE(counts)
}
if (ggplot) {
ilcs <- ilcs * 2.5 / 0.6
sncs <- sncs * 3.5 / 0.85
if (
!requireNamespace("ggplot2", quietly = TRUE) |
!requireNamespace("ggpolypath", quietly = TRUE)
) {
admisc::stopError(
paste(
"Packages \"ggplot2\" and \"ggpolypath\" are needed",
"to make this work, please install."
)
)
}
}
funargs <- unlist(lapply(match.call(), deparse)[-1])
if (!is.element("cexil", names(funargs))) {
names(funargs)[which(names(funargs) == "cexil")] <- "ilcs"
}
if (!is.element("cexsn", names(funargs))) {
names(funargs)[which(names(funargs) == "cexsn")] <- "sncs"
}
if (inherits(tryCatch(eval(x), error = function(e) e), "error")) {
x <- funargs["x"]
}
if (is.numeric(x)) {
if (length(x) > 1) {
admisc::stopError(
"Argument <x> can be a single digit, for up to 7 sets."
)
}
}
if (!identical(zcolor, "bw") & !identical(zcolor, "style")) {
zcolor <- admisc::splitstr(zcolor)
testcolor <- tryCatch(col2rgb(zcolor), error = function(e) e)
if (!is.matrix(testcolor)) {
admisc::stopError("Invalid color(s) in argument <zcolor>.")
}
}
nofsets <- 0
if (!identical(snames, "")) {
if (!is.character(snames)) {
admisc::stopError("The argument <snames> should be character.")
}
if (length(snames) == 1) snames <- admisc::splitstr(snames)
nofsets <- length(snames)
}
ttqca <- FALSE
listx <- FALSE
if (any(is.element(c("qca", "QCA_min", "tt", "QCA_tt"), class(x)))) {
ttqca <- TRUE
otype <- "input"
if (any(is.element(c("tt", "QCA_tt"), class(x)))) {
QCA <- all(
which(
is.element(
c("minmat", "DCC", "options", "neg.out", "opts"),
names(x)
)
) < 4
)
otype <- "truth table"
tt <- x$tt
snames <- unlist(
strsplit(
gsub("[[:space:]]", "", x$options$conditions),
split = ","
)
)
noflevels <- x$noflevels
rnms <- rownames(x$initial.data)
ttcases <- x$tt$cases
}
else {
QCA <- all(
which(
is.element(
c("minmat", "DCC", "options", "neg.out", "opts"),
names(x$tt)
)
) < 4
)
otype <- "minimization"
oq <- TRUE
tt <- x$tt$tt
snames <- unlist(
strsplit(
gsub("[[:space:]]", "", x$tt$options$conditions),
split = ","
)
)
noflevels <- x$tt$noflevels
rnms <- rownames(x$tt$initial.data)
ttcases <- x$tt$tt$cases
}
if (tjqca) {
if (!identical(
sort(tjcases),
sort(unique(gsub("[0-9]", "", rnms)))
)) {
admisc::stopError("Case names do not match the truth table.")
}
}
if (!QCA) {
admisc::stopError(
sprintf(
"Please create a proper %s object with package QCA.",
otype
)
)
}
if (any(noflevels != 2)) {
admisc::stopError(
"Venn diagrams are not possible for multivalue data."
)
}
if (nofsets == 0) {
nofsets <- length(snames)
}
if (nofsets > 7) {
admisc::stopError(
"Venn diagrams can only be drawn up to 7 explanatory conditions."
)
}
if (nofsets < 4 | nofsets > 5) {
ellipse <- FALSE
}
ttcolors <- c(
"0" = "#ffd885",
"1" = "#96bc72",
"C" = "#1c8ac9",
"?" = "#ffffff" # white
)
if (identical(zcolor, "style")) {
zcolor <- "bw"
}
else if (!identical(zcolor, "bw")) {
if (is.character(zcolor) & length(zcolor) >= 3) {
ttcolors[c("0", "1", "C")] <- zcolor[1:3]
}
}
individual <- length(opacity) == nrow(tt)
gvenn <- do.call(
openPlot,
c(
list(plotsize, par = par, ggplot = ggplot),
dots
)
)
if (individual) {
for (i in seq(nrow(tt))) {
if (tt$OUT[i] != "?") {
color <- adjustcolor(
ttcolors[tt$OUT[i]],
alpha.f = as.numeric(opacity[i])
)
if (i == 1) {
zeroset <- matrix(
c(0, 1000, 1000, 0, 0, 0, 0, 1000, 1000, 0),
ncol = 2
)
colnames(zeroset) <- c("x", "y")
polygons <- rbind(
zeroset,
rep(NA, 2),
getZones(0, nofsets, ellipse)[[1]]
)
polygons <- polygons[-nrow(polygons), ]
if (is.null(gvenn)) {
polypath(
polygons,
rule = "evenodd",
col = color,
border = NA
)
}
else {
gvenn <- gvenn + ggpolypath::geom_polypath(
polygons,
rule = "evenodd",
col = color
)
}
}
else {
plotdata <- ints[
ints$s == nofsets &
ints$v == as.numeric(ellipse) &
ints$i == i,
c("x", "y")
]
if (is.null(gvenn)) {
polygon(plotdata, col = color)
}
else {
gvenn <- gvenn + ggplot2::geom_polygon(
data = plotdata,
ggplot2::aes(x, y),
fill = color
)
}
}
}
}
}
else {
for (i in names(ttcolors)[1:3]) {
zones <- as.numeric(rownames(tt[tt$OUT == i, ]))
if (length(zones) > 0) {
if (any(zones == 1)) {
zeroset <- matrix(
c(0, 1000, 1000, 0, 0, 0, 0, 1000, 1000, 0),
ncol = 2
)
colnames(zeroset) <- c("x", "y")
polygons <- rbind(
zeroset,
rep(NA, 2),
getZones(0, nofsets, ellipse)[[1]]
)
polygons <- polygons[-nrow(polygons), ]
if (is.null(gvenn)) {
polypath(
polygons,
rule = "evenodd",
col = ttcolors[i],
border = NA
)
}
else {
gvenn <- gvenn + ggpolypath::geom_polypath(
polygons,
rule = "evenodd",
col = ttcolors[i]
)
}
zones <- zones[-1]
}
plotdata <- ints[
ints$s == nofsets & ints$v == as.numeric(ellipse) &
is.element(ints$i, zones),
c("x", "y")
]
if (is.null(gvenn)) {
polygon(plotdata, col = ttcolors[i])
}
else {
gvenn <- gvenn + ggplot2::geom_polygon(
data = plotdata,
ggplot2::aes(x, y),
fill = ttcolors[i]
)
}
}
}
}
if (isTRUE(counts) & is.null(cts)) {
cts <- tt$n
}
x <- nofsets
}
else if (is.numeric(x)) {
nofsets <- x
if (!identical(snames, "")) {
if (length(snames) != nofsets) {
admisc::stopError(
"Number of sets not equal with the number of set names."
)
}
}
}
else if (is.character(x)) {
if (any(grepl("\\$solution", funargs["x"]))) {
obj <- get(unlist(strsplit(funargs["x"], split = "[$]"))[1])
snames <- obj$tt$options$conditions
nofsets <- length(snames)
}
x <- unlist(strsplit(gsub("[[:space:]]", "", x), split = ","))
if (all(grepl("[A-Za-z]", x))) {
if (identical(snames, "")) {
y <- admisc::translate(
paste(x, collapse = "+"),
snames = snames
)
snames <- colnames(y)
nofsets <- length(snames)
}
x <- lapply(x, function(x) {
return(paste(apply(
admisc::translate(x, snames = snames),
1,
function(x) {
x[x < 0] <- "-"
return(paste(x, collapse = ""))
}),
collapse = "+"
))
})
}
if (!is.list(x)) {
if (!all(gsub("0|1|-|\\+", "", x) == "")) {
admisc::stopError("Invalid codes in the rule(s).")
}
if (nofsets == 0) {
nofsets <- unique(nchar(unlist(strsplit(x, split = "\\+"))))
}
x <- as.list(x)
}
}
else if (is.data.frame(x)) {
if (!is.null(names(x))) {
if (all(names(x) != "")) {
snames <- names(x)
}
}
if (!all(is.element(unique(unlist(x)), c(0, 1)))) {
admisc::stopError(
"As a dataframe, argument <x> can only contain values 0 and 1."
)
}
if (nofsets == 0) {
nofsets <- length(x)
}
if (isTRUE(counts) & is.null(cts)) {
cts <- apply(
sapply(
rev(seq(nofsets)),
function(x) {
rep.int(
c(sapply(0:1, function(y) rep.int(y, 2^(x - 1)))),
2^nofsets / 2^x
)
}
),
1,
function(l1) {
sum(apply(x, 1, function(l2) {
all(l1 == l2)
}))
}
)
}
x <- nofsets
}
else if (is.list(x)) {
if (any(grepl("\\$solution", funargs["x"]))) {
obj <- get(
unlist(
strsplit(funargs["x"], split = "[$]")
)[1]
)
snames <- obj$tt$options$conditions
nofsets <- length(snames)
x <- admisc::translate(
paste(unlist(x), collapse = " + "),
snames = snames
)
x <- as.list(apply(x, 1, function(y) {
y[y < 0] <- "-"
return(paste(y, collapse = ""))
}))
}
else {
listx <- TRUE
if (length(x) > 7) {
x <- x[seq(7)]
}
if (!is.null(names(x))) {
if (all(names(x) != "")) {
snames <- names(x)
}
}
if (identical(snames, "")) {
snames <- LETTERS[seq(length(x))]
}
if (nofsets == 0) {
nofsets <- length(x)
}
tt <- sapply(
rev(seq(nofsets)),
function(x) {
rep.int(
c(sapply(0:1, function(y) rep.int(y, 2^(x - 1)))),
2^nofsets / 2^x
)
}
)
colnames(tt) <- snames
intersections <- apply(tt, 1,
function(y) {
setdiff(Reduce(intersect, x[y == 1]), unlist(x[y == 0]))
}
)
names(intersections) <- apply(
tt,
1,
function(x) paste(snames[x == 1], collapse = ":")
)
ttcts <- unlist(lapply(intersections, length))
intersections <- intersections[ttcts > 0]
tt <- as.data.frame(cbind(tt, counts = ttcts))
attr(tt, "intersections") <- intersections
if (isTRUE(counts) & is.null(cts)) {
cts <- ttcts
}
x <- nofsets
}
}
else {
admisc::stopError("Unrecognised argument <x>.")
}
if (length(cts) != 2^nofsets) {
cts <- NULL
counts <- NULL
}
if (nofsets > 7) {
admisc::stopError("Venn diagrams can only be drawn up to 7 sets.")
}
else if (nofsets < 4 | nofsets > 5) {
ellipse <- FALSE
}
if (identical(snames, "")) {
snames <- LETTERS[seq(nofsets)]
}
else {
if (length(snames) != nofsets) {
admisc::stopError(
"Length of set names does not match the number of sets."
)
}
}
if (!is.element("ilcs", names(funargs))) {
if (!ggplot) {
ilcs <- ilcs - ifelse(nofsets > 5, 0.1, 0) - ifelse(nofsets > 6, 0.05, 0)
}
}
if (!ttqca) {
gvenn <- openPlot(plotsize, par = par, ggplot = ggplot, ... = ...)
}
gvenn <- plotRules(
x, zcolor, ellipse, opacity, allborders = borders, box = box,
gvenn = gvenn, ... = ...
)
if (isTRUE(ilabels) | !is.null(cts) | tjqca) {
if (isTRUE(ilabels)) {
ilabels <- icoords$l[
icoords$s == nofsets & icoords$v == as.numeric(ellipse)
]
} else if (!is.null(cts)) {
if (isTRUE(counts)) {
cts[cts == 0] <- ""
}
ilabels <- cts
}
icoords <- icoords[
icoords$s == nofsets & icoords$v == as.numeric(ellipse),
c("x", "y")
]
if (!is.null(ilabels)) {
if (ggplot) {
for (i in which(ilabels != "")) {
gvenn <- gvenn + ggplot2::annotate("text",
x = icoords$x[i], y = icoords$y[i],
label = ilabels[i],
size = ilcs
)
}
}
else {
text(icoords, labels = ilabels, cex = ilcs)
}
}
if (tjqca) {
ttcases <- strsplit(gsub(";", ",", ttcases), split = ",")
caselist <- lapply(tjcases, function(x) {
rnms <- rnms[is.element(gsub("[0-9]", "", rnms), x)]
rnmsindex <- c()
for (i in seq(length(rnms))) {
rnmsindex <- c(
rnmsindex,
which(sapply(ttcases, function(x) {
any(x == rnms[i])
}))
)
}
return(rle(rnmsindex))
})
for (case in seq(length(tjcases))) {
rlecase <- caselist[[case]]
lengths <- rlecase$lengths
values <- rlecase$values
uvalues <- unique(values)
jx <- jitter(icoords$x[uvalues], factor = 2)
jy <- jitter(icoords$y[uvalues], factor = 2)
x <- jx[match(values, uvalues)]
y <- jy[match(values, uvalues)]
tcase <- trajectory[[tjcases[case]]]
if (is.null(tcase$length)) {
tcase$length <- 0.12
}
if (is.null(tcase$lwd)) {
tcase$lwd <- 2
}
if (is.null(tcase$col)) {
tcase$col <- "black"
}
if (length(values) == 1) {
points(
x,
y,
pch = ifelse(is.null(tcase$pch), 20, tcase$pch),
cex = ifelse(is.null(tcase$cex), 2, tcase$cex),
col = tcase$col
)
}
else {
i <- 1
j <- 2
while (i <= length(values) - 1) {
if (i == 1 & lengths[1] > 1) {
points(
x[1],
y[1],
pch = ifelse(is.null(tcase$pch), 20, tcase$pch),
cex = ifelse(is.null(tcase$cex), 1.5, tcase$cex),
col = tcase$col
)
}
back <- FALSE
while (j <= length(values)) {
if (j < length(values)) {
back <- values[j + 1] == values[i]
}
callist <- c(
list(x[i], y[i], x[j], y[j]),
tcase
)
callist$code <- 2
do.call(graphics::arrows, callist)
j <- j + 1 + back
}
i <- i + 1 + back
}
}
}
}
}
scoords <- scoords[
scoords$s == nofsets & scoords$v == as.numeric(ellipse),
c("x", "y")
]
if (ggplot) {
for (i in seq(length(snames))) {
gvenn <- gvenn + ggplot2::annotate("text",
x = scoords$x[i], y = scoords$y[i],
label = snames[i],
size = sncs
)
}
}
else {
text(scoords, labels = snames, cex = sncs)
}
if (ttqca) {
if (is.null(gvenn)) {
points(
seq(10, 340, length.out = 4),
rep(-25, 4),
pch = 22,
bg = ttcolors,
cex = 1.75
)
text(
seq(40, 370, length.out = 4),
rep(-26, 4),
names(ttcolors),
cex = 0.85
)
}
else {
gvenn <- gvenn +
ggplot2::annotate("rect",
xmin = 10, xmax = 32, ymin = -44, ymax = -22,
fill = ttcolors[1],
col = "black"
) +
ggplot2::annotate("rect",
xmin = 120, xmax = 142, ymin = -44, ymax = -22,
fill = ttcolors[2],
col = "black"
) +
ggplot2::annotate("rect",
xmin = 230, xmax = 252, ymin = -44, ymax = -22,
fill = ttcolors[3],
col = "black"
) +
ggplot2::annotate("rect",
xmin = 340, xmax = 362, ymin = -44, ymax = -22,
fill = ttcolors[4],
col = "black"
) +
ggplot2::annotate("text",
x = 50, y = -34,
label = names(ttcolors)[1]
) +
ggplot2::annotate("text",
x = 160, y = -34,
label = names(ttcolors)[2]
) +
ggplot2::annotate("text",
x = 270, y = -34,
label = names(ttcolors)[3]
) +
ggplot2::annotate("text",
x = 380, y = -34,
label = names(ttcolors)[4]
)
}
}
if (ggplot) {
return(gvenn)
}
if (listx) {
return(invisible(tt))
}
}
|
/scratch/gouwar.j/cran-all/cranData/venn/R/venn.R
|
venneuler <- function(combinations, weights, ...) {
if (missing(combinations)) stop("combinations must be specified")
if (inherits(combinations, "table")) {
if (!missing(weights)) warning("combinations is a table yet weights are also specified - ignoring weights")
weights <- as.vector(combinations)
rnm <- rep(rownames(combinations), dim(t)[2])
cnm <- rep(colnames(combinations), each=dim(t)[1])
names(weights) <- paste(rnm, cnm, sep="&")
if (all(weights == 0)) stop("all weights are zero")
weights <- weights[weights != 0]
combinations <- names(weights)
} else if (missing(weights) && is.numeric(combinations) && is.null(dim(combinations))) {
if (is.null(names(combinations))) stop("combinations are a numeric vector but without names")
weights <- combinations
combinations <- names(combinations)
}
if (is.data.frame(combinations)) combinations <- as.matrix(combinations)
if (is.matrix(combinations) && (is.numeric(combinations) || is.logical(combinations))) {
if (is.null(colnames(combinations))) colnames(combinations) <- LETTERS[seq.int(dim(combinations)[2])]
## aggregate all entries using a hased environment -- we could probably devise a smarter way if we cared ...
e <- new.env(TRUE, emptyenv())
cn <- colnames(combinations)
if (is.logical(combinations)) { for (i in seq.int(dim(combinations)[1])) if (any(combinations[i,])) {
ec <- paste(cn[combinations[i,]], collapse='&')
e[[ec]] <- if (is.null(e[[ec]])) 1L else (e[[ec]] + 1L)
} } else if (is.numeric(combinations)) for (i in seq.int(dim(combinations)[1])) if (any(combinations[i,] != 0)) {
ec <- paste(cn[combinations[i,] != 0], collapse='&')
e[[ec]] <- (if (is.null(e[[ec]])) 0 else e[[ec]]) + sum(combinations[i,])
}
en <- ls(e, all.names=TRUE)
weights <- as.numeric(unlist(lapply(en, get, e)))
combinations <- as.character(en)
}
if (is.matrix(combinations) && is.character(combinations) && dim(combinations)[2] == 2) {
vd <- .jnew("edu/uic/ncdm/venn/data/VennData", as.character(combinations[,1]), as.character(combinations[,2]))
} else {
if (!is.character(combinations)) stop("combinations must be either a character vector, a table, a named numeric vector or a character matrix with two columns")
if (missing(weights)) weights <- rep(1, length(combinations))
vd <- .jnew("edu/uic/ncdm/venn/data/VennData", as.character(combinations), as.double(weights))
}
a <- .jnew("edu/uic/ncdm/venn/VennAnalytic")
g <- .jcall(a, "Ledu/uic/ncdm/venn/VennDiagram;", "compute", vd)
ct <- lapply(.jevalArray(.jfield(g, "[[D", "centers", convert=FALSE)), .jevalArray)
n <- length(ct)
ct <- matrix(unlist(ct), ncol=2, byrow=TRUE)
colnames(ct) <- c("x", "y")
diam <- .jevalArray(.jfield(g, "[D", "diameters", convert=FALSE))
areas <- .jevalArray(.jfield(g, "[D", "areas", convert=FALSE))
res <- .jevalArray(.jfield(g, "[D", "residuals", convert=FALSE))
col <- .jevalArray(.jfield(g, "[D", "colors", convert=FALSE))
lab <- .jevalArray(.jfield(g, "[Ljava/lang/String;", "circleLabels", convert=FALSE))
rownames(ct) <- lab
names(diam) <- lab
names(col) <- lab
names(res) <- .jevalArray(.jfield(g, "[Ljava/lang/String;", "residualLabels", convert=FALSE))
structure(list(centers=ct, diameters=diam, colors=col, labels=lab, residuals=res,
stress=.jfield(g, "D", "stress"), stress01=.jfield(g, "D", "stress01"),
stress05=.jfield(g, "D", "stress05")), class="VennDiagram")
}
## Note: in col.fn we need more croma and less luminance than usual, because we'll be plotting with reduced alpha
plot.VennDiagram <- function(x, col, col.fn = function(col) hcl(col * 360, 130, 60), alpha=0.3, main=NULL, edges=200, border=NA, col.txt=1, cex=1, lwd=1, lty=1, font=NULL, family="", ...) {
if (length(edges) != 1 || !is.numeric(edges) || !is.finite(edges) || edges < 3)
stop("invalid number of edges specified")
# calculate total extents
xtp <- x$centers + x$diameters / 2
xtm <- x$centers - x$diameters / 2
xr <- range(c(xtp[,1], xtm[,1]))
yr <- range(c(xtp[,2], xtm[,2]))
# create canvas
plot.new()
plot.window(xr, yr, "", asp = 1)
# adjust alpha for all colors if specified
n <- length(x$diameters)
if (missing(col)) col <- col.fn(x$colors)
if (length(col) < n) col <- rep(col, length.out=n)
if (!any(is.na(alpha))) {
col <- col2rgb(col) / 255
col <- rgb(col[1,], col[2,], col[3,], alpha)
}
# prepare circle coordinates
s <- seq.int(edges) / edges * 2 * pi
sx <- cos(s) / 2 # VD uses diameter, not radius
sy <- sin(s) / 2
if (!is.null(border)) border <- rep(border, length.out=n)
lty <- rep(lty, length.out=n)
lwd <- rep(lwd, length.out=n)
# plot all circles
for (i in seq.int(n))
polygon(x$centers[i, 1] + x$diameters[i] * sx, x$centers[i, 2] + x$diameters[i] * sy,
col = col[i], border = border[i], lty = lty[i], lwd = lwd[i])
# if col.txt is not NA, plot the circle text
if (!all(is.na(col.txt))) text(x$centers, x$labels, col=col.txt, cex=cex, font=font, family=family)
# finish with title
title(main = main, ...)
invisible(NULL)
}
|
/scratch/gouwar.j/cran-all/cranData/venneuler/R/venneuler.R
|
.onLoad <- function(libname, pkgname)
.jpackage("venneuler", "venneuler.jar")
|
/scratch/gouwar.j/cran-all/cranData/venneuler/R/zzz.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
lossCpp <- function(xy, lambda, radius, ED, ThreeD, ToleranceofLoss, maximumStep, ToleranceofStepsize, proportional, ALPHA, Bool) {
.Call('vennplot_loop_R', PACKAGE = 'vennplot', xy, lambda, radius, ED, ThreeD, ToleranceofLoss, maximumStep, ToleranceofStepsize, proportional, ALPHA, Bool)
}
transCpp <- function(xy, radius, radiusvec, radiusall) {
.Call('vennplot_trans_R', PACKAGE = 'vennplot', xy, radius, radiusvec, radiusall)
}
allDisjointCpp <- function(xy1, xy2, radius1, radius2, delta) {
.Call('vennplot_alldis_R', PACKAGE = 'vennplot', xy1, xy2, radius1, radius2, delta)
}
closeCpp <- function(xy1, xy2, radius1, radius2, delta, direc) {
.Call('vennplot_close_R', PACKAGE = 'vennplot', xy1, xy2, radius1, radius2, delta, direc)
}
binaryIndexCpp <- function(M, xy, radius, k, yuan, xuan, num) {
.Call('vennplot_binaryIndexCpp', PACKAGE = 'vennplot', M, xy, radius, k, yuan, xuan, num)
}
goThroughPixelCpp <- function(myList, m, num) {
.Call('vennplot_goThroughPixelCpp', PACKAGE = 'vennplot', myList, m, num)
}
countCpp <- function(M, Me) {
.Call('vennplot_countCpp', PACKAGE = 'vennplot', M, Me)
}
getRidofZeroCpp <- function(M) {
.Call('vennplot_getRidofZeroCpp', PACKAGE = 'vennplot', M)
}
binaryIndexThreeDCpp <- function(myList, xy, radius, k, yuan, xuan, zuan, num) {
.Call('vennplot_binaryIndexThreeDCpp', PACKAGE = 'vennplot', myList, xy, radius, k, yuan, xuan, zuan, num)
}
goThroughPixelThreeDCpp <- function(list, m, num) {
.Call('vennplot_goThroughPixelThreeDCpp', PACKAGE = 'vennplot', list, m, num)
}
allConnectedCpp <- function(xy, radius, ThreeD) {
.Call('vennplot_allConnectedCpp', PACKAGE = 'vennplot', xy, radius, ThreeD)
}
distanceCpp <- function(r1, r2, theta1, theta2, S, ThreeD) {
.Call('vennplot_distanceCpp', PACKAGE = 'vennplot', r1, r2, theta1, theta2, S, ThreeD)
}
BoolScaleNMCpp <- function(proportional, value, LAMBDA, STRESS) {
.Call('vennplot_BoolScaleNMCpp', PACKAGE = 'vennplot', proportional, value, LAMBDA, STRESS)
}
BoolScaleLCpp <- function(proportional, value, stress_n, stress) {
.Call('vennplot_BoolScaleLCpp', PACKAGE = 'vennplot', proportional, value, stress_n, stress)
}
BoolDistanceCpp <- function(proportional, value, f1, f2, thetanew, theta) {
.Call('vennplot_BoolDistanceCpp', PACKAGE = 'vennplot', proportional, value, f1, f2, thetanew, theta)
}
|
/scratch/gouwar.j/cran-all/cranData/vennplot/R/RcppExports.R
|
#' Data on human encounters with great white sharks.
#'
#' @format A dataset with 65 rows and 11 columns.
#' \describe{
#' \item{Year}{Years encounter sharks}
#' \item{Sex}{Sex of victims}
#' \item{Age}{Age of victims}
#' \item{Time}{Encounter sharks in AM or PM}
#' \item{Australia}{Encounter in Australia}
#' \item{USA}{Encounter in the United States}
#' \item{Surfing}{Surfing incident}
#' \item{Scuba}{Scuba-diving incident}
#' \item{Fatality}{Whether or not there was a fatality}
#' \item{Injury}{Whether or not there was an injury}
#' \item{Length}{The length of great white sharks}
#' }
#' @source \url{http://sharkattackinfo.com/shark_attack_news_sas.html}. Data collected by Professor Pierre-Jerome Bergeron, University of Ottawa.
#' @examples
#' vennplot(disjoint.combinations = sharks, vars = c("Au","USA","Fa","Ti"))
"sharks"
|
/scratch/gouwar.j/cran-all/cranData/vennplot/R/sharks.R
|
#' Draw Venn and Euler diagram in 2D or 3D
#'
#' @param disjoint.combinations Named numeric vector or data.frame where each column should be factor. See Details.
#' @param vars Extract specific variables of data.frame as \code{disjoint.combinations}. If \code{vars = NULL}, all the information of data.frame will be extracted.
#' @param Delta The length of step for method "lineSearch" or the initial interval of test points for method "NelderMead".
#' @param ThreeD Draw Venn diagram in 3D. See Examples.
#' @param lambda It can be \code{NULL} or a numeric vector. If \code{lambda = NULL}, the loss function optimize lambda, else, based on the given lambda, loss function will calculate stress respectively then return the minimum one and corresponding lambdas.
#' @param stressWay If data set can be separated into a few groups, there will be two ways to express stress: one is to sum up all the stress (named "sum"; default), the other is to use total TSS divide by total RSS (named "combine").
#' @param delta Closeness between groups.
#' @param weight The weight of \code{disjoint.combinations}. It should have the same length with \code{disjoint.combinations}.
#' @param expand If some balls should not intersect and the code fails to detect it. It is possible to be fixed manually but sacrificing stress.
#' @param twoWayGenerate Boolean factor, if false, any missing intersections are set as zero.
#' @param scaleSearch Provide multiple methods to optimize scale lambda. The default method is "NelderMead". See Details.
#' @param twoWaySearch If two way intersections are missing, multiple methods are available to generate two way intersections. The default method is "lineSearch". See Details.
#' @param scaleSeachTolerance A list with tolerance value and boolean factor " proportional". The loop of NelderMead and lineSearch in scaleSearch will end when the difference or proportional difference matches the tolerance value.
#' @param distanceTolerance A list with tolerance value and boolean factor " proportional". The Newton method of finding distance will end when the difference or proportional difference matches the tolerance value.
#' @param lossTolerance A list with ToleranceofLoss, maximumStep, ALPHA, ToleranceofStepsize and boolean factor "proportional". If ALPHA is null, the step size will be searched through Newton method and it will stop when step reaches the maximum step or the difference matches ToleranceofStepsize; else step size will be fixed with ALPHA . The loss will end when the difference or proportional difference or the total loss value matches the "ToleranceofLoss".
#' @param stressBound The loop of method NelderMead will stop when stress is beyond the stressBound.
#' @param maximumStep The maximum searching step for method NelderMead and Newton method of calculating distance.
#' @param planeSize The plane size of calculating disjoint intersections numerically.
#' @param lower The lower bound of the interval to be searched for the "goldenSectionSearch" and "L-BFGS-B". See Details.
#' @param upper The upper bound of the interval to be searched for the "goldenSectionSearch" and "Brent". See Details.
#' @param control A list of control parameters. See Details
#' @param hessian Logical. A numerically differentiated Hessian matrix be returned or not. See Details.
#' @param mar Plot margins.
#' @param cols Color of balls. If \code{NULL}, rainbow color will be set.
#' @param alpha Color darkness.
#' @param smooth For 3D plot, if true, the balls will be much more smoother. However, based on the high resolution, if the number of balls is too much, when rotating, the new window stumbles.
#' @param ... Any further graphical parameters to be passed to the \code{plot} function.
#'
#' @details
#' 1. One way sets must be given in \code{disjoint.combination}. e.g.\code{disjoint.combination = c( B=2, AB=0.5)} is not allowed. \code{disjoint.combination = c(A = 0, B=2, AB=0.5)} works.
#' 2. Except "NelderMead" and "lineSearch", "goldenSectionSearch" in \code{scaleSearch} and \code{twoWaySearch} is based on \code{\link{optimize}} and the rest methods are based on \code{\link{optim}}.
#' 3. \code{lower}, \code{upper}, \code{control} and \code{hessian} share the same parameters with \code{\link{optim}}, and \code{lower}, \code{upper} can also be used in \code{\link{optimize}}
#'
#' @author Zehao Xu and Wayne Oldford
#' @return An object of the class \code{vennplot} with following components:
#' \describe{
#' \item{xy}{centres of the balls (columns are (\code{x}, \code{y}) or (\code{x}, \code{y}, \code{z}) coordinates).}
#' \item{radius}{radii of the balls.}
#' \item{loss}{total loss of \code{vennplot}.}
#' \item{stress}{stress value for solution.}
#' }
#' @examples
#' # 3D Venn plot with arbitray sets
#' disjoint.combinations = c(A=80, B=50,C=100, D = 100,E = 100,
#' "A&C"=30, "A&D"= 30,"B&E" = 30, "A&E" = 40, h = 40, "B&h" = 10)
#' ve = vennplot(disjoint.combinations, ThreeD = TRUE)
#'
#' # data frame
#' vennplot(disjoint.combinations = sharks, vars = c("Au","USA","Fa","Sex"),
#' scaleSearch = "lineSearch", expand = 1.1)
#'
#' @export
vennplot <- function(disjoint.combinations = NULL, vars = NULL, Delta = 0.1,
ThreeD = FALSE, lambda = NULL, stressWay = c("sum","combine"),
delta = 0.01, weight = NULL, expand = NULL, twoWayGenerate = FALSE,
scaleSearch = c("NelderMead", "lineSearch", "goldenSectionSearch",
"BFGS", "CG", "L-BFGS-B", "SANN", "Brent"),
twoWaySearch = c("lineSearch", "NelderMead", "goldenSectionSearch",
"BFGS", "CG", "L-BFGS-B", "SANN", "Brent"),
scaleSeachTolerance = list(value = 1e-5, proportional = FALSE),
distanceTolerance = list(value = 1e-5, proportional = FALSE),
lossTolerance = list(ToleranceofLoss = 1e-10, maximumStep = 10, ALPHA = 1e-2,
ToleranceofStepsize = 1e-5, proportional = FALSE),
stressBound = 1e-3, maximumStep = 50, planeSize = 50,
lower= -Inf, upper = Inf, control = list(), hessian = FALSE,
mar = rep(1,4), cols = NULL, alpha = 0.3, smooth = FALSE, ...){
if(is.null(disjoint.combinations)) stop("combinations should not be empty")
if (is.data.frame(disjoint.combinations)){
disjoint.combinations <- extractCombinations(disjoint.combinations, vars = vars)
}
combinations <- disjoint2combinations(disjoint.combinations)
# reorder disjoint.combinations with ways
reOrder <- reorderDisjointCombinations(combinations, disjoint.combinations)
disjoint.combinations <- reOrder$newDisjointCombinations
# scale proportionately
combProp <- combinations/sum(disjoint.combinations)
disProp <- disjoint.combinations/sum(disjoint.combinations)
# Check weight vector
if(is.null(weight)){
weight <- rep(1,length(disjoint.combinations))
} else {
if(length(weight)!=length(disjoint.combinations)){
stop("weight must be NULL or the same length as the number of disjoint combinations")
}
}
weight <- weight[reOrder$newOrder]
disjointSetNames <- names(disProp)
names(weight) <- disjointSetNames
# Get the number of intersections
numWays <- str_count(disjointSetNames, pattern = "&") + 1
if(max(numWays)==2){
nonEmptyTwoWays <- which(combProp!=0)
weight <- weight[nonEmptyTwoWays]
disjointSetNames <- disjointSetNames[nonEmptyTwoWays]
numWays <- numWays[nonEmptyTwoWays]
combProp <- combProp[nonEmptyTwoWays]
disProp <- disProp[nonEmptyTwoWays]
}
oneWays <- which(numWays == 1)
# Check colours
if (is.null(cols)) {
cols <- rainbow(length(oneWays), alpha = alpha)
}
oneWaySetName <- disjointSetNames[oneWays]
oneWaySet <- combProp[oneWays]
m <- length(oneWays)
# and for those that are larger than one way set
if (length(disjointSetNames) == length(oneWays)) {
largerThanOneWaySetName <- NULL
} else {
largerThanOneWaySetName <- disjointSetNames[-oneWays]
# All names appearing in two and higher order ways must appear
# as individual input sets too
if (!all(unique(unlist(str_split(largerThanOneWaySetName,"&"))) %in% oneWaySetName)){
stop("Some intersection sets contain sets which do not appear individually.")
}
}
# Detect disjoint groups of sets and return groups in a list
groups <- groupDetection(largerThanOneWaySetName = largerThanOneWaySetName,
oneWaySetName= oneWaySetName)
# Order groups from largest to smallest
groupOrder <- order(sapply(groups, length), decreasing = T)
groups <- groups[groupOrder]
ngroups <- length(groups)
# get the combinations and proportions separated by groups
combPropGroup <- list()
disPropGroup <- list()
for(i in 1:ngroups){
groupMember <- groups[[i]]
groupWay <- oneWaySetName[groupMember]
for(j in 1:length(disjointSetNames)){
if(any(groupWay %in% str_split(disjointSetNames[j],"&")[[1]])){
groupWay <- c(groupWay, disjointSetNames[j])}
}
groupWay <- unique(groupWay)
combPropGroup[[i]] <- combProp[which(disjointSetNames %in% groupWay)]
disPropGroup[[i]] <- disProp[which(disjointSetNames %in% groupWay)]
}
# Calculate radius
radius <- if (ThreeD){
(3*oneWaySet/(4*pi))^(1/3)
} else {
sqrt(oneWaySet/pi)
}
# Get radii within each group
radiusGroup <- lapply(groups,
function(grp){radius[grp]})
weightGroup <- lapply(disPropGroup,
function(prop){
weight[which(names(weight) %in% names(prop)==TRUE)]})
# lossFunction
groupCentre <- list()
if(length(lower) != length(upper)){
stop("the length of lower vector must be equal to the length of upper vector")
}
if(length(lower) == 1 && length(upper) == 1){
lower = rep(lower,2)
upper = rep(upper,2)
}
if(length(hessian) == 1){hessian = rep(hessian, 2)}
scaleSearch <- match.arg(scaleSearch)
twoWaySearch <- match.arg(twoWaySearch)
stressWay <- match.arg(stressWay)
stressGroup <- rep(0, ngroups)
RSSGroup <- rep(0, ngroups)
TSSGroup <- rep(0, ngroups)
loss <- 0
for (gn in 1:ngroups){
lossFunctionOutput <- lossFunction(gn = gn, combProp = combPropGroup[[gn]], Delta = Delta,
radius = radiusGroup[[gn]], disProp = disPropGroup[[gn]], lambda = lambda,
weight = weightGroup[[gn]], ThreeD = ThreeD,
method = c(scaleSearch, twoWaySearch), twoWayGenerate = twoWayGenerate,
lower= lower, upper = upper, control = control, hessian = hessian,
expand = expand, scaleSeachTolerance = scaleSeachTolerance,
distanceTolerance = distanceTolerance, lossTolerance = lossTolerance,
stressBound = stressBound, maximumStep = maximumStep, planeSize = planeSize)
groupCentre[[gn]] <- lossFunctionOutput$centre
stressGroup[gn] <- lossFunctionOutput$stress
RSSGroup[gn] <- lossFunctionOutput$RSS
TSSGroup[gn] <- lossFunctionOutput$TSS
loss <- loss + lossFunctionOutput$loss
}
if(stressWay == "sum"){
stress <- sum(stressGroup)
} else {
#combine stress
stress <- sum(RSSGroup)/sum(TSSGroup)
}
#combine all groups
if(ngroups > 1){
combinedGroups <- combineGroups(groupCentre = groupCentre, radiusGroup = radiusGroup,delta = delta)
xy <- combinedGroups$xy
radius <- combinedGroups$radius
oneWaySetName <- combinedGroups$namexy
} else {
xy <- groupCentre[[1]]
radius <- radiusGroup[[1]]
oneWaySetName <- names(radius)
}
if(ThreeD){
sphere(xy = xy, radius = radius, cols = cols, alpha = alpha, oneWaySetName = oneWaySetName, smooth = smooth)
} else {
plot.new()
plotCircle(xy, radius, oneWaySetName, cols, mar = mar)
}
list(xy = xy, radius = radius, loss = loss, stress = stress)
}
#--- helper functions -----------------------------------------------------
#library(rgl)
#library(stringr)
#library(Rcpp)
#sourceCpp(file = "your path/vennCpp2.cpp")
#lossFunction, the main function of vennplot; return centres of each group
lossFunction <- function(gn, combProp, radius, Delta, lambda,disProp,
weight, ThreeD, method, twoWayGenerate,
lower, upper, control, hessian, expand, scaleSeachTolerance,
distanceTolerance, lossTolerance, stressBound, maximumStep, planeSize){
disjointSetNames <- names(combProp)
numWays <- str_count(disjointSetNames,pattern = "&")+1
#one way set
oneWaySetName <- disjointSetNames[which(numWays==1)]
m <- length(oneWaySetName)
if(m == 1){
if(ThreeD){centre <- matrix(c(0,0,0),nrow = 1)}else{centre <- matrix(c(0,0),nrow=1)}
rownames(centre) <- oneWaySetName
loss <- 0
oneWayStress <- calculateStress(centre, radius, disProp, weight, ThreeD, planeSize,twoWayGenerate)
RSS <- oneWayStress$RSS
TSS <- oneWayStress$TSS
stress <- oneWayStress$stress
}else{
complete <- TRUE
if(max(numWays)>=3){
firstGenerate <- twoWayGeneration(combProp = combProp, mu = 1)
#To determine which example we use: Example 1 or Example 2
if(firstGenerate$resam != 0){complete <- FALSE}
newTworWaySet <- firstGenerate$newTworWaySet
}else{newTworWaySet <- combProp[which(numWays == 2)]}
EDandInitialLocation <- EuclideanDistance(newTworWaySet = newTworWaySet, oneWaySetName = oneWaySetName,
radius = radius, ThreeD = ThreeD, initial = TRUE,
expand = expand, distanceTolerance = distanceTolerance,
maximumStep = maximumStep)
ED <- EDandInitialLocation$ED
xy <- EDandInitialLocation$xy
if(twoWayGenerate == TRUE && complete == FALSE){
out <- solveWithMu(ED = ED, xy = xy, combProp = combProp, ThreeD = ThreeD,
Delta = Delta, radius = radius, disProp = disProp, weight = weight, method = method,
firstGenerate = firstGenerate,
lower= lower, upper = upper, control = control, hessian = hessian,
expand = expand, scaleSeachTolerance = scaleSeachTolerance,
distanceTolerance = distanceTolerance, lossTolerance = lossTolerance,
stressBound = stressBound, maximumStep = maximumStep, planeSize = planeSize,
twoWayGenerate = twoWayGenerate)
ED <- out$ED
xy <- out$xy
}
result <- solveWithLambda(ED = ED, xy = xy, ThreeD = ThreeD, lambda = lambda, Delta = Delta,
radius = radius, disProp = disProp, weight = weight, method = method,
lower= lower, upper = upper, control = control, hessian = hessian,
scaleSeachTolerance = scaleSeachTolerance, lossTolerance = lossTolerance,
stressBound = stressBound, maximumStep = maximumStep, planeSize = planeSize,
twoWayGenerate = twoWayGenerate)
centre <- result$xy
rownames(centre) <- oneWaySetName
stress <- result$stress
loss <- result$loss
RSS <- result$RSS
TSS <- result$TSS
}
list(centre = centre, loss = loss, stress = stress, RSS = RSS, TSS = TSS)
}
#optimize lambda to get centre
solveWithLambda <- function(ED , xy , ThreeD , lambda, Delta, radius ,
disProp, weight, method, lower, upper, control,
hessian, scaleSeachTolerance, lossTolerance,
stressBound, maximumStep, planeSize, twoWayGenerate) {
stress_0_Calculation <- calculateStress(xy = xy,radius = radius,
disProp = disProp,
weight = weight,ThreeD = ThreeD, planeSize = planeSize,
twoWayGenerate = twoWayGenerate)
stress_0 <- stress_0_Calculation$stress
lambda_1_Stress <- findOptimalStress(lambda = 1, xy = xy,weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance, planeSize = planeSize,
twoWayGenerate = twoWayGenerate)
stress_1 <- lambda_1_Stress$stress
if(ThreeD){offset <- 3}else{offset <- 2}
if(min(stress_0,stress_1) == stress_0 && dim(xy)[1]<=offset){
stress <- stress_0
loss <- 0
RSS <- stress_0_Calculation$RSS
TSS <- stress_0_Calculation$TSS
}else{
if(is.null(lambda)){
if(method[1] == "NelderMead"){
Centre <- list();
lambda_2_Stress <- findOptimalStress(lambda = 1+Delta, xy = xy, weight = weight,
radius = radius, disProp = disProp,
ED = ED, ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
stress_2 <- lambda_2_Stress$stress
lambda_3_Stress <- findOptimalStress(lambda = 1-Delta, xy = xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
stress_3 <- lambda_3_Stress$stress
Centre[[1]] <- lambda_1_Stress$xy
Centre[[2]] <- lambda_2_Stress$xy
Centre[[3]] <- lambda_3_Stress$xy
STRESS <- c(stress_1,stress_2,stress_3)
xy <- Centre[[which(STRESS == min(STRESS))[1]]]
loss <- c(lambda_1_Stress$loss, lambda_2_Stress$loss, lambda_3_Stress$loss)[which(STRESS == min(STRESS))[1]]
LAMBDA <- c(1,1+Delta,1-Delta)[order(STRESS)]
STRESS <- sort(STRESS)
count <- 0
while (count< maximumStep && min(STRESS) > stressBound){
count <- count+1
lambda_0 <- mean(LAMBDA[1:2])
lambda_R <- lambda_0 + (lambda_0 - LAMBDA[3])
lambda_R_stress <- findOptimalStress(lambda = lambda_R, xy = xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
stress_R <- lambda_R_stress$stress
#Reflection:
if(STRESS[1] <= stress_R && stress_R< STRESS[2]){
STRESS <- c(STRESS[1] , stress_R , STRESS[2])
LAMBDA <- c(LAMBDA[1], lambda_R, LAMBDA[2])
} else if (stress_R < STRESS[1]){
#Expansion
xy <- lambda_R_stress$xy
loss <- lambda_R_stress$loss
lambda_E <- lambda_0 + 2*(lambda_R - lambda_0)
lambda_E_stress <- findOptimalStress(lambda = lambda_E, xy = xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
stress_E = lambda_E_stress$stress
if(stress_E<stress_R){
STRESS <- c(stress_E , stress_R , STRESS[1])
LAMBDA <- c(lambda_E, lambda_R, LAMBDA[1])
xy <- lambda_E_stress$xy
loss <- lambda_E_stress$loss
}else if(stress_R<=stress_E && stress_E <STRESS[1]){
STRESS <- c(stress_R, stress_E, STRESS[1])
LAMBDA <- c(lambda_R, lambda_E, LAMBDA[1])
}
else if(STRESS[1]<=stress_E && stress_E <STRESS[2]){
STRESS <- c(stress_R, STRESS[1],stress_E)
LAMBDA <- c(lambda_R, LAMBDA[1], lambda_E)
}else{
STRESS <- c(stress_R, STRESS[1],STRESS[2])
LAMBDA <- c(lambda_R, LAMBDA[1],LAMBDA[2])
}
} else if(stress_R >= STRESS[2]) {
#Contraction
lambda_C <- lambda_0 + 0.5*(LAMBDA[3] - lambda_0)
lambda_C_stress <- findOptimalStress(lambda = lambda_C, xy = xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
stress_C <- lambda_C_stress$stress
if (stress_C < STRESS[3]){
STRESS <- c(STRESS[1],STRESS[2],stress_C)
LAMBDA <- c(LAMBDA[1],LAMBDA[2],lambda_C)[order(STRESS)]
STRESS <- sort(STRESS)
if(min(STRESS) == stress_C){
xy <- lambda_C_stress$xy
loss <- lambda_C_stress$loss
}
} else {
#shrink
LAMBDA[2] <- (LAMBDA[1]+LAMBDA[2])/2
STRESS[2] <- findOptimalStress(lambda = LAMBDA[2], xy = xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize,twoWayGenerate = twoWayGenerate)$stress
LAMBDA[3] <- (LAMBDA[1]+LAMBDA[3])/2
STRESS[3] <- findOptimalStress(lambda = LAMBDA[3], xy = xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize,twoWayGenerate = twoWayGenerate)$stress
}
}
if(allConnectedCpp(xy = xy, radius = radius, ThreeD = ThreeD) == FALSE){break}
if(BoolScaleNMCpp( proportional = scaleSeachTolerance$ proportional,
value = scaleSeachTolerance$value,
LAMBDA = LAMBDA, STRESS = STRESS) == FALSE){break}
}
if(allConnectedCpp(xy = xy, radius = radius, ThreeD = ThreeD) == FALSE) {
lambdaStress <- findOptimalStress(lambda = LAMBDA[2], xy = xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
xy <- lambdaStress$xy
if(allConnectedCpp(xy = xy, radius = radius, ThreeD = ThreeD) == FALSE) {
lambdaStress <- findOptimalStress(lambda = LAMBDA[3], xy = xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
xy <- lambdaStress$xy
}
stress <- lambdaStress$stress
loss <- lambdaStress$loss
RSS <- lambdaStress$RSS
TSS <- lambdaStress$TSS
} else {
lambdaStress <- findOptimalStress(lambda = LAMBDA[1], xy = xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
xy <- lambdaStress$xy
stress <- lambdaStress$stress
loss <- lambdaStress$loss
RSS <- lambdaStress$RSS
TSS <- lambdaStress$TSS
}
} else if (method[1] == "lineSearch"){
lambda_2_Stress <- findOptimalStress(lambda = 1+Delta, xy = xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
stress_2 <- lambda_2_Stress$stress
lambda_3_Stress <- findOptimalStress(lambda = 1-Delta, xy = xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
stress_3 <- lambda_3_Stress$stress
if(min(stress_1,stress_2,stress_3) == stress_1){
xy <- lambda_1_Stress$xy
loss <- lambda_1_Stress$loss
stress <- stress_1
RSS <- lambda_1_Stress$RSS
TSS <- lambda_1_Stress$TSS
} else {
Center <- list()
Loss <- c()
if(min(stress_1,stress_2,stress_3) == stress_2){
stress_n <- stress_2
stress <- stress_2
Center[[1]] <- lambda_2_Stress$xy
Loss[1] <- lambda_2_Stress$loss
lambda <- 1+Delta
shrinkage <- TRUE
} else {
stress_n <- stress_3
stress <- stress_3
Center[[1]] <- lambda_3_Stress$xy
Loss[1] <- lambda_3_Stress$loss
lambda <- 1-Delta
shrinkage <- FALSE
}
RSSVec <- c()
TSSVec <- c()
n <- 1
while(stress_n <= stress){
stress <- stress_n
if(shrinkage){
lambda <- lambda + Delta
} else {
lambda <- lambda - Delta
}
n <- n+1
lambdaStress <- findOptimalStress(lambda = lambda, xy = Center[[n-1]], weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
Center[[n]] <- lambdaStress$xy
Loss[n] <- lambdaStress$loss
stress_n <- lambdaStress$stress
RSSVec[n] <- lambdaStress$RSS
TSSVec[n] <- lambdaStress$TSS
if(BoolScaleLCpp( proportional = scaleSeachTolerance$ proportional,
value = scaleSeachTolerance$value, stress_n = stress_n, stress = stress)){break}
}
xy <- Center[[n-1]]
loss <- Loss[n-1]
RSS <- RSSVec[n-1]
TSS <- TSSVec[n-1]
}
}else if(method[1] == "goldenSectionSearch"){
Optimization <- optimize(f = findOptimalStressLambda, lower= lower[1], upper = upper[1],
xy = lambda_1_Stress$xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
if(is.null(Optimization$minimum)||is.infinite(Optimization$minimum)){
stop("This method does not converge")
}
lambdaStress <- findOptimalStress(lambda = Optimization$minimum, xy = lambda_1_Stress$xy,weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
xy <- lambdaStress$xy
loss <- lambdaStress$loss
stress <- Optimization$objective
RSS <- lambdaStress$RSS
TSS <- lambdaStress$TSS
} else {
## method[1] must be an method appropriate for optim(...)
Optimization <- optim(par = 1, fn = findOptimalStressLambda,method = method[1],
lower = lower[1], upper = upper[1],
control = control, hessian = hessian[1],
xy = lambda_1_Stress$xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
if(is.null(Optimization$par)||is.infinite(Optimization$par)){
stop("This method does not converge")
}
lambdaStress <- findOptimalStress(lambda = Optimization$par, xy = lambda_1_Stress$xy,weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
xy <- lambdaStress$xy
loss <- lambdaStress$loss
stress <- Optimization$value
RSS <- lambdaStress$RSS
TSS <- lambdaStress$TSS
}
} else {
Centre <- list()
RSSVec <- c()
TSSVec <- c()
STRESS <- rep(0,length(lambda))
Loss <- rep(1,length(lambda))
for(i in 1:length(lambda)){
out <- findOptimalStress(lambda = lambda[i], xy = xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
Centre[[i]] <- out$xy
Loss[i] <- out$loss
STRESS[i] <- out$stress
RSSVec[i] <- out$RSS
TSSVec[i] <- out$TSS
}
index <- which(STRESS == min(STRESS))[1]
stress <- STRESS[index]
xy <- Centre[[index]]
loss <- Loss[index]
RSS <- RSSVec[index]
TSS <- TSSVec[index]
}
}
list(xy = xy, loss = loss, stress = stress, RSS = RSS, TSS = TSS)
}
# if two way intersections missing, optimize mu to get Euclidean ditance
solveWithMu <- function(ED, xy, combProp, ThreeD, radius, disProp,
weight, Delta, method, firstGenerate,
lower, upper, control, hessian, expand, scaleSeachTolerance,
distanceTolerance, lossTolerance,
stressBound, maximumStep, planeSize, twoWayGenerate) {
stress_0 <- calculateStress(xy = xy,radius = radius,disProp = disProp,weight = weight,ThreeD = ThreeD,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)$stress
mu_1_Stress <- findOptimalStress(lambda = 1, xy = xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
stress_1 <- mu_1_Stress$stress
if(min(stress_0,stress_1) != stress_0){
if(method[2] == "NelderMead"){
EDhat <- list()
Centre <- list()
EDplus <- newEuclideanDistance(firstGenerate = firstGenerate, mu = 1+Delta,
combProp = combProp, radius = radius, ThreeD = ThreeD,
expand = expand, distanceTolerance = distanceTolerance,
maximumStep = maximumStep)
mu_2_Stress <- findOptimalStress(lambda = 1, xy = xy, weight = weight,radius = radius, disProp = disProp,
ED = EDplus, ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
EDplusplus <- newEuclideanDistance(firstGenerate = firstGenerate, mu = 1+2*Delta,
combProp = combProp, radius = radius, ThreeD = ThreeD,
expand = expand, distanceTolerance = distanceTolerance,
maximumStep = maximumStep)
mu_3_Stress <- findOptimalStress(lambda = 1, xy = xy, weight = weight,radius = radius, disProp = disProp,
ED = EDplusplus, ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
Centre[[1]] <- mu_1_Stress$xy
Centre[[2]] <- mu_2_Stress$xy
Centre[[3]] <- mu_3_Stress$xy
EDhat[[1]] <- ED
EDhat[[2]] <- EDplus
EDhat[[3]] <- EDplusplus
STRESS <- c(stress_1, mu_2_Stress$stress, mu_3_Stress$stress)
ED <- EDhat[[which(STRESS == min(STRESS))[1]]]
centre <- Centre[[which(STRESS == min(STRESS))[1]]]
MU <- c(1,1+Delta,1+2*Delta)[order(STRESS)]
STRESS <- sort(STRESS)
count <- 0
while(count<maximumStep && min(STRESS)> stressBound) {
#reflection
count <- count+1
mu_0 <- mean(MU[1:2])
mu_R <- mu_0 + (mu_0 - MU[3])
EDreflection <- newEuclideanDistance(firstGenerate = firstGenerate, mu = mu_R,
combProp = combProp, radius = radius, ThreeD = ThreeD,
expand = expand,distanceTolerance = distanceTolerance,
maximumStep = maximumStep)
mu_R_stress <- findOptimalStress(lambda = 1, xy = xy, weight = weight, radius = radius, disProp = disProp,
ED = EDreflection,ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
stress_R <- mu_R_stress$stress
#Reflection:
if(STRESS[1] <= stress_R && stress_R< STRESS[2]){
STRESS <- c(STRESS[1] , stress_R , STRESS[2])
MU <- c(MU[1], mu_R, MU[2])
}else if(stress_R < STRESS[1]){
#Expansion
centre <- mu_R_stress$xy
ED <- EDreflection
mu_E <- mu_0 + 2*(mu_R - mu_0)
EDexpansion <- newEuclideanDistance(firstGenerate = firstGenerate, mu = mu_E,
combProp = combProp, radius = radius, ThreeD = ThreeD,
expand = expand, distanceTolerance = distanceTolerance,
maximumStep = maximumStep)
mu_E_stress <- findOptimalStress(lambda = 1, xy = xy, weight = weight,
radius = radius, disProp = disProp, ED = EDexpansion,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
stress_E <- mu_E_stress$stress
if(stress_E<stress_R){
STRESS <- c(stress_E , stress_R , STRESS[1])
MU <- c(mu_E, mu_R, MU[1])
ED <- EDexpansion
centre <- mu_E_stress$xy
}else if(stress_R<=stress_E && stress_E <STRESS[1]){
STRESS <- c(stress_R, stress_E, STRESS[1])
MU <- c(mu_R, mu_E, MU[1])
}
else if(STRESS[1]<=stress_E && stress_E <STRESS[2]){
STRESS <- c(stress_R, STRESS[1],stress_E)
MU <- c(mu_R, MU[1], mu_E)
}else{
STRESS <- c(stress_R, STRESS[1],STRESS[2])
MU <- c(mu_R, MU[1],MU[2])
}
}else if(stress_R >= STRESS[2]){
#Contraction
mu_C <- mu_0 + 0.5*(MU[3] - mu_0)
EDcontraction <- newEuclideanDistance(firstGenerate = firstGenerate, mu = mu_C,
combProp = combProp,
radius = radius, ThreeD = ThreeD,
expand = expand, distanceTolerance = distanceTolerance,
maximumStep = maximumStep)
mu_C_stress <- findOptimalStress(lambda = 1, xy = xy, weight = weight,radius = radius, disProp = disProp,
ED = EDcontraction, ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
stress_C <- mu_C_stress$stress
if (stress_C < STRESS[3]){
STRESS <- c(STRESS[1],STRESS[2],stress_C)
MU <- c(MU[1],MU[2],mu_E)[order(STRESS)]
STRESS <- sort(STRESS)
if(min(STRESS) == stress_C){
ED <- EDcontraction
centre <- mu_C_stress$xy}
}else{
#shrink
MU[2] <- (MU[1] + MU[2])/2
MU[3] <- (MU[1] + MU[3])/2
}
}
if(BoolScaleNMCpp( proportional = scaleSeachTolerance$ proportional,
value = scaleSeachTolerance$value,
LAMBDA = MU, STRESS = STRESS) == FALSE){break}
}
xy <- centre
} else if(method[2] == "lineSearch") {
stress_n <- stress_1
Center <- list()
Center[[1]] <- mu_1_Stress$xy
n <- 1
mu <- 1
EDhat <- list()
EDhat[[1]] <- ED
while (stress_n <= stress_1){
stress_1 <- stress_n
mu <- mu + Delta
ED <- newEuclideanDistance(firstGenerate = firstGenerate, mu = mu,
combProp = combProp, radius = radius, ThreeD = ThreeD,
expand = expand, distanceTolerance = distanceTolerance,
maximumStep = maximumStep)
L <- findOptimalStress(lambda = 1, xy = Center[[n]], weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
n <- n+1
stress_n <- L$stress
Center[[n]] <- L$xy
EDhat[[n]] <- ED
}
xy <- Center[[n-1]]
ED <- EDhat[[n-1]]
} else if(method[2] == "goldenSectionSearch"){
Optimization <- optimize(f = findOptimalStressMu, lower = lower[2], upper = upper[2],
xy = mu_1_Stress$xy, weight = weight,
firstGenerate = firstGenerate,combProp = combProp,
radius = radius, disProp = disProp, ED = ED,ThreeD = ThreeD,
expand = expand, lossTolerance = lossTolerance,
distanceTolerance = distanceTolerance,
maximumStep = maximumStep,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
if(is.null(Optimization$minimum) || is.infinite(Optimization$minimum)){
stop("This method does not converge")
}else{
ED <- newEuclideanDistance(firstGenerate = firstGenerate, mu = Optimization$minimum,
combProp = combProp, radius = radius, ThreeD = ThreeD,
expand = expand, distanceTolerance = distanceTolerance,
maximumStep = maximumStep)
muStress <- findOptimalStress(lambda = 1, xy = mu_1_Stress$xy,weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
xy <- muStress$xy
}
}else{
Optimization <- optim(par = 1, fn = findOptimalStressMu,method = method[2],
lower = lower[2], upper = upper[2],
control = control, hessian = hessian[2],
xy = mu_1_Stress$xy, weight = weight,
firstGenerate = firstGenerate,combProp = combProp,
radius = radius, disProp = disProp, ED = ED,ThreeD = ThreeD,
expand = expand, lossTolerance = lossTolerance,
distanceTolerance = distanceTolerance,
maximumStep = maximumStep,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
if(is.null(Optimization$apr) || is.infinite(Optimization$apr)){
stop("This method does not converge")
}else{
ED <- newEuclideanDistance(firstGenerate = firstGenerate, mu = Optimization$apr,
combProp = combProp, radius = radius, ThreeD = ThreeD,
expand = expand, distanceTolerance = distanceTolerance,
maximumStep = maximumStep)
L <- findOptimalStress(lambda = 1, xy = mu_1_Stress$xy,weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
xy <- L$xy
}
}
}
list(xy = xy, ED = ED)
}
# draw sphere
sphere = function(xy, radius, cols, alpha, oneWaySetName, smooth){
open3d()
if(smooth){
n <- dim(xy)[1]
f <- function(s, t) cbind(r * cos(s) * cos(t) + x0,
r * sin(s) * cos(t) + y0,
r * sin(t) + z0)
for(i in 1:n){
x0 = xy[i,1]
y0 = xy[i,2]
z0 = xy[i,3]
r = radius[i]
persp3d(f, slim = c(0, pi), tlim = c(0, 2*pi), n = 101, add = T, col = cols[i], alpha = alpha)
}
} else {
spheres3d(xy[,1], xy[,2], xy[,3], radius = radius,
color = cols, alpha = alpha)
}
text3d(xy[,1], xy[,2], xy[,3], oneWaySetName)
}
# if two way intersections missing, given mu to get a new Euclidean distance
newEuclideanDistance <- function(firstGenerate, mu, combProp, radius, ThreeD, expand,
distanceTolerance, maximumStep){
disjointSetNames <- names(combProp)
numWays <- str_count(disjointSetNames,pattern = "&")+1
#one way set
oneWaySetName <- disjointSetNames[which(numWays==1)]
oneWaySet <- combProp[which(numWays==1)]
nextGenerate <- lapply(firstGenerate$New,
function(a, oneWaySet, mu = mu){
higherWay <- a[1];newGenerate <- a[-1]
newGenerateName <- names(newGenerate)
for(i in 1:length(newGenerate)){
newGenerate[i] <- higherWay*mu^(str_count(names(higherWay),pattern = "&")+1-2)
newGenerateNameSeparate <- str_split(newGenerateName,"&")[[i]]
minOneWay <- min(oneWaySet[which((names(oneWaySet) %in% newGenerateNameSeparate) == TRUE)])
if(newGenerate[i]> minOneWay){
newGenerate[i] <- runif(1,min = higherWay,max = minOneWay)
}
}
newGenerate
}, oneWaySet = oneWaySet, mu = mu)
newTworWaySet <- c(combProp[which(numWays == 2)], unlist(nextGenerate))
EuclideanDistance(newTworWaySet = newTworWaySet,
oneWaySetName = oneWaySetName, radius = radius,
ThreeD = ThreeD, initial = FALSE, expand = expand,
distanceTolerance = distanceTolerance, maximumStep = maximumStep)
}
# given lambda, optimize stress to get centres and corresponding values
findOptimalStress <- function(lambda, xy, weight, radius, disProp, ED, ThreeD, lossTolerance, planeSize, twoWayGenerate){
if(is.null(lossTolerance$ALPHA)){
bool <- TRUE
#Just satisfy ``double'' input in Cpp; ALPHA will be generated through Newton method
ALPHA <- 0.01
} else{
bool <- FALSE
ALPHA <- lossTolerance$ALPHA
}
L <- lossCpp(xy = xy,radius = radius, lambda = lambda, ED = ED,
ThreeD = ThreeD, ToleranceofLoss = lossTolerance$ToleranceofLoss, maximumStep = lossTolerance$maximumStep,
ToleranceofStepsize = lossTolerance$ToleranceofStepsize, proportional = lossTolerance$ proportional,
ALPHA = ALPHA, Bool = bool)
stressCalculation <- calculateStress(xy = L$xy,radius = radius,
disProp = disProp, weight = weight, ThreeD = ThreeD,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
stress <- stressCalculation$stress
RSS <- stressCalculation$RSS
TSS <- stressCalculation$TSS
## Return a list
list(xy = L$xy, stress = stress, loss = L$loss, RSS = RSS, TSS = TSS)
}
# used in optim(...) or optimization(...) function, optimize lambda with minimum stress
findOptimalStressLambda<- function(lambda, xy, weight, radius, disProp, ED, ThreeD, lossTolerance, planeSize, twoWayGenerate){
stressValues <- findOptimalStress(lambda = lambda, xy = xy,weight = weight,
radius = radius, disProp = disProp,
ED = ED,ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
## Return the stress only
stressValues$stress
}
# used in optim(...) or optimization(...) function, optimize mu with minimum stress
findOptimalStressMu<- function(mu, xy, weight, firstGenerate, combProp, radius, disProp, ED,
ThreeD, expand, lossTolerance, distanceTolerance, maximumStep, planeSize, twoWayGenerate){
ED <- newEuclideanDistance(firstGenerate = firstGenerate, mu = mu,
combProp = combProp, radius = radius, ThreeD = ThreeD,
expand = expand, distanceTolerance = distanceTolerance,
maximumStep = maximumStep)
stressValues <- findOptimalStress(lambda = 1, xy = xy, weight = weight,
radius = radius, disProp = disProp, ED = ED,
ThreeD = ThreeD, lossTolerance = lossTolerance,
planeSize = planeSize, twoWayGenerate = twoWayGenerate)
## Return the stress only
stressValues$stress
}
#transform disjointcombinations to not disjoint combinations
disjoint2combinations <- function(combinations){
disjointSetNames <- names(combinations)
#numWays <- str_length(name)
numWays <- str_count(disjointSetNames,pattern = "&")+1
combinations <- combinations[order(numWays)]
numWays <- sort(numWays)
if(max(numWays)!=1){
for(i in 1:length(numWays)){
if(numWays[i] == max(numWays)){break} else {
Index <- sapply(str_split(names(combinations[-i]),pattern = "&"),
function(a){bool <- 0;
if(all(str_split(names(combinations[i]),pattern = "&")[[1]]%in%a))
{bool <- 1};
bool})
if(length(which(Index == 1))!=0){combinations[i] <- combinations[i] + sum(combinations[-i][which(Index == 1)])}
}
}
}
combinations
}
#reorder
reorderDisjointCombinations <- function(combinations, disjoint.combinations){
m <- length(combinations)
newDisjointCombinations <- rep(0, m)
combinationsName <- names(combinations)
newOrder <- rep(0, m)
disjoint.combinationsName <- names(disjoint.combinations)
for(i in 1:m){
wayOrder <- which(disjoint.combinationsName%in%combinationsName[i] == TRUE)
newDisjointCombinations[i] <- disjoint.combinations[wayOrder]
newOrder[i] <- wayOrder
}
names(newDisjointCombinations) <- combinationsName
list(newDisjointCombinations = newDisjointCombinations, newOrder = newOrder)
}
# If input is a data list(frame), extract combinations from it
extractCombinations <- function(data, vars) {
data <- as.data.frame(data)
#turn character to numeric
if(is.null(vars)==FALSE){
allColName <- colnames(data)
vars <- match.arg(vars , allColName,several.ok = T)
data <- data[, which(allColName%in%vars) ]
}
if(is.null(dim(data))){
data <- as.data.frame(data)
colnames(data) <- vars
}
if(dim(data)[2] == 1){
warning("Meaningless factor data frame")
}
colReduceForSure <- rep(0,dim(data)[2])
for(i in 1:dim(data)[2]){
if(is.numeric(data[,i]) && all(unique(data[,i])%in%c(0,1)) == FALSE){
colReduceForSure[i] <- i
} else if(length(unique(data[,i])) == 1){
colReduceForSure[i] <- i
}
}
# get rid of some numeric columns
if(all(colReduceForSure==0)==FALSE){
colReduceForSure <- colReduceForSure[which(colReduceForSure!=0)]
data <- data[,-colReduceForSure]
warning(cat(paste("Non-factor column(s)",toString(colReduceForSure),
"has(have) been ignored"), "\n"))
}
colName <- colnames(data)
n <- dim(data)[1]
p <- dim(data)[2]
colAdd <- list()
colReduce <- rep(0,p)
for(i in 1:p){
uniqueName <- unique(data[,i])
if(all(uniqueName%in%c(0,1))) {next
} else if (mode(levels(data[,i])) == "character"){
colReduce[i] <- i
newDataSet <- matrix(rep(0,n*length(uniqueName)),nrow = n)
for(j in 1:length(uniqueName)){
newDataSet[which(data[,i] == uniqueName[j]),j] <- 1
}
colnames(newDataSet) <- uniqueName
colAdd[[i]] <- newDataSet
}
}
colReduce <- colReduce[which(colReduce!=0)]
if(length(colReduce)!=0) {
newdata <- as.matrix(data[,-colReduce])
if(dim(newdata)[2] == 1){
colnames(newdata) <- colName[-colReduce]
}
for(i in 1:length(colAdd)){
colAddName <- colnames(colAdd[[i]])
sumAll <- apply(colAdd[[i]], 2, "sum")
deleteCol <- which(sumAll == min(sumAll))[1]
newInput <- as.matrix(colAdd[[i]][,-deleteCol])
if(dim(newInput)[2] == 1){
colnames(newInput) <- colAddName[-deleteCol]
}
newdata <- cbind(newdata, newInput)
}
} else {newdata <- data}
rowSumZero <- which(apply(newdata,1,"sum")==0)
if(length(rowSumZero)!=0){
newdata <- as.matrix(newdata[-which(apply(newdata,1,"sum")==0),])
}
if(dim(newdata)[2] == 1) {
disjoint.combinations <- c(sum(newdata))
names(disjoint.combinations) <- colnames(newdata)
} else {
#OUTCOME is not disjoint
G <- list()
for(i in 1:dim(newdata)[2]){
G[[i]] <- t(combn(dim(newdata)[2],i))
}
#OUTCOME is disjoint
newColName <- colnames(newdata)
nameList <- apply(newdata,1, function(a){paste(newColName[which(a==1)],collapse = "&")})
disjointOutput <- aggregate(data.frame(count = nameList), list(name = nameList), length)
disjoint.combinations <- disjointOutput[,2]
names(disjoint.combinations) <- disjointOutput[,1]
numWays <- str_count(disjointOutput[,1], pattern = "&")+1
oneWays <- which(numWays == 1)
if(length(oneWays) != length(newColName)){
notIn <- which(newColName %in% disjointOutput[,1][oneWays] == FALSE)
newIn <- rep(0, length(notIn))
names(newIn) <- newColName[notIn]
disjoint.combinations <- c(disjoint.combinations, newIn)
}
}
disjoint.combinations
}
# Calculates distance between two circles
Distance <- function(r1,r2,S,ThreeD, expand, distanceTolerance, maximumStep) {
if (ThreeD){
if(S == 0){
if(is.null(expand)) {
d <- r1+r2
} else {
d <- (r1+r2)*expand
}
} else if(abs(S - min(4*pi/3*r1^3,4*pi/3*r2^3)) < distanceTolerance$value) {
d <- max(r1,r2) - min(r1,r2)
} else {
theta <- matrix(c(0,0),nrow = 2)
thetanew <- theta+1
f1 <- pi/3*r1^3*(1-cos(theta[1]))^2*(2+cos(theta[1])) +
pi/3*r2^3*(1-cos(theta[2]))^2*(2+cos(theta[2])) - S
f2 <- r1*sin(theta[1]) - r2*sin(theta[2])
k <- 0
while(k < maximumStep){
theta <- thetanew
f1 <- pi/3*r1^3*(1-cos(theta[1]))^2*(2+cos(theta[1])) +
pi/3*r2^3*(1-cos(theta[2]))^2*(2+cos(theta[2])) - S
f2 <- r1*sin(theta[1]) - r2*sin(theta[2])
f <- matrix(c(f1,f2),nrow = 2)
g <- matrix(c(pi*r1^3*sin(theta[1])^3, pi*r2^3*sin(theta[2])^3,
r1*cos(theta[1]),-r2*cos(theta[2])),nrow = 2, byrow = T)
g <- solve(g)
thetanew <- theta - g%*%f
k <- k+1
if(BoolDistanceCpp( proportional = distanceTolerance$ proportional,
value = distanceTolerance$value,
f1 = f1, f2 = f2,
thetanew = thetanew, theta = theta) == FALSE){break}
}
if(any(thetanew>pi) || any(thetanew<0) || k == maximumStep){
theta1 <- seq(0,pi, length = 200)
theta2 <- seq(0,pi, length = 200)
searchMatrix <- distanceCpp(r1, r2,theta1, theta2, S,ThreeD)
index <- which(searchMatrix== min(searchMatrix),arr.ind=T)[1,]
d <- r1*cos( theta1[index[1]]) + r2*cos(theta2[index[2]])
} else {
d <- r1*cos(thetanew[1]) + r2*cos(thetanew[2])
}
}
}
else{
if(S==0){
if(is.null(expand)){
d <- r1+r2
} else {
d <- (r1+r2)*expand
}
}else if( abs(S - min(pi*r1^2,pi*r2^2)) < distanceTolerance$value ){
d <- max(r1,r2) - min(r1,r2)
}else{
theta <- matrix(c(0,0),nrow = 2)
thetanew <- theta+1
f1 <- theta[1]*r1^2 - sin(2*theta[1])*r1^2/2 +theta[2]*r2^2 - sin(2*theta[2])*r2^2/2 - S
f2 <- r1*sin(theta[1]) - r2*sin(theta[2])
k <- 0
while(k < maximumStep){
theta <- thetanew
f1 <- theta[2]*r2^2 - sin(2*theta[2])*r2^2/2 + theta[1]*r1^2 - sin(2*theta[1])*r1^2/2 - S
f2 <- r1*sin(theta[1]) - r2*sin(theta[2])
f <- matrix(c(f1,f2),nrow = 2)
g <- matrix(c(r1^2-r1^2*cos(2*theta[1]), r2^2-r2^2*cos(2*theta[2]),
r1*cos(theta[1]),-r2*cos(theta[2])),nrow = 2, byrow = T)
g <- solve(g)
thetanew <- theta - g%*%f
k <- k+1
if(BoolDistanceCpp( proportional = distanceTolerance$ proportional,
value = distanceTolerance$value,
f1 = f1, f2 = f2,
thetanew = thetanew, theta = theta) == FALSE){break}
}
if(any(thetanew>pi) || any(thetanew<0) || k == maximumStep){
theta1 <- seq(0,pi, length = 200)
theta2 <- seq(0,pi, length = 200)
searchMatrix <- distanceCpp(r1, r2,theta1, theta2, S,ThreeD)
index <- which(searchMatrix== min(searchMatrix),arr.ind=T)[1,]
d <- r1*cos( theta1[index[1]]) + r2*cos(theta2[index[2]])
} else {
d <- r1*cos(thetanew[1]) + r2*cos(thetanew[2])
}
}
}
return(d)
}
# Plots circles of Venn diagram
plotCircle <- function(xy, radius,name, col, mar, ...) {
par(mar = mar)
a1 <- range(c((xy + radius)[,1], (xy - radius)[,1]))
a2 <- range(c((xy + radius)[,2], (xy - radius)[,2]))
plot.window(a1, a2, "", asp = 1)
theta <- seq.int(360)/360*2*pi
for (i in 1:length(radius)){
polygon(xy[i,1] + radius[i]*cos(theta), xy[i,2] + radius[i]*sin(theta), col = col[i],border = col[i])
}
text(xy, name)
}
#rotate centres until two groups totally separated
rotateCentres <- function(xy,transxy,radius1,radius2, delta){
for(i in 1:35){
theta <- i/18*pi
if(dim(xy)[2] == 3){
rotation1 <- matrix(c(1,0,0,0,cos(theta),-sin(theta),0,sin(theta),cos(theta)),nrow = 3,byrow = T)
rotation2 <- matrix(c(cos(theta),0,sin(theta),0,1,0,-sin(theta),0,cos(theta)),nrow = 3,byrow = T)
rotation3 <- matrix(c(cos(theta),-sin(theta),0,sin(theta),cos(theta),0,0,0,1),nrow = 3,byrow = T)
rotation <- rotation1%*%rotation2%*%rotation3}
else{
rotation <- matrix(c(cos(theta),-sin(theta),sin(theta),cos(theta)),nrow = 2,byrow = T)
}
newxy <- t(rotation%*%t(transxy))
newxy <- t(transxy[1,] - newxy[1,] + t(newxy))
Judgement <- allDisjointCpp(xy,newxy,radius1,radius2, delta)
if (Judgement!=0 ){break}
}
list(Judgement =Judgement, newxy = newxy)
}
# after finding centres, combine them with reasonable distance
combineGroups <- function(groupCentre, radiusGroup, delta){
groupNum <- length(groupCentre)
radius <- unlist(radiusGroup)
namexy <- lapply(radiusGroup,
function(a){
names(a)
})
groupxy <- groupCentre
radiusxy <- radiusGroup
for (gn in 1:(groupNum-1)){
#Randomly find a minimum x, minimum y, maximum x, or maximum y of the second group
cornerrandom = ceiling(runif(1,0,4))
if(cornerrandom == 1){
corner <- which(groupCentre[[gn+1]][,1] == min(groupCentre[[gn+1]][,1]))[1]
}else if(cornerrandom == 2){
corner <- which(groupCentre[[gn+1]][,1] == max(groupCentre[[gn+1]][,1]))[1]
}else if(cornerrandom == 3){
corner <- which(groupCentre[[gn+1]][,2] == min(groupCentre[[gn+1]][,2]))[1]
}else{
corner <- which(groupCentre[[gn+1]][,2] == max(groupCentre[[gn+1]][,2]))[1]
}
#corresponding radius
radiusvec <- radiusGroup[[gn+1]][corner]
#generate a centre to check which can make this centre is totally disjoint with the former group
xyvec <- transCpp(xy = groupCentre[[gn]], radius = radiusGroup[[gn]], radiusvec = radiusvec, radiusall = radius)
#move the second group
transxy <- t(t(groupCentre[[gn+1]]) + xyvec - groupCentre[[gn+1]][corner,])
#If two groups are totally seperated
if(allDisjointCpp(groupCentre[[gn]],transxy,radiusGroup[[gn]],radiusGroup[[gn+1]],delta) == 0){
out <- rotateCentres(groupCentre[[gn]],transxy,radiusGroup[[gn]],radiusGroup[[gn+1]],delta)
Judgement <- out$Judgement
newxy <- out$newxy
while(Judgement == 0){
xyvec <- transCpp(xy = groupCentre[[gn]], radius = radiusGroup[[gn]],
radiusvec = radiusvec, radiusall = radius)
transxy <- t(t(groupCentre[[gn+1]]) + xyvec - groupCentre[[gn+1]][corner,])
out <- rotateCentres(groupCentre[[gn]],transxy,radius[[gn]],radius[[gn+1]],delta)
Judgement <- out$Judgement
newxy <- out$newxy
}
rownames(newxy) <- namexy[[gn+1]]
}else{newxy <- transxy}
#renew the second group centre
groupxy[[gn+1]] <- newxy
}
#Move the last group to the former one with reasonable distance
for(gn in 1:(groupNum-1)){
center1 <- rep(0,dim(groupxy[[gn]])[2])
center2 <- rep(0,dim(groupxy[[gn+1]])[2])
for(j in 1:length(center1)){
center1[j] <- (max(groupxy[[gn]][,j]+radiusxy[[gn]])+
min(groupxy[[gn]][,j]-radiusxy[[gn]]))/2
center2[j] <- (max(groupxy[[gn+1]][,j]+radiusxy[[gn+1]])+
min(groupxy[[gn+1]][,j]-radiusxy[[gn+1]]))/2
}
direction <- center1-center2
if(delta==0){delta <- (1e-4)}
# scale it
direction <- direction/sqrt(sum(direction^2))*delta/10
newxy <- closeCpp(groupxy[[gn]],groupxy[[gn+1]],radiusxy[[gn]],
radiusxy[[gn+1]],delta = delta,direction)$xy
groupxy[[gn+1]] <- rbind(groupxy[[gn]],newxy)
radiusxy[[gn+1]] <- c(radiusxy[[gn]],radiusxy[[gn+1]])
namexy[[gn+1]] <- c(namexy[[gn]],namexy[[gn+1]])
}
list(xy = groupxy[[groupNum]], radius = radiusxy[[groupNum]],namexy = namexy[[groupNum]])
}
#given centres, numerically calculate each disjoint part's area and return stress
calculateStress <- function(xy, radius, disProp, weight, ThreeD, planeSize, twoWayGenerate){
m <- length(radius)
l <- list()
oneWaySetName <- names(radius)
if(ThreeD){
xuan <- (max(xy[,1]) - min(xy[,1])+2*max(radius))/planeSize
yuan <- (max(xy[,2]) - min(xy[,2])+2*max(radius))/planeSize
zuan <- (max(xy[,3]) - min(xy[,3])+2*max(radius))/planeSize
myList <- list()
for(k in 1:m){
for(i in 1:planeSize){
myList[[i]] <- matrix(rep(0,planeSize^2),nrow =planeSize)
}
l[[k]] <- binaryIndexThreeDCpp(myList, xy, radius,k ,yuan, xuan, zuan, planeSize)
}
numericArea <- goThroughPixelThreeDCpp(l, m, planeSize)
}else{
xuan <- (max(xy[,1]) - min(xy[,1])+2*max(radius))/planeSize
yuan <- (max(xy[,2]) - min(xy[,2])+2*max(radius))/planeSize
for (k in 1:m){
l[[k]] <- binaryIndexCpp(matrix(rep(0,planeSize^2),nrow =planeSize),xy,radius,k,yuan,xuan, planeSize)
}
numericArea <- goThroughPixelCpp(myList = l, m = m, num = planeSize)
}
if(m == 1){
TSS <- (sum(numericArea)/(planeSize*planeSize))^2*weight
RSS <- 0
stress <- 0
} else {
numericArea <- numericArea[which(getRidofZeroCpp(numericArea)!=0),]
numericAreaMatrix <- matrix(unlist(unique(as.data.frame(numericArea))),ncol = m)
numericAreaVector <- countCpp(numericArea,numericAreaMatrix)
numericAreaVectorName <- apply(numericAreaMatrix,1,
function(a, oneWaySetName)
{paste(oneWaySetName[which(a!=0)],collapse="&")
}, oneWaySetName = oneWaySetName)
names(numericAreaVector) <- numericAreaVectorName
lengthDisProp <- length(disProp)
cstar <- rep(0, lengthDisProp)
astar <- disProp
wstar <- weight
lengthNumericAreaVector <- length(numericAreaVector)
numericAreaVectorIndex <- rep(0,lengthDisProp)
numericAreaVectorNameSplit <- str_split(numericAreaVectorName,pattern = "&")
for(i in 1:lengthDisProp){
splitDisPropName <- str_split(names(disProp),pattern = "&")[[i]]
index <- 0
for (j in 1:lengthNumericAreaVector){
if(j %in% numericAreaVectorIndex) {next}
if(all(numericAreaVectorNameSplit[[j]] %in% splitDisPropName,
length(numericAreaVectorNameSplit[[j]]) == length(splitDisPropName))){
index <- j
numericAreaVectorIndex[i] <- index
break
}
}
if(index != 0){
cstar[i] <- numericAreaVector[index]
}
}
unnecessaryOverlayIndex <- which(c(1:lengthNumericAreaVector) %in% numericAreaVectorIndex == FALSE)
lengthofThisIndex <- length(unnecessaryOverlayIndex)
if(lengthofThisIndex != 0 && twoWayGenerate == FALSE){
cstar <- c(cstar, numericAreaVector[unnecessaryOverlayIndex])
astar <- c(disProp, rep(0, lengthofThisIndex))
wstar <- c(weight, rep(1, lengthofThisIndex))
}
fit <- lm(cstar~astar-1, weights = wstar)
RSS <- sum(fit$residuals^2*wstar)
TSS <- sum(cstar^2*wstar)
stress <- RSS/TSS
}
list(stress = stress, RSS = RSS, TSS = TSS)
}
#given combinations, detect groups
groupDetection <- function(largerThanOneWaySetName, oneWaySetName){
vertex <- list()
if(is.null(largerThanOneWaySetName)){
for(i in 1:length(oneWaySetName)){
vertex[[i]] <- i
}
} else {
largerThanOneWaySetLength <- length(largerThanOneWaySetName)
for(i in 1:largerThanOneWaySetLength){
Boolean1 <- rep(FALSE, largerThanOneWaySetLength)
for(j in i:largerThanOneWaySetLength){
Boolean1[j] <- any((str_split(largerThanOneWaySetName[i],"&")[[1]] %in% str_split(largerThanOneWaySetName[j],"&")[[1]])==TRUE)
}
Boolean1True <- which(Boolean1 == TRUE)
vertex[[i]] <- unique(unlist(str_split(largerThanOneWaySetName[Boolean1True],"&")))
if(i >1){
Boolean2 <- rep(FALSE, i-1)
for(j in 1:(i-1)){
Boolean2[j] <- any((vertex[[i]]%in%vertex[[j]])==TRUE)
}
Boolean2True <- which(Boolean2 == TRUE)
if(length(Boolean2True) == 1){
vertex[[Boolean2True]] <- unique(c(vertex[[Boolean2True]],vertex[[i]]))
vertex[[i]] <- NULL
}
}
}
if(length(which(sapply(vertex, is.null)))!=0){ vertex <- vertex[-which(sapply(vertex, is.null))]}
vertexLength <- length(vertex)
if(length(unlist(vertex)) != length(oneWaySetName)){
difference <- length(oneWaySetName) - length(unlist(vertex))
rest <- oneWaySetName[which((oneWaySetName%in%unlist(vertex))== FALSE)]
for(i in 1:difference){
vertex[[i+vertexLength]] <- rest[i]
}
}
vertex <- lapply(vertex,
function(a,oneWaySetName){
which((oneWaySetName%in%a)==TRUE)
}, oneWaySetName = oneWaySetName)
}
vertex
}
# if two way intersections are missing, generate them
twoWayGeneration <- function(combProp, mu){
#numWays <- str_length(names(combProp))
numWays <- str_count(names(combProp),pattern = "&")+1
oneWaySet <- combProp[which(numWays==1)]
twoWaySet <- combProp[which(numWays == 2)]
twoWaySetName <- names(twoWaySet)
#highWay gives way larger than 2
highWaySet <- sort(combProp[which(numWays > 2)],decreasing = T)
highWaySetName <- names(highWaySet)
highWays <- str_count(highWaySetName,pattern = "&")+1
resam <- 0
New <- list()
newName <- c()
for (i in 1:length(highWays)){
com <- combn(highWays[i],2)
L <- dim(com)[2]
newTwoWaySet <- rep(0,1e5)
newTwoWaySetName <- rep(0,1e5)
for (j in 1:L){
highWaySetNameSplit <- str_split(highWaySetName[i],"&")[[1]][com[,j]]
value_a <- oneWaySet[which(names(oneWaySet)==highWaySetNameSplit[1])]
value_b <- oneWaySet[which(names(oneWaySet)==highWaySetNameSplit[2])]
valuemin <- min(value_a,value_b)
highWaySetNamePaste <- paste(highWaySetNameSplit,collapse="&")
highWaySetNamePasteReorder <- paste(c(highWaySetNameSplit[2], highWaySetNameSplit[1]),collapse="&")
bool1 <- any( highWaySetNamePaste %in% twoWaySetName, highWaySetNamePasteReorder%in%twoWaySetName)
if(bool1 == FALSE){
bool2 <- any( highWaySetNamePaste %in% newName, highWaySetNamePasteReorder %in% newName )
if(bool2 == FALSE){
resam <- resam+1
newName[2*resam - 1] <- highWaySetNamePaste
newName[2*resam] <- highWaySetNamePasteReorder
if(highWaySet[i] == 0){
highWaySet[i] <- min(highWaySet[which(highWaySet!=0)])*0.01
}
if((highWaySet[i]*mu^(highWays[i]-2)) < valuemin){
newTwoWaySet[resam] <- (highWaySet[i]*mu^(highWays[i]-2))
}else{
newTwoWaySet[resam] <- runif(1,highWaySet[i],valuemin)
}
newTwoWaySetName[resam] <- highWaySetNamePaste
}
}
}
newTwoWayIndex <- which(newTwoWaySet!=0)
newTwoWaySet <- newTwoWaySet[newTwoWayIndex]
if(length(newTwoWaySet) != 0){
newTwoWaySetName <- newTwoWaySetName[newTwoWayIndex]
names(newTwoWaySet) <- newTwoWaySetName
New[[i]] <- c(highWaySet[i], newTwoWaySet)
}
}
if(length(New)== 0){
newTwoWaySet <- twoWaySet
} else {
anyNullList <- which(sapply(New, is.null))
if(length(anyNullList) !=0 ){
New <- New[-anyNullList]
}
newHighWaySet <- unlist(New)
if(any(is.na(newHighWaySet))){newHighWaySet <- newHighWaySet[-which(is.na(newHighWaySet))]}
numWays <- str_count(names(newHighWaySet),pattern = "&")+1
newTwoWaySet <- c(twoWaySet, newHighWaySet[which(numWays == 2)])
}
list(newTworWaySet = newTwoWaySet, resam = resam, New = New)
}
#calculate Euclidean distance matrix and intial location (based on Gram matrix)
EuclideanDistance <- function(newTworWaySet, oneWaySetName, radius, ThreeD, initial, expand, distanceTolerance,
maximumStep){
m <- length(oneWaySetName)
newTworWaySetName <- names(newTworWaySet)
ED <- matrix(rep(0, m^2),ncol = m)
for(i in 1:m){
for(j in i:m){
if(j == i){next}else{
sij1 <- newTworWaySet[which(newTworWaySetName == paste(c(oneWaySetName[i],oneWaySetName[j]),collapse = "&"))]
sij2 <- newTworWaySet[which(newTworWaySetName == paste(c(oneWaySetName[j],oneWaySetName[i]),collapse = "&"))]
if(length(sij1) == 0 && length(sij2) == 0 ){
sij <- 0
} else { sij <- c(sij1,sij2) }
ED[i,j] <- Distance(radius[i], radius[j], sij, ThreeD = ThreeD, expand = expand,
distanceTolerance = distanceTolerance, maximumStep = maximumStep)
}
}
}
ED <- ED + t(ED)
rownames(ED) <- oneWaySetName
colnames(ED) <- oneWaySetName
if(initial == TRUE){
D2 <- ED^2
J <- diag(m)-1/m
G <- -0.5*J%*%D2%*%J
Em <- svd(G)
U <- (Em$u) %*% sqrt(diag(Em$d))
if(ThreeD){
if (dim(U)[1]>=3){xy <- U[,1:3]
}else{xy <- cbind(U,rep(0,2))}
}else{
xy <- U[,1:2]
}
step <- 1
while((allConnectedCpp(xy,radius,ThreeD) == F)){
meanvec <- apply(xy,2,"mean")
deriction <- t(t(xy) - meanvec)
xy <- xy - 0.1*step*deriction
step <- step + 1
}
out <- list(ED = ED, xy = xy)
}else{out <- ED}
out
}
|
/scratch/gouwar.j/cran-all/cranData/vennplot/R/vennplot.R
|
#' Turn vectors into lists with any specified linking word
#'
#' @param items A vector of items to turn into a list phrase (e.g. c("a", "b", "c")).
#' @param linking_word Defaults to "and". Can be anything.
#' @param oxford_comma `logical`. Defaults to `FALSE`. If TRUE, an oxford comma is added (e.g. "a, b, and c").
#'
#' @return A string in the form of a list (e.g. "a, b and c")
#' @export
#'
#' @examples listify(c("a", "b", "c"), "or")
#'
listify <- function(items,
linking_word = "and",
oxford_comma = FALSE) {
if(length(items) > 1) {
paste0(paste0(items[1:length(items)-1],
collapse = ", "),
ifelse(oxford_comma == TRUE, ", ", " "),
linking_word,
" ",
items[length(items)])
} else {
paste(items)
}
}
|
/scratch/gouwar.j/cran-all/cranData/verbaliseR/R/listify.R
|
#' Spell out numbers if they are smaller than ten
#'
#' @param number Whole number as `numeric` or `integer`, to be turned into text. Numbers 1-10 are always written out in full,
#' regardless of their place in the sentence. Number 11-999 are written out in full if they are at the beginning of a sentence.
#' Numbers greater than 1000 are returned as numerals.
#' @param sentence_start Logical. If `TRUE`, numbers below 100 are written out in full, and their first letter is capitalised.
#' @param zero_or_no Specify what to print when the number is 0. Defaults to "no". Can be any string.
#' @param uk_or_us Defaults to UK which adds an "and" between "hundred" and other numbers (e.g. "One hundred and five"). If "US"
#' is chosen, the "and" is removed (e.g. "One hundred five").
#' @param big_mark Defaults to "," (e.g. "1,999").
#'
#' @return A string
#' @export
#'
#' @examples num_to_text(3)
#' num_to_text(333, sentence_start = TRUE)
#'
num_to_text <- function(number,
sentence_start = FALSE,
zero_or_no = "no",
uk_or_us = "UK",
big_mark = ",") {
# Return numeral if no other conditions are met (x > 10, & not start of sentence or x > 100)
num_to_print <- number
uk_or_us <- toupper(uk_or_us)
x <- as.numeric(number)
if (is.na(x)) stop(paste0(number, " is not a number."))
if(x %% 1 != 0) warning(paste0(number, " is not a whole number. It is kept as a numeral."))
if(x %% 1 == 0 & x > 999) {
warning("Numbers greater than 1000 are returned as numerals, regardless of their place in the sentence.")
num_to_print <- format(x, big.mark = big_mark)
}
ones <- c("one", "two", "three", "four",
"five", "six", "seven", "eight", "nine")
teens <- c("ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
"sixteen", "seventeen", "eighteen", "nineteen")
tens <- c("twenty", "thirty", "forty", "fifty",
"sixty", "seventy", "eighty", "ninety")
if(x == 0) num_to_print <- zero_or_no
if(x !=0 & x %% 1 == 0 & x < 10) num_to_print <- ones[x]
if(x == 10) num_to_print <- "ten"
if(x == 100) num_to_print <- "one hundred"
if(x == 1000) num_to_print <- "one thousand"
if(sentence_start == TRUE & x %in% c(10:19)) {
num_to_print <- teens[x-9]
}
if(sentence_start == TRUE & x %in% c(20:999)) {
hundreds <- ifelse(x > 99,
paste(ones[x %/% 100], "hundred"),
"")
if(x %% 100 > 19) {
sub_hundreds <- paste0(tens[(x %% 100 %/% 10) - 1],
ifelse(x %% 10 != 0, "-", ""),
ones[x %% 10])
} else if (x %% 100 > 9) {
sub_hundreds <- paste0(teens[x %% 100 - 9])
} else if (x %% 100 > 0) {
sub_hundreds <- ones[x %% 100]
}
num_to_print <- paste0(
ifelse(x > 99, paste(ones[x %/% 100], "hundred"), ""),
ifelse(x > 99 & x %% 100 != 0 & uk_or_us == "UK", " and ", ""),
ifelse(x %% 100 != 0,
sub_hundreds,
"")
)
}
if(sentence_start == TRUE) {
num_to_print <- stringr::str_to_sentence(num_to_print)
}
return(num_to_print)
}
|
/scratch/gouwar.j/cran-all/cranData/verbaliseR/R/num_to_text.R
|
#' Pluralise words if their accompanying number is not 1
#'
#' @param word A word which should be returned as plural if `count` is not equal to 1.
#' @param count A number to apply to `word`
#' @param plural How to make the plural; defaults to an "s" which is added at the end of the word.
#' Can be anything. See `add_or_swap`.
#' @param add_or_swap Choose between `add` (add the plural form (e.g. "s") onto the end; e.g. `house`
#' becomes `houses`) and `swap` (swap for the plural form; e.g. `mouse` becomes `mice`)
#' @param include_number Logical. If `TRUE`, the number will be turned into text, as per `num_to_text()` (if it is a whole number, )
#' @param sentence_start Logical. Defaults to `FALSE`, which results in only numbers 1-10 being written out in full.
#' If `TRUE`, numbers 11-999 are written out in full if included. (If `include_number` is `FALSE`, the first letter of `word` is capitalised.)
#' @param zero_or_no Prefered string to use where count == 0. Defaults to "no". Can be anything.
#' @param uk_or_us Only used if `include_number` == `TRUE`. Defaults to UK which adds an "and" between "hundred" and other numbers
#' (e.g. "One hundred and five"). If "US" is chosen, the "and" is removed (e.g. "One hundred five").
#' @param big_mark Passed to `num_to_text`. Defaults to "," (e.g. "1,999")
#'
#' @return A word which is pluralised or not based on the value of `count`
#' @export
#'
#' @examples pluralise("penguin", 3)
#' pluralise("bateau", 1234, "x")
#' pluralise("sheep", 333, "sheep", add_or_swap = TRUE, sentence_start = TRUE)
#'
pluralise <- function(word,
count,
plural = "s",
add_or_swap = "add",
include_number = TRUE,
sentence_start = FALSE,
zero_or_no = "no",
uk_or_us = "UK",
big_mark = ","){
if(count == 1) {
output_string <- word
} else {
output_string <- ifelse(add_or_swap == "swap",
plural,
paste0(word, plural))
}
if(include_number == FALSE & sentence_start == TRUE) {
output_string <- stringr::str_to_sentence(output_string)
}
if(include_number == TRUE) {
output_string <- paste(
num_to_text(number = count,
sentence_start = sentence_start,
zero_or_no = zero_or_no,
uk_or_us = uk_or_us,
big_mark = big_mark),
output_string
)
}
return(output_string)
}
|
/scratch/gouwar.j/cran-all/cranData/verbaliseR/R/pluralise.R
|
#' Render ordinal dates in UK or US style
#'
#' @param date_to_format The date to use. It must be either be of class `Date` or a string written as "YYYY-MM-DD" or "YYYY/MM/DD")
#' @param uk_or_us Defaults to "UK", which results in outputs like "12th September 2022"; if
#' "US", the output resembles "September 12th, 2022".
#' @param formal_or_informal Defaults to "informal", so the ordinals are included (e.g. "st", "nd", "rd", "th").
#' If "formal" is chosen, the ordinals are omitted (e.g. "12 September 2022").
#'
#' @return A string (e.g. "12th September 2022")
#' @export
#'
#' @examples prettify_date(Sys.Date(), "UK", "informal")
prettify_date <- function(date_to_format = Sys.Date(),
uk_or_us = "UK",
formal_or_informal = "informal") {
day_num <- as.numeric(format(as.Date(date_to_format), "%d"))
if (!(day_num %% 100 %in% c(11, 12, 13))) {
day_th <- switch(as.character(day_num %% 10),
"1" = {paste0(day_num, "st")},
"2" = {paste0(day_num, "nd")},
"3" = {paste0(day_num, "rd")},
paste0(day_num, "th"))
} else {
day_th <- paste0(day_num, "th")
}
num_to_use <- ifelse(formal_or_informal == "informal",
day_th,
day_num)
if(uk_or_us == "UK") {
paste(num_to_use, format(as.Date(date_to_format), "%B %Y"))
} else {
paste0(format(as.Date(date_to_format), "%B"), " ",
num_to_use, ", ", format(as.Date(date_to_format), "%Y"))
}
}
|
/scratch/gouwar.j/cran-all/cranData/verbaliseR/R/prettify_date.R
|
#' Restore sustom capitalisation in a string
#'
#' @param x A string in which capitalisation needs to be restored
#' @param items_to_capitalise Whole words or acronyms in which capitalisation must be retained; special characters can be included (e.g. "R2-D2")
#'
#' @return A string with restored capitals
#' @export
#'
#' @examples
#' x <- "Should i tell c-3po the french call him z-6po?"
#' restore_capitals(x, c("I", "C-3PO", "French", "Z-6PO"))
#'
restore_capitals <- function(x, items_to_capitalise) {
for(item in items_to_capitalise) {
x <- gsub(paste0("\\b", tolower(item), "\\b"),
item,
x)
}
return(x)
}
|
/scratch/gouwar.j/cran-all/cranData/verbaliseR/R/restore_capitals.R
|
#' Habsburg pedigree.
#'
#' A subset of the royal Habsburg family, showing the ancestry of (the
#' infamously inbred) King Charles II of Spain.
#'
#' @format A `ped` object containing a pedigree with 29 members.
#'
#' @source Adapted from \url{https://en.wikipedia.org/wiki/Habsburg_family_tree}
#'
#' @examples
#'
#' plot(habsburg, hatched = "Charles II", cex = 0.7)
#'
#' verbalise(habsburg, ids = parents(habsburg, "Charles II"))
#'
"habsburg"
|
/scratch/gouwar.j/cran-all/cranData/verbalisr/R/data.R
|
#' @importFrom ribd inbreeding
pathData = function(x, p1, p2, inb = NULL) {
p1 = as.character(p1)
p2 = as.character(p2)
if(p1[1] != p2[1])
stop2("Both paths must start at the common ancestor")
anc = as.character(p1[1])
# The two connected indivs
leaves = c(p1[length(p1)], p2[length(p2)]) # do this here to avoid empty paths
# Remove ancestor from paths
v1 = p1[-1]
v2 = p2[-1]
l1 = length(v1)
l2 = length(v2)
# Path type
type = if(l1 == 0 || l2 == 0)
"lineal"
else if(l1 == 1 && l2 == 1)
"sibling"
else if(l1 == 1 || l2 == 1)
"avuncular"
else
"cousin"
# Full or half?
if(type == "lineal")
full = NA
else {
pars1 = parents(x, v1[1]) # = character(0) if empty
pars2 = parents(x, v2[1])
full = setequal(pars1, pars2)
if(full)
anc = pars1
}
half = isFALSE(full) # for use below; not reported
# Degree/removal
nSteps = c(l1, l2)
degree = sum(nSteps) - as.integer(isTRUE(full)) # NB: not cousin degree
removal = abs(diff(nSteps))
# Number of great/grand (lineal & avuncular only)
ng = if(removal > 1) removal - 1 else 0
# Inbreeding of ancs
if(is.null(inb))
inb = ribd::inbreeding(x)
ancInb = inb[anc]
# Sexes along path from A to B (not inclusive). Include anc only for half rels.
pth = c(rev(v1[-l1]), if(half) anc, v2[-l2])
sex = getSex(x, named = TRUE)
sexPath = paste0(c("p", "m")[sex[pth]], collapse = "")
# top/bottom within leaves (used in lineal & avunc details below)
top = if(l1 < l2) leaves[1] else leaves[2]
bottom = setdiff(leaves, top)
details = NULL
# Relationship descriptions
switch(type,
lineal = {
code = paste0("lin", degree)
rel = paste("lineal of degree", degree)
# details
typ = switch(sex[top], "father", "mother")
if(ng > 0)
typ = paste0(strrep("great-", ng - 1), "grand", typ)
determ = if(ng > 0) "a" else "the"
details = sprintf("%s is %s %s of %s", top, determ, typ, bottom)
},
sibling = {
code = if(half) "hs" else "fs"
rel = paste(if(half) "half" else "full", "siblings")
},
avuncular = {
code = paste0(if(half) "h", if(ng > 0) "g", if(ng > 1) ng, "av")
rel = paste0(if(half) "half-",
if(ng > 1) strrep("great-", ng - 1),
if(ng > 0) "grand-",
"avuncular")
# details
typ = switch(sex[top], "uncle", "aunt")
if(ng > 0)
typ = paste0(strrep("great-", ng - 1), "grand", typ)
if(half)
typ = paste0("half-", typ)
determ = if(substr(typ, 1, 1) %in% c("a", "u")) "an" else "a"
details = sprintf("%s is %s %s of %s", top, determ, typ, bottom)
},
cousin = {
cousDeg = min(l1, l2) - 1
code = paste0(if(half) "h", "c", cousDeg, "r", removal)
rel = paste(ordinal(cousDeg), "cousins")
if(half) rel = paste("half", rel)
if(removal > 0) rel = paste(rel, numtimes(removal), "removed")
}
)
# Path string
ancBrack = sprintf("[%s]", paste0(anc, collapse = ","))
path = paste0(c(rev(v1), ancBrack, v2), collapse = "-")
list(v1 = v1, v2 = v2, leaves = leaves, anc = anc, full = full,
nSteps = nSteps, degree = degree, removal = removal,
ancInb = ancInb, sex = sex, sexPath = sexPath, path = path,
code = code, type = type, rel = rel, details = details)
}
unrelatedPair = function(x, ids) {
sex = getSex(x, ids = ids, named = TRUE)
emptypath = list(v1 = ids[1], v2 = ids[2], leaves = ids, anc = character(0), full = NA,
nSteps = c(Inf, Inf), degree = Inf, removal = 0,
ancInb = 0, sex = sex, sexPath = "", path = "",
code = "un", type = "unrelated", rel = "unrelated", details = NULL)
structure(list(emptypath), class = "pairrel")
}
|
/scratch/gouwar.j/cran-all/cranData/verbalisr/R/pathData.R
|
#' Format relationship description
#'
#' This is the main formatting function called by `print.pairrel()`.
#'
#' @param x An output of [verbalise()].
#' @param cap A logical indicating if the first letter of each path description
#' should be capitalised. By default TRUE.
#' @param includePaths A logical indicating if the complete paths should be
#' included in the output. By default TRUE.
#' @param ... Not used.
#'
#' @export
format.pairrel = function(x, cap = TRUE, includePaths = TRUE, ...) {
if(length(x) == 1 && x[[1]]$type == "unrelated")
return("Unrelated")
# Descriptions: Relationship + details
descrips = vapply(x, FUN.VALUE = "", function(p)
if(is.null(p$details)) p$rel else paste0(p$rel, ": ", p$details))
# Collect path groups
uniq = unique.default(descrips)
paths = lapply(uniq, function(dsc)
sapply(x[descrips == dsc], function(p) p$path))
s = doublify(uniq, n = lengths(paths))
if(cap)
s = capit(s)
names(paths) = s
# Collect and print
if(includePaths)
s = unlist(lapply(s, function(r) c(r, paste(" ", paths[[r]]))))
s
}
#' @export
print.pairrel = function(x, ...) {
txt = format(x, ...)
cat(txt, sep = "\n")
invisible(x)
}
|
/scratch/gouwar.j/cran-all/cranData/verbalisr/R/print.R
|
# Preferred version of stop()
stop2 = function(...) {
a = lapply(list(...), toString)
a = append(a, list(call. = FALSE))
do.call(stop, a)
}
checkIds = function(x, ids, checkDups = TRUE, exactly = NULL, atleast = NULL, atmost = NULL) {
labs = if(is.character(x)) x else unlist(labels(x), use.names = FALSE)
if(!all(ids %in% labs))
stop2("Unknown ID label: ", setdiff(ids, labs))
if(!is.null(exactly) && length(ids) != exactly)
stop2("Argument `ids` must have length ", exactly)
if(!is.null(atleast) && length(ids) < atleast)
stop2("Argument `ids` must have length at least ", atleast)
if(!is.null(atmost) && length(ids) > atmost)
stop2("Argument `ids` must have length at most ", atmost)
if(checkDups && (d <- anyDuplicated.default(match(labs, ids), incomparables = NA)))
stop2("ID label is not unique: ", labs[d])
if(checkDups && (d <- anyDuplicated.default(ids)))
stop2("Repeated individual: ", ids[d])
}
removeEmpty = function(x) {
x[lengths(x) > 0]
}
ordinal = function(n) {
if(n < 0) stop2("`n` must be nonnegative")
switch(min(n, 4), "first", "second", "third", paste0(n, "'th"))
}
numtimes = function(n) {
if(n < 0) stop2("`n` must be nonnegative")
if(n == 0) return("")
if(n == 1) return("once")
if(n == 2) return("twice")
paste(n, "times")
}
tuple = function(n) {
if(n < 1) stop2("`n` must be positive")
if(n > 8) return(paste(n, "times"))
switch(n, "single", "double", "triple", "quadruple", "quintuple", "sextuple", "septuple", "octuple")
}
indent = function(x, level = 0, capit = as.logical(level == 0)) {
if(capit)
x[1] = capit(x[1])
paste0(strrep(" ", level), x)
}
# Replace duplications by prefixing "double" etc
#' @importFrom stats setNames
doublify = function(x, n = NULL) {
if(is.null(n))
tab = as.list(table(x))
else
tab = setNames(as.list(n), x)
y = lapply(names(tab), function(s) {
tup = tab[[s]]
if(tup > 1) {
DB = tuple(tup)
paste(DB, sub("(: .* is a)n?", paste("\\1", DB), s))
}
else s
})
unlist(y)
}
capit = function(x) {
substr(x, 1, 1) = toupper(substr(x, 1, 1))
x
}
isFull = function(path) {
isTRUE(path$full)
}
|
/scratch/gouwar.j/cran-all/cranData/verbalisr/R/utils.R
|
#' Describe a pairwise relationship
#'
#' The description includes all pedigree paths between the two individuals,
#' indicating with brackets the topmost common ancestors in each path.
#'
#' @param x A `ped` object, or a list of such.
#' @param ids A vector containing the names of two pedigree members.
#'
#' @return An object of class `pairrel`. This is essentially a list of lists,
#' where each inner list describes a single path.
#'
#' @examples
#'
#' # Example 1: Family quartet
#'
#' x = nuclearPed(2)
#' verbalise(x, 1:2)
#' verbalise(x, 2:3)
#' verbalise(x, 3:4)
#'
#' # Example 2: Complicated cousin pedigree
#'
#' y = doubleCousins(degree1 = 1, removal1 = 1, half1 = TRUE,
#' degree2 = 2, removal2 = 0, half2 = FALSE)
#' verbalise(y)
#'
#' # Example 3: Full sib mating
#'
#' z = fullSibMating(1)
#' verbalise(z)
#' verbalise(z, ids = c(1,5))
#'
#' # Example 4: Quad half first cousins
#'
#' w = quadHalfFirstCousins()
#' verbalise(w)
#'
#' @importFrom ribd kinship
#' @export
verbalise = function(x, ids = leaves(x)) {
ids = as.character(ids)
checkIds(x, ids, exactly = 2)
kinmat = kinship(x)
phi = kinmat[ids[1], ids[2]]
inb = 2 * diag(kinmat) - 1
### Unrelated: Return early
if(phi == 0)
return(unrelatedPair(x, ids))
# By now, if ped list, ids are from the same comp!
if(is.pedList(x))
x = x[[getComponent(x, ids[1])]]
id1 = ids[1]; id2 = ids[2]
SEX = getSex(x, named = TRUE)
# Vector of all common ancestors
comAnc = commonAncestors(x, ids, inclusive = TRUE)
# List of lists: All paths from each common ancestor
descPth = descentPaths(x, comAnc)
# Split into paths to each id
allpaths = lapply(descPth, function(plist) {
p1 = lapply(plist, function(p) p[seq_len(match(id1, p, nomatch = 0))])
p2 = lapply(plist, function(p) p[seq_len(match(id2, p, nomatch = 0))])
list(unique.default(removeEmpty(p1)),
unique.default(removeEmpty(p2)))
})
PATHS = list()
taken = character()
for(a in comAnc) {
a.to.id1 = allpaths[[a]][[1]]
a.to.id2 = allpaths[[a]][[2]]
for(p1 in a.to.id1) {
for(p2 in a.to.id2) {
# If intersection: Ignore
if(length(intersect(p1[-1], p2[-1])))
next
pd = pathData(x, p1, p2, inb = inb)
if(pd$path %in% taken)
next
PATHS = c(PATHS, list(pd))
taken = c(taken, pd$path)
}
}
}
# Sort
PATHS = PATHS[order(sapply(PATHS, function(p) p$degree),
sapply(PATHS, function(p) sum(p$nSteps)),
sapply(PATHS, function(p) -p$removal))]
structure(PATHS, class = "pairrel")
}
|
/scratch/gouwar.j/cran-all/cranData/verbalisr/R/verbalise.R
|
#' @keywords internal
"_PACKAGE"
## usethis namespace: start
#' @import pedtools
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/verbalisr/R/verbalisr-package.R
|
wright = function(x) {
stopifnot(inherits(x, "pairrel"))
s = 0
for(p in x) {
expo = sum(p$nSteps) + 1
f = p$ancInb
s = s + 0.5^expo * (length(f) + sum(f)) # ok regardless of half/full
}
s
}
validateKinship = function(x, ids) {
v = verbalise(x, ids)
if(!all.equal(wright(v), kinship(x, ids))) stop("err")
}
|
/scratch/gouwar.j/cran-all/cranData/verbalisr/R/wright.R
|
#' calculate_digit
#'
#' Calculates a single Verhoeff Check Digit. This function is exported, but it would usually
#' be called from one of the `verhoeff_*` wrapper functions
#'
#' @param number A number you want to calculate the check digit for
#' @param d5 The verhoeff d5 matrix. Retrievable with create_verhoeff_matrices()$d5
#' @param d5_p The verhoeff p matrix. Retrievable with create_verhoeff_matrices()$d5_p
#' @param inv_v The verhoeff inv matrix. Retrievable with create_verhoeff_matrices()$inv_v
#'
#' @return A single integer
#' @export
#'
#' @examples
#' dat <- verhoeff::create_verhoeff_matrices()
#' calculate_digit(5, dat$d5, dat$d5_p, dat$inv_v)
calculate_digit <- function(number, d5, d5_p, inv_v){
number <- prepare_number(number)
c <- 0
for (i in 1:length(number)){
c <- d5_calc(d5, c,
d5_p_calc(d5_p, i, number[i]))
}
final <- inv_v[c + 1]
return(final)
}
d5_p_calc <- function(d5_p, i, number) {
d5_p[(i %% 8) + 1, number + 1] + 1
}
d5_calc <- function(d5, c, d5_p_calc) {
d5[c + 1, d5_p_calc]
}
|
/scratch/gouwar.j/cran-all/cranData/verhoeff/R/calculate_digit.R
|
#' create_verhoeff_matrices
#'
#' @return An object with three fields; d5, d5_p, and inv_v
#' @export
#' @keywords internal
#'
#' @examples
#' verhoeff::create_verhoeff_matrices()$d5
#' verhoeff::create_verhoeff_matrices()$d5_p
#' verhoeff::create_verhoeff_matrices()$inv_v
create_verhoeff_matrices <- function(){
d5_matrix <- matrix(as.integer(c(
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
1, 2, 3, 4, 0, 6, 7, 8, 9, 5,
2, 3, 4, 0, 1, 7, 8, 9, 5, 6,
3, 4, 0, 1, 2, 8, 9, 5, 6, 7,
4, 0, 1, 2, 3, 9, 5, 6, 7, 8,
5, 9, 8, 7, 6, 0, 4, 3, 2, 1,
6, 5, 9, 8, 7, 1, 0, 4, 3, 2,
7, 6, 5, 9, 8, 2, 1, 0, 4, 3,
8, 7, 6, 5, 9, 3, 2, 1, 0, 4,
9, 8, 7, 6, 5, 4, 3, 2, 1, 0
)), ncol = 10, byrow = TRUE)
d5_p_matrix <- matrix(as.integer(c(
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
1, 5, 7, 6, 2, 8, 3, 0, 9, 4,
5, 8, 0, 3, 7, 9, 6, 1, 4, 2,
8, 9, 1, 6, 0, 4, 3, 5, 2, 7,
9, 4, 5, 3, 1, 2, 6, 8, 7, 0,
4, 2, 8, 6, 5, 7, 3, 9, 0, 1,
2, 7, 9, 3, 8, 0, 6, 4, 1, 5,
7, 0, 4, 6, 9, 1, 3, 2, 5, 8
)), ncol = 10, byrow = TRUE)
inv_vector <- as.integer(c(0, 4, 3, 2, 1, 5, 6, 7, 8, 9))
return(list(d5 = d5_matrix,
d5_p = d5_p_matrix,
inv_v = inv_vector))
}
|
/scratch/gouwar.j/cran-all/cranData/verhoeff/R/matrices.R
|
#' prepare_number
#'
#' Takes a number and prepares it for input to the verhoeff algorithim by reversing it
#'
#' @param number A single number that can be coerced to numeric
#'
#' @return A numeric vector of length equal to number of digits in the input
#' @export
#'
#' @examples
#' prepare_number(1234)
prepare_number <- function(number){
if (length(number) > 1){
stop("Only a single number can be passed. Please do not pass vectors of length > 1",
call. = FALSE)
}
if (is.logical(number)){
warning("Coercing logical values to numeric. Please ensure this is expected.")
}
number <- suppressWarnings(as.numeric(number))
if (is.na(number)){
stop("NAs were introduced. Please check your input can be coerced to numeric",
call. = FALSE)
}
rev_number <- reverse_number(number)
return(rev_number)
}
#' reverse_number
#'
#' Takes a numeric or character number and reverses it. Only called from within `prepare_number`
#'
#' @param number A numeric or something that can be coerced to numeric
#'
#' @return A numeric vector with one element per digit
#' @noRd
#'
#' @examples
#' reverse_number(1234)
reverse_number <- function(number){
# No arg checking is here, because it is always called from `prepare_number` which handles errors.
number <- as.character(number)
number <- strsplit(number, "")
number <- number[[1]]
number <- as.numeric(number)
number <- rev(number)
return(number)
}
|
/scratch/gouwar.j/cran-all/cranData/verhoeff/R/prepare_number.R
|
#' verhoeff_calculate
#'
#' @param number The vector of numbers you want a check digit for
#' @param as_list Return the results as a list? Defaults to false
#'
#' @return Vector or list of check digits
#' @export
#'
#' @examples
#' verhoeff_calculate(1234)
verhoeff_calculate <- function(number, as_list = FALSE){
v_matrices <- create_verhoeff_matrices()
check_digit <- lapply(number,
calculate_digit,
d5 = v_matrices$d5,
d5_p = v_matrices$d5_p,
inv_v = v_matrices$inv_v)
if (!as_list){
check_digit <- unlist(check_digit)
}
return(check_digit)
}
#' verhoeff_append
#'
#' Return a number with its check digit appended
#'
#' @param number The number to calculate a check digit for
#' @param sep A separator for the two numbers
#'
#' @return Numeric vector of length equal to its input
#' @export
#'
#' @examples
#' verhoeff::verhoeff_append(123)
verhoeff_append <- function(number, sep = "-"){
original_number <- number
check_digit <- verhoeff_calculate(number)
appended_number <- paste(original_number, check_digit, sep = sep)
return(appended_number)
}
#' verhoeff_validate
#'
#' Enter a number, and an existing check digit. Function will return true if the supplied check digit is a correct verhoeff check digit for the given number
#'
#' @param number A numerical input
#' @param check_digit An existing check digit for the input number
#'
#' @return Logical vector
#' @export
#'
#' @examples
#' verhoeff::verhoeff_validate(123, 3)
verhoeff_validate <- function(number, check_digit){
number <- number
check_digit <- as.integer(check_digit)
calc_check_digit <- verhoeff_calculate(number)
return(
identical(check_digit, calc_check_digit)
)
}
|
/scratch/gouwar.j/cran-all/cranData/verhoeff/R/verhoeff.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
attribute<- function(x, ...){
UseMethod("attribute")
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/attribute.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
attribute.default<- function(x, obar.i, prob.y=NULL, obar = NULL, class = "none", main = NULL, CI = FALSE, n.boot = 100, alpha = 0.05, tck = 0.01, freq = TRUE, pred = NULL, obs = NULL, thres = thres, bins = FALSE, ...){
## attribute plot as displayed in Wilks, p 264.
## If the first object is a prob.bin class, information derived from that.
old.par <- par(no.readonly = TRUE) # all par settings which
# could be changed.
# on.exit(par(old.par))
########################################
## if x is an verification object, bootstapping is possible.
if(CI & class != "prob.bin" ) stop("x must be an 'prob.bin' object create by verify to create confidence intervals" )
########################################
plot(x, obar.i, col = 2, lwd = 2, type = "n",
xlim = c(0,1), ylim = c(0,1),
xlab = expression( paste("Forecast probability, ", y[i] ) ),
ylab = expression( paste("Observed relative frequency, ", bar(o)[1] ))
)
################### need to put down shading before anything else.
if(!is.null(obar)){
a <- (1-obar)/2 + obar
b <- obar / 2
x.p <- c(obar, obar, 1, 1, 0, 0)
y.p <- c(0, 1, 1, a, b, 0)
polygon(x.p, y.p, col = "gray")
text(0.6, obar + (a-b)*(0.6 - obar), "No skill", pos = 1,
srt = atan( a - b )/(2*pi)*360 )
}
###########
ii <- is.finite(obar.i)
points(x[ii], obar.i[ii], type = "b", col = 2, lwd = 2)
####### bootstrap CI's
####### this causes a binding error since pred and obs is not introduced.
if(CI){
n <- length(pred)
OBAR <- matrix(NA, nrow = length(obar.i), ncol = n.boot)
for(i in 1:n.boot){
ind <- sample(1:n, replace = TRUE)
YY <- verify(obs[ind], pred[ind], show = FALSE, thresholds = thres, bins = bins)$obar.i
OBAR[,i] <- YY
} ## close 1:nboot
a<- apply(OBAR,1, quantile, alpha, na.rm = TRUE)
b<- apply(OBAR,1, quantile, 1-alpha, na.rm = TRUE)
for(i in 1:length(a) ){
lines(rep(x[i], 2), c(a[i], b[i] ), lwd = 1)
lines( c(x[i] - tck, x[i] + tck), rep(a[i],2),lwd = 1 )
lines( c(x[i] - tck, x[i] + tck), rep(b[i],2), lwd = 1 )
}
rm(OBAR, a,b)
} ## close if CI
## plot relative frequency of each forecast
if(freq){
ind<- x< 0.5
text(x[ind], obar.i[ind], formatC(prob.y[ind], format = "f", digits = 3),
pos = 3, offset = 2, srt = 90)
text(x[!ind], obar.i[!ind], formatC(prob.y[!ind], format = "f", digits = 3),
pos = 1, offset = 2, srt = 90)
}
if(is.null(main)){title("Attribute Diagram")}else
{title(main)}
abline(0,1)
## resolution line
if(!is.null(obar)){
abline(h = obar, lty = 2)
abline(v = obar, lty = 2)
text( 0.6, obar, "No resolution", pos = 3)
}
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/attribute.default.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
attribute.prob.bin<- function(x, ...){
# retreives data from a verify object.
# assign("obar.i", x$obar.i)
# assign("thres", x$thres)
# assign("prob.y", x$prob.y)
# assign("obar", x$obar)
# assign("class", "prob.bin")
# assign("obs", x$obs)
# assign("pred", x$pred)
# assign("bins", x$bins)
# assign("x", x$y.i)
# do.call("attribute.default", list(x, obar.i, prob.y, obar, class, obs=obs, pred = pred, thres = thres, bins = bins,...))
res <- attribute.default(x$y.i, obar.i=x$obar.i, prob.y=x$prob.y, obar=x$obar,
class="prob.bin", obs=x$obs, pred=x$pred, thres=x$thres, bins=x$bins, ...)
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/attribute.prob.bin.R
|
`brier` <-
function (obs, pred, baseline = NULL, thresholds = seq(0, 1,
0.1), bins = TRUE, ...)
{
id <- is.finite(obs) & is.finite(pred)
obs <- obs[id]
pred <- pred[id]
pred <- round(pred, 8)
thresholds <- round(thresholds, 8 )
if (max(pred) > 1 | min(pred) < 0) {
cat("Predictions outside [0,1] range. \n Are you certain this is a probability forecast? \n")
}
if (is.null(baseline)) {
obar <- mean(obs)
baseline.tf <- FALSE
}
else {
obar <- baseline
baseline.tf <- TRUE
}
bs.baseline <- mean((obar - obs)^2)
if (bins) {
XX <- probcont2disc(pred, bins = thresholds)
pred <- XX$new
new.mids <- XX$mids
}
else {
if (length(unique(pred)) > 20) {
warning("More than 20 unique probabilities. This could take awhile.")
}
}
N.pred <- aggregate(pred, by = list(pred), length)
N.obs <- aggregate(obs, by = list(pred), sum)
if (bins) {
XX <- data.frame(Group.1 = new.mids, zz = rep(0, length(thresholds) -
1))
XX$Group.1 <- as.factor(XX$Group.1)
N.pred$Group.1 <- as.factor(N.pred$Group.1)
N.obs$Group.1 <- as.factor(N.obs$Group.1)
N.pred <- merge(XX, N.pred, all.x = TRUE)
N.obs <- merge(XX, N.obs, all.x = TRUE)
}
else {
XX <- data.frame(Group.1 = thresholds, zz = rep(0, length(thresholds)))
XX$Group.1 <- as.factor(XX$Group.1)
N.pred$Group.1 <- as.factor(N.pred$Group.1)
N.obs$Group.1 <- as.factor(N.obs$Group.1)
N.pred <- merge(XX, N.pred, all.x = TRUE)
N.obs <- merge(XX, N.obs, all.x = TRUE)
}
obar.i <- N.obs$x/N.pred$x
y.i <- as.numeric(as.character(N.obs$Group.1))
bs <- mean((pred - obs)^2)
n <- length(obs)
ss <- 1 - bs/bs.baseline
bs.rel <- sum(N.pred$x * (y.i - obar.i)^2, na.rm = TRUE)/n
bs.res <- sum(N.pred$x * (obar.i - obar)^2, na.rm = TRUE)/n
bs.uncert <- obar * (1 - obar)
check <- bs.rel - bs.res + bs.uncert
prob.y <- N.pred$x/n
return(list(baseline.tf = baseline.tf, bs = bs, bs.baseline = bs.baseline,
ss = ss, bs.reliability = bs.rel, bs.resol = bs.res,
bs.uncert = bs.uncert, y.i = y.i, obar.i = obar.i, prob.y = prob.y,
obar = obar, thres = thresholds, check = check, bins = bins))
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/brier.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
conditional.quantile<- function(pred, obs, bins = NULL, thrs = c(10, 20), main = "Conditional Quantile Plot", ...){
## creates conditional probability plots as described in Murphy et al.
#set.seed(1)
#frcst <- round(runif(100, 20, 70))
#obs<- rnorm( 100, frcst, 10)
#bins <- seq(0,100,10)
#thrs<- c( 10, 20) # number of obs needed for a statistic to be printed #1,4 quartitle, 2,3 quartiles
old.par <- par(no.readonly = TRUE) # all par settings which # could be changed.
on.exit(par(old.par))
# check bin sizes; issue warning
if(!is.null(bins)){
if( min(bins)> min (obs) | max(bins)< max(obs) ){warning("Observations outside of bin range. \n")}
if( min(bins)> min (pred) | max(bins)< max(pred) ){warning("Forecasts outside of bin range. \n")}
} else {
dat <- c(obs,pred); min.d <- min(dat); max.d <- max(dat)
bins<- seq(floor(min.d), ceiling(max.d), length = 11)
} ## close bin check
## plot ranges
lo<- min(bins); hi<- max(bins)
## if selected, the quasi-continuous data is subsetted into larger
## bins so that quantile statistics might be calculated.
b<- bins[- length(bins)]
labs<- b + 0.5*diff(bins)
obs.cut<- cut(obs, breaks = bins, include.lowest = TRUE, labels = labs)
obs.cut[is.na(obs.cut)]<- labs[1] # place anything below the limit into first bin.
obs.cut<- as.numeric(as.character(obs.cut))
frcst.cut<- cut(pred, breaks = bins, include.lowest = TRUE, labels = labs)
frcst.cut[is.na(frcst.cut)]<- labs[1]
frcst.cut<- as.numeric(as.character(frcst.cut))
## calculate stats ext
n<- length(labs)
lng<- aggregate(obs, by = list(frcst.cut),length)
med<- aggregate(obs, by = list(frcst.cut),median)
q1 <- aggregate(obs, by = list(frcst.cut),quantile, 0.25)
q2 <- aggregate(obs, by = list(frcst.cut),quantile, 0.75)
q1$x[lng$x <= thrs[1]] <- NA
q2$x[lng$x <= thrs[1]] <- NA
q3 <- aggregate(obs, by = list(frcst.cut),quantile, 0.1)
q4 <- aggregate(obs, by = list(frcst.cut),quantile, 0.9)
q3$x[lng$x <= thrs[2]] <- NA
q4$x[lng$x <= thrs[2]] <- NA
par( mar = c(5,5,5,5) )
plot(frcst.cut, obs.cut, xlim = c(lo,hi), ylim = c(lo, hi), main = main,
type = 'n', ylab = "Observed Value", xlab = "Forecast Value", ... )
mtext("Sample Size", side = 4, adj = -1)
#### legend
legend.txt<- c("Median", "25th/75th Quantiles", "10th/90th Quantiles")
legend(min(pred) + 0.55*diff(range(pred)),
min(obs) + 0.25*diff(range(obs)), legend.txt, col = c(2,3,4),
lty = c(1,2,3), lwd = 3, cex = 0.7 )
abline(0,1)
X <- as.numeric(as.character(med$Group.1))
lines(X, med$x, col = 2, lwd = 3)
lines(X, q1$x,
col = 3, lty = 2, lwd = 3)
lines(X, q2$x,
col = 3, lty = 2, lwd = 3)
lines(X, q3$x,
col = 4, lty = 3, lwd = 3)
lines(X, q4$x,
col = 4, lty = 3, lwd = 3)
pp<- par("plt")
par("plt" = c(pp[1], pp[2], pp[3], 0.2))
par(new = TRUE)
hist(frcst.cut, breaks = bins, col = "blue",
main = "", axes = FALSE, xlim = c(lo, hi),
xlab = " " , ylab = " ")
axis(4, line = 0)
# mtext("Sample Size", side = 4, line = 1)
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/conditional.quantile.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/9/1 14:13:55
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
crps <- function(obs, pred, ...)
## Tilmann Gneiting's crps code, assumes pred is either a vector of length
## 2 (mu, sig) or a matrix of mu and sig if each forcast is different
{
if(is.null( dim(pred)) & length(pred)==2){mu <- pred[1];
sigma <- pred[2]} else {
mu<- as.numeric( pred[,1] ); sigma <- as.numeric( pred[,2]) }
z <- (obs-mu)/sigma ## center and scale
crps<- sigma * (z*(2*pnorm(z,0,1)-1) + 2*dnorm(z,0,1) - 1/sqrt(pi))
ign <- 0.5*log(2*pi*sigma^2) + (obs - mu)^2/(2*sigma^2)
pit <- pnorm(obs, mu,sigma )
return(list(crps = crps, CRPS = mean(crps), ign = ign, IGN = mean(ign), pit = pit) )
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/crps.r
|
#----------------------------------------------------------
#
# Calculate CRPS decomposition.
# from alphas, betas and heavisides
# This function is called by crpsDecomposition
# Returns:
# CRPS: mean CRPS
# Reli: The reliability term of the CRPS
# CRPSpot: The potential CRPS
# Author: Ronald Frenette, Severe Weather Lab, Quebec region
# Jun 2009
#
#-----------------------------------------------------------
crpsFromAlphaBeta<-function(alpha,beta,heaviside0,heavisideN)
{
nMember=dim(alpha)[2] -1
Reli<-0
CRPSpot<-0
for (i in 0:nMember)
{
index<-i+1
meanoi<-0
meangi<-0
#Outlier
if (i==0)
{
meanbeta<-mean(beta[,index])
meanoi<-mean(heaviside0)
if (meanoi != 0)
meangi<-meanbeta/meanoi
}
if (i==nMember)
{
meanoi<-mean(heavisideN)
meanalpha<-mean(alpha[,index])
if (meanoi != 1)
meangi<-meanalpha/(1-meanoi)
}
#Non outliers
if (i>0 & i<nMember)
{
meanbeta<-mean(beta[,index])
meanalpha<-mean(alpha[,index])
meanoi<-meanbeta/(meanalpha+meanbeta)
meangi<-meanalpha+meanbeta
}
pi<-i/nMember
Reli<- Reli + meangi * (meanoi-pi) * (meanoi-pi)
CRPSpot<- CRPSpot + meangi * meanoi * (1.0 - meanoi)
}
CRPS<-Reli+CRPSpot
return(list(CRPS=CRPS,CRPSpot=CRPSpot,Reli=Reli))
}
#----------------------------------------------------------
#
# Calculate CRPS decomposition.
# from observations and ensemble forecast
# Returns:
# CRPS: mean CRPS
# Reli: The reliability term of the CRPS
# CRPSpot: The potential CRPS
# alpha: vector of alpha
# beta: vector of beta
# heaviside0 vector of heaviside of first outliers
# heaviside0 vector of heaviside of last outliers
# Author: Ronald Frenette, Severe Weather Lab, Quebec region
# Jun 2009
#
#-----------------------------------------------------------
crpsDecomposition<-function(obs,eps)
{
nMember=dim(eps)[2]
nObs<-length(obs)
alpha<-rep(0,nObs*(nMember+1))
beta<-rep(0,nObs*(nMember+1))
heaviside0<-rep(0,nObs)
heavisideN<-rep(0,nObs)
dim(alpha)<-c(nObs,nMember+1)
dim(beta)<-c(nObs,nMember+1)
prev<-t(apply(eps,1,sort))
# Calculate alpha and beta of observation
# heaviside for the two outliers
#1) Beta and alpha for Outliers
index<-which(obs < prev[,1])
beta[index,1]<-prev[index,1]-obs[index]
index<-which(obs > prev[,nMember])
alpha[index,nMember+1]<-obs[index]-prev[index,nMember]
#2) Heavisides for Outliers
index<-which(obs <= prev[,1])
heaviside0[index]<-1
index<-which(obs <= prev[,nMember])
heavisideN[index]<-1
#3) Non outlier
for (i in 1:(nMember-1))
{
index<-which(obs > prev[,i+1])
alpha[index,i+1]<-prev[index,i+1]-prev[index,i]
index<-which(obs < prev[,i])
beta[index,i+1]<-prev[index,i+1]-prev[index,i]
index<-which((prev[,i+1] > obs) & (obs > prev[,i]))
alpha[index,i+1]<-obs[index]-prev[index,i]
beta[index,i+1]<-prev[index,i+1]-obs[index]
}
crps<-crpsFromAlphaBeta(alpha,beta,heaviside0,heavisideN)
return(list(CRPS=crps$CRPS,CRPSpot=crps$CRPSpot,Reli=crps$Reli,alpha=alpha,beta=beta,heaviside0=heaviside0,heavisideN=heavisideN))
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/crpsDecomposition.r
|
#----------------------------------------------------------
#
# Calculate CRPS decomposition.
# from alphas, betas and heavisides
# This function is called by crpsDecomposition
# Returns:
# CRPS: mean CRPS
# Reli: The reliability term of the CRPS
# CRPSpot: The potential CRPS
# Author: Ronald Frenette, Severe Weather Lab, Quebec region
# Jun 2009
#
#-----------------------------------------------------------
crpsFromAlphaBeta<-function(alpha,beta,heaviside0,heavisideN)
{
nMember=dim(alpha)[2] -1
Reli<-0
CRPSpot<-0
for (i in 0:nMember)
{
index<-i+1
meanoi<-0
meangi<-0
#Outlier
if (i==0)
{
meanbeta<-mean(beta[,index])
meanoi<-mean(heaviside0)
if (meanoi != 0)
meangi<-meanbeta/meanoi
}
if (i==nMember)
{
meanoi<-mean(heavisideN)
meanalpha<-mean(alpha[,index])
if (meanoi != 1)
meangi<-meanalpha/(1-meanoi)
}
#Non outliers
if (i>0 & i<nMember)
{
meanbeta<-mean(beta[,index])
meanalpha<-mean(alpha[,index])
if ((meanalpha + meanbeta) != 0) {
meanoi <- meanbeta/(meanalpha + meanbeta)
}
# meanoi<-meanbeta/(meanalpha+meanbeta)
meangi<-meanalpha+meanbeta
}
pi<-i/nMember
Reli<- Reli + meangi * (meanoi-pi) * (meanoi-pi)
CRPSpot<- CRPSpot + meangi * meanoi * (1.0 - meanoi)
}
CRPS<-Reli+CRPSpot
return(list(CRPS=CRPS,CRPSpot=CRPSpot,Reli=Reli))
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/crpsFromAlphaBeta.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
"discrimination.plot" <- function(group.id, value, breaks = 11, main =
"Discrimination Plot", xlim = NULL, ylim = NULL, legend = FALSE,
leg.txt = paste("Model", sort(unique(group.id)) ), marginal =
TRUE, cols = seq(2, length(unique(group.id)) + 1 ), xlab = "Forecast", ... ){
dat <- data.frame(group.id = group.id, value = value)
groups <- sort(unique(group.id))
n.group <- length(groups )
# test data
# group.id <- dat$id2
# value <- dat$allmaxsev
# breaks <- 11
# main <- "Comparison of Distributions"
# leg.txt <- paste("Model", unique(dat$id2) )
# marginal <- TRUE
# cols <- seq(2, length(unique(dat$id2)) + 1 )
old.par <- par(no.readonly = TRUE) # original parameters
on.exit(par(old.par))
BRKS<- seq(min(value), max(value), length = breaks)
OUT <- matrix(NA, nrow = (breaks - 1), ncol = n.group)
for( i in 1:n.group){
XX <- hist(value[group.id == groups[i] ], plot = FALSE, breaks = BRKS)
OUT[,i]<- XX$counts/sum(XX$counts)
}
## limits for plots
mx.1 <- max(value)
mn.1 <- min(value)
mx.2 <- max(OUT)
mn.2 <- min(OUT)
if(!is.null(xlim) ){
mx.1 <- xlim[2]
mn.1 <- xlim[1]
}
if(!is.null(ylim) ){
mx.2 <- ylim[2]
mn.2 <- ylim[1]
}
if(marginal){
par(oma = c(0,0,2,0))
layout(matrix(1:2, nrow = 2), heights = c(1,4) )
if(legend){par(mar = c(0,4,1,9) )} else
{par(mar = c(0,4,1,1) ) }
boxplot(value~group.id, data = dat, horizontal = TRUE, axes = FALSE,
col = cols , ylim = c(mn.1, mx.1 ), ... )
axis(side = 2, at = 1:n.group, labels = leg.txt, las = 2 )
if(legend){par( mar = c(4,4,0,9))} else
{par(mar = c(4,4,0,1) )}
}else{if(legend){
par(mar = c(4,4,4,8) )
} ## close if legend
# par(mar = c(4,4,3,1) )
} ## close if marginal
plot(XX$mids, apply(OUT, 1, max) , type = "n", xlab = xlab, ylab
= "Relative Frequency", ylim = c(0, mx.2), xlim = c(mn.1, mx.1 ), ... )
for(i in 1:n.group){
points(XX$mids, OUT[,i], type = "b", col = cols[i], pch = 14+i )
}
if(marginal){mtext(main, outer = TRUE, ...)}else{title(main)}
abline(h = 0); abline(v=0)
if(legend){
par(xpd = NA)
xx <- mx.1 + 0.1*(mx.1 - mn.1)
yy <- mean(c(mx.2, mn.2))
legend(x= xx, y = yy, yjust = 0.5, legend = leg.txt, col= cols, pch = seq(15,
, 1, n.group), lty=1, merge=TRUE, cex = 0.8)
}# close if legend
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/discrimination.plot.R
|
fss <- function(obs, pred, w = 0, FUN = mean, ...){
### compare matrixes of forecast of observed values and forecast.
### values can be calcuated using different windows.
### with a window size of 0, obs is returned.
obs.matrix <- matrix.func(DAT = obs, w = w, FUN = FUN)
### with a window size of 0, obs is returned.
frcs.matrix <- matrix.func(DAT = pred, w = w, FUN = FUN)
if(nrow(obs)!= nrow(pred) & ncol(obs)!= nrow(obs) ) stop("Observation matrix and forecast matrix different sizes")
n <- prod(dim(obs.matrix)) ### number of gridpoints
N <- sum((obs.matrix-frcs.matrix)^2, na.rm = TRUE)/n ### numerator
D <- (sum(obs.matrix^2, na.rm = TRUE) +sum(frcs.matrix^2, na.rm = TRUE))/n ### denominator
FSS <- 1 - N/D
return(FSS)
}
matrix.func <- function(DAT, w = 0, FUN = mean, ...){
### w is the '' radius'' of window. eg. w = 2, defines a 5 by 5 square
### define function
FUN <- match.fun(FUN)
### define output dimension
II <- nrow(DAT) - 2*w ## output row dimension
JJ <- ncol(DAT) - 2*w
if(JJ<=0|II <= 0) {stop("The window exceeds the size of the observation" ) }
OUT <- matrix(NA, nrow= II, ncol = JJ)
for(i in 1:II){
for(j in 1:JJ){
sub <- DAT[ i :(i + 2*w ),
j :(j + 2*w ) ] # subset data
OUT[i,j] <- FUN(sub,...)
} ## close J
} ## close I
return(OUT)
} ## close function
|
/scratch/gouwar.j/cran-all/cranData/verification/R/fss.R
|
exponential <- function( sigma, theta, N ) {
x <- 0:round( N / 2 )
return( sigma^2 * exp( -3 * x / theta ) )
} # end of 'exponential' function.
WRSS.exp <- function( params, N, dcov, dat ) {
sigma <- params[1]
theta <- params[2]
acf.fit <- exponential( sigma, theta, N = N)
num.pairs <- N - dcov$lag
sum( num.pairs * ( acf.fit - dat$y )^2 )
} # end of 'WRSS.exp' function.
ORSS.exp <- function( params, N, dcov, dat ) {
sigma <- params[1]
theta <- params[2]
acf.fit <- exponential( sigma, theta, N = N)
sum(( acf.fit - dat$y )^2 )
} # end of 'ORSS.exp' function.
hg.test <- function( loss1, loss2, plot = FALSE , type) {
# Arguments (input):
#
# 'loss1', 'loss2', numeric vectors of equal length giving the two loss series
# (e.g., loss1 = abs( M1 - O ) and loss2 = abs( M2 - O ) ).
#
# type says whether the optimization uses WLS or OLS
# Value (output):
#
# numeric vector of length four giving the statistics and p-values calculated by:
# (1) fitting model using up to half the maximum lag and (2) setting the
# autocovariances to zero that correspond to small empirical ones.
# The loss differential series.
d <- loss1 - loss2
# length of the series.
N <- length( d )
# Calculate the autocovariances up to half the maximum lag.
dcov <- acf( d, type = "covariance", lag.max = round( N / 2 ), na.action = na.omit, plot = FALSE )
# Put the lags and estimated autocovariances into a data frame object.
dat <- data.frame( x = dcov$lag, y = dcov$acf[,, 1] )
# Get some kind of reasonable starting values for the exponential covariance parameters.
#theta.start <- ifelse( any( dat$y < 0.5 ), min( dat$x[ dat$y < 0.5 ] ), length( dat$x ) )
theta.start <- ifelse(dat$y[2]>0,-(dat$y[2] - dat$y[1]), -(0-dat$y[1]))
#ASH: Changed b/c the theta parameter should correspond to the speed at which the covariance decreases.
# If the second lag is negative, then just compute the slope from the first lag to 0.
sigma.start <-sqrt( dat$y[ 1 ] )
# Use 'nls' to try to fit the model to the data. Use try because often this doesn't work.
if(type=="WLS"){
f <- try(nlminb(c( sigma.start, theta.start ),
WRSS.exp, N = N, dcov = dcov, dat = dat,lower = c(0,0), upper = c(Inf,Inf)) )
} else if(type=="OLS"){
f <- try(nlminb(c( sigma.start, theta.start ),
ORSS.exp, N = N, dcov = dcov, dat = dat,lower = c(0,0), upper = c(Inf,Inf)) )
}
# If it worked, find the statistics and p-values. Otherwise, return NA's.
if( class( f ) != "try-error" ) {
xseq <- seq(0, 100, len = 1000)
co <- f$par
if( plot ) {
par( mfrow = c(3, 3) )
acf( loss1, main = "loss1", xlab = "" )
acf( loss2, main = "loss2", xlab = "", ylab = "" )
acf( d, main = "loss1 - loss2", xlab = "", ylab = "" )
pacf( loss1, main = "" )
pacf( loss2, main = "", ylab = "" )
pacf( d, main = "", ylab = "" )
plot(dat$x, dat$y, xlim = c(0, 100))
lines(xseq, (co[1]^2) * exp(-3 * xseq / co[2]))
abline(h = 0, col = 2)
} # end of if 'plot' stmt.
# Estimate the mean loss differential.
m <- mean( d, na.rm = TRUE )
# Use all lags to estimate the variance from the fitted model.
d1.var <- (co[1]^2) * exp( -3 * (0:N) / co[2] )
nd1var <- length( d1.var )
# Find lags that are small and set to zero (for d2.var).
id <- c(FALSE,abs(dat$y[2:length(dat$y)]) < 2 / sqrt(N))
#ASH: the first lag, which is the variance should always be included
nid <- length( id )
if( nid < nd1var ) id <- c( id, rep( TRUE, nd1var - nid ) )
d2.var <- d1.var
d2.var[ id ] <- 0
d2.var[ length(id):length(d1.var) ] <- 0
# Summing var/covs over the lags
var1 <- ( d1.var[1] + 2 * sum( d1.var[ 2:length(d1.var) ], na.rm = TRUE) ) / N
var2 <- ( d2.var[1] + 2 * sum( d2.var[ 2:length(d2.var) ], na.rm = TRUE) ) / N
# Estimate the two statistics.
S1 <- m / sqrt( var1 )
# if all autocovariances are zero, return NA for S2 and p-value 2.
# Actually, the way I'm doing it now, they will never all be zero.
if( !all( id ) ) S2 <- m / sqrt( var2 )
else S2 <- NA
# Find the associated p-values. Use a t distribution if N < 30.
if( N < 30 ) pval1 <- 2 * pt( -abs( S1 ), df = N - 1 )
else pval1 <- 2 * pnorm( -abs( S1 ) )
if( !all( id ) ) {
if( N < 30 ) pval2 <- 2 * pt( -abs( S2 ), df = N - 1 )
else pval2 <- 2 * pnorm( -abs( S2 ) )
} else S2 <- pval2 <- NA
out <- c( S1, pval1, S2, pval2 )
} else out <- rep( NA, 4 )
names( out ) <- c( "S 1", "pval 1", "S 2", "pval 2" )
return( out )
} # end of 'hg.test' function.
|
/scratch/gouwar.j/cran-all/cranData/verification/R/hgtest.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
leps <- function(x, pred, plot = TRUE, ...){
## leps function
## Corrections made to errors pointed out by Marin Mittermaier.
## 1/9/06
old.par <- par(no.readonly = TRUE) # all par settings which
# could be changed.
on.exit(par(old.par) )
Fn <- ecdf(x) ## empirical cdf
leps.0 <- mean(abs( Fn(pred) - Fn(x) ) )
leps.1 <- 2 - 3*(leps.0 + mean(Fn(pred)*(1-Fn(pred) ) )
+ mean(Fn(x)* (1-Fn(x) ) ) )
if(plot){
# if(is.null(titl)){titl<- "LEPS plot"}
plot(x, Fn(x), ylim = c(0,1),
ylab = expression(paste("Empirical CDF ", F[o](o)) ),
xlab = "Observation", ... )
}
r <- list(leps.0 = leps.0, leps.1 = leps.1)
invisible(r)
} # end of function.
|
/scratch/gouwar.j/cran-all/cranData/verification/R/leps.r
|
lines.attrib <- function(x, ...){
assign("y", x$obar.i)
assign("x", x$y.i)
do.call("lines", list(x, y, ...) )
}
####
lines.roc <- function(x, binormal = FALSE, ... ){
A <- roc.int(x$obs, x$pred, x$thres, binormal = binormal)
A<- as.data.frame(A)
if(binormal){
dat <- A
names(dat) <- c("thres", "proby", "probn", "zH", "zF")
dat <- dat[is.finite(dat$zH) & is.finite(dat$zF), ] ## reduce dat, get rid of nans and inf
new <- as.data.frame( matrix(qnorm(seq(0.005, 0.995, 0.005 ) ), ncol = 1) )
names(new) <- "zF"
A <- lm(zH ~ zF, data = dat)$fitted.values
B <- predict(lm(zH ~ zF, data = dat), newdata = new)
lines(pnorm(new$zF), pnorm(B), ...)
} else {lines(A$F, A$H, ...)}
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/linesADD.R
|
measurement.error <- function( obs, frcs = NULL, theta = 0.5, CI = FALSE, t = 1, u = 0, h = NULL, ...){
### if frcs is null, its assumed that obs is a vector with
### assume data entered as c(n11, n10, n01, n00)
if(is.null(frcs) & length(obs) ==4 ){
print(" Assume data entered as c(n11, n01, n10, n00)")
n11<- obs[1]
n10<- obs[3]
n01<- obs[2]
n00<- obs[4]} else{
### check to see if frcs is [0,1] if not convert
if( prod(unique(obs) %in% c(0,1) ) == 1 ){ ## if obs is not binomial
if(is.null(h)){ frcs <- as.numeric(frcs > theta) } else {frcs <- as.numeric(frcs > h) }
}# close if not unique
A<- table(data.frame(obs, frcs) )
n11 <- A[2,2]
n00 <- A[1,1]
n10 <- A[2,1]
n01 <- A[1,2]
}# close is.null else
# No error checking, but either n_ij can be a vector or theta can be;
# both can NOT be vectors.
# briggs [email protected]
n<-n11+n01+n10+n00;
# to determine which of RARE of COMM(ON) is used for each set
z<-((n10+n11)/n);
z<-(z <= theta)*1;
q11<-n11/(n11+n01);
p11<-(q11-u)/(t-u);
q00<-n00/(n10+n00);
p00<-(q00-(1-t))/(t-u);
px<-(n11+n01)/n
py<-(n11+n10-n*u)/(n*(t-u))
#k_RARE <- (n11*(1-u-theta*(t-u))-n01*(u+theta*(t-u)))/((n11+n10-n*u)*(1-theta));
k_RARE <- (p11-theta)*px/((1-theta)*py);
G_RARE <- 2*n11*log(p11/theta) + 2*n01*log((1-p11)/(1-theta));
#k_COMM <- (n00*(t-(1-theta)*(t-u))-n10*(1-t+(1-theta)*(t-u)))/((n00+n01-n*(1-t))*theta);
k_COMM <- (p00-1+theta)*(1-px)/(theta*(1-py));
G_COMM <- 2*n00*log(p00/(1-theta)) +2*n10*log((1-p00)/(theta));
k<-k_RARE*z+k_COMM*(1-z);
G<-G_RARE*z+G_COMM*(1-z);
# if k<0 then G=0 and p=.5
z<-(k>0)*1;
G<-G*z;
p<-pchisq(G,1,lower.tail=FALSE)/2;
p<-p*z+0.5*(1-z);
bigP<-p11*z+p00*(1-z)
if(CI){
ciLO<-0;ciHI<-0;
rootLO<-0;rootHI<-0;
if(length(theta>=1)){
f_RARE<-function(p11,n11,n01,theta)
2*n11*log(p11/theta) + 2*n01*log((1-p11)/(1-theta))-
(2*n11*log((n11/(n11+n01))/theta) +
2*n01*log((n01/(n11+n01))/(1-theta))-4.25);
# 3.84 for climate
# 4.25 for markov
f_COMM<-function(p00,n00,n10,theta)
2*n00*log(p00/(1-theta)) + 2*n10*log((1-p00)/theta)-
(2*n00*log((n00/(n00+n10))/(1-theta)) +
2*n10*log((n10/(n00+n10))/theta)-4.25);
for (i in 1:length(theta)){
zz<-((n10[i]+n11[i])/n[i]);
zz<-(zz<=theta[i]);
tol<-0.0001
if(zz){
hi<-p11[i]-tol
lo<-p11[i]+tol;# print(c(0,hi,lo,n11[i],n01[i],theta[i]))
rootLO[i]<-uniroot(f_RARE,lower=0.001,upper=hi,n11=(n11[i]+tol),
n01=(n01[i]+tol),theta=theta[i])$root
ciLO[i] <- (rootLO[i]-theta[i])*px[i]/((1-theta[i])*py[i]);
if(n01[i]==0) {
rootHI[i]<-1
} else {
rootHI[i]<-uniroot(f_RARE,lower=lo,upper=(1-tol),n11=(n11[i]+tol),
n01=(n01[i]+tol),theta=theta[i])$root
}
ciHI[i] <- (rootHI[i]-theta[i])*px[i]/((1-theta[i])*py[i]);
} else {
hi<-p00[i]-tol
lo<-p00[i]+tol; # print(c(1,hi,lo,n00[i],n10[i],theta[i]))
rootLO[i]<-uniroot(f_COMM,lower=0.001,upper=hi,n00=(n00[i]+tol),
n10=(n10[i]+tol),theta=theta[i])$root
ciLO[i] <- (rootLO[i]-1+theta[i])*(1-px[i])/(theta[i]*(1-py[i]));
rootHI[i]<-uniroot(f_COMM,lower=lo,upper=(1-tol),n00=(n00[i]+tol),
n10=(n10[i]+tol),theta=theta[i])$root
ciHI[i] <- (rootHI[i]-1+theta[i])*(1-px[i])/(theta[i]*(1-py[i]));
}
}
}
} else {
ciLO <- CI
ciHI <- CI
}
answer<-list(z=z,n=n,k=k,G=G,p=p,theta=theta,ciLO=ciLO,ciHI=ciHI);
return(answer)
}
#skill <- function(x,y,theta=0.5,CI=FALSE,t=1,u=0) {
# # briggs [email protected]
# n<-length(theta);
# n11<-0;n01<-0;n10<-0;n00<-0;
# for (i in 1:n){
# fit<-table((x>theta[i])*1,y)
# n11[i]<-fit[2,2];
# n01[i]<-fit[2,1];
# n10[i]<-fit[1,2];
# n00[i]<-fit[1,1];
# }
# answer<-skillTABLE(n11,n01,n10,n00,theta,CI,t=1,u=0);
#return(answer)
# }
|
/scratch/gouwar.j/cran-all/cranData/verification/R/measurement.error.R
|
multi.cont <- function (DAT, baseline = NULL) {
P.DAT <- DAT/sum(DAT)
p.diag <- diag(P.DAT)
PC <- sum(diag(P.DAT))
p.forc <- apply(P.DAT, 1, sum)
p.obs <- apply(P.DAT, 2, sum)
K <- nrow(DAT)
S <- matrix(NA, ncol = nrow(DAT), nrow = nrow(DAT))
kap <- 1/(K - 1)
p.base <- p.obs
if(!is.null(baseline) ){
p.base <- baseline
if(nrow(DAT)!= ncol(DAT) | nrow(DAT)!= length(baseline)){ stop("Dimension of contingency table does not
correspond to length of baseline probabilities") }
} ## close if null
## uses notation from Joliffe p. 89-90.
b <- 1/(K-1)
a <- numeric()
for(k in 1:K){
a[k] <- (1-sum(p.base[1:k]))/sum(p.base[1:k])
}
### fill diagonal
for(i in 1:K){
if(i ==1){
S[i,i]<- b* sum(a[1:(K-1)] )
}else if(i==K){
S[i,i]<- b*(sum(1/a[1:(i-1)] ) )} else{
S[i,i]<- b*(sum(1/a[1:(i-1)] ) + sum(a[i:(K-1)] )) } ## close else
} ## close i
### fill off diagonal
for(i in 1:(K-1)){
for(j in (i+1):K){
if(i == 1 & j == K){
S[j,i] <- S[i,j] <- b*(-1*(j-i))
}else
if(i == 1){
S[j,i] <- S[i,j] <- b*(-1* (j-i) + sum(a[j:(K-1)] ))}else
if(j == K){
S[j,i] <- S[i,j] <- b*( sum(1/a[1:(i-1)] )- (j-i) )
}else
{S[j,i] <- S[i,j] <- b*( sum(1/a[1:(i-1)] )- (j-i) + sum(a[j:(K-1)] ))}
} ## close i
} ## close j
GS <- sum(P.DAT * S)
bias <- p.forc/p.obs
pc <- diag(P.DAT)/p.obs
f <- h <- ts <- far <- bias2<- pc2<- numeric()
for(i in 1:nrow(P.DAT)) {
a <-P.DAT[i,i]
b <- P.DAT[i, -i]
c <- P.DAT[-i, i]
d <- P.DAT[-i, -i]
h[i] <- sum(a)/ (sum(a) + sum(c)) ## hit rate also pod
f[i] <- sum(b)/ (sum(b) + sum(d) ) ## false alarm rate
far[i]<- sum(b)/(sum(a) + sum(b) ) ## false alarm ratio
pc2[i]<- (sum(a) + sum(d))/ sum(P.DAT)
ts[i] <- sum(a)/(sum(a) + sum(b) + sum(c) )
# bias2[i] <- (sum(a) + sum(b))/ (sum(a) + sum(c))
}
# far <- (p.forc - p.diag)/p.forc
# d <- numeric()
# for (i in 1:nrow(P.DAT)) {
# d[i] <- sum(P.DAT[-i, -i])
# }
# ts <- p.diag/(1 - d)
hss <- (sum(p.diag) - sum(p.base * p.forc))/(1 - sum(p.base *
p.forc))
pss <- (sum(p.diag) - sum(p.base * p.forc))/(1 - sum(p.base *
p.base))
return(list(pc = PC, bias = bias, ts = ts, hss = hss, pss = pss, gs = GS, pc2 = pc2, h = h, f = f,
false.alarm.ratio = far))
} ## close function
############### example
#USFMATemp <- read.table("~/Desktop/verify/USFMA.txt",header=FALSE)
#FMA <- table(USFMATemp)
#
#JJA <- matrix(c(3,8,7,8,13,14,4,18,25), nrow = 3 )
#
#A<- multi.cont(FMA)
#B<- multi.cont(FMA, baseline = c(0.3, 0.4, 0.3) )
#
#D<- verify(FMA, baseline = c(0.3, 0.4, 0.3), frcst.type = "cat", obs.type = "cat" )
#
#summary(D)
#
#
#A$HSS
#B$HSS
#
#multi.cont.alt(JJA)
|
/scratch/gouwar.j/cran-all/cranData/verification/R/multi.cont.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/9/1 14:13:55
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
observation.error <- function(obs, gold.standard = NULL, ...){
if(is.null(gold.standard) & length(obs) ==4 ){
## assume data entered as c(n11, n10, n01, n00)
n11<- obs[1]
n10<- obs[2]
n01<- obs[3]
n00<- obs[4]
return( list( t = n11/(n11+n01), u = n10/(n10+n00) ))
} else {
A <- table(data.frame( obs, gold.standard) )
n11 <- A[2,2]
n00 <- A[1,1]
n10 <- A[2,1]
n01 <- A[1,2]
return( list(t = n11/(n11+n01), u = n10/(n10+n00) ) )
}
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/observation.error.R
|
performance.diagram <- function(...){
far <- seq(1,0, length = 501)### 1 - far
h <- seq(0,1, length = 501) ### pod
f <- function(far, h){ (1- far)*h/(1-far*(1-h) )}
g <- function(far, h){ h/(1-far)}
hh <- function(h, b) { h/b }
TS <- B <- matrix(NA, nrow = 501, ncol = 501)
for(i in 1:501){
for(j in 1:501){
TS[i,j] <- f(far[i], h[j])
}
}
contour(t(TS), xlim = c(0,1), ylim = c(0,1), xlab = "Success Ratio",
ylab = "Probability of Detection", ... )
BB <- c(0.3,0.5, 0.8, 1, 1.3, 1.5,2, 3, 5, 10)
x0 <- 0
y0 <- 0
x1 <- 1
y1 <- hh(1, 1/BB)
segments(x0, y0, x1, y1, lty = 2, col = 1)
id <- y1<= 1
mtext(side = 4, text = y1[id], at = y1[id], line = 0.3, cex = 0.7, las = 2)
id <- y1> 1
mtext(side = 3, text = y1[id], at = 1/y1[id], line = 0.3, cex = 0.7)
} ## close function
|
/scratch/gouwar.j/cran-all/cranData/verification/R/performance.diagram.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
plot.cont.cont <- function(x, ...){
assign("pred", x$pred )
assign("obs", x$obs )
do.call("conditional.quantile", list(pred, obs, ...) )
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/plot.cont.cont.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
plot.prob.bin <- function(x, ...){
attribute(x, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/plot.prob.bin.R
|
predcomp.test <- function(x, xhat1, xhat2, alternative = c("two.sided", "less", "greater"),
lossfun = "losserr", lossfun.args = NULL, test = c("DM", "HG"), ...) {
test <- match.arg( test )
alternative <- tolower( alternative )
alternative <- match.arg( alternative )
out <- list()
out$call <- match.call()
e1 <- do.call(lossfun, c(list(x=x, xhat=xhat1), lossfun.args))
e2 <- do.call(lossfun, c(list(x=x, xhat=xhat2), lossfun.args))
if( test == "DM" ) {
d <- e1 - e2
d.cov.obj <- acf(d, type="covariance", plot=FALSE, na.action = na.pass, ...)
d.cov <- d.cov.obj$acf[,,1]
out$method <- "Diebold-Mariano Test"
out$fitmodel <- "none"
n <- length(d)
d.var <- sum(c(d.cov[ 1 ], 2 * d.cov[-1])) / n
STATISTIC <- mean(d, na.rm=TRUE)/sqrt(d.var)
if (alternative == "two.sided") PVAL <- 2 * pnorm(-abs(STATISTIC))
else if (alternative == "less") PVAL <- pnorm(STATISTIC)
else if (alternative == "greater") PVAL <- pnorm(STATISTIC, lower.tail = FALSE)
} else {
fit <- hg.test( e1, e2, type = "OLS" )
STATISTIC <- fit[ 1 ]
PVAL <- fit[ 2 ]
out$method <- "Hering-Genton Test"
out$fitmodel <- "exponential"
}
alternative <- match.arg(alternative)
out$loss.function <- lossfun
out$loss.function.args <- lossfun.args
out$statistic <- STATISTIC
out$alternative <- alternative
out$p.value <- PVAL
out$data.name <- c(deparse(substitute(x)), deparse(substitute(xhat1)), deparse(substitute(xhat2)))
class(out) <- c("predcomp.test", "htest")
return(out)
} # end of 'predcomp.test' function.
losserr <- function(x, xhat, method=c("abserr","sqerr","simple","power","corrskill","dtw"),
scale=1, p=1, dtw.interr=c("abserr","sqerr","simple","power"), ...) {
method <- match.arg(method)
if(method=="abserr") return(abs((xhat - x)/scale))
else if(method=="sqerr") return(((xhat - x)/scale)^2)
else if(method=="simple") return((xhat - x)/scale)
else if(method=="power") return(((xhat - x)/scale)^p)
else if(method=="corrskill") return(scale*(x - mean(x,na.rm=TRUE))*(xhat - mean(xhat, na.rm=TRUE)))
else if(method=="dtw") {
dtw.interr <- match.arg(dtw.interr)
a <- dtw(xhat, x, step.pattern=asymmetric, ...)
w <- numeric(max(a$index1, a$index2)) + NA
w[a$index2] <- xhat[a$index1]
d1 <- abs(a$index1 - a$index2)
if(dtw.interr=="abserr") d2 <- abs((w - x)/scale)
else if(dtw.interr=="sqerr") d2 <- ((w - x)/scale)^2
else if(dtw.interr=="simple") d2 <- (w - x)/scale
else if(dtw.interr=="power") d2 <- ((w - x)/scale)^p
else stop("losserr: dtw.interr must be one of abserr, sqerr, simple, or power")
return(d1 + d2)
} else stop("losserr: method must be one of abserr, sqerr, simple, power, corrskill, or dtw")
} # end of 'losserr' function.
exponentialACV <- function(x, y, ...) {
args <- list(...)
if(!is.null(args$start)) res <- nls(y~sigma^2*exp(-3*x/theta), data=data.frame(x=x, y=y), ...)
else {
if(any(y<0.05)) theta.start <- min(x[y<0.5],na.rm=TRUE)
else theta.start <- length(x)
res <- nls(y~sigma^2*exp(-3*x/theta), data=data.frame(x=x, y=y), start=list(sigma=sqrt(y[1]), theta=theta.start), ...)
}
return(res)
} # end of 'acvparametric' function.
summary.predcomp.test <- function(object, ...) {
a <- object
print(a$call)
cat("\n", "Loss function used is: ", a$loss.function, "\n")
if(!is.null(a$loss.function.args)) if(!is.null(a$loss.function.args$method)) {
m <- a$loss.function.args$method
if(m == "abserr") msg <- "Absolute Error Loss"
else if(m == "sqerr") msg <- "Square Error Loss"
else if(m == "simple") msg <- "Simple Error Loss"
else if(m == "power") {
if(is.null(a$loss.function.args$p)) p <- 1
else p <- a$loss.function.args$p
msg <- paste("Power Error loss with p = ", p, sep="")
} else if(m == "corrskill") msg <- "Correlation Skill"
else if(m == "dtw") {
if(is.null(a$loss.function.args$dtw.interr)) interr <- "Absolute Error Loss"
else if(a$loss.function.args$dtw.interr == "abserr") interr <- "Absolute Error Loss"
else if(a$loss.function.args$dtw.interr == "sqerr") interr <- "Square Error Loss"
else if(a$loss.function.args$dtw.interr == "simple") interr <- "Simple Error Loss"
else if(a$loss.function.args$dtw.interr == "power") {
if(is.null(a$loss.function.args$p)) p <- 1
else p <- a$loss.function.args$p
msg <- paste("Power Error loss with p = ", p, sep="")
}
msg <- paste("Discrete Time Warping Loss with Intensity Error = ", interr, sep="")
}
cat(paste("Method used: ", msg, sep=""), "\n")
a$loss.message <- msg
}
if(is.null(a$loss.function.args)) if(a$loss.function == "losserr") cat("Absolute Error Loss\n")
print(a)
invisible(a)
} # end of 'summary.predcomp.test' function.
|
/scratch/gouwar.j/cran-all/cranData/verification/R/predcomptests.R
|
probcont2disc <- function(x, bins = seq(0,1,0.1) ){
## converts continuous prob forecasts into a range of discrete
## probability forecsast assigned the value at the midpoint of their
## bin.
if(prod(x >= 0 & x <= 1) != 1)
{stop("Are you sure x is a probability? \n
Values must be between 0 and 1 \n")}
if(max(x) > max(bins) | min(x) < min(bins) ){stop("
Bins must span the interval of predictions.") }
mids <- bins[-length(bins)] + 0.5* diff(bins)
xx<- cut(x, breaks = bins, include.lowest = TRUE)
# new <- mids[xx]
return(list(new = mids[xx], mids = mids))
} ## close function
|
/scratch/gouwar.j/cran-all/cranData/verification/R/probcont2disc.R
|
qrel.plot <- function(A, ...){
if(!is.element("quantile", class(A))) {
stop("qrel.plot: this function works only on quantile forecasts. \n")
} else {
qrelPlotDefault(y.i = A$y.i, obar.i = A$obar.i, prob.y = A$prob.y, ...)
}
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/qrel.plot.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
#
# changes for quantile verification S. Bentzien 2013
#
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
qrelPlotDefault <- function(y.i, obar.i, prob.y, titl = NULL, legend.names = NULL, ...){
old.par <- par(no.readonly = TRUE) # all par settings which
# could be changed.
on.exit(par(old.par))
obar.i<- as.matrix(obar.i)
if(is.null(legend.names)) legend.names<- paste("Model", seq(1,dim(obar.i)[2]))
prob.y<- as.matrix(prob.y)
plot.range <- range(obar.i,y.i)
plot(y.i, obar.i[,1], col = 2, lwd = 2, type = "n",
xlim = plot.range, ylim = plot.range,
xlab = "quantile forecast",
ylab = "conditional observed quantile",
)
if(is.null(titl)){title("Q-REL Plot")}else{
title(titl)
}
m<- dim(obar.i)[2]
for(i in 1:m){
points(y.i, obar.i[,i], type = "b", col = 1+i, lty = i, lwd = 2)
}
abline(0,1)
if(m == 1){
leg.txt<- legend.names[1]
legend("topleft", leg.txt, bty = 'n', col = 2, lwd = 2, pch = 1, lty = 1)
}
if(m >= 2){
leg.txt<- legend.names[1:m]
legend("topright", leg.txt, bty = 'n', col = c(2:(1+m) ), lwd = 2, pch = 1, lty = c(1:m) )
}
## rank histogram plot in lower corner.
pp<- par("plt")
# par("plt" = c(0.7, pp[2], pp[3], 0.3))
if(m<=2){ # if one or two forecasts are used, plot lower box plot.
par("plt" = c(pp[2] - 0.2 , pp[2], pp[3], pp[3]+ 0.2) )
par(new = TRUE)
barplot(prob.y[,1], axes = FALSE, axisnames = FALSE)
axis(4)
box() }
if(m == 2){
par("plt" = c(pp[1], pp[1]+ 0.2, pp[4] - 0.2, pp[4] ))
par(new = TRUE)
barplot(prob.y[,2], axes = FALSE, xlab = "", axisnames = FALSE)
axis(4)
box()
}# close if m = 2
invisible()
}# close function
|
/scratch/gouwar.j/cran-all/cranData/verification/R/qrelPlotDefault.R
|
`quantileScore` <-
function (obs, pred, p, breaks, ...)
{
id <- is.finite(obs) & is.finite(pred)
obs <- obs[id]
pred <- pred[id]
#pred <- round(pred, 8)
#breaks <- round(breaks, 8 )
# baseline
obar <- quantile(obs,p,type=8)
# overall quantile score original forecasts
qs.orig <- mean( check.func(obs-pred,p) )
# discretize forecast values
XX <- quantile2disc(pred, bins = breaks)
pred <- XX$new
y.i <- XX$mids
# number of forecast-observation pairs
N <- length(obs)
K <- length(y.i)
# number of forecasts within each bin
N.pred <- aggregate(pred, by = list(pred), length)$x
# conditional observed quantile
obar.i <- aggregate(obs, by = list(pred), quantile, p)$x
# overall quantile score
qs <- mean( check.func(obs-pred,p) )
qs.baseline <- mean( check.func(obs-obar,p) )
ss <- 1 - qs/qs.baseline
# decomposition
d.CQ <- rep(NA,K)
d.PQ <- rep(NA,K)
for(k in 1:K){
ind.k <- which(pred==as.factor(y.i[k]))
d.CQ[k] <- sum(check.func(obs[ind.k]-obar,p) - check.func(obs[ind.k]-obar.i[k],p))
d.PQ[k] <- sum(check.func(obs[ind.k]-y.i[k],p) - check.func(obs[ind.k]-obar.i[k],p))
}
qs.rel <- sum( d.PQ )/N
qs.res <- sum( d.CQ )/N
qs.uncert <- qs.baseline
check <- qs-(qs.rel - qs.res + qs.uncert)
prob.y <- N.pred/N
out <- list(qs.orig=qs.orig, qs = qs, qs.baseline = qs.baseline,
ss = ss, qs.reliability = qs.rel, qs.resol = qs.res,
qs.uncert = qs.uncert, y.i = y.i, obar.i = obar.i, prob.y = prob.y,
obar = obar, breaks = breaks, check = check)
class(out) <- "quantile"
return(out)
}
quantile2disc <- function(x, bins) {
## converts continuous (quantile) forecasts into a range of discrete
## (quantile) forecasts assigned to the mean value within each bin
if(max(x) > max(bins) | min(x) < min(bins) ){stop("
Bins must span the interval of predictions.") }
xx <- cut(x, breaks = bins, include.lowest = TRUE)
ind <- seq(1,length(bins)-1)[xx]
mids <- aggregate(x, by = list(ind), mean)
#falls ein bin nicht besetzt ist
new.mids <- rep(NA,length(bins)-1)
new.mids[mids$Group.1] <- mids$x
return(list(new = new.mids[xx], mids = mids$x))
}
check.func <- function(u,p) {
## calculates check function for values u and given quantile level p
## Yu et al. (2001)
rho <- (abs(u) + (2*p - 1)*u)*0.5
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/quantileScore.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
## ranked histogram plot.
ranked.hist <- function(frcst, nbins = 10, titl = NULL){
if( min(frcst) < 0 | max(frcst) > 1 ){warning("Observations outside of [0,1] interval. \n")}
brks<- seq(0,1, length = nbins + 1)
hist(frcst, breaks = brks, main = "")
if(is.null(titl)){title("Ranked Histogram")}else
{title(titl)}
abline(h = length(frcst)/nbins, lty = 2)
invisible()
} # close function
|
/scratch/gouwar.j/cran-all/cranData/verification/R/ranked.hist.R
|
#----------------------------------------------------------
#
# Calculate RCRV reduced centered random variable
# with bias and dispersion
# Return:
# bias: Bias
# disp: dispersion
# y : vector of y used to calculate bias and dispersion
# obsError: observation error used in the calculation
# Author: Ronald Frenette, Severe Weather Lab, Quebec region
# Jun 2009
#
#-----------------------------------------------------------
rcrv<-function(obs,epsMean,epsVariance,obsError)
{
y<-(obs-epsMean)/(sqrt(epsVariance+(obsError*obsError)))
bias<-mean(y)
disp<-sqrt(var(y))
return(list(bias=bias,disp=disp,y=y,obsEror=obsError))
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/rcrv.r
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
reliability.plot<- function(x,...)
UseMethod("reliability.plot")
|
/scratch/gouwar.j/cran-all/cranData/verification/R/reliability.plot.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
reliability.plot.default<- function(x, obar.i, prob.y, titl = NULL, legend.names = NULL, ...){
## this function is similar to a attribute plot but somewhat simplified.
## The differences are as follows. These include
## if obar.i is a matrix, multiple lines will be ploted with single graph.
## if obar.i is a matrix with 2 columns or 2 verify objects are used as inputs, 2
## ranked histograms will be printed.
# x<- c(0,0.05, seq(0.1, 1, 0.1))
# obar.i <- c(0.006, 0.019, 0.059, 0.15, 0.277, 0.377, 0.511, 0.587, 0.723, 0.779, 0.934, 0.933)
# obar.i<- data.frame(obar.i, runif(12) )
# obar.i<- data.frame(obar.i, runif(12) )
# prob.y<- c(0.4112, 0.0671, 0.1833, 0.0986, 0.0616, 0.0366, 0.0303, 0.0275, 0.0245, 0.022, 0.017, 0.0203)
# a<- runif(12)
# prob.y<- data.frame(prob.y,a/sum(a))
# prob.y<- data.frame(prob.y,a/sum(a))
# titl <- "Sample Reliability Plot"
# legend.names<- c("Test 1", "Test 2", "Test 3")
# methods(
old.par <- par(no.readonly = TRUE) # all par settings which
# could be changed.
on.exit(par(old.par))
obar.i<- as.matrix(obar.i)
if(is.null(legend.names)) legend.names<- paste("Model", seq(1,dim(obar.i)[2]))
prob.y<- as.matrix(prob.y)
plot(x, obar.i[,1], col = 2, lwd = 2, type = "n",
xlim = c(0,1), ylim = c(0,1),
xlab = expression( paste("Forecast probability, ", y[i] ) ),
ylab = expression( paste("Observed relative frequency, ", bar(o)[1] ))
)
if(is.null(titl)){title("Reliability Plot")}else{
title(titl)
}
m<- dim(obar.i)[2]
for(i in 1:m){
points(x, obar.i[,i], type = "b", col = 1+i, lty = i, lwd = 2)
}
abline(0,1)
if(m == 1){
leg.txt<- legend.names[1]
legend(0.8, 0.35, leg.txt, bty = 'n', col = 2, lwd = 2, pch = 1, lty = 1)
}
if(m >= 2){
leg.txt<- legend.names[1:m]
legend(0.8, 0.4, leg.txt, bty = 'n', col = c(2:(1+m) ), lwd = 2, pch = 1, lty = c(1:m) )
}
## rank histogram plot in lower corner.
pp<- par("plt")
# par("plt" = c(0.7, pp[2], pp[3], 0.3))
if(m<=2){ # if one or two forecasts are used, plot lower box plot.
par("plt" = c(pp[2] - 0.2 , pp[2], pp[3], pp[3]+ 0.2) )
par(new = TRUE)
barplot(prob.y[,1], axes = FALSE, axisnames = FALSE)
axis(4)
box() }
if(m == 2){
par("plt" = c(pp[1], pp[1]+ 0.2, pp[4] - 0.2, pp[4] ))
par(new = TRUE)
barplot(prob.y[,2], axes = FALSE, xlab = "", axisnames = FALSE)
axis(4)
box()
}# close if m = 2
invisible()
}# close function
|
/scratch/gouwar.j/cran-all/cranData/verification/R/reliability.plot.default.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
reliability.plot.verify<- function(x, ...){
#if(sum(class(A) == "prob.bin") < 1){
# warning("This function works only on probability forecast \n binary outcome objects. \n")}else{
assign("y.i", x$y.i)
assign("obar.i", x$obar.i)
assign("prob.y", x$prob.y)
do.call("reliability.plot.default", list(y.i, obar.i, prob.y, ...))
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/reliability.plot.verify.R
|
`roc.area` <-
function (obs, pred)
{
id <- is.finite(obs)&is.finite(pred)
obs<- obs[id]
pred <- pred[id]
n1 <- sum(obs)
n<- length(obs)
A.tilda <- (mean(rank(pred)[obs == 1]) - (n1 + 1)/2)/(n - n1)
stats <- wilcox.test(pred[obs == 1], pred[obs == 0], alternative = "great")
return(list( A = A.tilda, n.total = n, n.events = n1, n.noevents = sum(obs == 0),
p.value = stats$p.value))
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/roc.area.r
|
####################################################### internal roc function
roc.int <- function(x, pred, thres, binormal ){ # internal function
# that returns plot points
thres <- c(thres[1] - 1, thres )
H <- numeric()
F <- numeric()
n <- length(x)
n.thres <- length(thres) # number of unique thresholds, thres =
lng <- length(x) #
a <- x > 0 # event happened
a.sum <- sum(a) # n*1
a.not.sum <- sum(!a) # n*0
for(i in 1:(n.thres) ){
b <- pred > thres[i] # predict if value is greater than
# decision thresholds
# add point c(1, 1)
# browser()
H[i]<- sum( b * a )/ a.sum ## hit rate
F[i]<- sum( b * (!a) )/ a.not.sum ## False alarm rate
} ## close for loop 1:n.thres
if(binormal){
zH <- c( qnorm( H ) ) # NA are for top and bottom value
zF <- c( qnorm(F) )# NA are for top and bottom value
} else {
zH <- rep(NA, n.thres)
zF <- rep(NA, n.thres)
} ## close if binormal
return(cbind(thres, H, F, zH, zF))
} # close roc function
##############################################################
|
/scratch/gouwar.j/cran-all/cranData/verification/R/roc.int.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
"roc.plot" <- function(x, ...){
UseMethod("roc.plot")
## if a verify.prob.bin object is entered, create ROC plot or multiple ROC
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/roc.plot.R
|
`roc.plot.default` <-
function (x, pred, thresholds = NULL, binormal = FALSE, legend = FALSE,
leg.text = NULL, plot = "emp", CI = FALSE, n.boot = 1000,
alpha = 0.05, tck = 0.01, plot.thres = seq(0.1, 0.9, 0.1), show.thres = TRUE,
main = "ROC Curve", xlab = "False Alarm Rate", ylab = "Hit Rate",
extra = FALSE, ...)
{
pred <- as.matrix(pred)
f <- function(x){prod(is.finite(x))}
id <- is.finite(x) & apply(pred, 1, f )
x <- x[id]
pred <- pred[id,]
##### insert checks
if(!is.null(thresholds)){
if(min(diff(thresholds))<0){stop("Thresholds must be listed in ascending order")}
}
#####
if(!is.null(plot) && (plot == "binorm" | plot == "both") & binormal == FALSE) {
stop("binormal must be TRUE in order to create a binormal plot")
}
pred <- as.matrix(pred)
n.forc <- ncol(pred)
#### if thresholds is null
if (is.null(thresholds)) {
thresholds <- sort(as.numeric(unique(pred)))
if(length(thresholds) > 10000 ){warning("More than 10,000 unique predictions are used as thresholds. 100 equally spaced thresholds are used" )
thresholds <- 100}
if(length(thresholds) > 1000 ){warning("Large amount of unique predictions used as thresholds. Consider specifying thresholds." )}
}
#### insert check on the number of thresholds?
if (length(thresholds) == 1) {
n.thres.bins <- thresholds
t <- seq(0, 1, 1/n.thres.bins)
thresholds <- quantile(pred, t)
}
#####
orig <- as.data.frame(roc.int(x, pred, thres = plot.thres,
binormal))
A.boot <- NULL
if (CI) {
A.boot <- numeric()
D <- cbind(x, pred)
A <- matrix(NA, ncol = 3)
for (i in 1:n.boot) {
nr <- nrow(D)
ind <- sample(1:nr, size = nr, replace = TRUE)
sub <- D[ind, ]
A.boot[i] <- roc.area(D[ind, 1], D[ind, 2])$A
for (j in 1:length(plot.thres)) {
A <- rbind(A, roc.int(sub[, 1], sub[, 2], plot.thres[j],
binormal = binormal)[2, 1:3])
} ### 1:length(plot.thres)
} ## 1:n.boot
BOOT <- as.data.frame(A[-1, ])
xleft <- aggregate(BOOT$F, by = list(BOOT$thres), quantile,
alpha)$x
ybot <- aggregate(BOOT$H, by = list(BOOT$thres), quantile,
alpha)$x
xright <- aggregate(BOOT$F, by = list(BOOT$thres), quantile,
1 - alpha)$x
ytop <- aggregate(BOOT$H, by = list(BOOT$thres), quantile,
1 - alpha)$x
box.corners <- cbind(xleft, ybot, xright, ytop)
row.names(box.corners) <- plot.thres
} ### close CI
DAT <- array(NA, dim = c(length(thresholds) + 1, 5, n.forc))
VOLS <- matrix(nrow = n.forc, ncol = 3)
binormal.pltpts <- list()
for (j in 1:n.forc) {
DAT[, , j] <- roc.int(x, pred[, j], thresholds, binormal = binormal)
if (binormal) {
dat <- as.data.frame(DAT[, , j])
names(dat) <- c("thres", "proby", "probn", "zH",
"zF")
dat <- dat[is.finite(dat$zH) & is.finite(dat$zF),
]
new <- as.data.frame(matrix(qnorm(seq(0.005, 0.995,
0.005)), ncol = 1))
names(new) <- "zF"
mod <- lm(zH ~ zF, data = dat)
A <-mod$fitted.values
B <- predict(lm(zH ~ zF, data = dat), newdata = new)
binormal.pltpts[[j]] <- data.frame(t = new$zF, x = pnorm(new$zF),
y = pnorm(B))
# binormal.area <- sum(0.005 * pnorm(B), na.rm = TRUE)
aa <- mod$coefficients[1]
bb <- mod$coefficients[2]
binormal.area <- pnorm(aa/sqrt(1+bb^2) ) ## from Pepe p. 83
} ## if binormal
else {
binormal.area <- NA
}
v <- roc.area(x, pred[, j])
VOLS[j, 1] <- v$A
VOLS[j, 2] <- v$p.value
VOLS[j, 3] <- binormal.area
} ## close j in 1:n.forc
VOLS <- data.frame(paste("Model ", seq(1, n.forc)), VOLS)
names(VOLS) <- c("Model", "Area", "p.value", "binorm.area")
r <- structure(list(plot.data = DAT, roc.vol = VOLS, binormal.ptlpts = binormal.pltpts,
A.boot = A.boot), class = "roc.data")
if (!is.null(plot)) {
par(mar = c(4, 4, 4, 1))
plot(DAT[, 3, ], DAT[, 2, ], type = "n", xlim = c(0,
1), ylim = c(0, 1), main = main, xlab = xlab, ylab = ylab,
...)
abline(h = seq(0, 1, by = 0.1), v = seq(0, 1, by = 0.1),
lty = 3, lwd = 0.5, col = "grey")
abline(0, 1)
if (length(thresholds) < 16) {
L <- "b"
}
else {
L <- "l"
}
}
if(!is.null(plot) && (plot == "emp" | plot == "both")) {
for (i in 1:n.forc) {
points(DAT[, 3, i], DAT[, 2, i], col = i, lty = i,
type = "l", lwd = 2)
if (!is.null(plot.thres)) {
if (show.thres) {
a1 <- DAT[, 1, 1]
b1 <- plot.thres
a <- matrix(a1, ncol = length(b1), nrow = length(a1))
X <- abs(scale(a, center = b1, scale = FALSE))
X[X > 0.5 * max(diff(b1))] <- NA
id <- as.numeric(apply(X, 2, which.min))
id2 <- is.finite(id)
id <- id[id2]
rm(a1, b1, a)
points(DAT[id, 3, 1], DAT[id, 2, 1], col = 1,
pch = 19)
text(DAT[id, 3, i], DAT[id, 2, i], plot.thres[id2],
pos = 4, offset = 2)
}
}
}
}
if (!is.null(plot) && (plot == "binorm" || plot == "both")) {
for (i in 1:n.forc) {
dat <- binormal.pltpts[[i]]
points(dat$x, dat$y, col = 2, lty = i, type = "l",
lwd = 2)
}
}
if (!is.null(plot) && extra) {
if (plot == "both") {
text(0.6, 0.1, "Black lines are the empirical ROC")
text(0.6, 0.07, "Red lines and symbols are the bi-normal ROC")
text(0.6, 0.04, "The area under the binormal curve is in parathesis.")
}
if (plot == "emp") {
text(0.6, 0.1, "Black lines are the empirical ROC")
}
if (plot == "binorm") {
text(0.6, 0.1, "Red lines are the bi-normal ROC")
}
}
if (!is.null(plot) && CI) {
for (i in 1:nrow(box.corners)) {
lines(box.corners[i, c(1, 3)], rep(orig$H[i + 1],
2), lwd = 1)
lines(rep(box.corners[i, 1], 2), c(orig$H[i + 1] -
tck, orig$H[i + 1] + tck), lwd = 1)
lines(rep(box.corners[i, 3], 2), c(orig$H[i + 1] -
tck, orig$H[i + 1] + tck), lwd = 1)
lines(rep(orig$F[i + 1], 2), box.corners[i, c(2,
4)], lwd = 1)
lines(c(orig$F[i + 1] - tck, orig$F[i + 1] + tck),
rep(box.corners[i, 2], 2), lwd = 1)
lines(c(orig$F[i + 1] - tck, orig$F[i + 1] + tck),
rep(box.corners[i, 4], 2), lwd = 1)
}
}
if (!is.null(plot) && legend) {
if (is.null(leg.text)) {
leg.text <- paste("Model ", LETTERS[seq(1, n.forc)])
}
if (plot == "emp") {
leg.text <- paste(leg.text, " ", formatC(VOLS$Area,
digits = 3))
}
if (plot == "binorm") {
leg.text <- paste(leg.text, " ", formatC(VOLS$binorm.area,
digits = 3))
}
if (plot == "both") {
leg.text <- paste(leg.text, " ", formatC(VOLS$Area,
digits = 3), " (", formatC(VOLS$binorm.area,
digits = 3), ")")
}
legend(list(x = 0.6, y = 0.4), legend = leg.text, bg = "white",
cex = 0.6, lty = seq(1, n.forc), col = c("black",
"red", "blue"), merge = TRUE)
}
invisible(r)
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/roc.plot.default.r
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
roc.plot.prob.bin<- function(x, ...){
# retreives data from a verify object.
assign("obs", x$obs)
assign("pred", x$pred)
#assign("thresholds", x$thres)
do.call("roc.plot.default", list(obs, pred, ...) )
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/roc.plot.prob.bin.R
|
## ranked probability score
rps <- function(obs, pred, baseline = NULL){
## pred is a matrix. each column the prob of a given outcome
## obs is a vector with the number of the column that occured.
nr <- nrow(pred)
################
id <- is.finite(obs) & is.finite(apply(pred, 1, sum) )
obs <- obs[id]
pred <- matrix(pred[id,], nrow = nr )
########
OBS <- matrix(0, nrow = length(obs), ncol = ncol(pred) )
## a loop, but it seems quick enough
for(i in 1:nrow(OBS) ){
OBS[i,obs[i] ] <- 1
}
OBS2 <- OBS
for(i in 1:ncol(OBS) ){
OBS2[,i] <- apply(matrix(OBS[, 1:i], nrow = nr), 1, sum)
}
PRED <- OBS
for(i in 1:ncol(pred) ){
PRED[,i] <- apply(matrix(pred[, 1:i], nrow = nr), 1, sum)
}
RPS <- mean( apply( ( PRED - OBS2)^2,1, sum) )/ ( ncol(pred) -1 )
####
if(is.null(baseline)){
xxx <- apply(OBS, 2, sum)/ nrow(OBS) ## avg occurrence of each event
pred.climo <- matrix( xxx, nrow = nrow(OBS), ncol = ncol(OBS), byrow = TRUE)
} else{ ## climo should be a vector of the climo probs for each cat.
pred.climo <- matrix( baseline, nrow = nrow(OBS), ncol = ncol(OBS), byrow = TRUE)
}
PRED.climo <- OBS
for(i in 1:ncol(pred.climo) ){
PRED.climo[,i] <- apply(matrix(pred.climo[, 1:i], nrow = nr), 1, sum)
}
RPS.climo <- mean( apply( ( PRED.climo - OBS2)^2,1, sum) )/ ( ncol(PRED.climo) -1 )
RPSS <- 1 - RPS/RPS.climo
return(list(rps = RPS, rpss = RPSS, rps.clim = RPS.climo ) )
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/rps.r
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
summary.bin.bin <- function(object, ...){
## print function for binary forecast, binary outcome
cat("\nThe forecasts are binary, the observations are binary.\n")
cat("The contingency table for the forecast \n")
print(object$tab)
cat("\n")
cat(paste("PODy = ", formatC(object$POD, digits = 4), "\n"))
cat(paste("Std. Err. for POD = ", formatC(object$POD.se), "\n"))
cat(paste("TS = ", formatC(object$TS, digits = 4), "\n"))
cat(paste("Std. Err. for TS = ", formatC(object$TS.se), "\n"))
cat(paste("ETS = ", formatC(object$ETS, digits = 4), "\n"))
cat(paste("Std. Err. for ETS = ", formatC(object$ETS.se), "\n"))
cat(paste("FAR = ", formatC(object$FAR, digits = 4), "\n"))
cat(paste("Std. Err. for FAR = ", formatC(object$FAR.se), "\n"))
cat(paste("HSS = ", formatC(object$HSS, digits = 4), "\n"))
cat(paste("Std. Err. for HSS = ", formatC(object$HSS.se), "\n"))
cat(paste("PC = ", formatC(object$PC, digits = 4), "\n"))
cat(paste("Std. Err. for PC = ", formatC(object$PC.se), "\n"))
cat(paste("BIAS = ", formatC(object$BIAS, digits = 4), "\n"))
cat(paste("Odds Ratio = ", formatC(object$theta, digits = 4), "\n"))
cat(paste("Log Odds Ratio = ", formatC(object$log.theta, digits = 4), "\n"))
cat(paste("Std. Err. for log Odds Ratio = ", formatC(object$LOR.se), "\n"))
cat(paste("Odds Ratio Skill Score = ", formatC(object$orss, digits = 4), "\n"))
cat(paste("Std. Err. for Odds Ratio Skill Score = ", formatC(object$ORSS.se), "\n"))
cat(paste("Extreme Dependency Score (EDS) = ", formatC(object$eds, digits = 4), "\n"))
cat(paste("Std. Err. for EDS = ", formatC(object$eds.se, digits=4), "\n"))
cat(paste("Symmetric Extreme Dependency Score (SEDS) = ", formatC(object$seds, digits = 4), "\n"))
cat(paste("Std. Err. for SEDS = ", formatC(object$seds.se, digits=4), "\n"))
cat(paste("Extremal Dependence Index (EDI) = ", formatC(object$EDI, digits=4), "\n"))
cat(paste("Std. Err. for EDI = ", formatC(object$EDI.se, digits=4), "\n"))
cat(paste("Symmetric Extremal Dependence Index (SEDI) = ", formatC(object$SEDI, digits=4), "\n"))
cat(paste("Std. Err. for SEDI = ", formatC(object$SEDI.se, digits=4), "\n"))
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/summary.bin.bin.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
summary.cat.cat <- function(object, ...){
cat("\nThe forecasts are categorical, the observations are categorical.\n")
cat(paste("Percent Correct = ", formatC(object$pc, digits = 2), "\n"))
cat(paste("Heidke Skill Score = ", formatC(object$hss, digits = 3), "\n"))
cat(paste("Pierce Skill Score = ", formatC(object$pss, digits = 3), "\n"))
cat(paste("Gerrity Score = ", formatC(object$gs, digits = 3), "\n"))
cat("\n Statistics considering each category in turn. \n \n" )
cat(c("Threat Score ", formatC(object$ts, digits = 3), "\n"))
cat(c("Bias by cat. ", formatC(object$bias2, digits = 3), "\n"))
cat(c("Percent correct by cat. ", formatC(object$pc2, digits = 3), "\n") )
cat(c("Hit Rate (POD) by cat. ", formatC(object$h, digits = 3), "\n") )
cat(c("False Alarm Rate by cat. ", formatC(object$f, digits = 3), "\n") )
cat(c("False Alarm Ratio by cat.", formatC(object$false.alarm.ratio, digits = 3), "\n") )
#
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/summary.cat.cat.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
summary.cont.cont <- function(object, ...){
cat("\nThe forecasts are continuous, the observations are continous.\n")
if(object$baseline.tf){cat("Baseline data provided. \n")} else{
cat("Sample baseline calcluated from observations.\n")}
cat(paste("MAE = ", formatC(object$MAE, digits = 4), "\n"))
cat(paste("ME = ", formatC(object$ME, digits = 4), "\n"))
cat(paste("MSE = ", formatC(object$MSE, digits = 4), "\n"))
cat(paste("MSE - baseline = ", formatC(object$MSE.baseline, digits = 4), "\n"))
cat(paste("MSE - persistence = ", formatC(object$MSE.pers, digits = 4), "\n"))
cat(paste("SS - baseline = ", formatC(object$SS.baseline, digits = 4), "\n"))
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/summary.cont.cont.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
summary.norm.dist.cont <- function(object, ...){
cat("\nThe forecasts are a normal probability distribution. \n")
cat("The observations are continuous.\n\n")
cat(paste("Average crps score = ", formatC(object$CRPS, digits = 4), "\n"))
cat(paste("Average ignorance score = ", formatC(object$IGN, digits = 4), "\n"))
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/summary.norm.dist.cont.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
summary.prob.bin <- function(object, ...){
cat("\nThe forecasts are probabilistic, the observations are binary.\n")
if(object$baseline.tf){cat("Baseline data provided. \n")} else{
cat("Sample baseline calculated from observations.\n")}
cat(paste("Brier Score (BS) = ", formatC(object$bs, digits = 4), "\n"))
cat(paste("Brier Score - Baseline = ", formatC(object$bs.baseline, digits = 4), "\n"))
cat(paste("Skill Score = ", formatC(object$ss, digits = 4), "\n"))
cat(paste("Reliability = ", formatC(object$bs.reliability, digits = 4), "\n"))
cat(paste("Resolution = ", formatC(object$bs.resol, digits = 4), "\n"))
cat(paste("Uncertainty = ", formatC(object$bs.uncert, digits = 4), "\n"))
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/summary.prob.bin.R
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
table.stats<- function(obs, pred = NULL, fudge = 0.01, silent = FALSE) {
## internal function used in verify
## used with a binary forecast and a binary outcome.
if(is.null(pred) & length(obs) ==4 ) {
if(!silent){ print(" Assume data entered as c(n11, n01, n10, n00) Obs*Forecast") }
a <- as.numeric( obs[1] )
b <- as.numeric( obs[2] )
c <- as.numeric( obs[3] )
d <- as.numeric( obs[4] )
tab.out <- matrix(c(a,c,b,d), nrow = 2)
}
if(is.null(pred) & is.matrix(obs) & prod(dim(obs)) ==4 ) {
if(!silent) print(" Assume contingency table has observed values in columns, forecasts in rows")
obs <- as.numeric(obs)
a <- obs[1]
b <- obs[3]
c <- obs[2]
d <- obs[4]
tab.out <- matrix(c(a,c,b,d), nrow = 2)
}
if(!is.null(pred)& !is.null(obs)){
tab.out <- table(as.numeric(obs), as.numeric(pred))
a <- tryCatch(tab.out["1", "1"], error=function(e) 0)
b <- tryCatch(tab.out["0", "1"], error=function(e) 0)
c <- tryCatch(tab.out["1", "0"], error=function(e) 0)
d <- tryCatch(tab.out["0", "0"], error=function(e) 0)
# a <-tab.out["1","1"]
# b <-tab.out["0","1"]
# c <-tab.out["1","0"]
# d <-tab.out["0","0"]
} ## close else
###
n <- a + b + c + d
if(n == 0) n <- fudge
s <- (a+c)/n
TS <- a /(a+b+c + fudge)
POD<- H <- a /(a+c + fudge) ## hit rate
F <- b /(b+d + fudge) ## false alarm rate
TS.se <- sqrt((TS^2)*((1-H)/(a + fudge) + b*(1-F)/((a+b+c)^2 + fudge)))
SH2 <- H*(1-H)/(a+c + fudge)
SF2 <- F*(1-F)/(b+d + fudge)
POD.se <- sqrt(SH2)
F.se <- sqrt(SF2)
M <- c /(a+c + fudge) ## miss rate
FAR <- b/(a+b + fudge) ## false alarm ratio
FAR.se <- sqrt((FAR^4)*((1-H)/(a + fudge) + (1-F)/(b + fudge))*(a^2)/(b^2 + fudge))
HSS <- 2*(a*d - b*c)/ (1.0*(a+c)*(c+d) + 1.0*(a+b)*(b+d) + fudge)
SHSS2 <- SF2*(HSS^2)*(1/(H-F + fudge) + (1-s)*(1-2*s))^2 + SH2*(HSS^2)*(1/(H-F + fudge) - s*(1-2*s))^2
HSS.se = sqrt(SHSS2)
PSS <- 1 - M - F ## Pierce skill score
PSS.se <- sqrt(SH2 + SF2)
KSS <- (a*d - b*c)/((a+c)*(b + d) + fudge) ## similar to Pierc
PC <- (a+d)/(a+b+c+d + fudge)
PC.se <- sqrt(s*H*(1-H)/n + (1-s)*F*(1-F)/n)
if(a + c == 0) BIAS <- (a+b)/fudge
else BIAS <- (a + b)/(a + c)
if(b * c == 0) OR <- a*d/fudge ## odds ratio
else OR <- a * d / (b * c)
if(a * b + b * c == 0) ORSS <- (a*d - b*c)/ fudge
else ORSS <- (a*d - b*c)/ (a*d + b*c ) ## odds ratio skill score
HITSrandom <- 1.0* (a+c)*(a+b)/n
p <- (a+c)/n
if(a + b + c - HITSrandom == 0) ETS <- (a - HITSrandom)/fudge
else ETS <- (a-HITSrandom)/(a+b+c-HITSrandom)
if(2 - HSS == 0) ETS.se <- sqrt(4 * SHSS2/fudge)
else ETS.se <- sqrt(4*SHSS2/((2-HSS)^4))
if(b * c == 0) theta <- a * d / fudge
else theta <- (a*d)/(b*c)
log.theta <- log(a) + log(d) - log(b) - log(c)
if(a == 0) a.z <- fudge
else a.z <- a
if(b == 0) b.z <- fudge
else b.z <- b
if(c == 0) c.z <- fudge
else c.z <- c
if(d == 0) d.z <- fudge
else d.z <- d
if(1/a.z + 1/b.z + 1/c.z + 1/d.z == 0) n.h <- 1/fudge
else n.h <- 1/( 1/a.z + 1/b.z + 1/c.z + 1/d.z)
if(theta + 1 == 0) yules.q <- (theta - 1)/fudge
else yules.q <- (theta - 1)/(theta + 1)
if(n.h == 0) SLOR2 <- 1/fudge
else SLOR2 <- 1/n.h
LOR.se <- sqrt(SLOR2)
if(OR + 1 == 0) ORSS.se <- sqrt(SLOR2 * 4 * OR^2 / fudge)
else ORSS.se <- sqrt(SLOR2*4*OR^2/((OR+1)^4))
if(log(a/n) == 0) {
eds <- 2 * log((a + c)/n)/fudge - 1
seds <- (log((a+b)/n)+log((a+c)/n)) / fudge - 1
} else {
eds <- 2*log((a+c)/n)/log(a/n) - 1
seds <- (log((a+b)/n)+log((a+c)/n)) /log(a/n) - 1
}
eds.se <- 2*abs(log(p))/(H*(log(p) + log(H))^2)*sqrt(H*(1-H)/(p*n))
seds.se <- sqrt(H*(1-H)/(n*p)) *(-log(BIAS*p^2)/(H*log(H*p)^2))
if(log(F) + log(H) == 0) EDI <- (log(F) - log(H)) / fudge
else EDI <- (log(F) - log(H))/(log(F) + log(H))
EDI.se <- 2*abs(log(F) + H/(1-H)*log(H))/(H*(log(F) + log(H))^2)*sqrt(H*(1-H)/(p*n))
SEDI <- (log(F) - log(H) - log(1-F) + log(1-H))/(log(F) + log(H) + log(1-F) + log(1-H))
SEDI.se <- 2*abs(((1-H)*(1-F)+H*F)/((1-H)*(1-F))*log(F*(1-H)) + 2*H/(1-H)*log(H*(1-F)))/(H*(log(F*(1-H)) + log(H*(1-F)))^2)*sqrt(H*(1-H)/(p*n))
return(list(tab = tab.out, TS = TS, TS.se = TS.se,
POD = POD, POD.se = POD.se, M = M, F = F, F.se = F.se, FAR = FAR , FAR.se = FAR.se, HSS = HSS, HSS.se = HSS.se,
PSS = PSS, PSS.se = PSS.se, KSS = KSS,
PC = PC, PC.se = PC.se, BIAS = BIAS, ETS = ETS, ETS.se = ETS.se, theta = theta, log.theta = log.theta, LOR.se = LOR.se, n.h = n.h, orss = yules.q, orss.se = ORSS.se,
eds = eds, eds.se=eds.se, seds = seds, seds.se = seds.se,
EDI = EDI, EDI.se = EDI.se, SEDI = SEDI, SEDI.se = SEDI.se))
} # end of function.
|
/scratch/gouwar.j/cran-all/cranData/verification/R/table.stats.R
|
### bootstrap function for 2 by 2 contingency tables.
table.stats.boot <- function(CT, R = 100, alpha = 0.05, fudge = 0.01){
OUT <- as.data.frame(matrix(NA, nrow = R, ncol = 4) )
names(OUT)<- c("pod", "far", "bias", "ets")
for(i in 1:R){
N <- sum(CT) ## number of cases
CT.prob <- CT/N
L <- prod(dim(CT)) ### length of vector
X <- sample(1:L, size = N, replace = TRUE, prob = as.numeric(CT.prob))
### beware of zero entries.
CT.sample <- matrix(tabulate(X, nbins = L),nrow = 2)
temp <- table.stats(CT.sample, silent = TRUE, fudge = fudge)
OUT$bias[i] <- temp$BIAS
OUT$pod[i] <- temp$POD
OUT$far[i] <- temp$FAR
OUT$ets[i] <- temp$ETS
}
unOUT <- c(unlist(OUT))
if(any(is.nan(unOUT)) || any(is.na(unOUT)) || any(!is.finite(unOUT))) {
wmsg <- paste("table.stats.boot: NaN, NA or non-finite numbers in one or more statistics.",
"Removing these values in calculating CIs.", sep="\n")
warning(wmsg)
}
up <- apply(OUT,2,quantile, 1-alpha/2, na.rm=TRUE)
dw <- apply(OUT,2,quantile, alpha/2, na.rm=TRUE)
return(rbind(up, dw))
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/table.stats.boot.R
|
value<- function(obs, pred = NULL, baseline = NULL,
cl = seq(0.05, 0.95, 0.05), plot = TRUE, all = FALSE,
thresholds = seq(0.05, 0.95, 0.05), ylim = c(-0.05,
1), xlim = c(0,1), ...){
####
# obs
# pred = NULL
# baseline = NULL
# cl = seq(0.05, 0.95, 0.05)
# plot = TRUE
# all = FALSE
# thresholds = seq(0.05, 0.95, 0.05), ylim = c(-0.05,
# 1), xlim = c(0,1
####
if(!is.null(pred) ){
id <- is.finite(obs) & is.finite(pred)
obs <- obs[id]
pred <- pred[id] }else{
obs<- obs[is.finite(obs)]
}
### 2 by 2 contingency table
if(is.null(pred) & length(obs) ==4 ){
print(" Assume data entered as c(n11, n01, n10, n00) Obs*Forecast")
n <- sum(obs)
a <- obs[1]
b <- obs[2]
c <- obs[3]
d <- obs[4]
F <- b/(b+d) ## CONDITIONAL FALSE ALARM RATE
H <- a/(a+c) ## CONDITIONAL HIT RATE
if(is.null(baseline)){s <- (a+c)/n; baseline.tf <- FALSE}else
{s<- baseline; baseline.tf <- TRUE}
cl <- sort(c(cl, s) )## always add s to list of cl to ensure peak is used.
V1<- (1-F) - s/(1-s)*(1-cl)/cl *(1-H)
V2 <- H - (1-s)/s*cl/(1-cl)*F
V <- numeric(length(cl) )
V[cl < s] <- V1[cl < s]
V[cl >= s] <- V2[cl >= s]
V<- matrix(V, ncol = 1)
Vmax <- H - F ## Pierce skill score
positive <- c(c/(c+d), a/(a+b) ) ## range of positive skill score
type <- "binary"
} else { ## prediction a vector of binary forecasts
#####################################################
#####################################################
if(prod( unique(pred)%in%c(0,1) )) { # prediction are a vector binary
if(is.null(baseline)){s <- mean(obs); baseline.tf <- FALSE}else
{s<- baseline; baseline.tf <- TRUE}
cl <- sort(c(cl, s) )## always add s to list of cl
F <- numeric()
H <- numeric()
Vmax <- numeric()
V <- matrix(nrow = length(cl), ncol = 1 )
A <- table(data.frame(obs = obs,pred = pred) )
a <- A[2,2]
b <- A[1,2]
c <- A[2,1]
d <- A[1,1]
n <- a + b + c + d
F[1] <- b/(b+d) ## FALSE ALARM RATE
H[1] <- a/(a+c) ## HIT RATE
V1<- (1-F[1]) - s/(1-s)*(1-cl)/cl *(1-H[1])
V2 <- H[1] - (1-s)/s*cl/(1-cl)*F[1]
VV <- numeric(length(cl) )
VV[cl < s] <- V1[cl < s]
VV[cl >= s]<- V2[cl >= s]
V[,1]<- VV
Vmax[1] <- H[1] - F[1] #
positive <- c(c/(c+d), a/(a+b) ) ## range of positive skill score
} else {## close binary vector, open probabilistic forecast
## check?
if(max(pred)>1 | min(pred)<0 ) {
## predictions are a vector of probabilities
stop("Predictions outside [0,1] range. \n I am a bit confused. \n")
}
## make matrix with prediction for all thresholds
if(is.null(baseline)) {
s <- mean(obs)
baseline.tf <- FALSE
} else {
s<- baseline
baseline.tf <- TRUE
}
cl <- sort(c(cl, s) )## always add s to list of cl
NCOL <- length(thresholds)
PRED<- matrix(NA, nrow = length(pred), ncol = NCOL )
for(i in 1:NCOL) PRED[,i] <- pred > thresholds[i]
F <- numeric()
H <- numeric()
Vmax <- numeric()
V <- matrix(nrow = length(cl), ncol = ncol(PRED) )
n <- length(pred)
for(i in 1:ncol(PRED)){
## try inserted for when table is 2X2
A <- table(data.frame(obs, PRED[,i]) )
a <- try(A[2,2], silent = TRUE )
b <- try(A[1,2], silent = TRUE)
c <- try(A[2,1], silent = TRUE)
d <- try(A[1,1], silent = TRUE)
if(class(a) == "try-error") a<- NA
if(class(b) == "try-error") b<- NA
if(class(c) == "try-error") c<- NA
if(class(d) == "try-error") d<- NA
F[i] <- b/(b+d) ## FALSE ALARM RATE
H[i] <- a/(a+c) ## HIT RATE
V1<- (1-F[i]) - s/(1-s)*(1-cl)/cl *(1-H[i])
V2 <- H[i] - (1-s)/s*cl/(1-cl)*F[i]
VV <- numeric(length(cl) )
VV[cl < s] <- V1[cl < s]
VV[cl >= s]<- V2[cl >= s]
V[,i]<- VV
Vmax[i] <- H[i] - F[i] #
} #close for 1:ncol(PRED)
#V.ind <- V
#V <- apply(V, 1, max) ### outer envelope
} ## close probablistic option.
} # Close second if else stmt.
if(plot){
if(!all) V <- apply(V, 1, max)
matplot(cl, V, type = "l", ylim = ylim, xlim = xlim, ... )
if(all) lines(cl,apply(V, 1, max), lwd = 2)
abline(h=0)
abline(v = s, lty = 2, lwd = 0.4)
} ## close if plot
(aa<- list(vmax = Vmax, V = V, F= F, H = H, cl = cl , s = s,
n = n) )
invisible(aa)
}## close function
|
/scratch/gouwar.j/cran-all/cranData/verification/R/value.r
|
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** 2004/1/7 11:29:42
#
# changes to include quantile verification by S. Bentzien 2013
#
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
verify <- function(obs, pred = NULL, p = NULL, #--(be,09.08.2013)
baseline = NULL, # sample.baseline = FALSE, ?
frcst.type = "prob", obs.type = "binary",
thresholds = seq(0,1,0.1), show = TRUE, bins = TRUE,
fudge = 0.01, ...) {
##### insert checks
if(min(diff(thresholds))<0) stop("Thresholds must be listed in ascending order")
if(length(obs) > 4 && !is.matrix(obs) ) { ## assume if length = 4, a cont. table is entered.
id <- is.finite(obs) & is.finite(pred)
obs <- obs[id]
pred <- pred[id]
}
if(frcst.type == "binary" && obs.type == "binary" && is.null(pred) ) {
A <- table.stats(obs, fudge = fudge)
class(A) <- c("verify", "bin.bin")
} else if(frcst.type == "binary" & obs.type == "binary") {
if(length(unique(obs))>2 | length(unique(pred))>2 ) {warning("Prediction or observation may not be binary \n")}
A <- table.stats(obs, pred, fudge = fudge)
class(A) <- c("verify", "bin.bin")
} else if(frcst.type == "prob" & obs.type == "binary") {
if(show){
cat("If baseline is not included, baseline values will be calculated from the sample obs. \n") }
A<- brier(obs, pred, baseline, thresholds, bins = bins )
class(A)<- c("verify", "prob.bin")
} else if(frcst.type == "quantile" & obs.type == "cont") { #--- (be, 09.08.2013)
if(is.null(p)) warning("verify: Missing p. \n")
A<- quantileScore(obs = obs, pred = pred, p = p, breaks = thresholds)
class(A)<- c("verify", "quantile")
} else if(frcst.type == "norm.dist" & obs.type == "cont") {
A <- crps(obs,pred)
class(A) <- class(A)<- c("verify", "norm.dist.cont")
} else if(frcst.type == "cont" & obs.type == "cont") {
A<- c()
if(is.null(baseline)){baseline <- mean(obs); A$baseline.tf <- FALSE} else {A$baseline.tf <- TRUE}
A$MAE <- mean(abs(pred - obs))
A$MSE <- mean( (pred - obs)^2 )
A$ME <- mean( (pred - obs) )
A$MSE.baseline <- mean( (mean(baseline) - obs)^2)
# mse persistance only valid if data is presented in chronological order.
A$MSE.pers <- mean( (obs[-length(obs)]- obs[-1])^2)
A$SS.baseline <- 1 - (A$MSE/A$MSE.baseline)
class(A)<- c("verify", "cont.cont")
} else if(frcst.type == "cat" & obs.type == "cat") {
#### forecast summary can be listed as a contingency table.
if(is.matrix(obs) & is.null(pred)) {
print("Assuming data is summarized in a contingency table./n ")
print("Columns summarize observed values. Rows summarize predicted values /n" )
DAT <- obs
} else {
a <- sort(unique(c(obs, pred) ) )
obs.a <- c(a, obs)
pred.a <- c(a, pred)
DAT <- table(pred.a, obs.a)
diag(DAT)<- diag(DAT) - 1
}## close else
A <- multi.cont(DAT)
class(A) <- c("verify", "cat.cat")
} else cat("This combination of predictions \n and observations is not \n currently supported. \n")
## attach original data to be used in plot functions.
A$obs <- obs
A$pred <- pred
A$baseline <- baseline
return(A)
} # close function
|
/scratch/gouwar.j/cran-all/cranData/verification/R/verify.R
|
pop.convert<- function(){
# data(pop)
### script written by Beth Ebert to convert data into binary obs.
### to convert the pop text into binary observations ###
### Note: cat0 = rain <= 0.2 mm
### cat1 = 0.2 < rain <= 4.4 mm
### cat2 = 4.4 mm < rain
### Make observations into logical variables
d <- verification::pop
d$obs_norain <- d$obs <= 0.2
d$obs_light <- d$obs > 0.2 & d$obs <= 4.4 # light rain only
d$obs_heavy <- d$obs > 4.4 # heavy rain only
d$obs_rain <- d$obs_light | d$obs_heavy
### Rename probability variables and compute probabilities for all
### rain
d$p24_norain <- d$p24_cat0
d$p24_light <- d$p24_cat1
d$p24_heavy <- d$p24_cat2
d$p48_norain <- d$p48_cat0
d$p48_light <- d$p48_cat1
d$p48_heavy <- d$p48_cat2
d$p24_rain <- d$p24_light + d$p24_heavy
d$p48_rain <- d$p48_light + d$p48_heavy
# assign("d",d, envir = .GlobalEnv)
return(invisible(d) )
}
|
/scratch/gouwar.j/cran-all/cranData/verification/R/zzz.R
|
#' Amplification curves
#'
#' A data set containing raw fluorescence amplification data. Data is
#' obtained from Ruijter (2013), \doi{10.1016/j.ymeth.2012.08.011} but original
#' source is by Vermeulen (2009), \doi{10.1016/S1470-2045(09)70154-8}.
#'
#' The tidy version of the data is kept at the repository of the source of
#' `{vermeulen}` package. This function fetches such data and thus requires
#' internet connection. It takes a few seconds to run.
#'
#' @return A data frame with 24,576 amplification curves, 50 cycles each:
#'
#' \describe{
#' \item{`plate`}{Plate identifier. Because one plate was used per gene, the
#' name of the plate is the same as the values in `target`.}
#' \item{`well`}{Well identifier.}
#' \item{`dye`}{In all reactions the SYBR Green I master mix (Roche) was used,
#' so the value is always `"SYBR"`.}
#' \item{`cycle`}{PCR cycle.}
#' \item{`fluor`}{Raw fluorescence values.}
#' }
#'
#' @source
#' - \url{https://medischebiologie.nl/wp-content/uploads/2019/02/qpcrdatamethods.zip}
#' - \url{https://github.com/ramiromagno/vermeulen/blob/main/data-raw/amplification_curves.csv.gz}
#'
#' @noRd
amplification_curves <- function() {
url <- file.path(repo(), "main/data-raw/amplification_curves.csv.gz")
txt <- readLines(gzcon(url(url)))
df <- utils::read.csv(file = textConnection(txt), colClasses = c("factor", "factor", "factor", "integer", "double"))
# Fix the order of the levels in `well`
df$well <- factor(df$well, levels = wells())
df
}
|
/scratch/gouwar.j/cran-all/cranData/vermeulen/R/amplification_curves.R
|
#' Import the Biomarker data set
#'
#' This function retrieves the Biomarker data set, a data set containing raw
#' fluorescence amplification data: 24,576 amplification curves, of 50 cycles
#' each.
#'
#' @details
#' Data was gathered from Ruijter et al.
#' (2013), \doi{10.1016/j.ymeth.2012.08.011} but original source is by Vermeulen
#' et al. (2009), \doi{10.1016/S1470-2045(09)70154-8}.
#' The tidy version of the data is kept at the repository of the source of
#' `{vermeulen}` package. This function fetches such data and thus requires
#' internet connection. It takes a few seconds to run.
#'
#' The Biomarker data set comprises a set of 59 targets previously identified as
#' a 59-mRNA gene expression signature, that has been developed and validated for
#' improved outcome prediction of children with neuroblastoma. In short, 59
#' biomarkers and 5 reference genes were measured in 8 µl reactions in a
#' 384-well plate using the LightCycler480 SYBR Green Master (Roche) in a sample
#' maximization experiment design. The 59 genes were carefully selected as being
#' previously reported as prognostic genes in neuroblastoma in at least two
#' independent studies. Each plate contained 366 cDNA samples (n = 1) from
#' primary tumor biopsies, a 5-point 10-fold serial dilution series based on an
#' external oligonucleotide standard (n = 3, from 150,000 to 15 copies), and a
#' no template control (NTC, n = 3). Raw (baseline uncorrected) fluorescent data
#' were exported from the LightCycler480 instrument software.
#'
#' @return A data frame with 24,576 amplification curves, of 50 cycles each:
#'
#' \describe{
#' \item{`plate`}{Plate identifier. Because one plate was used per gene, the
#' name of the plate is the same as the values in `target`.}
#' \item{`well`}{Well identifier.}
#' \item{`dye`}{In all reactions the SYBR Green I master mix (Roche) was used,
#' so the value is always `"SYBR"`.}
#' \item{`target`}{Target identifier, in almost all cases the name of a gene.}
#' \item{`target_type`}{Target type: either target of interest (`"toi"`) or
#' reference target (`"ref"`).}
#' \item{`sample`}{Sample identifier.}
#' \item{`sample_type`}{Sample type.}
#' \item{`copies`}{Standard copy number.}
#' \item{`dilution`}{Dilution factor. Higher number means greater dilution.}
#' \item{`cycle`}{PCR cycle.}
#' \item{`fluor`}{Raw fluorescence values.}
#' }
#'
#' @source
#' - \url{https://medischebiologie.nl/wp-content/uploads/2019/02/qpcrdatamethods.zip}
#' - \url{https://github.com/ramiromagno/vermeulen/tree/main/data-raw}
#'
#' @examples
#' \donttest{
#' # Takes ~ 10-30 sec
#' head(get_biomarker_dataset())
#' }
#'
#' @export
get_biomarker_dataset <- function() {
amplification_curves <- amplification_curves()
samples <- samples()
reactions <- reactions()
targets <- targets()
df01 <- merge(x = amplification_curves, y = reactions, by = c("plate", "well", "dye"), all.x = TRUE)
df02 <- merge(x = df01, y = samples, by = "sample", all.x = TRUE)
df03 <- merge(x = df02, y = targets, by = "target", all.x = TRUE)
# Arrange rows and columns order.
cols_order <- c("plate", "well", "dye", "target", "target_type", "sample", "sample_type", "copies", "dilution", "cycle", "fluor")
ordering <- order(df03$plate, df03$well, df03$cycle)
ds_biomarker <- df03[ordering, cols_order]
# Remove row names.
rownames(ds_biomarker) <- NULL
ds_biomarker
}
|
/scratch/gouwar.j/cran-all/cranData/vermeulen/R/get_biomarker_dataset.R
|
#' Reactions
#'
#' A data set containing the metadata associated with each qPCR well and
#' respective reaction mix. Data is obtained from Ruijter (2013),
#' \doi{10.1016/j.ymeth.2012.08.011} but original source is by Vermeulen (2009),
#' \doi{10.1016/S1470-2045(09)70154-8}.
#'
#' The tidy version of the data is kept at the repository of the source of
#' `{vermeulen}` package. This function fetches such data and thus requires
#' internet connection.
#'
#' @return A data frame with 24,576 reactions and 5 variables:
#'
#' \describe{
#' \item{`plate`}{Plate identifier. Because one plate was used per gene, the
#' name of the plate is the same as the values in `target`.}
#' \item{`well`}{Well identifier.}
#' \item{`dye`}{In all reactions the SYBR Green I master mix (Roche) was used,
#' so the value is always `"SYBR"`.}
#' \item{`sample`}{Sample identifier.}
#' \item{`target`}{Target.}
#' }
#'
#' @source
#' - \url{https://medischebiologie.nl/wp-content/uploads/2019/02/qpcrdatamethods.zip}
#' - \url{https://github.com/ramiromagno/vermeulen/blob/main/data-raw/reactions.csv.gz}
#'
#' @noRd
reactions <- function() {
url <- file.path(repo(), "main/data-raw/reactions.csv.gz")
txt <- readLines(gzcon(url(url)))
df <- utils::read.csv(file = textConnection(txt), colClasses = c("factor", "factor", "factor", "character", "factor"))
# Fix the order of the levels in `well`
df$well <- factor(df$well, levels = wells())
df
}
|
/scratch/gouwar.j/cran-all/cranData/vermeulen/R/reactions.R
|
repo <- function() "https://raw.githubusercontent.com/ramiromagno/vermeulen"
|
/scratch/gouwar.j/cran-all/cranData/vermeulen/R/repo.R
|
#' Samples
#'
#' A data set containing the metadata associated with each sample. Data is
#' obtained from Ruijter (2013), \doi{10.1016/j.ymeth.2012.08.011} but original
#' source is by Vermeulen (2009), \doi{10.1016/S1470-2045(09)70154-8}.
#'
#' The tidy version of the data is kept at the repository of the source of
#' `{vermeulen}` package. This function fetches such data and thus requires
#' internet connection.
#'
#' @return A data frame with 372 samples and 3 variables:
#'
#' \describe{
#' \item{`sample`}{Sample identifier.}
#' \item{`sample_type`}{Sample type.}
#' \item{`copies`}{Standard copy number.}
#' \item{`dilution`}{Dilution factor. Higher number means greater dilution.}
#' }
#'
#' @source
#' - \url{https://medischebiologie.nl/wp-content/uploads/2019/02/qpcrdatamethods.zip}
#' - \url{https://github.com/ramiromagno/vermeulen/blob/main/data-raw/samples.csv.gz}
#'
#' @noRd
samples <- function() {
url <- file.path(repo(), "main/data-raw/samples.csv.gz")
txt <- readLines(gzcon(url(url)))
utils::read.csv(file = textConnection(txt), colClasses = c("factor", "factor", "integer", "double"))
}
|
/scratch/gouwar.j/cran-all/cranData/vermeulen/R/samples.R
|
#' Targets
#'
#' A data set containing the metadata associated with each target. Data is
#' obtained from Ruijter (2013), \doi{10.1016/j.ymeth.2012.08.011} but original
#' source is by Vermeulen (2009), \doi{10.1016/S1470-2045(09)70154-8}.
#'
#' The tidy version of the data is kept at the repository of the source of
#' `{vermeulen}` package. This function fetches such data and thus requires
#' internet connection.
#'
#' @return A data frame with 64 targets and two variables:
#'
#' \describe{
#' \item{`target`}{Target identifier, in almost all cases the name of a gene.}
#' \item{`target_type`}{Target type: either target of interest (`"toi"`) or
#' reference target (`"ref"`).}
#' }
#'
#' @source
#' - \url{https://medischebiologie.nl/wp-content/uploads/2019/02/qpcrdatamethods.zip}
#' - \url{https://github.com/ramiromagno/vermeulen/blob/main/data-raw/targets.csv.gz}
#'
#' @noRd
targets <- function() {
url <- file.path(repo(), "main/data-raw/targets.csv.gz")
txt <- readLines(gzcon(url(url)))
utils::read.csv(file = textConnection(txt), colClasses = c("factor", "factor"))
}
|
/scratch/gouwar.j/cran-all/cranData/vermeulen/R/targets.R
|
#' @keywords internal
"_PACKAGE"
## usethis namespace: start
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/vermeulen/R/vermeulen-package.R
|
wells <- function(layout = "16x24") {
# If layout is a 384 well plate
# 16 rows: A..P
# 24 cols: 1--24
# The well identifiers are generated in row-major order.
if (identical(layout, "16x24")) {
as.character(matrix(
outer(LETTERS[1:16], 1:24, paste0),
byrow = TRUE,
nrow = 24
))
}
}
|
/scratch/gouwar.j/cran-all/cranData/vermeulen/R/wells.R
|
# It is recommended that functions in a package are not memoised at build-time,
# but when the package is loaded. The simplest way to do this is within
# .onLoad().
# Here we perform memoisation for the set of helpers provided by this package.
.onLoad <- function(libname, pkgname) {
amplification_curves <<- memoise::memoise(amplification_curves)
samples <<- memoise::memoise(samples)
reactions <<- memoise::memoise(reactions)
targets <<- memoise::memoise(targets)
ds_biomarker <<- memoise::memoise(get_biomarker_dataset)
}
|
/scratch/gouwar.j/cran-all/cranData/vermeulen/R/zzz.R
|
#' R6 Class representing a configuration object
#'
#' @details
#' The special sublist `directories` is structured to contain three items for each
#' directory name:
#' - `versioned`: a T/F value specifying whether the directory is versioned
#' - `path`: the full path to the top level of that directory.
#' - `files`: A named list referencing file paths within that directory.
#'
#' If the directory is versioned, a version must be set in the `versions` sublist of the
#' config list. `versions` is itself a named list where each key corresponds to a
#' versioned folder in `directories` and the value gives the particular folder version
#' (for example, a timestamp) that corresponds to the particular run.
#'
#' @importFrom assertthat assert_that
#' @importFrom R6 R6Class
#' @importFrom utils str
#' @export
Config <- R6::R6Class(
"Config",
public = list(
#' @field config_list The list representation of the Config object
config_list = NULL,
#' @description Create a new Config object
#'
#' @param config_list either a list or a filepath to a YAML file containing that list
#' @param versions (default NULL) A named list containing versions for versioned
#' directories. If passed, used to define or update items in `config_list$versions`.
initialize = function(config_list, versions = NULL){
# If `config_list` is a character vector, assume it is a filepath and read
if(inherits(config_list, 'character')) config_list <- autoread(config_list)
# Check that the config list is a list
assertthat::assert_that(inherits(config_list, 'list'))
# If custom versions have been passed, add them to config_list$versions
if(length(versions) > 0){
assertthat::assert_that(inherits(versions, 'list'))
update_versions <- names(versions)
assertthat::assert_that(
!is.null(update_versions),
msg = 'If passed, `versions` must be a named list.'
)
# Make sure that config_list$versions exists and is a list
if(!inherits(config_list$versions, 'list')) config_list$versions <- list()
for(update_v in update_versions){
config_list$versions[[update_v]] <- versions[[update_v]]
}
}
self$config_list <- config_list
invisible(self)
},
#' @description Print the list representation of the Config object
print = function(){
utils::str(self$config_list)
invisible(self)
},
#' @description Get a subset of the `config_list`
#'
#' @details If no parameters are passed, returns the entire config_list
#'
#' @param ... Nested indices (character or numeric) down the config list
#'
#' @seealso [pull_from_list()]
#'
#' @return A subset of the list. If the item is NULL or missing, returns an error
get = function(...){
return(pull_from_list(self$config_list, ...))
},
#' @description Construct a directory path from the config object
#'
#' @details
#' Works differently for versioned and non-versioned directories. See the class
#' description for more information.
#'
#' @param dir_name Directory name
#' @param custom_version (character, default NULL) A custom version that will be
#' applied to this folder, rather than pulling from `config_list$versions[[dir]]`.
#' Only applies to versioned folders.
#' @param fail_if_does_not_exist (logical, default FALSE) should this method return an
#' error if the directory in question does not already exist?
#'
#' @return The full path to the directory
get_dir_path = function(
dir_name, custom_version = NULL, fail_if_does_not_exist = FALSE
){
dir_info <- self$get('directories', dir_name)
versioned <- pull_from_list(dir_info, 'versioned')
dir_base_path <- pull_from_list(dir_info, 'path')
# Check that the version is a boolean value
assertthat::assert_that(is.logical(versioned))
assertthat::assert_that(length(versioned) == 1)
# Get the directory path
if(versioned){
# If this directory is versioned, the full path is ({base path}/{version})
if(!is.null(custom_version)){
assertthat::assert_that(length(custom_version) == 1)
dir_version <- custom_version
} else {
dir_version <- self$get('versions', dir_name)
}
dir_path <- file.path(dir_base_path, dir_version)
} else {
# If this directory is NOT versioned, the full path == the base path
dir_path <- dir_base_path
}
# Optionally check if the directory exists
if(fail_if_does_not_exist) assertthat::assert_that(dir.exists(dir_path))
return(dir_path)
},
#' @description Construct a file path from the config object
#'
#' @details
#' Looks for the file path under:
#' `config_list$directories[[dir_name]]$files[[file_name]]`
#'
#' @param dir_name Directory name
#' @param file_name File name within that directory
#' @param custom_version (character, default NULL) A custom version that will be
#' applied to this folder, rather than pulling from `config_list$versions[[dir]]`.
#' Only applies to versioned folders.
#' @param fail_if_does_not_exist (logical, default FALSE) should this method return an
#' error if the directory in question does not already exist?
#'
#' @return The full path to the file
get_file_path = function(
dir_name, file_name, custom_version = NULL, fail_if_does_not_exist = FALSE
){
dir_path <- self$get_dir_path(
dir_name = dir_name,
custom_version = custom_version,
fail_if_does_not_exist = fail_if_does_not_exist
)
file_stub <- self$get('directories', dir_name, 'files', file_name)
file_path <- file.path(dir_path, file_stub)
if(fail_if_does_not_exist) assertthat::assert_that(file.exists(file_path))
return(file_path)
},
#' Read a file based on the config
#'
#' @param dir_name Directory name
#' @param file_name File name within that directory
#' @param ... Optional file reading arguments to pass to [autoread()]
#' @param custom_version (character, default NULL) A custom version that will be
#' applied to this folder, rather than pulling from `config_list$versions[[dir]]`.
#' Only applies to versioned folders. If passed, this argument must always be
#' explicitly named.
#'
#' @return The object loaded by [autoread()]
read = function(dir_name, file_name, ..., custom_version = NULL){
# Get the file path
file_path <- self$get_file_path(
dir_name = dir_name,
file_name = file_name,
custom_version = custom_version,
fail_if_does_not_exist = TRUE
)
# Automatically read it based on the extension
return(autoread(file_path, ...))
},
#' Write an object to file based on the config
#'
#' @param x Object to write
#' @param dir_name Directory name
#' @param file_name File name within that directory
#' @param ... Optional file writing arguments to pass to [autowrite()]
#' @param custom_version (character, default NULL) A custom version that will be
#' applied to this folder, rather than pulling from `config_list$versions[[dir]]`.
#' Only applies to versioned folders. If passed, this argument must always be
#' explicitly named.
#'
#' @return Invisibly passes TRUE if successful
write = function(x, dir_name, file_name, ..., custom_version = NULL){
# Get the file path to write to
file_path <- self$get_file_path(
dir_name = dir_name,
file_name = file_name,
custom_version = custom_version,
fail_if_does_not_exist = FALSE
)
# Automatically write to file based on extension
return(autowrite(x = x, file = file_path, ...))
},
#' Convenience function: write the config list to a folder as 'config.yaml'
#'
#' @param dir_name Directory name
#' @param ... Optional file writing arguments to pass to [autowrite()]
#' @param custom_version (character, default NULL) A custom version that will be
#' applied to this folder, rather than pulling from `config_list$versions[[dir]]`.
#' Only applies to versioned folders. If passed, this argument must always be
#' explicitly named.
#'
#' @return Invisibly passes TRUE if successful
write_self = function(dir_name, ..., custom_version = NULL){
# Get the file path to write to
dir_path <- self$get_dir_path(
dir_name = dir_name,
custom_version = custom_version,
fail_if_does_not_exist = TRUE
)
file_path <- file.path(dir_path, 'config.yaml')
# Automatically write to file based on extension
return(autowrite(x = self$config_list, file = file_path, ...))
}
),
private = list()
)
|
/scratch/gouwar.j/cran-all/cranData/versioning/R/Config.R
|
#' Get the list of file reading functions
#'
#' @description Constructs a list of all file-reading functions based on extension
#'
#' @return Named list where the names are file extensions, and the values are functions
#' that read a file. All functions have ... arguments that can be used to extend the
#' basic function.
#'
#' @seealso [autoread()] [get_file_writing_functions()]
#'
#' @importFrom data.table fread
#' @importFrom foreign read.dbf
#' @importFrom haven read_dta
#' @importFrom sf st_read
#' @importFrom terra rast
#' @importFrom yaml read_yaml
#' @export
get_file_reading_functions <- function(){
# Base list
funs <- list(
csv = function(file, ...) data.table::fread(file = file, ...),
dbf = function(file, ...) foreign::read.dbf(file = file, ...),
dta = function(file, ...) haven::read_dta(file = file, ...),
rda = function(file, ...) get(load(file = file, ...)),
rds = function(file, ...) readRDS(file = file, ...),
shp = function(file, ...) sf::st_read(dsn = file, ...),
tif = function(file, ...) terra::rast(x = file, ...),
txt = function(file, ...) readLines(con = file, ...),
yaml = function(file, ...) yaml::read_yaml(file = file, ...)
)
# Duplicates
funs$geojson <- funs$shp
funs$geotiff <- funs$tif
funs$rdata <- funs$rda
funs$yml <- funs$yaml
# Return
return(funs)
}
#' Auto-read from file
#'
#' @description Automatically read a file based on extension
#'
#' @param file Full path to be read
#' @param ... Other arguments to be passed to the particular loading function
#'
#' @seealso [get_file_reading_functions()] [autowrite()]
#'
#' @return The object loaded by the file
#'
#' @importFrom tools file_ext
#' @importFrom assertthat assert_that
#' @export
autoread <- function(file, ...){
# Check file extension and whether file exists
assertthat::assert_that(
file.exists(file),
msg = paste("Input file", file, "does not exist.")
)
assertthat::assert_that(
!dir.exists(file),
msg = paste("Input file", file, "must not be a directory.")
)
assertthat::assert_that(
length(file) == 1,
msg = "autoread takes one 'file' argument at a time."
)
# Check that extension is valid
ext <- tolower(tools::file_ext(file))
assertthat::assert_that(ext != "", msg = paste("File", file, "has no extension."))
# Get the file-reading function, failing if there's no match for the extension
file_reading_functions <- get_file_reading_functions()
read_fun <- pull_from_list(x = file_reading_functions, ext)
# Read the file and return
output <- read_fun(file = file, ...)
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/versioning/R/autoread.R
|
#' Get the list of file writing functions
#'
#' @description Constructs a list of all file-reading functions based on extension
#'
#' @return Named list where the names are file extensions, and the values are functions
#' that read a file. All functions have ... arguments that can be used to extend the
#' basic function.
#'
#' @seealso [autoread()] [get_file_reading_functions()]
#'
#' @importFrom data.table fwrite
#' @importFrom sf st_write
#' @importFrom terra writeRaster
#' @importFrom yaml write_yaml
#' @export
get_file_writing_functions <- function(){
# Base list
funs <- list(
csv = function(x, file, ...) data.table::fwrite(x = x, file = file, ...),
rda = function(x, file, ...) save(x, file = file, ...),
rds = function(x, file, ...) saveRDS(object = x, file = file, ...),
shp = function(x, file, ...) sf::st_write(obj = x, dsn = file, ..., append = FALSE),
tif = function(x, file, ...){
terra::writeRaster(x = x, filename = file, ..., overwrite = TRUE)
},
txt = function(x, file, ...) writeLines(text = x, con = file, ...),
yaml = function(x, file, ...) yaml::write_yaml(x = x, file = file, ...)
)
# Duplicates
funs$geojson <- funs$shp
funs$geotiff <- funs$tif
funs$rdata <- funs$rda
funs$yml <- funs$yaml
# Return
return(funs)
}
#' Auto-write to file
#'
#' @description Automatically write an object to a file based on extension
#'
#' @param x Object to be saved
#' @param file Full path to save the object to
#' @param ... Other arguments to be passed to the particular saving function
#'
#' @seealso [get_file_writing_functions()] [autoread()]
#'
#' @return Invisibly passes TRUE if the file saves successfully
#'
#' @importFrom tools file_ext
#' @importFrom assertthat assert_that
#' @export
autowrite <- function(x, file, ...){
# Check file extension and whether the save directory exists
save_dir <- dirname(file)
assertthat::assert_that(
dir.exists(save_dir),
msg = paste("Save directory", save_dir, "does not exist.")
)
assertthat::assert_that(
length(file) == 1,
msg = "autowrite takes one 'file' argument at a time."
)
# Check that extension is valid
ext <- tolower(tools::file_ext(file))
assertthat::assert_that(ext != "", msg = paste("Output file", file, "has no extension."))
# Get the file-reading function, failing if there is no match for the extension
file_writing_functions <- get_file_writing_functions()
write_fun <- pull_from_list(x = file_writing_functions, ext)
# Save the file
write_fun(x = x, file = file, ...)
# If file saves successfully, invisibly return TRUE
invisible(TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/versioning/R/autowrite.R
|
#' Safely pull an item from a list
#'
#' @description Indexing function for a list
#'
#' @details Use the `...` arguments to index the list. Not passing any `...` arguments
#' will return the entire list. The indexing will fail if either of two conditions are
#' met:
#' 1. The index (which can be numeric or a key) does not exist in the list
#' 2. If the index exists but the value of the item is NULL, and `fail_if_null` is TRUE
#'
#' @param x List to pull items from
#' @param ... List indices to pull. Can be either numeric or (preferably) a character.
#' @param fail_if_null (logical, default TRUE). Returns an informative error message if
#' the list index is NULL. This function must always be named.
#'
#' @importFrom assertthat assert_that
#' @importFrom glue glue
#' @export
pull_from_list <- function(x, ..., fail_if_null = TRUE){
indices <- list(...)
# Get original name of `x` for more informative error messages
list_name <- deparse(substitute(x))
# Iteratively subset list using indices
working_list <- x
for(index_i in seq_along(indices)){
index <- indices[[index_i]]
# Check that the subset will work
issue_prefix <- glue::glue("Issue with subset #{index_i} for list '{list_name}':")
assertthat::assert_that(
length(index) == 1,
msg = paste(issue_prefix, 'All list indices should have length 1.')
)
assertthat::assert_that(
is.character(index) | is.integer(index),
msg = paste(issue_prefix, "Indices should be either characters or integers.")
)
if(is.character(index)){
assertthat::assert_that(
index %in% names(working_list),
msg = glue::glue("{issue_prefix} '{index}' is not a name in the sub-list.")
)
}
if(is.integer(index)){
assertthat::assert_that(
length(working_list) <= index,
msg = glue::glue(
"{issue_prefix} numeric index {index} greater than the length of sub-list."
)
)
}
# Get the subset
working_list <- working_list[[index]]
# Optionally check that the subset is not NULL
if(fail_if_null){
assertthat::assert_that(
!is.null(working_list),
msg = paste(issue_prefix, 'Sub-list is NULL.')
)
}
}
return(working_list)
}
|
/scratch/gouwar.j/cran-all/cranData/versioning/R/utilities.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
# install.packages('versioning')
library(versioning)
## ----show-config--------------------------------------------------------------
example_config_fp <- system.file('extdata', 'example_config.yaml', package = 'versioning')
# Print the contents of the input YAML file
file_contents <- system(paste('cat', example_config_fp), intern = T)
message(paste(file_contents, collapse ='\n'))
## ----load-config--------------------------------------------------------------
# Load YAML file as a Config object
config <- versioning::Config$new(config_list = example_config_fp)
# Print the config file contents
print(config)
## ----retrieve-settings--------------------------------------------------------
# Retrieve some example settings from the config file
message("config$get('a') yields: ", config$get('a'))
message("config$get('b') yields: ", config$get('b'))
message("config$get('group_c', 'd') yields: ", config$get('group_c', 'd'))
# Update a setting
config$config_list$a <- 12345
message("config$get('a') has been updated and now yields: ", config$get('a'))
## ----get-directories----------------------------------------------------------
# Update the raw_data and prepared_data directories to temporary directories for this
# example
config$config_list$directories$raw_data$path <- tempdir(check = T)
config$config_list$directories$prepared_data$path <- tempdir(check = T)
# Create directories
message(
"Creating raw_data directory, which is not versioned: ",
config$get_dir_path('raw_data')
)
dir.create(config$get_dir_path('raw_data'), showWarnings = FALSE)
message(
"Creating prepared_data directory, which is versioned: ",
config$get_dir_path('prepared_data')
)
dir.create(config$get_dir_path('prepared_data'), showWarnings = FALSE)
# Copy the example input file to the raw data folder
file.copy(
from = system.file('extdata', 'example_input_file.csv', package = 'versioning'),
to = config$get_file_path(dir_name = 'raw_data', file_name = 'a')
)
## ----read-write-files---------------------------------------------------------
# Read that same table from file
df <- config$read(dir_name = 'raw_data', file_name = 'a')
# Write a prepared table and a summary to file
config$write(df, dir_name = 'prepared_data', file_name = 'prepared_table')
config$write(
paste("The prepared table has", nrow(df), "rows and", ncol(df), "columns."),
dir_name = 'prepared_data',
file_name = 'summary_text'
)
# Both files should now appear in the "prepared_data" directory
list.files(config$get_dir_path('prepared_data'))
## ----get-supported-extensions-------------------------------------------------
message(
"Supported file types for reading: ",
paste(sort(names(versioning::get_file_reading_functions())), collapse = ', ')
)
message(
"Supported file types for writing: ",
paste(sort(names(versioning::get_file_writing_functions())), collapse = ', ')
)
## ----write-self---------------------------------------------------------------
# Write the config object to the "prepared_data" directory
config$write_self(dir_name = 'prepared_data')
# The "prepared_data" directory should now include "config.yaml"
list.files(config$get_dir_path('prepared_data'))
## ----update-versions----------------------------------------------------------
# Load a new custom config where the "prepared_data" version has been updated to "v2"
custom_versions <- list(prepared_data = 'v2')
config_v2 <- versioning::Config$new(
config_list = example_config_fp,
versions = custom_versions
)
print(config_v2$get_dir_path('prepared_data')) # Should now end in ".../v2"
|
/scratch/gouwar.j/cran-all/cranData/versioning/inst/doc/versioning.R
|
---
title: "Using the versioning package"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Using the versioning package}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
This vignette introduces the **versioning** package, which aims to simplify management of
project settings and file input/output by combining them in a single R object.
R data pipelines commonly require reading and writing data to versioned directories. Each
directory might correspond to one step of a multi-step process, where that version
corresponds to particular settings for that step and a chain of previous steps that each
have their own respective versions. This package describes a `Config` (configuration)
object that makes it easy to read and write versioned data, based on YAML configuration
files loaded and saved to each versioned folder.
To get started, install and load the **versioning** package.
```{r setup}
# install.packages('versioning')
library(versioning)
```
YAML is a natural format for storing project settings, since it can represent numeric,
character, and logical settings as well as hierarchically-nested settings. We will use the
'example_config.yaml' file that comes with the **versioning** package for this example. The
following code block prints the contents of the YAML file to screen:
```{r show-config}
example_config_fp <- system.file('extdata', 'example_config.yaml', package = 'versioning')
# Print the contents of the input YAML file
file_contents <- system(paste('cat', example_config_fp), intern = T)
message(paste(file_contents, collapse ='\n'))
```
We can load this YAML file by creating a new `Config` object. The only required argument
when creating a Config object is `config_list`, which is either a nested R list of
settings or (in our case) a filepath to a YAML file containing those settings.
The Config object stores all those settings internally in the `config$config_list`
attribute. The full list of settings can always be viewed using `print(config)` or
`str(config$config_list)`.
```{r load-config}
# Load YAML file as a Config object
config <- versioning::Config$new(config_list = example_config_fp)
# Print the config file contents
print(config)
```
You can always access the list of settings directly by subsetting `config$config_list`
like a normal list, but the `Config$get()` method is sometimes preferable. For example, if
you want to retrieve the setting listed under "a", `config$get('a')` is equivalent to
`config$config_list[['a']]`, but will throw an error if the setting "a" does not exist.
You can also use the `config$get()` method for nested settings, as shown below:
```{r retrieve-settings}
# Retrieve some example settings from the config file
message("config$get('a') yields: ", config$get('a'))
message("config$get('b') yields: ", config$get('b'))
message("config$get('group_c', 'd') yields: ", config$get('group_c', 'd'))
# Update a setting
config$config_list$a <- 12345
message("config$get('a') has been updated and now yields: ", config$get('a'))
```
There are two special sub-lists of the `config_list`, titled `directories` and `versions`,
that can be handy for versioned R workflows with multiple steps. Each item in `directories`
is structured with the following information:
1. Name of the sublist: how the directory is accessed from the config (in our example, "raw_data" or "prepared_data")
2. `versioned` (logical): Does the directory have versioned sub-directories?
3. `path` (character): Path to the directory
4. `files` (list): Named list of files within the directory
In the example below, we'll show a very simple workflow where data is originally placed in
a "raw_data" directory, which is not versioned, and then some summaries are written to a
"prepared_data" directory, which is versioned. This mimics some data science workflows
where differences between data preparation methods and model results need to be tracked
over time. For this example, we will use temporary directories for both:
```{r get-directories}
# Update the raw_data and prepared_data directories to temporary directories for this
# example
config$config_list$directories$raw_data$path <- tempdir(check = T)
config$config_list$directories$prepared_data$path <- tempdir(check = T)
# Create directories
message(
"Creating raw_data directory, which is not versioned: ",
config$get_dir_path('raw_data')
)
dir.create(config$get_dir_path('raw_data'), showWarnings = FALSE)
message(
"Creating prepared_data directory, which is versioned: ",
config$get_dir_path('prepared_data')
)
dir.create(config$get_dir_path('prepared_data'), showWarnings = FALSE)
# Copy the example input file to the raw data folder
file.copy(
from = system.file('extdata', 'example_input_file.csv', package = 'versioning'),
to = config$get_file_path(dir_name = 'raw_data', file_name = 'a')
)
```
As seen above, we can use the `config$get_dir_path()` to access directory paths and
`config$get_file_path()` to access files within a directory. Note also that
the path for the "prepared_data" folder ends with "v1": this is because
`config$versions$prepared_data` is currently set to "v1". In a future run of this
workflow, we could change the folder version by updating this setting.
We can also use the `config$read()` and `config$write()` functions to read and write files
within these directories.
```{r read-write-files}
# Read that same table from file
df <- config$read(dir_name = 'raw_data', file_name = 'a')
# Write a prepared table and a summary to file
config$write(df, dir_name = 'prepared_data', file_name = 'prepared_table')
config$write(
paste("The prepared table has", nrow(df), "rows and", ncol(df), "columns."),
dir_name = 'prepared_data',
file_name = 'summary_text'
)
# Both files should now appear in the "prepared_data" directory
list.files(config$get_dir_path('prepared_data'))
```
These use the `autoread()` and `autowrite()` functions behind the scenes, and support any
file extensions listed in `get_file_reading_functions()`/`get_file_writing_functions()`.
```{r get-supported-extensions}
message(
"Supported file types for reading: ",
paste(sort(names(versioning::get_file_reading_functions())), collapse = ', ')
)
message(
"Supported file types for writing: ",
paste(sort(names(versioning::get_file_writing_functions())), collapse = ', ')
)
```
There is also a helper function, `config$write_self()`, that will write the current config
to a specified directory as a `config.yaml` file. For example, the following code block
writes the current config to the versioned "prepared_data" directory:
```{r write-self}
# Write the config object to the "prepared_data" directory
config$write_self(dir_name = 'prepared_data')
# The "prepared_data" directory should now include "config.yaml"
list.files(config$get_dir_path('prepared_data'))
```
While you can always update settings, versions, and file paths by changing the input YAML
file, it is sometimes more convenient to update versions in code or through command line
arguments passed to a script. In these cases, you can specify the `versions` argument when
creating a new Config object. This argument will set or overwrite the particular versions
listed, while keeping other versions unchanged. For example, the following code block
loads the config, but changes (only) the "prepared_data" version to "v2".
```{r update-versions}
# Load a new custom config where the "prepared_data" version has been updated to "v2"
custom_versions <- list(prepared_data = 'v2')
config_v2 <- versioning::Config$new(
config_list = example_config_fp,
versions = custom_versions
)
print(config_v2$get_dir_path('prepared_data')) # Should now end in ".../v2"
```
For more information about using this package, see the documentation on the `Config`
object: `help(Config, package = 'versioning')`.
|
/scratch/gouwar.j/cran-all/cranData/versioning/inst/doc/versioning.Rmd
|
---
title: "Using the versioning package"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Using the versioning package}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
This vignette introduces the **versioning** package, which aims to simplify management of
project settings and file input/output by combining them in a single R object.
R data pipelines commonly require reading and writing data to versioned directories. Each
directory might correspond to one step of a multi-step process, where that version
corresponds to particular settings for that step and a chain of previous steps that each
have their own respective versions. This package describes a `Config` (configuration)
object that makes it easy to read and write versioned data, based on YAML configuration
files loaded and saved to each versioned folder.
To get started, install and load the **versioning** package.
```{r setup}
# install.packages('versioning')
library(versioning)
```
YAML is a natural format for storing project settings, since it can represent numeric,
character, and logical settings as well as hierarchically-nested settings. We will use the
'example_config.yaml' file that comes with the **versioning** package for this example. The
following code block prints the contents of the YAML file to screen:
```{r show-config}
example_config_fp <- system.file('extdata', 'example_config.yaml', package = 'versioning')
# Print the contents of the input YAML file
file_contents <- system(paste('cat', example_config_fp), intern = T)
message(paste(file_contents, collapse ='\n'))
```
We can load this YAML file by creating a new `Config` object. The only required argument
when creating a Config object is `config_list`, which is either a nested R list of
settings or (in our case) a filepath to a YAML file containing those settings.
The Config object stores all those settings internally in the `config$config_list`
attribute. The full list of settings can always be viewed using `print(config)` or
`str(config$config_list)`.
```{r load-config}
# Load YAML file as a Config object
config <- versioning::Config$new(config_list = example_config_fp)
# Print the config file contents
print(config)
```
You can always access the list of settings directly by subsetting `config$config_list`
like a normal list, but the `Config$get()` method is sometimes preferable. For example, if
you want to retrieve the setting listed under "a", `config$get('a')` is equivalent to
`config$config_list[['a']]`, but will throw an error if the setting "a" does not exist.
You can also use the `config$get()` method for nested settings, as shown below:
```{r retrieve-settings}
# Retrieve some example settings from the config file
message("config$get('a') yields: ", config$get('a'))
message("config$get('b') yields: ", config$get('b'))
message("config$get('group_c', 'd') yields: ", config$get('group_c', 'd'))
# Update a setting
config$config_list$a <- 12345
message("config$get('a') has been updated and now yields: ", config$get('a'))
```
There are two special sub-lists of the `config_list`, titled `directories` and `versions`,
that can be handy for versioned R workflows with multiple steps. Each item in `directories`
is structured with the following information:
1. Name of the sublist: how the directory is accessed from the config (in our example, "raw_data" or "prepared_data")
2. `versioned` (logical): Does the directory have versioned sub-directories?
3. `path` (character): Path to the directory
4. `files` (list): Named list of files within the directory
In the example below, we'll show a very simple workflow where data is originally placed in
a "raw_data" directory, which is not versioned, and then some summaries are written to a
"prepared_data" directory, which is versioned. This mimics some data science workflows
where differences between data preparation methods and model results need to be tracked
over time. For this example, we will use temporary directories for both:
```{r get-directories}
# Update the raw_data and prepared_data directories to temporary directories for this
# example
config$config_list$directories$raw_data$path <- tempdir(check = T)
config$config_list$directories$prepared_data$path <- tempdir(check = T)
# Create directories
message(
"Creating raw_data directory, which is not versioned: ",
config$get_dir_path('raw_data')
)
dir.create(config$get_dir_path('raw_data'), showWarnings = FALSE)
message(
"Creating prepared_data directory, which is versioned: ",
config$get_dir_path('prepared_data')
)
dir.create(config$get_dir_path('prepared_data'), showWarnings = FALSE)
# Copy the example input file to the raw data folder
file.copy(
from = system.file('extdata', 'example_input_file.csv', package = 'versioning'),
to = config$get_file_path(dir_name = 'raw_data', file_name = 'a')
)
```
As seen above, we can use the `config$get_dir_path()` to access directory paths and
`config$get_file_path()` to access files within a directory. Note also that
the path for the "prepared_data" folder ends with "v1": this is because
`config$versions$prepared_data` is currently set to "v1". In a future run of this
workflow, we could change the folder version by updating this setting.
We can also use the `config$read()` and `config$write()` functions to read and write files
within these directories.
```{r read-write-files}
# Read that same table from file
df <- config$read(dir_name = 'raw_data', file_name = 'a')
# Write a prepared table and a summary to file
config$write(df, dir_name = 'prepared_data', file_name = 'prepared_table')
config$write(
paste("The prepared table has", nrow(df), "rows and", ncol(df), "columns."),
dir_name = 'prepared_data',
file_name = 'summary_text'
)
# Both files should now appear in the "prepared_data" directory
list.files(config$get_dir_path('prepared_data'))
```
These use the `autoread()` and `autowrite()` functions behind the scenes, and support any
file extensions listed in `get_file_reading_functions()`/`get_file_writing_functions()`.
```{r get-supported-extensions}
message(
"Supported file types for reading: ",
paste(sort(names(versioning::get_file_reading_functions())), collapse = ', ')
)
message(
"Supported file types for writing: ",
paste(sort(names(versioning::get_file_writing_functions())), collapse = ', ')
)
```
There is also a helper function, `config$write_self()`, that will write the current config
to a specified directory as a `config.yaml` file. For example, the following code block
writes the current config to the versioned "prepared_data" directory:
```{r write-self}
# Write the config object to the "prepared_data" directory
config$write_self(dir_name = 'prepared_data')
# The "prepared_data" directory should now include "config.yaml"
list.files(config$get_dir_path('prepared_data'))
```
While you can always update settings, versions, and file paths by changing the input YAML
file, it is sometimes more convenient to update versions in code or through command line
arguments passed to a script. In these cases, you can specify the `versions` argument when
creating a new Config object. This argument will set or overwrite the particular versions
listed, while keeping other versions unchanged. For example, the following code block
loads the config, but changes (only) the "prepared_data" version to "v2".
```{r update-versions}
# Load a new custom config where the "prepared_data" version has been updated to "v2"
custom_versions <- list(prepared_data = 'v2')
config_v2 <- versioning::Config$new(
config_list = example_config_fp,
versions = custom_versions
)
print(config_v2$get_dir_path('prepared_data')) # Should now end in ".../v2"
```
For more information about using this package, see the documentation on the `Config`
object: `help(Config, package = 'versioning')`.
|
/scratch/gouwar.j/cran-all/cranData/versioning/vignettes/versioning.Rmd
|
#' available.versions
#'
#' @description List all of the past versions of the named packages ever
#' uploaded to CRAN (and therefore in the CRAN source archives), their
#' publication dates and whether they can be installed from MRAN via
#' \code{\link{install.versions}} or \code{\link{install.dates}}.
#'
#' @param pkgs character vector of the names of packages for which to query
#' available versions
#'
#' @return a list of dataframes, each giving the versions and publication dates
#' for the corresponding elements of \code{pkgs} as well as whether they can be
#' installed from MRAN
#'
#' @export
#' @name available.versions
#' @examples
#'
#' \dontrun{
#'
#' # available versions of checkpoint
#' available.versions('checkpoint')
#'
#' # available versions of checkpoint and devtools
#' available.versions(c('checkpoint', 'devtools'))
#'
#' }
#'
available.versions <- function (pkgs) {
# vectorise by recursion
if (length(pkgs) > 1) {
ans <- lapply(pkgs,
available.versions)
# remove a level of listing
ans <- lapply(ans, '[[', 1)
names(ans) <- pkgs
return (ans)
}
# get the current version
current_df <- current.version(pkgs)
# see if the package has been archived
# get most recent MRAN image URL, Archive directory
archive_url <- sprintf('%s/src/contrib/Archive',
latest.MRAN())
# check for the package
archived <- pkg.in.archive(archive_url, pkgs)
# if it is archived, get the previous versions
if (archived) {
# get most recent MRAN image URL for the package archive
# (inside the Archive directory)
pkg_archive_url <- sprintf('%s/src/contrib/Archive/%s',
latest.MRAN(),
pkgs)
# scrape the versions therein
previous_df <- scrape.index.versions(pkg_archive_url,
pkgs)
} else {
# otherwise, make it a blank row
previous_df <- current_df[0, ]
}
# append previous versions to the current version
df <- rbind(current_df,
previous_df)
# add whether they were posted since the start of MRAN
df$available <- as.Date(df$date) >= as.Date('2014-09-17')
# also find the most recent version before the start of MRAN
if (!all(df$available)) {
first_available <- min(which(as.Date(df$date) <= as.Date('2014-09-17')))
df$available[first_available] <- TRUE
}
# wrap into a list
ans <- list()
ans[[pkgs]] <- df
return(ans)
}
|
/scratch/gouwar.j/cran-all/cranData/versions/R/available.versions.R
|
#' install.dates
#'
#' @description Download and install the latest versions of packages hosted on
#' CRAN as of a specific date from the MRAN server.
#'
#' @param pkgs character vector of the names of packages that should be
#' downloaded and installed
#'
#' @param dates character or Date vector of the dates for which to install the
#' latest versions of \code{pkgs}. If a data vector, it must be in the format
#' 'yyyy-mm-dd', e.g. '2014-09-17'. If this has the same length as \code{pkgs}
#' versions will correspond to those packages. If this has length one
#' the same version will be used for all packages. If it has any other
#' length an error will be thrown. Dates before 2014-09-17 will cause an error
#' as MRAN does not archive before that date.
#'
#' @param lib character vector giving the library directories where to
#' install the packages. Recycled as needed. If missing, defaults to the
#' first element of \code{\link{.libPaths}()}.
#'
#' @param \dots other arguments to be passed to \code{\link{install.packages}}.
#' The arguments \code{repos} and \code{contriburl} (at least) will
#' be ignored as the function uses the MRAN server to retrieve package versions.
#'
#' @export
#' @name install.dates
#' @examples
#'
#' \dontrun{
#'
#' # install yesterday's version of checkpoint
#' install.dates('checkpoint', Sys.Date() - 1)
#'
#' # install yesterday's versions of checkpoint and devtools
#' install.dates(c('checkpoint', 'devtools'), Sys.Date() - 1)
#'
#' # install yesterday's version of checkpoint and the day before's devtools
#' install.dates(c('checkpoint', 'devtools'), Sys.Date() - 1:2)
#'
#' }
install.dates <- function (pkgs,
dates,
lib,
...) {
# number of packages to install
n_pkgs <- length(pkgs)
if (!inherits(dates, c('character', 'Date'))) {
stop ('dates must be a vector of class character or Date')
}
if (length(dates) == 1) {
dates <- rep(dates, n_pkgs)
}
if (length(dates) != n_pkgs) {
stop ('dates must be have either length one, or the same length as pkgs')
}
# coerce dates to character
dates <- as.character(dates)
# check none of the dates are before the first MRAN date
if (any(as.Date(dates) < as.Date('2014-09-17'))) {
stop (sprintf('cannot install packages before 2014-09-17 as this is the
earliest date archived on MRAN.
Found date: %s',
min(as.Date(dates))))
}
# loop through packages installing them
for (i in 1:n_pkgs) {
# get package and date
pkg <- pkgs[i]
date <- dates[i]
# define repository
repos <- paste0('https://MRAN.revolutionanalytics.com/snapshot/', date)
install.packages(pkgs = pkg,
lib = lib,
repos = repos,
...)
}
}
|
/scratch/gouwar.j/cran-all/cranData/versions/R/install.dates.R
|
#' install.versions
#'
#' @description Download and install named versions of packages hosted on
#' CRAN from the MRAN server.
#'
#' @param pkgs character vector of the names of packages that should be
#' downloaded and installed
#' @param versions character vector of the versions of packages to be
#' downloaded and installed. If this has the same length as \code{pkgs}
#' versions will correspond to those packages. If this has length one
#' the same version will be used for all packages. If it has any other
#' length an error will be thrown.
#'
#' @param lib character vector giving the library directories where to
#' install the packages. Recycled as needed. If missing, defaults to the
#' first element of \code{\link{.libPaths}()}.
#'
#' @param \dots other arguments to be passed to \code{\link{install.packages}}.
#' The arguments \code{repos} and \code{contriburl} (at least) will
#' be ignored as the function uses the MRAN server to retrieve package versions.
#'
#' @export
#' @name install.versions
#'
#' @examples
#'
#'\dontrun{
#'
#' # install an earlier version of checkpoint
#' install.versions('checkpoint', '0.3.3')
#'
#' # install earlier versions of checkpoint and devtools
#' install.versions(c('checkpoint', 'devtools'), c('0.3.3', '1.6.1'))
#'
#'}
install.versions <- function (pkgs,
versions,
lib,
...) {
# number of packages to install
n_pkgs <- length(pkgs)
if (!inherits(versions, 'character')) {
stop ('versions must be a character vector')
}
if (length(versions) == 1) {
versions <- rep(versions, n_pkgs)
}
if (length(versions) != n_pkgs) {
stop ('versions must be have either length one, or the same length as pkgs')
}
# get date corresponding to version for this package
date <- version2date(pkgs = pkgs,
versions = versions)
# install by date
install.dates(pkgs = pkgs,
dates = date,
lib = lib,
...)
}
|
/scratch/gouwar.j/cran-all/cranData/versions/R/install.versions.R
|
#' installed.versions
#'
#' @description List the installed versions of packages in a library directory
#'
#' @param pkgs character vector of the names of packages for which to query the
#' installed versions
#'
#' @param lib character vector of length one giving the library directory
#' containing the packages to query. If missing, defaults to the
#' first element of \code{\link{.libPaths}()}.
#'
#' @return a named character vector of version numbers corresponding to
#' \code{pkgs}, with names giving the package names. If a packakge could not be
#' found in \code{lib}, an NA will be returned.
#'
#' @export
#' @name installed.versions
#'
#' @examples
#'
#' # the versions of versions
#' installed.versions('versions')
#'
#' # apply to multiple packages
#' installed.versions(c('stats', 'versions'))
#'
#' # add a package that doesn't exist or isn't installed
#' # (returns NA for that one)
#' installed.versions(c('stats', 'versions', 'notapackage'))
#'
installed.versions <- function (pkgs,
lib) {
if (missing(lib) || is.null(lib)) {
lib <- .libPaths()[1L]
if (length(.libPaths()) > 1L)
message(sprintf(ngettext(length(pkgs), "Checking package in %s\n(as %s is unspecified)",
"Checking packages in %s\n(as %s is unspecified)"),
sQuote(lib), sQuote("lib")), domain = NA)
}
# vectorise by recursion
if (length(pkgs) > 1) {
ans <- sapply(pkgs,
installed.versions,
lib)
return (ans)
}
# get path to package description
desc_path <- sprintf('%s/%s/DESCRIPTION',
lib,
pkgs)
# check it exists
if (!file.exists(desc_path)) {
return (NA)
} else {
lines <- readLines(desc_path)
vers_line <- lines[grep('^Version: *', lines)]
vers <- gsub('Version: ', '', vers_line)
return (vers)
}
}
|
/scratch/gouwar.j/cran-all/cranData/versions/R/installed.versions.R
|
# utility functions for versions package
# read lines from a url more quickly and with a clearer error
# message on failure than readLines
url.lines <- function (url) {
# create a tempfile
file <- tempfile()
# stick the html in there
suppressWarnings(success <- download.file(url, file,
quiet = TRUE))
# if it failed, issue a nice error
if (success != 0) {
stop(sprintf('URL does not appear to exist: %s',
url))
}
# get the lines, delete the file and return
lines <- readLines(file, encoding = "UTF-8")
file.remove(file)
return (lines)
}
# return the url for the latest date on an index page of dates
# (by default the MRAN snappshot index page)
latest.MRAN <- function(url = 'https://mran.revolutionanalytics.com/snapshot') {
# get all full dates
dates <- scrape.index.dates(url)
# get latest
max <- as.character(max(as.Date(dates)))
# form the url and return
ans <- paste(url, max, sep = '/')
return (ans)
}
# list the dates in an index page file dates as subdirectories
scrape.index.dates <- function (url) {
# get the lines
lines <- url.lines(url)
# keep only lines starting with hrefs
lines <- lines[grep('^<a href="*', lines)]
# take the sequence after the href that is between the quotes
lines <- gsub('.*href=\"([^\"]+)\".*', '\\1', lines)
# remove the trailing slash
lines <- gsub('/$', '', lines)
# remove any lines that aren't 10 characters long (a date only)
lines <- lines[nchar(lines) == 10]
# return list in reverse
return (rev(lines))
}
# list the package versions in an index page
scrape.index.versions <- function (url, pkgs) {
# get the lines
lines <- url.lines(url)
# keep only lines starting with hrefs
lines <- lines[grep('^<a href="*', lines)]
# take the sequence after the href that is between the quotes
versions <- gsub('.*href=\"([^\"]+)\".*', '\\1', lines)
# remove the leading package name
versions <- gsub(sprintf('^%s_', pkgs),
'', versions)
# remove the trailing tarball extension
versions <- gsub('.tar.gz$', '', versions)
# match the sequence in number-letter-number format
dates <- gsub('.* ([0-9]+-[a-zA-Z]+-[0-9]+) .*', '\\1', lines)
# convert dates to standard format
dates <- as.Date(dates, format = '%d-%b-%Y')
# get them in date order
o <- order(dates, decreasing = TRUE)
# create dataframe, reversing both
df <- data.frame(version = versions[o],
date = as.character(dates[o]),
stringsAsFactors = FALSE)
return (df)
}
# given the url to an archive ('.../src/contrib/Archive'), a package name
# and version, see if the package is present and return a scalar logical
pkg.in.archive <- function (url, pkg) {
# get the lines
lines <- url.lines(url)
# keep only lines starting with hrefs
lines <- lines[grep('^<a href="*', lines)]
# take the sequence after the href that is between the quotes
items <- gsub('.*href=\"([^\"]+)\".*', '\\1', lines)
# expected directory name
dir <- paste0(pkg, '/')
# search for the expected package directory
archived <- dir %in% items
return (archived)
}
# given packages name and required versions,
# return a date when it was live on CRAN
version2date <- function (pkgs, versions) {
# vectorise by recursion
if (length(pkgs) > 1) {
ans <- mapply(version2date,
pkgs,
versions)
return (ans)
}
# get available versions for the package
df <- available.versions(pkgs)[[1]]
# error if the version is not recognised
if (!(versions %in% df$version)) {
stop (sprintf('%s does not appear to be a valid version of %s.
Use available.versions("%s") to get valid versions\n\n',
versions,
pkgs,
pkgs))
}
# find the row corresponding to the version
idx <- match(versions, df$version)
# error if the version is recognised, but not available on MRAN
if (!df$available[idx]) {
stop (sprintf("%s is a valid version of %s, but was published before
2014-09-17 and can therefore not be downloaded from MRAN.
Try using devtools::install_version to install the package
from its source in the CRAN archives\n\n",
versions,
pkgs))
}
# get a middling date
# append today's date (note idx is one off now)
dates <- c(Sys.Date(),
as.Date(df$date))
# get the mean of the publication date and subsequent publication date
# (or today) as the target date for version installation
date <- as.character(mean(dates[idx + 0:1]))
# return this
return (date)
}
# get current version of package
current.version <- function (pkg) {
# get all current contributed packages in latest MRAN
current_url <- sprintf('%s/src/contrib',
latest.MRAN())
# get the lines
lines <- url.lines(current_url)
# keep only lines starting with hrefs
lines <- lines[grep('^<a href="*', lines)]
# take the sequence after the href that is between the quotes
tarballs <- gsub('.*href=\"([^\"]+)\".*', '\\1', lines)
# match the sequence in number-letter-number format
dates <- gsub('.* ([0-9]+-[a-zA-Z]+-[0-9]+) .*', '\\1', lines)
# convert dates to standard format
dates <- as.Date(dates, format = '%d-%b-%Y')
# get the ones matching the package
idx <- grep(sprintf('^%s_.*.tar.gz$', pkg),
tarballs)
if (length(idx) == 1) {
# if this provided exactly one match, it's the current package
# so scrape the version and get the date
versions <- tarballs[idx]
# remove the leading package name
versions <- gsub(sprintf('^%s_', pkg),
'', versions)
# remove the trailing tarball extension
versions <- gsub('.tar.gz$', '', versions)
dates <- dates[idx]
} else {
# otherwise warn and return NAs
warning (sprintf('The current version and publication date of %s could not
be detected',
pkg))
versions <- dates <- NA
}
# create dataframe, reversing both
df <- data.frame(version = versions,
date = as.character(dates),
stringsAsFactors = FALSE)
return (df)
}
|
/scratch/gouwar.j/cran-all/cranData/versions/R/utilities.R
|
#' @title versions: Query and Install Specific Versions of Packages on CRAN
#'
#' @name versions-package
#' @description Installs specified versions of R packages
#' hosted on CRAN and provides functions to list available versions and the
#' versions of currently installed packages. These tools can be used to help
#' make R projects and packages more reproducible.
#' \code{versions} fits in the narrow gap between the devtools
#' \code{install_version} function and the \code{checkpoint} package.
#'
#' \code{devtools::install_version} installs a stated package version from
#' source files stored on the CRAN archives. However CRAN does not store
#' binary versions of packages so Windows users need to have RTools installed
#' and Windows and OSX users get longer installation times.
#'
#' \code{checkpoint} uses the Revolution Analytics MRAN server to
#' install packages (from source or binary) as they were available on
#' a given date. It also provides a helpful interface to detect the packages
#' in use in a directory and install all of those packages for a given date.
#' \code{checkpoint} doesn't provide \code{install.packages}-like functionality
#' however, and that's what \code{versions} aims to do, by querying MRAN.
#'
#' As MRAN only goes back to 2014-09-17, \code{versions} can't install packages
#' from before this date.
#'
#' The available functions are:
#' \itemize{
#' \item \code{\link{available.versions}}
#' \item \code{\link{install.versions}}
#' \item \code{\link{install.dates}}
#' \item \code{\link{installed.versions}}
#' }
#'
#' @docType package
#'
#' @importFrom utils install.packages download.file
#'
#' @examples
#'
#' \dontrun{
#'
#' # list the available versions of checkpoint
#' available.versions('checkpoint')
#'
#' # install a specific version
#' install.versions('checkpoint', '0.3.9')
#'
#' # check the installed version
#' installed.versions('versions')
#'
#' # install checkpoint as of a specific date
#' install.dates('checkpoint', '2014-12-25')
#'
#' }
#'
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/versions/R/versions-package.R
|
#' Find the latest version code
#'
#' @description `ver_latest()` returns the latest of the version codes.
#'
#' @inheritParams ver_order
#'
#' @return A character vector with one element.
#'
#' @examples
#' ver_latest(c("1.3-0", "1.4-1", "0.0.0.9000", "1.4-0a", "1.4-0"))
#'
#' @seealso \code{\link{ver_oldest}()}
#' @export
ver_latest <- function(x) {
if (length(x) == 0)
stop("cannot select latest version when given empty input")
version_components <- strsplit(x, "[^\\w]+", perl = TRUE)
ret <- Reduce(
function(indices, position) {
# Early return if the solution was found
if (length(indices) == 1) return(indices)
# Extract n-th components of version code
# Empty components are extracted as NA (e.g. 4-th component of 1.5.0)
nth_components <- vapply(
version_components[indices], `[`, FUN.VALUE = character(1), position
)
if (all(is.na(nth_components))) return(indices)
# Omit NA components (it is guaranteed that there is at least one non-NA component)
indices <- indices[which(!is.na(nth_components))]
nth_components <- vapply(
version_components[indices], `[`, FUN.VALUE = character(1), position
)
# Split components into initial numeric value and additional code
# e.g. "0b8a" => 0 + "b8a"
initial_number_match <- regexpr("^\\d+", nth_components)
initial_number <- as.numeric(
substring(nth_components, 1, attr(
initial_number_match, "match.length", exact = TRUE)
)
)
# Compare numeric values first
if (any(is.na(initial_number))) {
# Codes are not empty, so initial_number is NA only when starting with a letter
# That means NA initial numbers are later than anything else
indices <- indices[which(is.na(initial_number))]
} else {
# Else simply choose maximum of the numbers
indices <- indices[which(initial_number == max(initial_number))]
}
# Compare codes only if necessary
if (length(indices) > 1) {
nth_components <- vapply(
version_components[indices], `[`, FUN.VALUE = character(1), position
)
initial_number_match <- regexpr("^\\d+", nth_components)
additional_code <- substring(nth_components, 1 + attr(
initial_number_match, "match.length", exact = TRUE)
)
if (any(additional_code != "")) {
indices <- indices[which(additional_code == max(additional_code))]
}
}
indices
},
seq_len(max(lengths(version_components))),
init = seq_along(version_components)
)
x[ret[1]]
}
|
/scratch/gouwar.j/cran-all/cranData/versionsort/R/latest.R
|
#' Find the oldest version code
#'
#' @description `ver_oldest()` returns the oldest of the version codes.
#'
#' @inheritParams ver_order
#'
#' @return A character vector with one element.
#'
#' @examples
#' ver_oldest(c("1.3-0", "1.4-1", "0.0.0.9000", "1.4-0a", "1.4-0"))
#'
#' @seealso \code{\link{ver_latest}()}
#' @export
ver_oldest <- function(x) {
if (length(x) == 0)
stop("cannot select oldest version when given empty input")
version_components <- strsplit(x, "[^\\w]+", perl = TRUE)
ret <- Reduce(
function(indices, position) {
# Early return if the solution was found
if (length(indices) == 1) return(indices)
# Extract n-th components of version code
# Empty components are extracted as NA (e.g. 4-th component of 1.5.0)
nth_components <- vapply(
version_components[indices], `[`, FUN.VALUE = character(1), position
)
# NA components take precedence over non-NA components
if (any(is.na(nth_components))) {
return(indices[which(is.na(nth_components))])
}
# Split components into initial numeric value and additional code
# e.g. "0b8a" => 0 + "b8a"
initial_number_match <- regexpr("^\\d+", nth_components)
initial_number <- as.numeric(
substring(nth_components, 1, attr(
initial_number_match, "match.length", exact = TRUE)
)
)
# Compare numeric values first
if (any(!is.na(initial_number))) {
# Codes are not empty, so initial_number is NA only when starting with a letter
# That means NA initial numbers are later than anything else
indices <- indices[which(initial_number == min(initial_number, na.rm = TRUE))]
nth_components <- vapply(
version_components[indices], `[`, FUN.VALUE = character(1), position
)
initial_number_match <- regexpr("^\\d+", nth_components)
}
# Compare codes only if necessary
if (length(indices) > 1) {
additional_code <- substring(nth_components, 1 + attr(
initial_number_match, "match.length", exact = TRUE)
)
if (any(additional_code != "")) {
indices <- indices[which(additional_code == min(additional_code))]
}
}
indices
},
seq_len(max(lengths(version_components))),
init = seq_along(version_components)
)
x[ret[1]]
}
|
/scratch/gouwar.j/cran-all/cranData/versionsort/R/oldest.R
|
#' Order version codes
#'
#' @description `ver_order()` returns the permutation that rearranges a vector
#' of version codes alphanumerically.
#'
#' @param x [`character()`]\cr
#' A vector of version codes that start with a number (as in "1.5.0"), i.e.
#' without initial "v" (as in "v1.5.0").
#'
#' @return An integer vector (for details see \code{\link[base]{order}}).
#'
#' @examples
#' version_codes <- c("1.5-0", "1.4-1", "0.0.0.9000", "1.4-0a", "1.4-0")
#' ver_order(version_codes)
#' # The line below is the same as ver_sort(version_codes)
#' version_codes[ver_order(version_codes)]
#'
#' @seealso \code{\link{ver_sort}()}
#' @export
ver_order <- function(x) {
# Early return if input is empty
if (length(x) == 0) return(integer())
version_components <- strsplit(x, "[^\\w]+", perl = TRUE)
Reduce(
function(list_order, position) {
# Extract n-th components of version code
# Empty components are extracted as NA (e.g. 4-th component of 1.5.0)
nth_components <- vapply(
version_components[list_order], `[`, FUN.VALUE = character(1), position
)
# Separate NA components
empty_components_indices <- which(is.na(nth_components))
other_components_indices <- which(!is.na(nth_components))
nth_components <- nth_components[other_components_indices]
# Split components into initial numeric value and additional code
# e.g. "0b8a" => 0 + "b8a"
initial_number_match <- regexpr("^\\d+", nth_components)
initial_number <- as.numeric(
substring(nth_components, 1, attr(
initial_number_match, "match.length", exact = TRUE)
)
)
additional_code <- substring(nth_components, 1 + attr(
initial_number_match, "match.length", exact = TRUE)
)
# Order code alphabetically, then number numerically
# Append empty components' indices at the beginning
code_order <- order(additional_code, na.last = TRUE)
list_order[
c(empty_components_indices,
other_components_indices[code_order][order(initial_number[code_order], na.last = TRUE)])
]
},
rev(seq_len(max(lengths(version_components)))),
init = seq_along(version_components)
)
}
#' Sort version codes
#'
#' @description `ver_sort()` returns a sorted vector of version codes, where
#' sorting is done alphanumerically.
#'
#' @inheritParams ver_order
#'
#' @return A character vector containing the same elements as input, but
#' reordered.
#'
#' @examples
#' ver_sort(c("1.5-0", "1.4-1", "0.0.0.9000", "1.4-0a", "1.4-0"))
#'
#' @seealso \code{\link{ver_order}()}
#' @export
ver_sort <- function(x) {
x[ver_order(x)]
}
|
/scratch/gouwar.j/cran-all/cranData/versionsort/R/versionsort.R
|
#' Compare two data frames
#'
#' @description
#' `compare()` creates a representation of the differences between two tables,
#' along with a shallow copy of the tables. This output is used
#' as the `comparison` argument when exploring the differences further with other
#' versus functions e.g. `slice_*()` and `weave_*()`.
#'
#' @param table_a A data frame
#' @param table_b A data frame
#' @param by <[`tidy-select`][versus_tidy_select]>. Selection of columns to use when matching rows between
#' \code{.data_a} and \code{.data_b}. Both data frames must be unique on \code{by}.
#' @param allow_both_NA Logical. If \code{TRUE} a missing value in both data frames is
#' considered as equal
#' @param coerce Logical. If \code{FALSE} and columns from the input tables have
#' differing classes, the function throws an error.
#'
#' @return
#' \describe{
#' \item{\code{compare()}}{A list of data frames having the following elements:
#' \describe{
#' \item{tables}{
#' A data frame with one row per input table showing the number of rows
#' and columns in each.
#' }
#' \item{by}{
#' A data frame with one row per \code{by} column showing the class
#' of the column in each of the input tables.
#' }
#' \item{intersection}{
#' A data frame with one row per column common to \code{table_a} and
#' \code{table_b} and columns "n_diffs" showing the number of values which
#' are different between the two tables, "class_a"/"class_b" the class of the
#' column in each table, and "value_diffs" a (nested) data frame showing
#' the the values in each table which are unequal and the \code{by} columns
#' }
#' \item{unmatched_cols}{
#' A data frame with one row per column which is in one input table but
#' not the other and columns "table": which table the column appears in,
#' "column": the name of the column, and "class": the class of the
#' column.
#' }
#' \item{unmatched_rows}{
#' A data frame which, for each row present in one input table but not
#' the other, contains the column "table" showing which table the row appears
#' in and the \code{by} columns for that row.
#' }
#' }
#' }
#' }
#' @examples
#' compare(example_df_a, example_df_b, by = car)
#'
#' @section data.table inputs:
#' If the input is a data.table, you may want `compare()` to make a deep copy instead
#' of a shallow copy so that future changes to the table don't affect the comparison.
#' To achieve this, you can set `options(versus.copy_data_table = TRUE)`.
#' @rdname compare
#' @export
compare <- function(table_a, table_b, by, allow_both_NA = TRUE, coerce = TRUE) {
check_required(by)
by <- enquo(by)
table_chr <- names(enquos(table_a, table_b, .named = TRUE))
validate_tables(table_a, table_b, coerce = coerce)
by_names <- get_by_names(table_a, table_b, by = by)
table_summ <- tibble(
table = c("table_a", "table_b"),
expr = table_chr,
nrow = c(nrow(table_a), nrow(table_b)),
ncol = c(ncol(table_a), ncol(table_b))
)
tbl_contents <- get_contents(table_a, table_b, by = by_names)
matches <- withCallingHandlers(
locate_matches(table_a, table_b, by = by_names),
vctrs_error_matches_relationship_one_to_one =
rethrow_match_relationship(table_a, table_b, by = by_names),
vctrs_error_ptype2 =
rethrow_incompatible_by_vars(table_a, table_b, by = by_names)
)
unmatched_rows <- get_unmatched_rows(
table_a,
table_b,
by = by_names,
matches = matches
)
tbl_contents$compare$diff_rows <- tbl_contents$compare$column %>%
lapply(get_diff_rows,
table_a = table_a,
table_b = table_b,
matches = matches,
allow_both_NA = allow_both_NA
)
tbl_contents$compare <- tbl_contents$compare %>%
mutate(n_diffs = map_int(diff_rows, nrow), .after = column)
out <- list(
tables = table_summ,
by = tbl_contents$by,
intersection = tbl_contents$compare,
unmatched_cols = tbl_contents$unmatched_cols,
unmatched_rows = unmatched_rows,
input = store_tables(table_a, table_b)
)
structure(out, class = "vs_comparison")
}
# Methods -----------
#' @export
print.vs_comparison <- function(x, ...) {
local({ # need local() for Rmd
class(x) <- "list"
print(x[setdiff(names(x), "input")])
})
invisible(x)
}
#' @export
summary.vs_comparison <- function(object, ...) {
out_vec <- c(
value_diffs = sum(object$intersection$n_diffs) > 0,
unmatched_cols = nrow(object$unmatched_cols) > 0,
unmatched_rows = nrow(object$unmatched_rows) > 0,
class_diffs = object$input$value %>%
lapply(fsubset, j = object$intersection$column) %>%
lapply(lapply, class) %>%
unname() %>%
pmap_lgl(Negate(identical)) %>%
any()
)
enframe(out_vec, name = "difference", value = "found")
}
# Helpers ---------
locate_matches <- function(table_a, table_b, by) {
matches <- vec_locate_matches(
fsubset(table_a, j = by),
fsubset(table_b, j = by),
relationship = "one-to-one",
remaining = NA_integer_
)
split_matches(matches)
}
split_matches <- function(matches) {
# split matches into
# common: rows in both tables
# a: rows only in table_a
# b: rows only in table_b
which_a <- whichNA(matches$haystack)
which_b <- whichNA(matches$needles)
unmatched <- c(which_a, which_b)
if (is_empty(unmatched)) {
common <- matches
} else {
common <- fsubset(matches, -unmatched, check = TRUE)
}
common <- common %>%
frename(c("a", "b")) %>%
as_tibble()
list(
common = common,
a = fsubset(matches, which_a, "needles")[[1]],
b = fsubset(matches, which_b, "haystack")[[1]]
)
}
get_unmatched_rows <- function(table_a, table_b, by, matches) {
unmatched <- list(
a = fsubset(table_a, matches$a, by),
b = fsubset(table_b, matches$b, by)
)
unmatched %>%
bind_rows(.id = "table") %>%
mutate(row = with(matches, c(a, b))) %>%
as_tibble()
}
converge <- function(table_a, table_b, by, matches) {
common_cols <- setdiff(intersect(names(table_a), names(table_b)), by)
by_a <- fsubset(table_a, matches$common$a, by)
common_a <- fsubset(table_a, matches$common$a, common_cols)
common_b <- fsubset(table_b, matches$common$b, common_cols)
add_vars(
by_a,
frename(common_a, \(nm) paste0(nm, "_a")),
frename(common_b, \(nm) paste0(nm, "_b"))
)
}
join_split <- function(table_a, table_b, by) {
matches <- locate_matches(table_a, table_b, by)
intersection <- converge(table_a, table_b, by, matches)
unmatched_rows <- get_unmatched_rows(table_a, table_b, by, matches)
list(intersection = intersection, unmatched_rows = unmatched_rows)
}
get_contents <- function(table_a, table_b, by) {
tbl_contents <- join_split(contents(table_a), contents(table_b), by = "column")
out <- list()
out$by <- tbl_contents$intersection %>%
filter(column %in% by)
out$compare <- tbl_contents$intersection %>%
filter(!column %in% by)
out$unmatched_cols <- tbl_contents$unmatched_rows %>%
select(-row)
out
}
store_tables <- function(table_a, table_b) {
env <- new_environment()
env$value <- list(a = table_a, b = table_b)
dt_copy <- getOption("versus.copy_data_table", default = FALSE)
if (dt_copy) {
env$value <- env$value %>%
map_if(\(x) inherits(x, "data.table"), compose(as_tibble, copy))
}
lockEnvironment(env, bindings = TRUE)
env
}
# Error handling -------------
rethrow_match_relationship <- function(table_a, table_b, by) {
call <- caller_env()
function(e) {
tbl <- if_else(e$which == "haystack", "table_a", "table_b")
top_msg <- "`by` variables must uniquely identify rows"
if (tbl == "table_a") {
tbl_row <- fsubset(table_b, e$i, by)
row_num <- vec_locate_matches(tbl_row, fsubset(table_a, j = by))$haystack
} else {
tbl_row <- fsubset(table_a, e$i, by)
row_num <- vec_locate_matches(tbl_row, fsubset(table_b, j = by))$haystack
}
n_rows <- length(row_num)
info <- c(i = "`{tbl}` has {n_rows} rows with the same `by` values as row {row_num[1]}")
cli_abort(c(top_msg, info, itemize_row(tbl_row)), call = call)
}
}
validate_tables <- function(table_a, table_b, coerce, call = caller_env()) {
assert_data_frame(table_a, call = call)
assert_data_frame(table_b, call = call)
assert_unique_names(table_a, call = call)
assert_unique_names(table_b, call = call)
if (!coerce) {
assert_same_class(table_a, table_b, call = call)
}
}
assert_unique_names <- function(table, call = caller_env()) {
arg_name <- deparse(substitute(table))
withCallingHandlers(
vec_as_names(names(table), repair = "check_unique"),
error = function(e) {
message <- c(glue("Problem with `{arg_name}`:"), cnd_message(e))
abort(message, call = call)
}
)
}
assert_data_frame <- function(table, call = caller_env()) {
arg_name <- deparse(substitute(table))
if (is.data.frame(table)) {
return(invisible())
}
message <- c(
"`{arg_name}` must be a data frame",
i = "class({arg_name}): {.cls {class(table)}}"
)
cli_abort(message, call = call)
}
assert_same_class <- function(table_a, table_b, call = caller_env()) {
common_cols <- intersect(names(table_a), names(table_b))
for (col in common_cols) {
a <- table_a[[col]][0]
b <- table_b[[col]][0]
if (identical(a, b)) {
next
}
message <- c(
"`coerce = FALSE` but some column classes do not match",
i = "table_a: {col} {.cls {class(a)}}",
i = "table_b: {col} {.cls {class(b)}}"
)
cli_abort(message, call = call)
}
}
rethrow_incompatible_by_vars <- function(table_a, table_b, by) {
call <- caller_env()
function(e) {
compatible <- is_ptype_compatible(
fsubset(table_a, j = by),
fsubset(table_b, j = by)
)
bad_column <- by[which.max(!compatible)]
class_a <- class(table_a[[bad_column]])
class_b <- class(table_b[[bad_column]])
message <- c(
"`by` columns must be compatible",
"`table_a${bad_column}` {.cls {class_a}}",
"`table_b${bad_column}` {.cls {class_b}}"
)
cli_abort(message, call = call)
}
}
|
/scratch/gouwar.j/cran-all/cranData/versus/R/compare.R
|
#' Modified version of \code{datasets::mtcars} - version a
#'
#' A version of mtcars with some values altered and some rows/columns removed. Not for
#' informational purposes, used only to demonstrate the comparison of two slightly
#' different data frames. Since some values were altered at random, the values do
#' not necessarily reflect the true original values. The variables are as follows:
#'
#' @format A data frame with 9 rows and 9 variables:
#' \describe{
#' \item{car}{The rowname in the corresponding \code{datasets::mtcars} row}
#' \item{mpg}{Miles/(US) gallon}
#' \item{cyl}{Number of cylinders}
#' \item{disp}{Displacement (cu.in.)}
#' \item{hp}{Gross horsepower}
#' \item{drat}{Rear axle ratio}
#' \item{wt}{Weight (1000 lbs)}
#' \item{vs}{Engine (0 = V-shaped, 1 = straight)}
#' \item{am}{Transmission (0 = automatic, 1 = manual)}
#' }
#' @source
#' Sourced from the CRAN datasets package, with modified values. Originally from
#' Henderson and Velleman (1981), Building multiple regression models interactively. \emph{Biometrics}, \strong{37}, 391–411.
"example_df_a"
#' Modified version of \code{datasets::mtcars} - version b
#'
#' A version of mtcars with some values altered and some rows/columns removed. Not for
#' informational purposes, used only to demonstrate the comparison of two slightly
#' different data frames. Since some values were altered at random, the values do
#' not necessarily reflect the true original values. The variables are as follows:
#'
#' @format A data frame with 9 rows and 9 variables:
#' \describe{
#' \item{car}{The rowname in the corresponding \code{datasets::mtcars} row}
#' \item{wt}{Weight (1000 lbs)}
#' \item{mpg}{Miles/(US) gallon}
#' \item{hp}{Gross horsepower}
#' \item{cyl}{Number of cylinders}
#' \item{disp}{Displacement (cu.in.)}
#' \item{carb}{Number of carburetors}
#' \item{drat}{Rear axle ratio}
#' \item{vs}{Engine (0 = V-shaped, 1 = straight)}
#' }
#' @source
#' Sourced from the CRAN datasets package, with modified values. Originally from
#' Henderson and Velleman (1981), Building multiple regression models interactively. \emph{Biometrics}, \strong{37}, 391–411.
"example_df_b"
|
/scratch/gouwar.j/cran-all/cranData/versus/R/data.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.