content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' @export prunikus
#'
prunikus <- function(x, y){
a1 = x[1]*y[2] - x[2]*y[1]
a2 = x[3] - x[4]
a3 = x[1] - x[2]
a4 = x[3]*y[4] - x[4]*y[3]
hp = a1*a2 - a3*a4
a5 = y[3] - y[4]
a6 = y[1] - y[2]
dp = a3*a5 - a6*a2
px = hp/dp
#=========
hd = a1*a5 - a6*a4
dd = a3*a5 - a6*a2
py = hd/dd
return(c(px, py))
}
|
/scratch/gouwar.j/cran-all/cranData/vfcp/R/prunikus.R
|
#' @export trimeze
#'
trimeze <- function(C1, C23){
as123 = data.frame(s1 = 1 - C1, s2 = 1 - C23)
ac123 = data.frame(c1 = C1, c2 = C23)
# nas = length(as123$s2)
# if (as123$s2[nas] > ac123$c2[1]) ac123$c2[1] = as123$s2[nas]
inx = max(which(as123$s1 <= min(ac123$c1))) # x1, x2 and y1, y2 for tlc
xnx = max(which(ac123$c1 <= max(as123$s1))) # x1, x2 and y1, y2 for brc
iny = max(which((ac123$c2) >= max(as123$s2))) # x4, x3 and y4, y3 for tlc
xny = min(which((as123$s2) >= min(ac123$c2))) # x3, x4 and y3, y4 for brc
#== top left corner =====================
ixt = c(inx, inx - 1, iny+1, iny)
xt = c(as123$s1[ixt[1:2]], ac123$c1[ixt[3:4]])
yt = c(as123$s2[ixt[1:2]], ac123$c2[ixt[3:4]])
tlc = prunikus(xt, yt)
# print(tlc)
#== bottom right corner ========================
iyt = c(xnx, xnx + 1, xny-1, xny)
xb = c(ac123$c1[iyt[1:2]], as123$s1[iyt[3:4]])
yb = c(ac123$c2[iyt[1:2]], as123$s2[iyt[3:4]])
brc = prunikus(xb, yb)
# print(brc)
gss = which(as123[, 1] <= tlc[2] & as123[, 1] >= brc[2])
gcc = which(ac123[, 1] >= tlc[1] & ac123[, 1] <= brc[1])
sp = rbind(tlc, as123[gss, 2:1], brc)
# print("sp"); print(sp)
cp = rbind(tlc, ac123[gcc, ], brc)
return(list(tlc = tlc, brc = brc, sp = sp, cp = cp))
}
|
/scratch/gouwar.j/cran-all/cranData/vfcp/R/trimeze.R
|
#' @export vfalihaq
#'
vfalihaq <- function(C, u, tht){
# Ali–Mikhail–Haq
r = (C*(1 - tht*(1 - u))/(u - tht*C*(1 - u)))
return(ifelse(r > 1, NaN, ifelse(r < 0, NaN, r)))
}
#' @export vfjoe
#'
vfjoe <- function(C, u, tht){
# tht >= 1
r = (1 - (((1 - C)^tht - (1 - u)^tht)/(1 - (1 - u)^tht))^(1/tht))
return(ifelse(r > 1, NaN, ifelse(r < 0, NaN, r)))
}
|
/scratch/gouwar.j/cran-all/cranData/vfcp/R/vfcfaj.R
|
#' @export vfgumbel
#'
vfgumbel <- function(C, u, tht) {
r = exp(-((-log(C))^tht - (-log(u))^tht)^(1/tht))
return(ifelse(r > 1, NaN, ifelse(r < 0, NaN, r)))
}
#' @export vfclayton
#'
vfclayton <- function(C, u, tht) {
r = (C^(-tht) - u^(-tht) + 1)^(-1/tht)
return(ifelse(r > 1, NaN, ifelse(r < 0, NaN, r)))
}
#' @export vffrank
#'
vffrank <- function(C, u, tht) {
r = -log(((exp(-C * tht) - 1) * (exp(-tht) - 1))/
(exp(-u * tht) - 1) + 1)/tht
return(ifelse(r > 1, NaN, ifelse(r < 0, NaN, r)))
}
|
/scratch/gouwar.j/cran-all/cranData/vfcp/R/vfcfg.R
|
#' @export vfenuo
#'
vfenuo <- function(marg, xo)
{
nuo = length(marg)
k = 0
e = numeric(nuo)
for(i in 1:nuo){
if (marg[i] == "weibull"){
e[i] = xo[i + k] * gamma(1 + 1/xo[i + k + 1])
} else {
if (marg[i] == "gamma"){
e[i] = xo[i + k] * xo[i + k + 1]
} else {
if (marg[i] == "lnorm"){
e[i] = exp(xo[i + k] + xo[i + k + 1]*xo[i + k + 1]/2)
} else {
if (marg[i] == "norm"){
e[i] = xo[i + k]
} else {
if (marg[i] == "betapr"){
e[i] = xo[i + k]/(xo[i + k + 1] - 1)
} else {
if (marg[i] == "beta"){
e[i] = xo[i + k]/(xo[i + k] + xo[i + k + 1])
}
}
}
}
}
}
k = k + 1
}
return(e)
}
|
/scratch/gouwar.j/cran-all/cranData/vfcp/R/vfenuo.R
|
#' @export vfex
#'
vfex <- function(C, u, th, fm)
{
if (fm == "clayton"){
v = vfclayton(C, u, th)
} else {
if (fm == "frank"){
v = vffrank(C, u, th)
} else {
if (fm == "gumbel"){
v = vfgumbel(C, u, th)
} else {
if (fm == "fgm"){
v = vffgm(C, u, th)
} else {
if (fm == "amh"){
v = vfalihaq(C, u, th)
} else {
if (fm == "joe"){
v = vfjoe(C, u, th)
}
}
}
}
}
}
return(v)
}
|
/scratch/gouwar.j/cran-all/cranData/vfcp/R/vfex.R
|
#' @export vffgm
#'
vffgm <- function(C, u, tht){
a = -(1 - u)*tht
b = 1 + (1 - u)*tht
c = -C/u
r = ((-b + sqrt(b*b - 4*a*c))/(2*a))
return(ifelse(r > 1, NaN, ifelse(r < 0, NaN, r)))
}
|
/scratch/gouwar.j/cran-all/cranData/vfcp/R/vffgm.R
|
#' @export vfmrg
#' @importFrom extraDistr qbetapr
#' @importFrom stats qbeta qweibull qgamma qlnorm qnorm
#'
vfmrg <- function(rdj, i, cosi, yo, cdf)
{
if (!cdf) posi = 1 - cosi else posi = cosi
if (rdj[i] == "weibull"){
ha = qweibull(posi, scale = yo[2*i - 1], shape = yo[2*i])
} else {
if (rdj[i] == "gamma"){
ha = qgamma(posi, scale = yo[2*i - 1], shape = yo[2*i])
} else {
if (rdj[i] == "lnorm"){
ha = qlnorm(posi, meanlog = yo[2*i - 1], sdlog = yo[2*i])
} else {
if (rdj[i] == "norm"){
ha = qnorm(posi, mean = yo[2*i - 1], sd = yo[2*i])
} else {
if (rdj[i] == "betapr"){
ha = qbetapr(posi, shape1 = yo[2*i - 1], shape2 = yo[2*i])
} else {
ha = qbeta(posi, shape1 = yo[2*i - 1], shape2 = yo[2*i])
}
}
}
}
}
return(ha)
}
|
/scratch/gouwar.j/cran-all/cranData/vfcp/R/vfmrg.R
|
#' @export vfploto
#' @importFrom graphics plot lines grid legend points
#'
vfploto <- function(cx, pro, fam, marg, xo, tht,
cdf=TRUE, plt=TRUE, rtn=FALSE, ped = TRUE)
{
if (!plt & !rtn) stop(paste("plt =", plt, "and", "rtn =", rtn))
ncx = length(cx)
tp = ifelse(cdf == TRUE, "CDF", "Survival")
if (any(pro[2:length(pro)] < 1) & any(pro >= 1)) stop("error: wrong 'pro'")
if (pro[1] >= 1) stop("pro[1] >= 1")
if (all(pro < 1)) pda = FALSE else pda = TRUE
cz = list()
if (plt | rtn){
if (plt){
if (pda){
up = vfpripo(cx[1], pro)
uk = vfpripo(cx[ncx], pro)
# print(up); print(uk)
} else {
up = vfprifo(cx[1], pro)
uk = vfprifo(cx[ncx], pro)
}
# if (!cdf){up = 1 -up; uk = 1 - uk}
vp = vfex(cx[1], up, th = tht, fm = fam)
vk = vfex(cx[ncx], uk, th = tht, fm = fam)
# print(paste(vp, vk))
xyxk = vfmrg(rdj = marg, i = 1, cosi = uk, yo = xo, cdf)
xyxp = vfmrg(rdj = marg, i = 1, cosi = up, yo = xo, cdf)
xyyk = vfmrg(rdj = marg, i = 2, cosi = vk, yo = xo, cdf)
xyyp = vfmrg(rdj = marg, i = 2, cosi = vp, yo = xo, cdf)
e = vfenuo(marg, xo)
if (cdf){
limx = c(min(0, xyxp), min(max(0, xyxk), 2.5*e[1]))
limy = c(min(0, xyyp), min(max(0, xyyk), 3*e[2]))
} else {
limx = c(min(0, xyxk), max(0, xyxp))
limy = c(min(0, xyyk), max(0, xyyp))
}
# print(limx); print(limy)
e = c(NA, NA)
if (ped){
e = vfenuo(marg, xo)
# points(e[1], e[2], pch = 19, col = 1)
m2 = paste(" E[x, y] = {", round(e[1], 4), ", ",
round(e[2], 4), "}")
} else { m2 = ""}
plot(x=NULL, y=NULL, xlim=limx, ylim=limy,
xlab = paste(marg[1], " (", xo[1], ", ", xo[2], ")", sep = ""),
ylab = paste(marg[2], " (", xo[3], ", ", xo[4], ")", sep = ""),
main = paste(tp, fam, "tht =", tht, m2))
legend("topright", legend = cx, text.col = c(1:length(cx)), bty = "n")
grid(col = 2)
}
if (pda) slp = length(vfpripo(cx[1], pro)) else slp = length(vfprifo(cx[1], pro))
u = array(NA, c(ncx, slp))
v = array(NA, c(ncx, slp))
for (k in 1:ncx){
if (pda) u[k, ] = vfpripo(cx[k], pro) else u[k, ] = vfprifo(cx[1], pro)
v[k, ] = vfex(cx[k], u[k, ], th = tht, fm = fam)
xyx = vfmrg(rdj = marg, i = 1, cosi = u[k, ], yo = xo, cdf)
xyy = vfmrg(rdj = marg, i = 2, cosi = v[k, ], yo = xo, cdf)
if (plt){
lines(xyx, xyy, col = k, type = "l")
}
# new ==
# e = c(NA, NA)
#========
if (rtn){
e = vfenuo(marg, xo)
cz[[k]] = list(Type = tp, P = cx[k], x = xyx, y = xyy,
u = u[k, ], v = v[k, ], e = e)
}
}
if (ped){
# e = vfenuo(marg, xo)
points(e[1], e[2], pch = 19, col = 1)
}
}
return(cz)
}
|
/scratch/gouwar.j/cran-all/cranData/vfcp/R/vfploto.R
|
#' @export vfprifo
#'
vfprifo <- function(ck, pro) return(c(ck + pro))
|
/scratch/gouwar.j/cran-all/cranData/vfcp/R/vfprifo.R
|
#' @export vfpripo
#'
vfpripo <- function(ck, pro){
if (is.finite(ck) & ck >= 0 & ck <= 1) {
npro = length(pro)
k = min(which(pro > 1)) - 1
# b = numeric(1)
b = NULL
b[k] = pro[k] - ck
for(i in (k + 1):npro)
{
b[i] = b[i - 1]/pro[i]
}
bb = 0
a = list()
for(j in npro:(k+1))
{
if (j == npro) a[[j]] = seq(b[j], b[j - 1], b[j]) else {
a[[j]] = seq(2*b[j], b[j-1], b[j] )
}
}
h = c(1e-14, 1e-09)
for(i in npro:(k+1))
{
h = c(h, as.numeric(a[[i]]))
}
return(c(ck + h, pro[(k-1):1]))
} else {return(NaN)}
}
|
/scratch/gouwar.j/cran-all/cranData/vfcp/R/vfpripo.R
|
#' Create a numeric legend
#'
#' Create a color legend based on given data and palette or colors. Also passes on data- attributes for optional JS interaction.
#'
#' @param inputId The `input` slot that will be used to access the value.
#' @param label Display label for the control, or `NULL` for no label.
#' @param class The CSS class of the input div element to match with any brush-defining functions. Default classes for brushes are either `"continuous-color-filter"` or `"discrete-color-filter"`.
#' @param n Number of color strips in the legend. Default is `100`.
#' @param minValue Minimum numeric value in the legend (can be higher the maximum for inverted scale).
#' @param maxValue Maximum numeric value in the legend (can be lower the minimum for inverted scale).
#' @param data Alternative vector to extract numeric minimum and maximum values.
#' @param colors Colours to interpolate; must be a valid argument to
#' [grDevices::col2rgb()]. This can be a character vector of
#' `"#RRGGBB"` or `"#RRGGBBAA"`, colour names from
#' [grDevices::colors()], or a positive integer that indexes into
#' [grDevices::palette()].
#' @param palette A function that outputs a list of colors
#' @param options Configuration options for brush and scale. Use `ticks` to specify number of ticks or a list of specific tick values
#' , `format` to a d3-format-compatible formatting string (see \url{https://github.com/d3/d3-format} for valid formats) and
#' `hide_brush_labels` as `TRUE` to hide the brush interval.
#' @param orient Orientation of the legend. Can be `"bottom"` (default, horizontal with labels below), `"top"` (horizontal with labels above), `"left"` (vertical with labels on the left)
#' and `"right"` (vertical with labels on the right).
#' @param size Absolute length in pixels of the color bar; becomes width or height depending on value of `orient`. Default is `200`.
#' @param thickness Absolute thickness in pixels of the color bar; opposite of size depending on value of `orient`. Default is `20`.
#' @param offset Left offset for scale to allow long labels. Default is `0`.
#'
#' @return A numeric color legend control that can be added to a UI definition
#'
#' @family base legend
#' @seealso [discreteColorFilter()] [continuousColorFilter()] [categoricalColorFilter()]
#'
#' @import shiny
#' @export
numericLegend <- function(inputId, label = NULL, class = "", n = 100, minValue = NULL, maxValue = NULL, data = NULL, colors = NULL,
palette = NULL, options = NULL, orient = "bottom", size = 200, thickness = 20, offset = 0) {
# Data options are passed to JavaScript to set up the brush
data_options <- list()
if (!validateOrient(orient))
stop(paste0(inputId, ": orient must be 'bottom', 'top', 'right' or 'left'"))
# For convenience the function can receive a column of a dataframe/vector/list as an argument
# Min and max values are then automatically calculated
data_neutral <- NULL
if (!is.null(data)) {
data_min <- min(data)
data_max <- max(data)
}
else if (((is.null(minValue)) || is.null(maxValue)) ||
(!is.numeric(minValue) || !is.numeric(maxValue))) {
stop(paste0(inputId, ": Either a vector/list or minimum and maximum numeric data values must be provided."))
}
else {
if (is.numeric(minValue) & is.numeric(maxValue)) {
data_min <- minValue
data_max <- maxValue
}
else stop(paste0(inputId, ": minValue or maxValue must be numeric"))
}
data_options[["numcolors"]] <- n
numcolors <- n
# Color validation
if (!is.null(palette)) {
if (!is.null(colors)) stop(paste0(inputId,": Only one of colors or palette should be passed."))
if (is.function(palette)) {
color_map <- palette(numcolors)
}
else if (class == "discrete-color-filter") {
if (is.list(palette) | is.vector(palette)) {
numcolors <- length(palette)
data_options[["numcolors"]] <- length(palette)
color_map <- palette
}
else stop(paste0(inputId,": Palette must be either a function that receives a number of colors or a list of colors"))
}
else {
stop(paste0(inputId,": Palette must be a function that receives a number of colors"))
}
}
else if (!is.null(colors)) {
if (length(colors) > 1) {
color_map <- scales::colour_ramp(colors)(seq(0, 1, length = numcolors))
}
else {
stop(paste0(inputId,": At least two colors must be provided for simple linear interpolation if a palette is not provided"))
}
}
else {
stop(paste0(inputId,": Either a palette or two colors for interpolation must be defined"))
}
# div_container_size <- paste0("width: ", htmltools::validateCssUnit(containerWidth), "; height: ", htmltools::validateCssUnit(containerHeight))
div_container_size <- paste0("width: 100% ; height: 100%")
svg_width <- NULL
svg_height <- NULL
if ((orient == "bottom") | (orient == "top")) {
svg_width <- size + offset + 40
svg_height <- thickness + 80
stylesize0 <-
paste0("width: ", htmltools::validateCssUnit(svg_width), ";")
stylesize <-
paste0(stylesize0, "height: ", htmltools::validateCssUnit(svg_height), "px")
}
else {
svg_width <- offset + thickness + 120
svg_height <- size + 20
stylesize0 <-
paste0("height: ", htmltools::validateCssUnit(svg_height), ";")
stylesize <-
paste0(
stylesize0,
"width: ",
htmltools::validateCssUnit(svg_width),
"px"
)
}
svg_size <- list("width" = svg_width, "height" = svg_height)
if (!missing(options)) {
if (!is.null(options$ticks)) {
if (!is.numeric(options$ticks) & (length(options$ticks) < 1)) {
stop(paste0(inputId,"Ticks must be a number or a list of numbers"))
}
else {
data_options[["ticks"]] <- options$ticks
}
}
# Format is not validated when running the Shiny app
if (!is.null(options$format)) {
data_options[["format"]] <- options$format
}
if (!is.null(options$hide_brush_labels)) {
data_options[["hide_brush_labels"]] <- options$hide_brush_labels
}
}
inner_width <- ifelse((orient == "bottom" | orient == "top"), size, thickness) + offset # svg_size$width - 40 + offset;
inner_height <- ifelse((orient == "bottom" | orient == "top"), thickness, size) # svg_size$height - 10
legend_scale <- scale_to(0, numcolors, 0, ifelse((orient == "bottom" | orient == "top"), inner_width, inner_height))
# colormap <- lapply(1:numcolors, FUN = function(x,cmap) { return(cmap[x]) }, cmap = color_map)
# Create a group of svg rects based on the color map above
strips <-
tag("g", tagList(
addColorStrips(
numcolors,
color_map,
orient,
legend_scale,
ifelse((orient == "bottom" | orient == "top"), inner_width, inner_height),
thickness
)
))
# Positioning the group element that defines where the gradient, axis and brush are drawn
# For the right-oriented filters, we need to position them based on the width of the drawing area
left_offset <- list(
"bottom" = 20 + offset,
"top" = 20 + offset,
"right" = inner_width - thickness - 25,
"left" = 30 + offset
)
g <- tag("g", list(
"id" = paste0("g-filter-", inputId),
"transform" = paste0("translate(", left_offset[[orient]], ",5)"),
strips
))
# clipRect <- tag("rect", list("width" = svg_size$width, "height" = svg_size$height))
# defs <- tag("defs", tagList(tag("clipPath", list("id" = paste0("clip-", inputId), clipRect))))
svg <- tag("svg", list(
"xmlns" = "http://www.w3.org/2000/svg",
"viewBox" = paste("0 0", svg_size$width, svg_size$height),
"class" = "filter-svg",
"id" = paste0("svg-filter-", inputId),
"width" = svg_size$width,
"height" = svg_size$height,
# "clip-path" = paste0("url(#clip-",inputId,")"),
# defs,
g
))
div(
class = "form-group shiny-input-container",
style = div_container_size,
tags$label(
label,
class = "control-label",
class = if (is.null(label)) "shiny-label-null",
`for` = inputId
), tags$div(
id = inputId,
class = class,
style = stylesize,
"data-size" = jsonlite::toJSON(list("width" = inner_width, "height" = inner_height), auto_unbox = TRUE),
"data-orient" = orient,
"data-min" = data_min,
"data-max" = data_max,
"data-thickness" = thickness,
"data-options" = jsonlite::toJSON(data_options, auto_unbox = TRUE),
tagList(svg)
)
)
}
#' Create a categorical legend
#'
#' Create a color legend based on given data and palette or colors. Also passes on data- attributes for optional JS interaction.
#'
#' @param inputId The `input` slot that will be used to access the value.
#' @param label Display label for the control, or `NULL` for no label.
#' @param class The CSS class of the input div element to match with any filter toggling functions. Default class is `"categorical-color-filter"`.
#' @param values List of character vectors that will match with the colors or palette in the order provided by both.
#' @param data Alternative vector to extract values with `"unique()"` function.
#' @param colors Colours to match with values; must be a valid argument to
#' [grDevices::col2rgb()]. This can be a character vector of
#' `"#RRGGBB"` or `"#RRGGBBAA"`, colour names from
#' [grDevices::colors()], or a positive integer that indexes into
#' [grDevices::palette()].
#' @param palette A function that outputs a list of colors.
#' @param orient Orientation of the legend. Can be `"bottom"` (default, horizontal with labels below), `"top"` (horizontal with labels above), `"left"` (vertical with labels on the left)
#' and `"right"` (vertical with labels on the right).
#' @param size Absolute length in pixels of the color bar; becomes width or height depending on value of `orient`. Default is `220`.
#' @param multiple Is selection of multiple items allowed? Default is `TRUE`. With `FALSE`, selecting one item will de-select the others.
#' @return A categorical color legend control that can be added to a UI definition
#'
#' @family base legend
#' @seealso [discreteColorFilter()] [continuousColorFilter()] [categoricalColorFilter()]
#'
#' @import shiny
#' @export
categoricalLegend <- function(inputId, label = NULL, class = "", values = NULL, data = NULL, colors = NULL,
palette = NULL, orient = "bottom", size = 220, multiple = TRUE) {
if (!validateOrient(orient))
stop(paste0(inputId, ": orient must be 'bottom', 'top', 'right' or 'left'"))
if (!is.null(values)) {
if (!is.null(data)) stop(paste0(inputId,": Only one of values or data should be passed."))
if (!is.list(values) & !is.vector(values)) {
stop(paste0(inputId,": Values must be provided in a list."))
}
}
if (!is.null(data)) {
# if (typeof(data) == "character") stop("Invalid data type")
if (mode(data) == "numeric") {
message(paste0(inputId,": Numeric data type might yield unexpected results."))
}
values <- unique(data)
}
if (is.null(data) & is.null(values)) {
stop(paste0(inputId,": At least one of data or values must be provided."))
}
numcolors <- length(values)
# Color validation
if (!is.null(palette)) {
# If palette is a function, that's all good
if (is.function(palette)) {
color_map <- palette(numcolors)
}
else if (is.vector(palette) | is.list(palette)) {
# If palette is either a list or a vector, we check if the numbers match
if (length(palette) != numcolors)
stop(paste0(inputId,": The number of values must match the number of colors."))
else
color_map <- palette
}
else {
stop(paste0(inputId,": Palette must either be a function that receives a number of colors or a list of colors."))
}
}
else if (!is.null(colors)) {
# If colors are passed, we need at least two because we will generate a discrete palette through interpolation
if (length(colors) > 1) {
color_map <- scales::colour_ramp(colors)(seq(0, 1, length = numcolors))
}
else {
stop(paste0(inputId,": At least two colors must be provided for simple linear interpolation if a palette is not provided."))
}
}
else {
stop(paste0(inputId,": Either a palette or two colors for interpolation must be provided"))
}
if ((orient == "bottom") | (orient == "top")) {
stylesize0 <- paste0("width: ", htmltools::validateCssUnit(size), ";")
stylesize <- paste0(stylesize0, "height: 90px")
svg_size <- list("width" = size, "height" = 90)
}
else {
stylesize0 <- paste0("height: ", htmltools::validateCssUnit(size), ";")
stylesize <- paste0(stylesize0, "width: 90px")
svg_size <- list("width" = 90, "height" = size)
}
tag_head <- {
if (orient == "left") {
tags$head(tags$style(
paste0(".", inputId, "{
display: inline-flex;
align-items: center;
margin-right: 1em;
} .", inputId, "::after {
content: \"\";
width: 20px;
height: 15px;
margin-left: 0.5em;
margin-right: 0.25em;
background: var(--color);
}
.selected::after { border: solid 1.5px black; }
.selected { font-weight: bold; }")
))
}
else {
tags$head(tags$style(
paste0(".", inputId, "{
display: inline-flex;
align-items: center;
margin-right: 1em;
} .", inputId, "::before {
content: \"\";
width: 20px;
height: 15px;
margin-left: 0.5em;
margin-right: 0.25em;
background: var(--color);
}
.selected::before { border: solid 1.5px black; }
.selected { font-weight: bold; }")
))
}
}
category_blocks <-
tag("g", tagList(
addCategoryBlocks(
orient = "orient",
input_id = inputId,
color_map = color_map,
values = values
)
))
div(
class = "form-group shiny-input-container",
style = stylesize0,
tags$label(
label,
class = "control-label",
class = if (is.null(label)) "shiny-label-null",
`for` = inputId
), tags$div(
id = inputId,
class = class,
style = stylesize,
"data-multiple" = multiple,
tagList(tag_head, category_blocks)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/vfinputs/R/base-legends.R
|
#' Add a visual filter input for continuous values
#'
#' @description The brush used in this filter allows a free selection over the whole input range.
#'
#' @inheritDotParams numericLegend
#' @param inputId The `input` slot that will be used to access the value.
#' @return A visual filter input control that can be added to a UI definition.
#'
#' @family visual filters
#' @seealso [discreteColorFilter()] [categoricalColorFilter()]
#'
#' @examples
#' ## Only run examples in interactive R sessions
#' if (interactive()) {
#'
#' ui <- fluidPage(
#' continuousColorFilter("filter", minValue = 0, maxValue = 200, palette = scales::viridis_pal()),
#' verbatimTextOutput("value")
#' )
#' server <- function(input, output) {
#' output$value <- output$selection <- renderPrint({
#' if (!is.null(input$filter)) {
#' paste0(input$filter$start, ",", input$filter$end)
#' }
#' })
#' }
#' shinyApp(ui, server)
#'
#' ui <- fluidPage(
#' continuousColorFilter("filter", data = mtcars$mpg, colors = c("#FF0000", "#0000FF")),
#' verbatimTextOutput("value")
#' )
#' server <- function(input, output) {
#' output$value <- output$selection <- renderPrint({
#' if (!is.null(input$filter)) {
#' paste0(input$filter$start, ",", input$filter$end)
#' }
#' })
#' }
#' shinyApp(ui, server)
#'
#' }
#'
#' @section Server value:
#' `start` and `end` bounds of a selection. The input value is `NULL` for empty selections.
#'
#' @import shiny
#' @export
continuousColorFilter <- function(inputId, ...) {
aux_deps <- singleton(
tags$head(
tags$script(src = "wwwvfinputs/js/axisfilter.js"),
tags$link(rel = "stylesheet", type = "text/css", href = "wwwvfinputs/css/style.css")
)
)
tagList(
numericLegend(inputId = inputId, class = "continuous-color-filter", ...),
singleton(tags$head(
tags$script(src = "wwwvfinputs/js/continuousLegendFilterBinding.js")
)),
d3_dependency(),
aux_deps
)
}
#' Add a visual filter input for discrete values
#'
#' @description The brush used in this filter snaps to evenly divided steps based on the number of colors passed as argument.
#' With minValue = 0, maxValue = 100 and n = 5, it will snap at the edges (0 and 100) and 20, 40, 60, and 80.
#'
#' @inheritDotParams numericLegend
#' @param inputId The `input` slot that will be used to access the value.
#' @return A visual filter input control that can be added to a UI definition.
#'
#' @family visual filters
#' @seealso [numericLegend()]
#'
#' @section Server value:
#' `start` and `end` bounds of a selection. The input value is `NULL` for empty selections.
#'
#' @import shiny
#' @examples
#' ## Only run examples in interactive R sessions
#' if (interactive()) {
#'
#' ui <- fluidPage(
#' discreteColorFilter("filter", minValue = 0, maxValue = 200, n = 5,
#' palette = scales::viridis_pal()),
#' verbatimTextOutput("value")
#' )
#' server <- function(input, output) {
#' output$value <- output$selection <- renderPrint({
#' if (!is.null(input$filter)) {
#' paste0(input$filter$start, ",", input$filter$end)
#' }
#' })
#' }
#' shinyApp(ui, server)
#' }
#'
#' @section Server value:
#' `start` and `end` bounds of a selection. The default value is null.
#'
#' @import shiny
#' @export
discreteColorFilter <- function(inputId, ...) {
aux_deps <- singleton(
tags$head(
tags$script(src="wwwvfinputs/js/axisfilter.js"),
tags$link(rel="stylesheet", type="text/css", href="wwwvfinputs/css/style.css")
)
)
tagList(
numericLegend(inputId = inputId, class = "discrete-color-filter", ...),
singleton(tags$head(
tags$script(src="wwwvfinputs/js/discreteLegendFilterBinding.js"))),
d3_dependency(),
aux_deps
)
}
#' Add a visual filter input for categorical data
#'
#' @inheritDotParams categoricalLegend
#' @param inputId The `input` slot that will be used to access the value.
#'
#' @return A visual filter input control that can be added to a UI definition
#'
#' @family visual filters
#' @seealso [categoricalLegend()]
#'
#' @section Server value:
#' `start` and `end` bounds of a selection. The default value (or empty selection) is `NULL`.
#'
#' @examples
#' ## Only run examples in interactive R sessions
#' if (interactive()) {
#'
#' ui <- fluidPage(
#' categoricalColorFilter("filter", data = sort(mtcars$gear), orient = "right",
#' palette = RColorBrewer::brewer.pal(8, "Dark2")),
#' verbatimTextOutput("value")
#' )
#' server <- function(input, output) {
#' output$value <- output$selection <- renderPrint({
#' if (!is.null(input$filter)) {
#' format(input$filter)
#' }
#' })
#' }
#'
#' shinyApp(ui, server)
#'
#' ui <- fluidPage(
#' categoricalColorFilter("filter", label = p("Categorical filter:"),
#' palette = RColorBrewer::brewer.pal(3, "Accent"),
#' values = list("a","b","c")),
#' verbatimTextOutput("values")
#' )
#' server <- function(input, output) {
#' output$value <- output$selection <- renderPrint({
#' if (!is.null(input$filter)) {
#' format(input$filter)
#' }
#' })
#' }
#' shinyApp(ui, server)
#'
#' }
#'
#' @import shiny
#' @export
categoricalColorFilter <- function(inputId, ...) {
aux_deps <- singleton(
tags$head(
tags$script(src = "wwwvfinputs/js/visualscales.js"),
tags$script(src = "wwwvfinputs/js/axisfilter.js"),
tags$script(src = "wwwvfinputs/js/colorutils.js"),
tags$link(rel = "stylesheet", type = "text/css", href = "wwwvfinputs/css/style.css")
)
)
tagList(
categoricalLegend(inputId = inputId, class = "categorical-color-filter", ...),
singleton(tags$head(
tags$script(src = "wwwvfinputs/js/categoricalFilterBinding.js")
)),
d3_dependency(),
aux_deps,
)
}
|
/scratch/gouwar.j/cran-all/cranData/vfinputs/R/input-colorfilters.R
|
/scratch/gouwar.j/cran-all/cranData/vfinputs/R/server-input-handlers.R
|
|
#' Add color strip
#'
#' @param color A valid CSS color name
#' @param x The x position of the `rect` shape relative to a container
#' @param y The y position of the `rect` shape relative to a container
#' @param width The width of the `rect`
#' @param height The height of the `rect`
#'
#' @return A `rect` element with the `color` argument as fill and stroke
#'
#' @import shiny
colorStrip <- function(color, x = 0, y = 0, width = 1, height = 30) {
tag("rect", list ("x" = x,
"y" = y,
"width" = width,
"height" = height,
"style" = paste0("fill: ", color,"; stroke:", color)))
}
#' Add a color-label block
#'
#' @param i The index of the item to be created
#' @param tag_name An HTML element tag
#' @param class The HTML element class that will enable interaction
#' @param color_map A list of colors from where the corresponding item color will be retrieved
#' @param values A list of values from which the corresponding item label will be retrieved
#'
#' @return An HTML element with pointer cursor, a colored square and a label
#'
#' @import shiny
categoryBlock <- function(i,values, tag_name, class, color_map) {
tag(tag_name,
list(
values[i],
"class" = class,
"data-value" = values[i],
"style" = paste0("cursor: pointer; --color: ", color_map[i])
))
}
#' Add list of category items
#'
#' @param orient Orientation of the legend. Can be `"bottom"` (default, horizontal with labels below), `"top"` (horizontal with labels above), `"left"` (vertical with labels on the left)
#' and `"right"` (vertical with labels on the right).
#' @param input_id The CSS class used to trigger interactions
#' @param color_map A list of colors from where the corresponding item color will be retrieved
#' @param values A list of values from which the corresponding item label will be retrieved
#'
#' @return A list of the same length as values, containing either `"span"` or `"div"` elements depending on the chosen orientation.
#'
#' @seealso [categoricalLegend()]
#'
addCategoryBlocks <- function(orient, input_id, color_map, values) {
tag_name <- ifelse((orient == "bottom") | (orient == "top"), "span", "div")
lapply(1:length(values), categoryBlock, tag_name = tag_name,
class = input_id, color_map = color_map, values = values)
}
#' Add list of colored strips
#'
#' @param n_strips Number of strips to be added
#' @param color_map A list of colors corresponding to the number of strips
#' @param orient Orientation of the legend. Can be `"bottom"` (default, horizontal with labels below), `"top"` (horizontal with labels above), `"left"` (vertical with labels on the left)
#' and `"right"` (vertical with labels on the right).
#' @param pos_function A function to convert from index number to pixels
#' @param size The length of the list in pixels
#' @param thickness The height or width of the list in pixels
#'
#' @return A list of SVG `rect` shapes.
#'
#' @seealso [numericLegend()]
#'
addColorStrips <- function(n_strips, color_map, orient, pos_function, size, thickness = 20) {
return (switch(orient,
"bottom" = lapply(1:n_strips, function(i) {
colorStrip(color = color_map[i],
x = pos_function(i-1),
y = 20,
width = size/n_strips,
height= thickness) }),
"top" = lapply(1:n_strips, function(i) {
colorStrip(color = color_map[i],
x = pos_function(i-1),
y = 25,
width = size/n_strips,
height= thickness) }),
"right" = lapply(1:n_strips, function(i) {
colorStrip(color = color_map[i],
y = pos_function(i-1),
x = 30,
height = size/n_strips,
width= thickness) }),
"left" = lapply(1:n_strips, function(i) {
colorStrip(color = color_map[i],
y = pos_function(i-1),
x = 15,
height = size/n_strips,
width = thickness) }),
)
) #return
}
|
/scratch/gouwar.j/cran-all/cranData/vfinputs/R/shapes.R
|
#' @title Change a numeric legend filter in the client
#'
#' @description This function does not validate if a brush is already defined; updating only one of start or end with an empty brush will assign the other to NaN.
#'
#' @details This function only affects the label and JavaScript-implemented axis and brush values and selection.
#' Re-creating the color strips and changing the ticks and format of values requires deleting and re-creating the legend using `shinyjs`, for example.
#'
#' @param session The `session` object passed to function given to
#' `shinyServer`.
#' @param inputId The id of the input object.
#' @param label The label to set for the input object.
#' @param start Beginning of selection interval.
#' @param end End of selection interval.
#' @param minValue Minimum numeric value in the legend (can be higher the maximum for inverted scale).
#' @param maxValue Maximum numeric value in the legend (can be lower the minimum for inverted scale).
#'
#' @family update functions
#' @seealso [continuousColorFilter()] [discreteColorFilter()]
#'
#' @export
updateNumericFilter <- function(session, inputId, label = NULL, start = NULL, end = NULL, minValue = NULL, maxValue = NULL) {
message <- dropNulls(list(label = label, start = start, end = end, min = minValue, max = maxValue))
session$sendInputMessage(inputId, message)
}
#' Change a categorical legend in the client
#'
#' @details This function only affects the label and the selection. Re-creating the items requires deleting and re-creating the legend using `shinyjs`, for example.
#'
#' @param session The `session` object passed to function given to
#' `shinyServer`.
#' @param inputId The id of the input object.
#' @param label The label to set for the input object.
#' @param select Items to be selected.
#' @param deselect Items to be deselected.
#'
#' @family update functions
#' @seealso [categoricalColorFilter()]
#'
#' @export
updateCategoricalFilter <- function(session, inputId, label = NULL, select = NULL, deselect = NULL) {
message <- dropNulls(list(label = label, select = select, deselect = deselect))
session$sendInputMessage(inputId, message)
}
|
/scratch/gouwar.j/cran-all/cranData/vfinputs/R/update-input.R
|
d3_dependency <- function() {
htmltools::htmlDependency("d3", "5.15.0", c(href = "wwwvfinputs/js/"),
script = "d3.min.js"
)
}
normalize_value <- function(value, data) {
return((value - min(data))/(max(data) - min(data)))
}
normalize_whole <- function(data) {
return ((data - min(data)) / (max(data) - min(data)))
}
dropNulls <- function(x) {
x[!vapply(x, is.null, FUN.VALUE=logical(1))]
}
scale_to <- function(in.min, in.max, out.min, out.max, discrete = F) {
if (discrete) {
return(function(value) {
return( trunc((value - in.min) / (in.max - in.min) * (out.max-0.1 - out.min) + out.min) )
} )
}
else {
return(function(value) {
return( (value - in.min) / (in.max - in.min) * (out.max - out.min) + out.min)
} )
}
}
validateOrient <- function(orient) {
return(orient == "bottom" | orient == "top" | orient == "left" | orient == "right")
}
validateScheme <- function(scheme) {
validSchemes = list("category10" = "Category10",
"accent" = "Accent",
"dark2" = "Dark2",
"paired" = "Paired",
"pastel1" = "Pastel1",
"pastel2" = "Pastel2",
"set1" = "Set1",
"set2" = "Set2",
"set3" = "Set3",
"tableau10" = "Tableau10",
"brbg" = "BrBG",
"prgn" = "PRGn",
"piyg" = "PiYG",
"puor" = "PuOr",
"rdbu" = "RdBu",
"rdgy" = "RdGy",
"rdylbu" ="RdYlBu",
"rdyglgn"= "RdYlGn",
"spectral" = "spectral",
"blues" = "blues",
"greens" = "greens",
"greys" = "greys",
"oranges" = "oranges",
"purples" = "purples",
"reds" = "reds",
"turbo" = "turbo",
"viridis" = "viridis",
"inferno" = "inferno",
"magma" = "magma",
"plasma" = "plasma",
"cividis" = "cividis",
"warm" = "warm",
"cool" = "cool",
"cubehelixdefault" = "CubehelixDefault",
"bugn" = "BuGn",
"bupu" = "BuPu",
"gnbu" = "GnBu",
"orrd" = "OrRd",
"pubugn" = "PuBuGn",
"pubu" = "PuBu",
"purd" = "PuRd",
"rdpu" = "RdPu",
"ylgnbu" = "YlGnBu",
"ylgn" = "YlGn",
"ylorbr" = "YlOrBr",
"ylorrd" = "YlOrRd",
"rainbow" = "Rainbow",
"sinebow" = "Sinebow")
if (tolower(scheme) %in% names(validSchemes))
return(validSchemes[[tolower(scheme)]])
else stop("Invalid colour scheme or palette chosen. Please use a valid colour scheme name from https://github.com/d3/d3-scale-chromatic.")
}
|
/scratch/gouwar.j/cran-all/cranData/vfinputs/R/utils.R
|
#' Sets up the package when it's loaded
#'
#' Adds the content of www to shinyTime/, and registers an inputHandler to massage the output from
#' JavaScript into an R structure.
#'
#' @importFrom shiny addResourcePath registerInputHandler
#'
#' @noRd
#'
.onLoad <- function(libname, pkgname) {
# Add directory for static resources
addResourcePath(
prefix = 'wwwvfinputs',
directoryPath = system.file('www', package = 'vfinputs')
)
registerInputHandler("vfinputs.continuousLegendFilter", function(data, ...) {
if (is.null(data))
NULL
else
list(start=data[1],end=data[2])
}, force = TRUE)
registerInputHandler("vfinputs.discreteLegendFilter", function(data, ...) {
if (is.null(data))
NULL
else
list(start=data[1],end=data[2])
}, force = TRUE)
registerInputHandler("vfinputs.categoricalColorFilter", function(data, ...) {
if (is.null(data))
NULL
else
data
}, force = TRUE)
}
#' Cleans up when package is unloaded
#'
#' Reverses the effects from .onLoad
#'
#' @importFrom shiny removeInputHandler
#'
#' @noRd
#'
.onUnload <- function(libpath) {
removeInputHandler('vfinputs.continuousLegendFilter')
removeInputHandler('vfinputs.discreteLegendFilter')
removeInputHandler('vfinputs.categoricalColorFilter')
}
|
/scratch/gouwar.j/cran-all/cranData/vfinputs/R/zzz.R
|
#' Combined Visual Field Series for General Progression Method
#'
#' Data
#'
#' @docType data
#'
#' @usage data(vf.cigts)
#'
#' @format A data frame sample for CIGTS progression method, which includes visual field related measurement for two eyes, each with 10 follow-ups. Rows represent the single measurements.
#'
#' @keywords datasets
#'
#' @source
#' \describe{
#' \item{eyeid}{eyeid, labeled as 1,2... for different eyes.}
#' \item{yearsfollowed}{follow-up years. The minimum measurements /rows for one eye is 5.}
#' \item{tdp1-tdp54}{52 total deviation probability, or 'tdp' measurements. The minimum measurements, or rows for one eye is 5.}
#' ...
#' }
#'
#' @examples
#' data(vf.cigts)
#' colnames(vf.cigts)
#' progression.cigts(vf.cigts)
#' progression.cigts(vf.cigts[vf.cigts$eyeid == 1,])
#' progression.cigts(vf.cigts[vf.cigts$eyeid == 2,])
"vf.cigts"
|
/scratch/gouwar.j/cran-all/cranData/vfprogression/R/data-vfseries-cigits.R
|
#' Combined Visual Field Series for General Progression Method
#'
#' Data
#'
#' @docType data
#'
#' @usage data(vf.plr.nouri.2012)
#'
#' @format A data frame sample for Pointwise Linear Regression (PLR) method according to Nouri-Mahdavi 2012 progression, which includes visual field related measurement for two eyes, each with 10 follow-ups. Rows represent the single measurements.
#'
#' @keywords datasets
#'
#' @source
#' \describe{
#' \item{eyeid}{eyeid, labeled as 1,2... for different eyes}
#' \item{yearsfollowed}{follow-up years. The minimum measurements, or rows, for one eye is 3}
#' \item{td1-td54}{52 total deviation, or 'td' measurements. The minimum measurements, or rows, for one eye is 3}
#' ...
#' }
#'
#' @examples
#' data(vf.plr.nouri.2012)
#' colnames(vf.plr.nouri.2012)
#' progression.plr.nouri.2012(vf.plr.nouri.2012)
#' progression.plr.nouri.2012(vf.plr.nouri.2012[vf.plr.nouri.2012$eyeid == 1,])
#' progression.plr.nouri.2012(vf.plr.nouri.2012[vf.plr.nouri.2012$eyeid == 2,])
"vf.plr.nouri.2012"
|
/scratch/gouwar.j/cran-all/cranData/vfprogression/R/data-vfseries-plr-nouri-2012.R
|
#' Combined Visual Field Series for General Progression Method
#'
#' Data
#'
#' @docType data
#'
#' @usage data(vf.schell2014)
#'
#' @format A data frame sample for progression method by Schell et al. 2014, which includes visual field related measurement for two eyes, each with 10 follow-ups. Rows represent the single measurements.
#'
#' @keywords datasets
#'
#' @source
#' \describe{
#' \item{eyeid}{eyeid, labeled as 1,2... for different eyes.}
#' \item{md}{mean deviation measurements. The minimum measurements, or rows, for one eye is 4.}
#' ...
#' }
#'
#' @examples
#' data(vf.schell2014)
#' colnames(vf.schell2014)
#' progression.schell2014(vf.schell2014)
#' progression.schell2014(vf.schell2014[vf.schell2014$eyeid == 1,])
#' progression.schell2014(vf.schell2014[vf.schell2014$eyeid == 2,])
"vf.schell2014"
|
/scratch/gouwar.j/cran-all/cranData/vfprogression/R/data-vfseries-schell2014.R
|
#' Combined Visual Field Series for General Progression Method
#'
#' Data
#'
#' @docType data
#'
#' @usage data(vf.vfi)
#'
#' @format A data frame for CIGTS progression example, which includes visual field related measurement for two eyes each with 10 follow-ups.
#'
#' @keywords datasets
#'
#' @source
#' \describe{
#' \item{eyeid}{eyeid, labeled as 1,2... for different eye groups.}
#' \item{yearsfollowed}{follow-up years. The minimum measurements, or rows, for one eye is 3.}
#' \item{vfi}{visual field index. The minimum measurements, or rows, for one eye is 3.}
#' ...
#' }
#'
#' @examples
#' data(vf.vfi)
#' colnames(vf.vfi)
#' progression.vfi(vf.vfi)
#' progression.vfi(vf.vfi[vf.vfi$eyeid == 1,])
#' progression.vfi(vf.vfi[vf.vfi$eyeid == 2,])
"vf.vfi"
|
/scratch/gouwar.j/cran-all/cranData/vfprogression/R/data-vfseries-vfi.R
|
#' Combined Visual Field Series for General Progression Method
#'
#' Data
#'
#' @docType data
#'
#' @usage data(vfseries)
#'
#' @format A data frame sample including the following visual field related measurement for two eyes, each with 10 follow-ups.
#'
#' @keywords datasets
#'
#' @source
#' \describe{
#' \item{eyeid}{eyeid, labeled as 1,2... for different eyes.}
#' \item{nvisit}{number of visits.}
#' \item{yearsfollowed}{follow-up years.}
#' \item{distprev}{to be updated.}
#' \item{age}{in years.}
#' \item{righteye}{1 as right eye, 0 as left eye.}
#' \item{malfixrate}{VF test malfixation rate.}
#' \item{ght}{glaucoma hemifield test result.}
#' \item{vfi}{visual field index.}
#' \item{md}{mean deviation.}
#' \item{mdprob}{mean deviation probability.}
#' \item{psd}{pattern standard deviation.}
#' \item{psdprob}{pattern standard deviation probability.}
#' \item{s1-s54}{52 sensitivity measurements.}
#' \item{td1-td54 }{52 total deviation measurements.}
#' \item{tdp1-tdp54}{52 total deviation probability measurements.}
#' \item{pdp1-pdp54}{52 pattern deviation probability measurements.}
#' ...
#' }
#' @examples
#' data(vfseries)
#' progression(vfseries)
#' progression(vfseries[vfseries$eyeid == 1,])
#' progression(vfseries[vfseries$eyeid == 2,])
#' progression(vfseries, method=c("cigts"))
#' progression.cigts(vfseries)
#' progression(vfseries, method=c('plr.nouri.2012', 'schell2014', 'vfi'))
"vfseries"
|
/scratch/gouwar.j/cran-all/cranData/vfprogression/R/data-vfseries.R
|
### convert field locations to matrix indices (24-2 patterns):
indices = rbind(
cbind(1, 4:7),
cbind(2, 3:8),
cbind(3, 2:9),
cbind(4, 1:9),
cbind(5, 1:9),
cbind(6, 2:9),
cbind(7, 3:8),
cbind(8, 4:7))
### same for 30-2 patterns:
indices30 = rbind(
cbind(1, 4:7),
cbind(2, 3:8),
cbind(3, 2:9),
cbind(4, 1:10),
cbind(5, 1:10),
cbind(6, 1:10),
cbind(7, 1:10),
cbind(8, 2:9),
cbind(9, 3:8),
cbind(10, 4:7))
#===========================================================================#
### Value plotting function:
#' Value plotting function for 24-2 or 30-2 visual field measurement:
#'
#' \code{plotTDvalues} plots the following 24-2 or 30-2 visual field measurement: sensitivity, TD, and PD:
#' @param tds a vector contains sensitivity/TD/PD measurement. For 24-2 VF \code{tds} should have 52 or 54 elements. For 30-2 VF, \code{tds} should have 74 or 76 elements.
#' @param cex.tds a numeric variable for label size (default: 1).
#' @param textcolor a function defines the label color.
#' @param show.lines a logical variable indicates whether to show the horizontal and vertical lines.
#' @param ... other variables to be added.
#' @return value plot for sensitivity, TD and PD input.
#' @examples
#' data(vfseries)
#' tds = t(vfseries[1, grepl('^s[0-9]+', colnames(vfseries))])
#' plotTDvalues(tds)
#' title(main = "Sensitivity", line = 3)
#' tds = t(vfseries[1, grepl('^td[0-9]+', colnames(vfseries))])
#' plotTDvalues(tds)
#' title(main = "Total Dviation", line = 3)
#' tds = t(vfseries[1, grepl('^pd[0-9]+', colnames(vfseries))])
#' plotTDvalues(tds)
#' title(main = "Pattern Dviation", line = 3)
#' @importFrom grDevices colorRampPalette
#' @importFrom graphics abline axis image layout par plot points rect text
#' @export
plotTDvalues <- function(tds, cex.tds=1, textcolor=function(x) "black", show.lines=T, ...)
# plot TDs as numeric values;
# works for both 24-2 and 30-2
{
tp = switch(paste(length(tds)),
"52" = c(tds[1:25], NA, tds[26:33], NA, tds[34:52]),
"74" = c(tds[1:35], NA, tds[36:44], NA, tds[45:74]),
tds)
tp[ifelse(length(tp)==54, 26, 36)] = NA
# tp[ifelse(length(tp)==54, 35, 46)] = NA
tp[ifelse(length(tp)==76, 35, 46)] = NA # edited by Dian, 2019-5-21
inds = if(length(tp)== 54) indices else indices30
op = par(mar=c(2, 1, 2, 2)+0.1)
plot(
inds[,2],
-inds[,1],
xlim=c(1, ifelse(length(tp)==54, 9.2, 10.2)),
pch='.',
type='n',
xaxt='n', yaxt='n',
xlab="", ylab="",
bty='n',
...)
if(show.lines)
{
abline(h=ifelse(length(tp)==54, -4.5, -5.5))
abline(v=5.5)
}
for(i in 1:length(tp))
{
x = inds[i,2]
y = -inds[i ,1]
if(!is.na(tp[i]))
text(x, y, tp[i], cex=cex.tds, col=textcolor(tp[i]))
}
par(op)
}
#===========================================================================#
### Value plotting function:
#' Value plotting function for 24-2 or 30-2 visual field measurement:
#'
#' \code{plotTdProbabilities} plots the following 24-2 or 30-2 visual field measurement: TD probs, and PD probs:
#' @param tdprob a vector contains TD probs/PD probs measurement. For 24-2 VF \code{tdprob} should have 52 or 54 elements. For 30-2 VF, \code{tdprob} should have 74 or 76 elements.
#' @param cex a numeric variable for label size (default: 2).
#' @param rectangle.color a string variable defines label color (default: 'black').
#' @param rectangle.width a numeric variable defines label width (default: '0.16').
#' @param margins a vector define the plot margins (default: c(2, 1, 2, 2)+0.1).
#' @param ... other variables to be added.
#' @return value plot for TD prob and PD prob input.
#' @examples
#' data(vfseries)
#' tdprob = t(vfseries[1, grepl('^tdp[0-9]+', colnames(vfseries))])
#' plotTdProbabilities(tdprob)
#' title(main = "Total Deviation Probability", line = 3)
#' tdprob = t(vfseries[1, grepl('^pdp[0-9]+', colnames(vfseries))])
#' plotTdProbabilities(tdprob)
#' title(main = "Pattern Deviation Probability", line = 3)
#' @importFrom grDevices colorRampPalette
#' @importFrom graphics abline axis image layout par plot points rect text
#' @export
#'
plotTdProbabilities <- function(
tdprob,
cex=2,
rectangle.color = "black",
rectangle.width = 0.16,
margins = c(2, 1, 2, 2)+0.1,
...)
# works for both 24-2 and 30-2
{
tp = switch(paste(length(tdprob)),
"52" = c(tdprob[1:25], NA, tdprob[26:33], NA, tdprob[34:52]),
"74" = c(tdprob[1:35], NA, tdprob[36:44], NA, tdprob[45:74]),
tdprob)
tp[ifelse(length(tp)==54, 26, 36)] = NA
tp[ifelse(length(tp)==54, 35, 46)] = NA
inds = if(length(tp)== 54) indices else indices30
op = par(mar=margins)
plot(
inds[,2],
-inds[,1],
pch='.',
type='n',
xaxt='n', yaxt='n',
xlab="", ylab="",
bty='n',
...)
abline(h=ifelse(length(tp)==54, -4.5, -5.5))
abline(v=5.5)
plot005 <- function(x, y)
{
d = 0.08
points(c(x-d, x-d, x+d, x+d), c(y-d, y+d, y-d, y+d), pch='.', cex=cex)
}
plot002 <- function(x, y)
{
d=0.04
points(
x + d*c(-2, 0, 2, -3, -1, -2, 0, -3, 1, -2, 2, -3, -1, 1, 0, 2, 1),
y + d*c(-4, -4, -4, -3, -3, -2, -2, -1, -1, 0, 0, 1, 1, 1, 2, 2, 3),
pch='.',
cex=cex)
}
plot001 <- function(x, y)
{
d=0.04
points(
x + d*c(-3, -2, 0, 1, 2, -3, -2, -1, 1, 3, -1:3, -1, 0, 2, 3, -3, -2, 0, 1, -2:3, -3:-1, 1, -2, 1),
y + d*c(-4, -4, -4, -4, -4, -3, -3, -3, -3, -3, rep(-2, 5), rep(-1, 4), rep(0, 4), rep(1,6), rep(2, 4), 3, 3),
pch='.',
cex=1.5*cex)
}
plot0005 <- function(x, y)
{
d = rectangle.width
rect(x-d, y-d, x+d, y+d, col=rectangle.color, border=NA)
}
for(i in 1:length(tp))
{
x = inds[i,2]
y = -inds[i ,1]
if(!is.na(tp[i]))
switch(paste(tp[i]),
"0.05" = plot005(x, y),
"0.02" = plot002(x, y),
"0.01" = plot001(x, y),
"0.005" = plot0005(x, y),
points(x, y, pch='.', cex=cex))
}
par(op)
}
#===========================================================================#
### Single plotting function:
#' Single plotting function for one 24-2 or 30-2 visual field measurement:
#'
#' \code{plotfield.normalized} plots the following 24-2 or 30-2 visual field measurement: sensitivity, TD, TD prob, PD, and PD prob:
#' @param eigenfields a vector contains Sensitivity/TD/PD measurement. For 24-2 VF \code{eigenfields} should have 52 or 54 elements. For 30-2 VF, \code{eigenfields} should have 74 or 76 elements.
#' @param component Number of components to be plotted (default: 1).
#' @param zmin minimum value of the color scale (default: auto defined).
#' @param zmax maximum value of the color scale (default: auto defined).
#' @param color.pal an object that defines color scale theme (default: colorRampPalette(c("red", "white", "blue"), space = "Lab")(256)).
#' @param show.colorbar a logic value to show colorbar (default: TRUE).
#' @param topleftannotation a string annotation shown on the top left side of the plot (default: NULL).
#' @param bottomleftannotation a string annotation shown on the bottom left side of the plot (default: NULL).
#' @param labelcex a numeric variable for label size (default: 2).
#' @param ... other variables to be added.
#' @return heatmap for sensitivity, TD and PD input
#' @examples
#' data(vfseries)
#' eigenfields = t(vfseries[1, grepl('^s[0-9]+', colnames(vfseries))])
#' plotfield.normalized(eigenfields)
#' title(main = "Sensitivity", line = 3)
#' eigenfields = t(vfseries[1, grepl('^td[0-9]+', colnames(vfseries))])
#' plotfield.normalized(eigenfields)
#' title(main = "Total Deviation", line = 3)
#' eigenfields = t(vfseries[1, grepl('^pd[0-9]+', colnames(vfseries))])
#' plotfield.normalized(eigenfields)
#' title(main = "Pattern Deviation", line = 3)
#' @importFrom grDevices colorRampPalette
#' @importFrom graphics abline axis image layout par plot points rect text
#' @export
plotfield.normalized <- function(
eigenfields,
component = 1,
zmin=-max(abs(c(min(eigenfields), max(eigenfields)))),
zmax=max(abs(c(min(eigenfields), max(eigenfields)))),
color.pal=colorRampPalette(c("red", "white", "blue"), space = "Lab")(256),
show.colorbar = TRUE,
topleftannotation = NULL,
bottomleftannotation = NULL,
labelcex = 2,
...)
{
v = eigenfields[, component]
include.blindspot = length(v) == 54 # if locations of blind spot are included
m = matrix(rep(NA, 8*9), ncol=8)
i.v = 1:length(v)
i.mat = if(include.blindspot) i.v else c(i.v[c(-26, -35)], 53, 54)
# rotate the matrix 90 degree counter-clockwise (for function image()):
for(i in i.v)
m[indices[i.mat[i], 2], 9-indices[i.mat[i], 1]] = v[i]
op = par(mar=c(2, 1, 2, 2)+0.1)
image(
y = 1:8,
z = m,
zlim=c(zmin, zmax),
ylim=c(ifelse(show.colorbar, -0.5, 0), 8.5),
col=color.pal,
axes=FALSE,
xlab="", ylab="",
...)
# mark blind spot:
if(!include.blindspot)
rect(7.3/9, 3.5, 8.4/9, 5.48, col='gray')
# annotate:
if(!is.null(topleftannotation))
text(-0.05, 7.9, labels=topleftannotation, cex=labelcex, pos=4)
if(!is.null(bottomleftannotation))
text(-0.05, 0.9, labels=bottomleftannotation, cex=labelcex, pos=4)
# color bar:
if(show.colorbar)
{
for(k in 1:256)
{
xleft = k/256-0.5/256
rect(xleft, -0.25, xleft+1/256, 0.25, col=color.pal[k], border=NA)
}
axis(1, at=c(0, 0.5, 1), labels=round(c(zmin, (zmin+zmax)/2, zmax), 3))
}
par(op)
}
#===========================================================================#
### General plotting function:
#' General plotting function for multiple 24-2 or 30-2 visual field measurements together:
#'
#' \code{plotComponentMatrix} plots the following 24-2 or 30-2 visual field measurement: sensitivity, TD, TD prob, PD, and PD prob:
#' @param componentmatrix a matrix or data frame, column represents different eyes and rows are the VF measurements of the same type (sensitivity, TD, TD prob, PD, or PD prob).
#' @param ncomp a numeric variable defines the number of components to be plotted (default: all).
#' @param plot.ncols a numeric variable defines the number of columns to be plotted (default: 5).
#' @param plot.nrows a numeric variable defines the number of rows to be plotted (default: NULL (automatically calculated)).
#' @param plot.annot.topleft.function a function(i) that is given to any subplot i to create its top left annotation.
#' @param plot.annot.bottomleft.function a function(i) that is given to any subplot i to create its bottom left annotation (default: returns NULL).
#' @param globaltitle a string for global title (default: k = ncomp; set to NULL to suppress global title).
#' @param globalannotright a string annotation to the right of the global title (default: NULL).
#' @param zmin minimum value of the color scale (default: auto defined).
#' @param zmax maximum value of the color scale (default: auto defined).
#' @param color.pal an object that defines color scale theme (default: colorRampPalette(c("red", "white", "blue"), space = "Lab")(256)).
#' @param td.probabilities a logic variable indicates whether to plot TD probability symbols instead of TD colors (default: FALSE).
#' @param show.colorbar a logic variable indicates whether to show a global colorbar (default: !td.probabilities).
#' @param titleheight a numeric variable defines the height of the title relative to height of row one.
#' @param ... other variables to be added.
#' @return heatmap for sensitivity, TD and PD input. Value plot for TD prob and PD prob input.
#' @examples
#' data(vfseries)
#' componentmatrix = t(vfseries[1:10, grepl('^s[0-9]+', colnames(vfseries))])
#' globaltitle = paste("Sensitivities, k = ", ncol(componentmatrix), sep = '')
#' plotComponentMatrix(componentmatrix, globaltitle = globaltitle)
#' componentmatrix = t(vfseries[1:10, grepl('^td[0-9]+', colnames(vfseries))])
#' globaltitle = paste("TDs, k = ", ncol(componentmatrix), sep = '')
#' plotComponentMatrix(componentmatrix, globaltitle = globaltitle)
#' componentmatrix = t(vfseries[1:10, grepl('^pd[0-9]+', colnames(vfseries))])
#' globaltitle = paste("PDs, k = ", ncol(componentmatrix), sep = '')
#' plotComponentMatrix(componentmatrix, globaltitle = globaltitle)
#' componentmatrix = t(vfseries[1:10, grepl('^tdp[0-9]+', colnames(vfseries))])
#' globaltitle = paste("TD Probs, k = ", ncol(componentmatrix), sep = '')
#' plotComponentMatrix(componentmatrix, globaltitle = globaltitle, td.probabilities = TRUE)
#' componentmatrix = t(vfseries[1:10, grepl('^pdp[0-9]+', colnames(vfseries))])
#' globaltitle = paste("PD Probs, k = ", ncol(componentmatrix), sep = '')
#' plotComponentMatrix(componentmatrix, globaltitle = globaltitle, td.probabilities = TRUE)
#' @importFrom grDevices colorRampPalette
#' @importFrom graphics abline axis image layout par plot points rect text
#' @export
plotComponentMatrix <- function(
componentmatrix,
ncomp = ncol(componentmatrix),
plot.ncols = 5,
plot.nrows = NULL,
plot.annot.topleft.function = toString,
plot.annot.bottomleft.function = function(i) NULL,
globaltitle = sprintf("k = %i", ncol(componentmatrix)),
globalannotright = NULL,
zmin=-ceiling(max(abs(c(min(componentmatrix), max(componentmatrix))))),
zmax=-zmin,
color.pal=colorRampPalette(c("red", "white", "blue"), space = "Lab")(256),
td.probabilities = FALSE,
show.colorbar = !td.probabilities,
titleheight = 0.2,
...)
# plot all components (e.g. eigenfields, archetypes, etc.) together
# componentmatrix: matrix the columns of which contain the components
# ncomp: plot ncomp of these components (default: all)
# plot.ncols: plot components in matrix with plot.ncols columns
# plot.nrows: plot components in matrix with plot.ncols rows
# default: NULL (automatically calculated)
# plot.annot.topleft.function: function(i) that is given to any subplot i to create its
# top left annotation
# plot.annot.bottomleft.function: function(i) that is given to any subplot i to create its
# bottom left annotation (default: returns NULL)
# globaltitle: default: k = ncomp; set to NULL to suppress global title
# globalannotright: annotation to the right of the global title (default: NULL)
# zmin, zmax, color.pal: see plotfield.normalized
# td.probabilities: plot TD probability symbols instead of TD colors
# show.colorbar: show a global colorbar (default: !td.probabilities)
# titleheight: height of the title relative to height of one row
{
#op <- par(mfrow=c(ceiling(ncomp/plot.ncols), plot.ncols))
plotindex = ifelse(is.null(globaltitle), 1, 2)
nrows = ifelse(is.null(plot.nrows), ceiling(ncomp/plot.ncols), plot.nrows)
layoutmatrix = rbind(
rep(1, ifelse(is.null(globaltitle), 0, plot.ncols)),
matrix(
c(plotindex:(ncomp+plotindex-1), numeric(nrows * plot.ncols - ncomp)),
ncol=plot.ncols,
byrow=TRUE),
rep(ncomp + plotindex, ifelse(show.colorbar, plot.ncols, 0)))
layout(
layoutmatrix,
heights=c(
rep(titleheight, ifelse(is.null(globaltitle), 0, 1)),
rep(1, nrows),
rep(titleheight, ifelse(show.colorbar, 1, 0))))
if(!is.null(globaltitle))
{
op <- par(mar=numeric(4))
plot(c(0,1), c(0,1), type="n", axes=FALSE, ylab="", xlab="", xaxs="i")
text(0.5, 1, globaltitle, font=2, cex=3, pos=1)
if(!is.null(globalannotright))
text(0.95, 0.55, globalannotright, cex=1.8, pos=2)
par(op)
}
for(i in 1:ncomp)
{
if(td.probabilities)
plotTdProbabilities(componentmatrix[, i], ...)
else
plotfield.normalized(
componentmatrix,
i,
topleftannotation=plot.annot.topleft.function(i),
bottomleftannotation=plot.annot.bottomleft.function(i),
show.colorbar = FALSE,
zmin, zmax,
color.pal,
...)
}
if(show.colorbar)
{
op <- par(mar=c(2, 0, 0, 0))
plot(c(0,1), c(0,1), type="n", axes=FALSE, ylab="", xlab="")
for(k in 1:256)
{
xleft = k/256-1/256
rect(xleft, 0, xleft+1/256, 1, col=color.pal[k], border=NA)
}
axis(1, at=c(0, 0.5, 1), labels=round(c(zmin, (zmin+zmax)/2, zmax), 3), cex.axis=1.5)
par(op)
}
}
|
/scratch/gouwar.j/cran-all/cranData/vfprogression/R/fields_plot.R
|
######## Progression source code
#Dian 6/16/2017
#### AGIS VF scoring
# according to Fig. 1 in AGIS 2 (Gaasterland et al., 1994)
agis.vf.sectors <- c(
rep("upper", 10),
"nasal", rep("upper", 7),
"nasal", "nasal", rep("upper", 5), NA, "upper",
"nasal", "nasal", rep("lower", 5), NA, "lower",
"nasal", rep("lower", 7),
rep("lower", 10))
# neighboring VF locations within each of the three VF sectors,
# according to Fig. 1 in AGIS 2 (Gaasterland et al., 1994)
agis.neighbors <- list(
c(2, 5, 6, 7), c(1, 3, 6:8), c(2, 4, 7:9), c(3, 8:10),
c(1, 6, 12, 13), c(1, 2, 5, 7, 12:14), c(1:3, 6, 8, 13:15), c(2:4, 7, 9, 14:16), c(3, 4, 8, 10, 15:17), c(4, 9, 16:18),
c(19, 20), c(5, 6, 13, 21, 22), c(5:7, 12, 14, 21:23), c(6:8, 13, 15, 22:24), c(7:9, 14, 16, 23:25), c(8:10, 15, 17, 24, 25), c(9, 10, 16, 18, 25, 27), c(10, 17, 27),
c(11, 20, 28, 29), c(11, 19, 28, 29), c(12, 13, 22), c(12:14, 21, 23), c(13:15, 22, 24), c(14:16, 23, 25), c(15:17, 24), NA, c(17, 18),
c(19, 20, 29, 37), c(19, 20, 28, 37), c(31, 38, 39), c(30, 32, 38:40), c(31, 33, 39:41), c(32, 34, 40:42), c(33, 41:43), NA, c(43, 44),
c(28, 29), c(30, 31, 39, 45, 46), c(30:32, 38, 40, 45:47), c(31:33, 39, 41, 46:48), c(32:34, 40, 42, 47:49), c(33, 34, 41, 43, 48:50), c(34, 36, 42, 44, 49, 50), c(36, 43, 50),
c(38, 39, 46, 51), c(38:40, 45, 47, 51, 52), c(39:41, 46, 48, 51:53), c(40:42, 47, 49, 52:54), c(41:43, 48, 50, 53, 54), c(42:44, 49, 54),
c(45:47, 52), c(46:48, 51, 53), c(47:49, 52, 54), c(48:50, 53))
agis.is.abnormal <- function(vf)
# returns boolean vector of VF locations;
# TRUE: abnormal according to AGIS 2 (Gaasterland et al., 1994)
{
x = if(length(vf)<54) c(vf[1:25], NA, vf[26:33], NA, vf[34:52]) else vf
# according to Fig. 1 in AGIS 2 (Gaasterland et al., 1994):
criteria <- -c(
rep(9, 4),
rep(8, 8), rep(6, 4), 8, 8,
9, 8, rep(6, 5), NA, 8,
9, 7, rep(5, 5), NA, 7,
7, 7, rep(5, 4), rep(7, 12))
vf <= criteria
}
agis.clusters <- function(vf)
# return indices of clusters seperately for upper and lower hemifields and nasal sector
{
abn <- agis.is.abnormal(vf)
clusterize <- function(clusterlist, unassigned)
{
if(length(unassigned) == 0)
clusterlist
else
{
if(length(clusterlist)==0)
clusterize(list(unassigned[1]), unassigned[-1])
else
{
cc = clusterlist[[length(clusterlist)]]
neighbors = unique(do.call(c, agis.neighbors[cc]))
newindices = unassigned %in% neighbors
newmembers = unassigned[newindices]
if(length(newmembers) == 0)
{
clusterlist[[length(clusterlist)+1]] = unassigned[1]
clusterize(clusterlist, unassigned[-1])
}
else
{
newunassigned = unassigned[!newindices]
clusterlist[[length(clusterlist)]] = sort(c(cc, newmembers))
clusterize(clusterlist, newunassigned)
}
}
}
}
upperind = which(agis.vf.sectors == "upper" & abn)
clusters.upper = clusterize(list(), upperind)
lowerind = which(agis.vf.sectors == "lower" & abn)
clusters.lower = clusterize(list(), lowerind)
nasalind = which(agis.vf.sectors == "nasal" & abn)
clusters.nasal = clusterize(list(), nasalind)
list(upper = clusters.upper, lower = clusters.lower, nasal = clusters.nasal)
}
agis.score <- function(tds)
# according to AGIS 2 (Gaasterland et al., 1994), p. 1448, and
# Katz (1999), p. 392
{
n = length(tds)
if(n<52)
stop("agis.score: too few elements in TD vector (MUST be 52 or 54)")
if(n>54)
{
# try to extract TDs from the data structure:
if("td1" %in% names(tds))
tds <- tds[grep("^td[0-9]+", names(tds))]
else
stop("agis.score: too many elements in TD vector (MUST be 52 or 54, or a data structure that contains names td1, ..., td54)")
}
vf <- if(length(tds)<54) c(tds[1:25], NA, tds[26:33], NA, tds[34:52]) else tds
cl <- agis.clusters(vf)
score = 0
# nasal:
if(length(cl$nasal) > 0)
{
if(length(cl$nasal) == 1 && length(cl$nasal[[1]]) < 3) # nasal step if it's restricted to one hemifield:
{
if(all(cl$nasal[[1]] %in% c(11, 19, 20)) || all(cl$nasal[[1]] %in% c(28, 29, 37)))
score = score+1
} else # "nasal defect"
score = score+ifelse(any(sapply(cl$nasal, length) > 2), 1, 0)
lessequal12 <- which(vf[which(agis.vf.sectors == "nasal")] <= -12)
if(length(lessequal12)>=4) score = score+1
}
# hemifields:
score.locations <- function(clusterlist)
{
score = 0
number.per.cluster <- sapply(clusterlist, length)
# only clusters >= 3:
greatereq3 <- which(number.per.cluster>=3)
if(length(greatereq3) > 0)
{
s = sum(number.per.cluster[greatereq3])
if(s>=3) score = score+1
if(s>=6) score = score+1
if(s>=13) score = score+1
if(s>=20) score = score+1
# add even more if half of the locations exceed a certain value:
loc3 <- vf[ do.call(c, clusterlist[greatereq3]) ]
l3h <- length(loc3)/2
addone <- function(criterion)
ifelse(length(which(loc3<=-criterion)) >= l3h, 1, 0)
score = score+addone(12)
score = score+addone(16)
score = score+addone(20)
score = score+addone(24)
score = score+addone(28)
}
score
}
score <- score + score.locations(cl$upper)
score <- score + score.locations(cl$lower)
score
}
#===========================================================================#
#### CIGTS VF scoring (Musch et al., 1999; Gillespie et al., 2003)
# neighboring VF locations within each hemifield,
# according to Gillespie et al. (2003), with 52 indices
cigts.neighbors <- list(
c(2, 5, 6, 7), c(1, 3, 6:8), c(2, 4, 7:9), c(3, 8:10),
c(1, 6, 11:13), c(1, 2, 5, 7, 12:14), c(1:3, 6, 8, 13:15), c(2:4, 7, 9, 14:16), c(3, 4, 8, 10, 15:17), c(4, 9, 16:18),
c(5, 12, 19:21), c(5, 6, 11, 13, 20:22), c(5:7, 12, 14, 21:23), c(6:8, 13, 15, 22:24), c(7:9, 14, 16, 23:25), c(8:10, 15, 17, 24, 25), c(9, 10, 16, 18, 25, 26), c(10, 17, 26),
c(11, 20), c(11, 12, 19, 21), c(11:13, 20, 22), c(12:14, 21, 23), c(13:15, 22, 24), c(14:16, 23, 25), c(15:17, 24), c(17, 18),
c(28, 35), c(27, 29, 35, 36), c(28, 30, 35:37), c(29, 31, 36:38), c(30, 32, 37:39), c(31, 33, 38:40), c(32, 39:41), c(41, 42),
c(27:29, 36, 43), c(28:30, 35, 37, 43, 44), c(29:31, 36, 38, 43:45), c(30:32, 37, 39, 44:46), c(31:33, 38, 40, 45:47), c(32, 33, 39, 41, 46:48), c(33, 34, 40, 42, 47, 48), c(34, 41, 48),
c(35:37, 44, 49), c(36:38, 43, 45, 49, 50), c(37:39, 44, 46, 49:51), c(38:40, 45, 47, 50:52), c(39:41, 46, 48, 51, 52), c(40:42, 47, 52),
c(43:45, 50), c(44:46, 49, 51), c(45:47, 50, 52), c(46:48, 51))
cigts.score <- function(tdprobs)
# CIGTS VF scoring (Gillespie et al., 2003)
# tdprobs: vector of length 52 representing the TD probabilities (between 0.005 and 1)
# example from Gillespie:
# tdprobs = c(0.05, 0.02, 0.05, rep(1,8), 0.05, 0.02, rep(1,7), 0.01, 0.005, rep(1,6), 0.02, rep(1,17), 0.01, rep(1,5))
# cigts.score(tdprobs) -> 0.7692308
{
n = length(tdprobs)
if(n<52)
stop("cigts.score: too few elements in TD prob vector (MUST be 52 or 54)")
if(n>54)
{
# try to extract TDs from the data structure:
if("tdp1" %in% names(tdprobs))
tdprobs <- tdprobs[grep("^tdp[0-9]+", names(tdprobs))]
else
stop("cigts.score: too many elements in TD prob vector (MUST be 52 or 54, or a data structure that contains names td1, ..., td54)")
}
if(n==54)
tdprobs <- tdprobs[-c(26,35)]
pweights <- ifelse(tdprobs==0.005, 4, ifelse(tdprobs==0.01, 3, ifelse(tdprobs==0.02, 2, ifelse(tdprobs==0.05, 1, 0))))
# calculate the weights relevant for scoring:
get.effective.weight <- function(weight, neighbors)
{
neighborweights = sort(pweights[neighbors], decreasing=T)
min(neighborweights[2], weight)
}
effective.weights <- mapply(get.effective.weight, pweights, cigts.neighbors)
sum(effective.weights)/10.4
}
##### input quaity check, cigts:
input.check.cigts <- function (measmatrix)
{
n = nrow(measmatrix)
if(n < 5)
stop("progression.cigts: at least 5 VFs required")
n.tdprobs = length(grep("^tdp[0-9]+", colnames(measmatrix)))
if(n.tdprobs < 52)
{stop("progression.cigts: too few elements in TD probability (tdp) vector (MUST be 52 or 54. The data structure should contain names 'tdp1', ..., 'tdp52' or until 'tdp54')")}
if(n.tdprobs == 52)
if("tdp1" %in% names(measmatrix))
{tdprobs <- measmatrix[ , grep("^tdp[0-9]+", colnames(measmatrix))]}
else
{stop("progression.cigts: too few elements in TD probability (tdp) vector (MUST be 52 or 54. The data structure should contain names 'tdp1', ..., 'tdp52' or until 'tdp54')")}
if(n.tdprobs == 54)
tdprobs <- tdprobs[-c(26,35)]
if(n.tdprobs > 54)
{
# try to extract TDs from the data structure:
stop("progression.cigts: too many elements in TD probability (tdp) TD vector (MUST be 52 or 54. The data structure should contain names 'tdp1', ..., 'tdp52' or until 'tdp54')")
}
return (as.matrix(tdprobs))
}
#===================================#
#base function, cigts
progression.cigts.base <- function(measmatrix)
# CIGTS VF progression (Musch et al., 1999)
# measmatrix: columns MUST contain the 52 TD probs and yearsfollowed,
# rows represent the single measurements
# returns "stable", "worsening", or "improving"
# note: If a VF series is temporarily improving and
# temporarily worsening, it is assumed to be "stable" overall
{
tdprobs = input.check.cigts(measmatrix)
# tdprobs = measmatrix[, grep("^tdp[0-9]+", colnames(measmatrix))]
cigts.scores = apply(tdprobs, 1, cigts.score)
baseline = mean(cigts.scores[1:2])
tl = rev(cigts.scores[-(1:2)] - baseline)
results = ifelse(tl >= 3, "worsening", ifelse(tl <= -3, "improving", "stable"))
final = unique(results[1:3])
if(length(final) == 1)
{
inter = results[-(1:3)]
# if ever a VF series is both "improving" and "worsening" for single VFs, we assume it to be stable overall
ifelse(any(inter != "stable" & inter != final), "stable", final)
} else "stable"
}
#' CIGTS VF progression
#'
#' \code{progression.cigts} returns the progression of visual field test based on 52 or 54 total deviation probabilities (tdp). CIGTS VF progression (Musch et al., 1999).
#' @param measmatrix is a data frame. MUST contain the following columns: 52/54 TD probs (column names MUST be 'tdp1' ~ 'tdp52' or 'tdp1' ~ 'tdp54'), 'yearsfollowed', and 'eyeid'. Rows represent the single measurements. The minimum measurements (rows) is 5.
#' @return "stable", "worsening", or "improving" of measurements in \code{measmatrix}. Note: If a VF series is temporarily improving and temporarily worsening, it is assumed to be "stable" overall
#' @references \url{http://www.aaojournal.org/article/S0161-6420(99)90147-1/abstract}
#' @examples
#' data(vf.cigts)
#' colnames(vf.cigts)
#' progression.cigts(vf.cigts)
#' progression.cigts(vf.cigts[vf.cigts$eyeid == 1,])
#' progression.cigts(vf.cigts[vf.cigts$eyeid == 2,])
#' @importFrom stats lm
#' @export
#'
progression.cigts <- function(measmatrix)
{
if(!("eyeid" %in% colnames(measmatrix)))
{
warning("progression.cigts: input does not contain column named 'eyeid'. Assuming that all measurements are from the same eye.")
measmatrix$eyeid <- 1
}
method = 'cigts'
do.call(
"rbind",
by(
measmatrix,
measmatrix$eyeid,
function(eye)
sapply(
method,
function(meth) do.call(paste("progression", meth, 'base', sep="."), list(eye))), #Dian, added 'base' 5.16.2019
simplify=F))
# progression.cigts.base (measmatrix)
}
#===========================================================================#
##### input quaity check, schell2014:
input.check.schell2014 <- function (measmatrix)
{
if(nrow(measmatrix)<4)
stop("progression.schell2014: at least 4 VFs required")
if (!('md' %in% colnames(measmatrix))) stop("progression.schell2014: Column with name 'md' missing")
}
#===================================#
#base function, schell2014
progression.schell2014.base <- function(measmatrix)
# progression criterion after Schell et al. 2014
# which is essentially like CIGTS but with MD, and only
# one follow-up is enough to confirm progression.
# note: If a VF series is temporarily improving and
# temporarily worsening, it is assumed to be "stable" overall
{
input.check.schell2014(measmatrix)
mds = measmatrix[, "md"]
baseline = mean(mds[1:2])
tl = rev(mds[-(1:2)] - baseline)
results = ifelse(tl <= -3, "worsening", ifelse(tl >= 3, "improving", "stable"))
final = unique(results[1:2])
if(length(final) == 1)
{
inter = results[-(1:2)]
# if ever a VF series is both "improving" and "worsening" for single VFs, we assume it to be stable overall
ifelse(any(inter != "stable" & inter != final), "stable", final)
} else "stable"
}
##### Progression detection methods:
#' Schell 2014 VF progression
#'
#' \code{progression.schell2014} returns the progression criterion after Schell et al. 2014, which is essentially like CIGTS but with MD, and only one follow-up is enough to confirm progression.
#' @param measmatrix is a data frame. MUST contain the following columns: 'md' (mean deviation) and 'eyeid'. Rows represent the single measurements. The minimum measurements (rows) is 4.
#' @return "stable", "worsening", or "improving" of measurements in \code{measmatrix}. Note: If a VF series is temporarily improving and temporarily worsening, it is assumed to be "stable" overall
#' @seealso \url{https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4495761/}
#' @examples
#' data(vf.schell2014)
#' colnames(vf.schell2014)
#' progression.schell2014(vf.schell2014)
#' progression.schell2014(vf.schell2014[vf.schell2014$eyeid == 1,])
#' progression.schell2014(vf.schell2014[vf.schell2014$eyeid == 2,])
#' @importFrom stats lm
#' @export
#'
progression.schell2014 <- function(measmatrix)
{
if(!("eyeid" %in% colnames(measmatrix)))
{
warning("progression.schell2014: input does not contain column named 'eyeid'. Assuming that all measurements are from the same eye.")
measmatrix$eyeid <- 1
}
method = 'schell2014'
do.call(
"rbind",
by(
measmatrix,
measmatrix$eyeid,
function(eye)
sapply(
method,
function(meth) do.call(paste("progression", meth, 'base', sep="."), list(eye))), #Dian, added 'base' 5.16.2019
simplify=F))
}
#===========================================================================#
### Pointwise Linear Regression (PLR) progression detection methods:
plr.two.omit <- function(timepoints, tds)
# Pointwise Linear Regression (PLR) progression detection method
# according to Gardiner & Crabb (2002);
# timepoints: measurement time, in years (either age or yearsfollowed)
# tds: vector of TDs at a single location over time
# returns "stable", "worsening", or "improving"
{
n = length(timepoints)
if(n<3) stop("plr.two.omit: at least 3 measurements required")
standard.crit <- function(tpts, tdvals)
{
m = lm(tdvals ~ tpts)
slope = m$coefficients[2]
p = (summary(m))$coefficients[2,4]
ifelse(abs(slope)>=1 && p <= 0.01, ifelse(slope<0, "worsening", "improving"), "stable")
}
#print(tds[-(n-1)])
r1 = standard.crit(timepoints[-n], tds[-n])
r2 = standard.crit(timepoints[-(n-1)], tds[-(n-1)])
#cat("r1 =",r1, ", r2 =", r2, " \n")
ifelse(r1==r2, r1, "stable")
}
##### input quaity check, plr.nouri.2012:
input.check.plr.nouri.2012 <- function (measmatrix)
{
if (!('yearsfollowed' %in% colnames(measmatrix))) stop("progression.plr.nouri.2012: Column with name 'yearsfollowed' missing")
else {
n = length(measmatrix[, "yearsfollowed"])
if(n < 3)
stop("progression.plr.two.omit: at least 3 measurements required")
}
n.tds = length(grep("^td[0-9]+", colnames(measmatrix)))
if(n.tds < 52)
{stop("progression.plr.nouri.2012: too few elements in TD vector (MUST be 52 or 54. The data structure should contain names 'td1', ..., 'td52' or until 'td54')")}
if(n.tds == 52)
if("td1" %in% names(measmatrix))
{tds <- measmatrix[ , grep("^td[0-9]+", colnames(measmatrix))]}
else
{stop("progression.plr.nouri.2012: too few elements in TD vector (MUST be 52 or 54. The data structure should contain names 'td1', ..., 'td52' or until 'td54')")}
if(n.tds == 54)
tds <- tds[-c(26,35)]
if(n.tds > 54)
{
# try to extract TDs from the data structure:
stop("progression.plr.nouri.2012: too many elements in TD vector (MUST be 52 or 54. The data structure should contain names 'td1', ..., 'td52' or until 'td54')")
}
return (as.matrix(tds))
}
#===================================#
#base function, plr.nouri.2012
progression.plr.nouri.2012.base <- function(measmatrix)
# Pointwise Linear Regression (PLR) progression detection method
# according to Nouri-Mahdavi et al. (2012);
# measmatrix: columns MUST contain the 52 TDs and yearsfollowed,
# rows represent the single measurements
# returns "stable", "worsening", or "improving"
{
# tds = measmatrix[, grep("^td[0-9]+", colnames(measmatrix))]
tds = input.check.plr.nouri.2012(measmatrix)
results = apply(
tds,
2,
function(v) plr.two.omit(measmatrix[, "yearsfollowed"], v))
worsening = sum(results=="worsening")
improving = sum(results=="improving")
#cat(improving, ",", worsening, "\n")
ifelse(
worsening >= improving + 3,
"worsening",
ifelse(improving >= worsening + 3, "improving", "stable"))
}
##### Progression detection methods:
#' Nouri-Mahdavi 2012 VF progression
#'
#' \code{progression.plr.nouri.2012} returns the progression criterion, using Pointwise Linear Regression (PLR) progression detection method according to Nouri-Mahdavi et al. (2012).
#' @param measmatrix is a data frame. MUST contain the following columns: 52/54 TD (column names MUST be 'td1' ~ 'td52' or 'td1' ~ 'td54'), 'yearsfollowed', and 'eyeid'. Rows represent the single measurements. The minimum measurements (rows) is 3.
#' @return "stable", "worsening", or "improving" of measurements in \code{measmatrix}
#' @seealso \url{https://www.ncbi.nlm.nih.gov/pubmed/22427560/}
#' @examples
#' data(vf.plr.nouri.2012)
#' colnames(vf.plr.nouri.2012)
#' progression.plr.nouri.2012(vf.plr.nouri.2012)
#' progression.plr.nouri.2012(vf.plr.nouri.2012[vf.plr.nouri.2012$eyeid == 1,])
#' progression.plr.nouri.2012(vf.plr.nouri.2012[vf.plr.nouri.2012$eyeid == 2,])
#' @importFrom stats lm
#' @export
progression.plr.nouri.2012 <- function(measmatrix)
{
if(!("eyeid" %in% colnames(measmatrix)))
{
warning("progression.vfi: input does not contain column named 'eyeid'. Assuming that all measurements are from the same eye.")
measmatrix$eyeid <- 1
}
method = 'plr.nouri.2012'
do.call(
"rbind",
by(
measmatrix,
measmatrix$eyeid,
function(eye)
sapply(
method,
function(meth) do.call(paste("progression", meth, 'base', sep="."), list(eye))), #Dian, added 'base' 5.16.2019
simplify=F))
}
#===========================================================================#
##### input quaity check, vfi:
input.check.vfi <- function (measmatrix)
{
if (!('yearsfollowed' %in% colnames(measmatrix))) stop("progression.vfi: Column with name 'yearsfollowed' missing")
if (!('vfi' %in% colnames(measmatrix))) stop("progression.vfi: Column with name 'vfi' missing")
n = length(measmatrix[, "yearsfollowed"])
if(n < 3)
stop("progression.vfi: at least 3 measurements required")
}
#===================================#
#base function, vfi
progression.vfi.base <- function(measmatrix)
# progression according to VFI (significant slope, p<=0.05)
# used in Aptel et al. (2015);
# if timepoints is a matrix the rows of which contain the measurements,
# VFIs and yearsfollowed are extracted from the matrix and vfis is ignored
# returns "stable", "worsening", or "improving"
{
input.check.vfi(measmatrix)
yearsfollowed = measmatrix[, "yearsfollowed"]
vfis = measmatrix[, "vfi"]
m = lm(vfis ~ yearsfollowed)
slope = m$coefficients[2]
pval = (summary(m))$coefficients[2,4]
ifelse(!is.na(pval) && pval <= 0.05, ifelse(slope<0, "worsening", "improving"), "stable")
}
##### Progression detection methods:
#' progression according to VFI (significant slope, p<=0.05)
#'
#' \code{progression.vfi} returns the progression criterion used in Aptel et al. (2015).
#' @param measmatrix is a data frame. MUST contain the following columns: 'vfi' (visual field index), 'yearsfollowed', and 'eyeid'. Rows represent the single measurements. The minimum measurements (rows) is 3.
#' @return "stable", "worsening", or "improving" of measurements in \code{timepoints}
#' @seealso \url{https://www.ncbi.nlm.nih.gov/pubmed/26095771/}
#' @examples
#' data(vf.vfi)
#' colnames(vf.vfi)
#' progression.vfi(vf.vfi)
#' progression.vfi(vf.vfi[vf.vfi$eyeid == 1,])
#' progression.vfi(vf.vfi[vf.vfi$eyeid == 2,])
#' @importFrom stats lm
#' @export
progression.vfi <- function(measmatrix)
{
if(!("eyeid" %in% colnames(measmatrix)))
{
warning("progression.vfi: input does not contain column named 'eyeid'. Assuming that all measurements are from the same eye.")
measmatrix$eyeid <- 1
}
method = 'vfi'
do.call(
"rbind",
by(
measmatrix,
measmatrix$eyeid,
function(eye)
sapply(
method,
function(meth) do.call(paste("progression", meth, 'base', sep="."), list(eye))), #Dian, added 'base' 5.16.2019
simplify=F))
}
#===========================================================================#
### general progression function:
#' general progression function
#'
#' \code{progression} returns the progression criterion with four methods. plr.nouri.2012, vfi, schell2014, cigts
#' @param vfseries is a data frame. MUST contain the following columns: yearsfollowed', and 'eyeid'. Rows represent the single measurements. Other requirements, such as number of minimum measurements (rows), and necessary VF measurements could be found in each progression method's documentation
#' @param method selected from one or more from: plr.nouri.2012, vfi, schell2014, cigts. Default it ...
#' @return "stable", "worsening", or "improving" of measurements in \code{measmatrix}
#' @seealso \url{https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4495761/}
#' @examples
#' data(vfseries)
#' progression(vfseries)
#' progression(vfseries[vfseries$eyeid == 1,])
#' progression(vfseries[vfseries$eyeid == 2,])
#' progression(vfseries, method=c("cigts"))
#' @export
progression <- function(vfseries, method=c("plr.nouri.2012", "vfi", "schell2014", "cigts"))
{
if(is.matrix(vfseries))
vfseries <- as.data.frame(vfseries)
if(!is.data.frame(vfseries))
stop("progression: first argument MUST be matrix or data frame.")
if(!("yearsfollowed" %in% colnames(vfseries)))
{
if("age" %in% colnames(vfseries))
vfseries$yearsfollowed <- vfseries$age - vfseries$age[1]
else
stop("progression: first argument does not contain columns named yearsfollowed or age.")
}
if(!("eyeid" %in% colnames(vfseries)))
{
warning("progression: first argument does not contain column named eyeid. Assuming that all measurements are from the same eye.")
vfseries$eyeid <- 1
}
do.call(
"rbind",
by(
vfseries,
vfseries$eyeid,
function(eye)
sapply(
method,
function(meth) do.call(paste("progression", meth, 'base', sep="."), list(eye))), #Dian, added 'base' 5.16.2019
simplify=F))
}
linear.md <- function(tdvec, capped=FALSE)
# calculate a "more linear" version of MD from TDs
# by linearizing TDs, as described in Gardiner et al. (2014, p. 545)
# capped: all TDs are capped at zero
{
if(capped)
tdvec <- sapply(tdvec, function(x) min(x, 0))
mean(10**(tdvec/10))
}
|
/scratch/gouwar.j/cran-all/cranData/vfprogression/R/progression_sources.r
|
#' Perform MAVB after fitting vglmer
#'
#' @description Given a model estimated using \code{vglmer}, this function
#' performs marginally augmented variational Bayes (MAVB) to improve the
#' approximation quality.
#'
#' @details This function returns the improved estimates of the
#' \emph{parameters}. To use MAVB when generating predictions, one should use
#' \link{predict_MAVB}. At present, MAVB is only enabled for binomial models.
#'
#' @return This function returns a matrix with \code{samples} rows and columns
#' for each fixed and random effect.
#'
#' @param object Model fit using \code{vglmer}.
#' @param samples Number of samples to draw.
#' @param var_px Variance of working prior for marginal augmentation. Default
#' (\code{Inf}) is a flat, improper, prior.
#' @param verbose Show progress in drawing samples.
#' @import CholWishart
#' @importFrom mvtnorm rmvnorm
#'
#' @references
#' Goplerud, Max. 2022a. "Fast and Accurate Estimation of Non-Nested Binomial
#' Hierarchical Models Using Variational Inference." \emph{Bayesian Analysis}. 17(2):
#' 623-650.
#' @export
MAVB <- function(object, samples, verbose = FALSE, var_px = Inf) {
if (!inherits(object, "vglmer")) {
stop("Must provide object from vglmer")
}
if (object$family != "binomial") {
stop("MAVB only implemented for binomial at present.")
}
M_prime <- object$internal_parameters$MAVB_parameters$M_prime
M_prime_one <- object$internal_parameters$MAVB_parameters$M_prime_one
M_mu_to_beta <- object$internal_parameters$MAVB_parameters$M_mu_to_beta
B_j <- object$internal_parameters$MAVB_parameters$B_j
if (!isDiagonal(B_j)){
stop('MAVB not set up for non-diagonal mean expansion; do all REs have a corresponding FE?')
}else{
if (!isTRUE(all.equal(B_j@x, rep(1, nrow(B_j))))){
stop('B_j is diagonal but not identity matrix; do all REs have a corresponding FE?')
}
}
d_j <- object$internal_parameters$MAVB_parameters$d_j
g_j <- object$internal_parameters$MAVB_parameters$g_j
outer_alpha_RE_positions <- object$internal_parameters$MAVB_parameters$outer_alpha_RE_positions
factorization_method <- object$control$factorization_method
if (factorization_method == "weak") {
decomp_joint <- object$joint$decomp_var
joint_mean <- rbind(object$beta$mean, object$alpha$mean)
p.XZ <- ncol(decomp_joint)
p.X <- nrow(object$beta$mean)
} else {
decomp_varA <- object$alpha$decomp_var
decomp_varB <- object$beta$decomp_var
p.XZ <- ncol(decomp_varA) + ncol(decomp_varB)
p.X <- nrow(object$beta$mean)
p.Z <- nrow(object$alpha$mean)
}
MAVB_sims <- matrix(NA, nrow = samples, ncol = p.XZ)
regen_store <- MAVB_diff <- matrix(NA, nrow = samples, ncol = sum(d_j))
n_MAVB <- samples
alpha_mean <- object$alpha$mean
beta_mean <- object$beta$mean
sigma_df <- object$sigma$df
sigma_cov <- object$sigma$cov
all_sigma <- mapply(sigma_df, sigma_cov, SIMPLIFY = FALSE, FUN = function(i, j) {
rInvWishart(n = n_MAVB, df = i, Sigma = j)
})
for (it in 1:n_MAVB) {
if (it %% 1000 == 0 & verbose) {
message(".", appendLF = F)
}
# Sim from VARIATIONAL approximation to posterior.
if (factorization_method == "weak") {
sim_joint <- joint_mean + t(decomp_joint) %*% rnorm(p.XZ)
sim_beta <- sim_joint[1:p.X, , drop = F]
sim_a <- sim_joint[-1:-p.X, , drop = F]
} else {
sim_a <- alpha_mean + t(decomp_varA) %*% rnorm(p.Z)
sim_beta <- beta_mean + t(decomp_varB) %*% rnorm(p.X)
}
sim_sigma <- lapply(all_sigma, FUN = function(i) {
i[, , it]
})
# Working Prior
if (var_px == Inf) {
sim_px <- rep(0, sum(d_j))
} else {
sim_px <- rnorm(sum(d_j), 0, sd = sqrt(var_px))
}
# Transform t^{-1}_a(z) = w
sim_atilde <- sim_a + M_prime_one %*% sim_px
sim_btilde <- sim_beta - t(M_mu_to_beta) %*% sim_px
# Draw sim_px AGAIN from its full conditional
if (var_px == Inf) {
# Use the LIMITING transition.
var_redux <- as.matrix(bdiag(mapply(sim_sigma, g_j, SIMPLIFY = FALSE, FUN = function(S, g) {
S / g
})))
# Get the MEAN not the SUM
mean_redux <- t(M_prime) %*% sim_atilde
} else {
var_redux <- solve(diag(x = 1 / var_px, ncol = sum(d_j), nrow = sum(d_j)) + as.matrix(bdiag(mapply(sim_sigma, g_j, SIMPLIFY = FALSE, FUN = function(S, g) {
solve(S) * g
}))))
# Use the SUM
mean_redux <- var_redux %*% solve(bdiag(sim_sigma)) %*% t(M_prime_one) %*% sim_atilde
}
regen_px <- t(rmvnorm(1, mean_redux, var_redux))
regen_store[it, ] <- regen_px
MAVB_diff[it, ] <- regen_px - sim_px
final_atilde <- sim_atilde - M_prime_one %*% regen_px
final_btilde <- sim_btilde + t(M_mu_to_beta) %*% regen_px
MAVB_sims[it, ] <- c(as.vector(final_btilde), as.vector(final_atilde))
}
colnames(MAVB_sims) <- c(rownames(object$beta$mean),
rownames(object$alpha$mean))
return(MAVB_sims)
}
#' @import lme4
get_RE_groups <- function(formula, data) {
if (inherits(formula, 'formula')){
bars <- findbars(formula)
}else{
bars <- formula
}
if (is.null(bars)){# Usually if only splines used, then NA.
return(list(factor = NA, design = NA))
}
barnames <- utils::getFromNamespace('barnames', 'lme4')
names(bars) <- barnames(bars)
fr <- data
blist <- lapply(bars, simple_blist, fr, drop.unused.levels = F, reorder.vars = FALSE)
blist <- lapply(blist, FUN=function(i){i[c('ff', 'mm')]})
ff <- lapply(blist, FUN=function(i){i$ff})
ff <- lapply(ff, FUN=function(i){match(i, levels(i))})
mm <- lapply(blist, FUN=function(i){i$mm})
return(list(factor = ff, design = mm))
}
#' @import lme4
#' @importFrom utils getFromNamespace
simple_blist <- function(x, frloc, drop.unused.levels = TRUE, reorder.vars = FALSE) {
frloc <- factorize(x, frloc)
makeFac <- utils::getFromNamespace('makeFac', 'lme4')
if (is.null(ff <- tryCatch(eval(substitute(makeFac(fac),
list(fac = x[[3]])), frloc), error = function(e) NULL)))
stop("couldn't evaluate grouping factor ", deparse(x[[3]]),
" within model frame:", " try adding grouping factor to data ",
"frame explicitly if possible", call. = FALSE)
if (all(is.na(ff)))
stop("Invalid grouping factor specification, ", deparse(x[[3]]),
call. = FALSE)
if (drop.unused.levels)
ff <- factor(ff, exclude = NA)
nl <- length(levels(ff))
mm <- model.matrix(eval(substitute(~foo, list(foo = x[[2]]))),
frloc)
if (reorder.vars) {
colSort <- utils::getFromNamespace("colSort", "lme4")
mm <- mm[colSort(colnames(mm)), ]
}
list(ff = ff, nl = nl, mm = mm, cnms = colnames(mm))
}
#' Variance of Rows or Columns of Matrices
#'
#' Base R implementation for variance. Analogue of rowMeans.
#' @name var_mat
#' @keywords internal
#' @param matrix Matrix of numeric inputs.
rowVar <- function(matrix) {
apply(matrix, MARGIN = 1, var)
}
#' @importFrom stats var
#' @rdname var_mat
colVar <- function(matrix) {
apply(matrix, MARGIN = 2, var)
}
#' Get samples from GLMER
#'
#' Order samples from glmer to match names from vglmer.
#'
#' @param glmer object fitted using glmer
#' @param samples number of samples to draw
#' @param ordering order of output
#' @keywords internal
#' @importFrom stats rnorm
custom_glmer_samples <- function(glmer, samples, ordering) {
fmt_glmer <- format_glmer(glmer)
glmer_samples <- mapply(fmt_glmer$mean, fmt_glmer$var, FUN = function(m, v) {
rnorm(samples, mean = m, sd = sqrt(v))
})
colnames(glmer_samples) <- fmt_glmer$name
glmer_samples <- glmer_samples[, match(ordering, colnames(glmer_samples))]
return(glmer_samples)
}
#' Draw samples from the variational distribution
#'
#' @description This function draws samples from the estimated variational
#' distributions. If using \code{MAVB} to improve the quality of the
#' approximating distribution, please use \link{MAVB} or \link{predict_MAVB}.
#' @param object Model fit using \code{vglmer}.
#' @param samples Number of samples to draw.
#' @param verbose Show progress in drawing samples.
#'
#' @return This function returns a matrix with \code{samples} rows and columns
#' for each fixed and random effect.
#'
#' @export
posterior_samples.vglmer <- function(object, samples, verbose = FALSE) {
if (!inherits(object, "vglmer")) {
stop("Must provide object from vglmer")
}
M_prime <- object$internal_parameters$MAVB_parameters$M_prime
M_prime_one <- object$internal_parameters$MAVB_parameters$M_prime_one
M_mu_to_beta <- object$internal_parameters$MAVB_parameters$M_mu_to_beta
d_j <- object$internal_parameters$MAVB_parameters$d_j
g_j <- object$internal_parameters$MAVB_parameters$g_j
outer_alpha_RE_positions <- object$internal_parameters$MAVB_parameters$outer_alpha_RE_positions
factorization_method <- object$control$factorization_method
if (factorization_method == "weak") {
decomp_joint <- object$joint$decomp_var
joint_mean <- rbind(object$beta$mean, object$alpha$mean)
p.XZ <- ncol(decomp_joint)
p.X <- nrow(object$beta$mean)
}
else {
decomp_varA <- object$alpha$decomp_var
decomp_varB <- object$beta$decomp_var
p.XZ <- ncol(decomp_varA) + ncol(decomp_varB)
p.X <- nrow(object$beta$mean)
p.Z <- nrow(object$alpha$mean)
}
post_sims <- matrix(NA, nrow = samples, ncol = p.XZ)
alpha_mean <- object$alpha$mean
beta_mean <- object$beta$mean
sigma_df <- object$sigma$df
sigma_cov <- object$sigma$cov
for (it in 1:samples) {
if (it%%1000 == 0 & verbose) {
message(".", appendLF = F)
}
if (factorization_method == "weak") {
sim_joint <- joint_mean + t(decomp_joint) %*% rnorm(p.XZ)
sim_beta <- sim_joint[1:p.X, , drop = F]
sim_a <- sim_joint[-1:-p.X, , drop = F]
}
else {
sim_a <- alpha_mean + t(decomp_varA) %*% rnorm(p.Z)
sim_beta <- beta_mean + t(decomp_varB) %*% rnorm(p.X)
}
post_sims[it, ] <- c(as.vector(sim_beta), as.vector(sim_a))
}
colnames(post_sims) <- c(rownames(object$beta$mean),
rownames(object$alpha$mean))
return(post_sims)
}
|
/scratch/gouwar.j/cran-all/cranData/vglmer/R/MAVB_functions.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Linear Regression by Cholesky
#'
#' Do linear regression of form (X^T O X + P)^{-1} X^T y where O is omega, P is
#' precision.
#'
#' @keywords internal
#'
#' @param X Design Matrix
#' @param omega Polya-Gamma weights
#' @param prior_precision Prior Precision for Regression
#' @param y Outcome
#' @param save_chol Save cholesky factor
LinRegChol <- function(X, omega, prior_precision, y, save_chol = TRUE) {
.Call('_vglmer_LinRegChol', PACKAGE = 'vglmer', X, omega, prior_precision, y, save_chol)
}
calculate_expected_outer_alpha <- function(L, alpha_mu, re_position_list) {
.Call('_vglmer_calculate_expected_outer_alpha', PACKAGE = 'vglmer', L, alpha_mu, re_position_list)
}
unique_rows <- function(m) {
.Call('_vglmer_unique_rows', PACKAGE = 'vglmer', m)
}
prepare_Z_for_px <- function(Mmap) {
.Call('_vglmer_prepare_Z_for_px', PACKAGE = 'vglmer', Mmap)
}
chol_sparse <- function(X, omega, precision) {
.Call('_vglmer_chol_sparse', PACKAGE = 'vglmer', X, omega, precision)
}
cpp_zVz <- function(Z, V) {
.Call('_vglmer_cpp_zVz', PACKAGE = 'vglmer', Z, V)
}
vecR_ridge_general <- function(L, pg_mean, Z, M, mapping_J, d, start_z, diag_only) {
.Call('_vglmer_vecR_ridge_general', PACKAGE = 'vglmer', L, pg_mean, Z, M, mapping_J, d, start_z, diag_only)
}
vecR_design <- function(alpha_mu, Z, M, mapping_J, d, start_z) {
.Call('_vglmer_vecR_design', PACKAGE = 'vglmer', alpha_mu, Z, M, mapping_J, d, start_z)
}
vecR_fast_ridge <- function(X, omega, prior_precision, y, adjust_y) {
.Call('_vglmer_vecR_fast_ridge', PACKAGE = 'vglmer', X, omega, prior_precision, y, adjust_y)
}
vecR_ridge_new <- function(L, pg_mean, mapping_J, d, store_id, store_re_id, store_design, diag_only) {
.Call('_vglmer_vecR_ridge_new', PACKAGE = 'vglmer', L, pg_mean, mapping_J, d, store_id, store_re_id, store_design, diag_only)
}
|
/scratch/gouwar.j/cran-all/cranData/vglmer/R/RcppExports.R
|
update_rho <- function(XR, y, omega, prior_precision,
moments_sigma_alpha,
prior_sigma_alpha_nu, prior_sigma_alpha_phi,
vi_a_a_jp, vi_a_b_jp, vi_a_nu_jp,
vi_a_APRIOR_jp,
spline_REs, vi_beta_mean,
p.X, d_j, stationary_rho,
do_huangwand, offset,
px_it = NULL, init_rho = NULL,
method){
if (do_huangwand){
prior_weight <- vi_a_nu_jp + d_j - 1
diag_weight <- mapply(vi_a_a_jp, vi_a_b_jp, vi_a_nu_jp, SIMPLIFY = FALSE,
FUN = function(tilde.a, tilde.b, nu) {
Diagonal(x = tilde.a/tilde.b) * 2 * nu
})
}else{
diag_weight <- prior_sigma_alpha_phi
prior_weight <- prior_sigma_alpha_nu
}
ESigma <- lapply(moments_sigma_alpha[c(which(spline_REs), which(!spline_REs))], FUN=function(i){i$sigma.inv})
Phi <- diag_weight[c(which(spline_REs), which(!spline_REs))]
nu <- prior_weight[c(which(spline_REs), which(!spline_REs))]
y <- as.vector(y)
sum_ysq <- sum(omega %*% y)
tXy <- t(XR) %*% y
tXX <- t(XR) %*% omega %*% XR
if (offset != 0){ # Negative Binomial Offset
tXY <- tXy + offset * matrix(colSums(omega %*% XR))
}
prior_precision <- prior_precision
rho_idx <- d_j[spline_REs]
rho_idx <- rho_idx * seq_len(length(rho_idx))
rho_idx <- c(rho_idx, rep(seq_len(sum(!spline_REs)), times = d_j[!spline_REs]^2) + sum(spline_REs))
if (method == 'numerical'){
null_rho <- c(rep(1, sum(spline_REs)), stationary_rho)
null_rho <- c(as.vector(vi_beta_mean), null_rho)
dim_rho <- c(rep(1, sum(spline_REs)), d_j[!spline_REs])
ctrl_opt <- list(fnscale = -1)
if (!is.null(px_it)){
ctrl_opt$maxit <- px_it
}
if (is.null(init_rho)){
init_rho <- null_rho
}
opt_rho <- optim(par = init_rho, fn = eval_rho,
gr = eval_grad_rho,
method = 'L-BFGS-B', control = ctrl_opt,
tXy = tXy, tXX = tXX,
ridge = prior_precision, rho_idx = rho_idx,
nu = nu, Phi = Phi, ESigma = ESigma, dim_rho = dim_rho, p.X = p.X
)
null_eval <- eval_rho(null_rho, tXy = tXy, tXX = tXX, ridge = prior_precision, rho_idx = rho_idx,
nu = nu, Phi = Phi, ESigma = ESigma, dim_rho = dim_rho, p.X = p.X)
if (opt_rho$value < null_eval){
warning('Optimization failed in parameter expansion.')
opt_rho$par <- null_rho
}
improvement <- opt_rho$value - null_eval
opt_rho <- opt_rho$par
names(opt_rho) <- NULL
opt_rho <- list(rho = opt_rho, improvement = improvement)
}else if (method == 'dynamic'){
vec_OSL_prior <- mapply(moments_sigma_alpha[!spline_REs],
diag_weight[!spline_REs],
prior_weight[!spline_REs],
SIMPLIFY = FALSE, FUN=function(moment_j, phi_j, nu_j){
as.vector(moment_j$sigma.inv %*% phi_j - nu_j * Diagonal(n = nrow(phi_j)))
})
vec_OSL_prior <- do.call('c', vec_OSL_prior)
if (sum(spline_REs)){
OSL_spline_prior <- unlist(mapply(moments_sigma_alpha[spline_REs],
diag_weight[spline_REs],
prior_weight[spline_REs],
SIMPLIFY = FALSE, FUN=function(moment_j, phi_j, nu_j){
as.vector(moment_j$sigma.inv %*% phi_j - nu_j * Diagonal(n = nrow(phi_j)))
}))
vec_OSL_prior <- matrix(c(rep(0, p.X), OSL_spline_prior, vec_OSL_prior))
}else{
vec_OSL_prior <- matrix(c(rep(0, p.X), vec_OSL_prior))
}
hw_a <- vi_a_a_jp[c(which(spline_REs), which(!spline_REs))]
A_prior <- vi_a_APRIOR_jp[c(which(spline_REs), which(!spline_REs))]
nu_prior <- vi_a_nu_jp[c(which(spline_REs), which(!spline_REs))]
sum_d <- sum(d_j)
null_rho <- c(rep(1, sum(spline_REs)), stationary_rho)
null_rho <- c(as.vector(vi_beta_mean), null_rho)
dim_rho <- c(rep(1, sum(spline_REs)), d_j[!spline_REs])
if (is.null(init_rho)){
init_rho <- null_rho
}
ctrl_opt <- list(fnscale = -1)
if (!is.null(px_it)){
ctrl_opt$maxit <- px_it
}
null_eval <- eval_profiled_rho(null_rho, tXy = tXy, tXX = tXX, ridge = prior_precision, rho_idx = rho_idx,
nu = nu, Phi = Phi, ESigma = ESigma, dim_rho = dim_rho, p.X = p.X,
sum_d = sum_d, hw_a = hw_a, A_prior = A_prior,
nu_prior = nu_prior)
OSL_rho <- vecR_fast_ridge(X = XR,
omega = omega, prior_precision = prior_precision, y = y,
adjust_y = as.vector(vec_OSL_prior))
OSL_eval <- eval_profiled_rho(rho = OSL_rho, tXy = tXy, tXX = tXX, ridge = prior_precision, rho_idx = rho_idx,
nu = nu, Phi = Phi, ESigma = ESigma, dim_rho = dim_rho, p.X = p.X, hw_a, sum_d = sum_d,
A_prior = A_prior, nu_prior = nu_prior)
OSL_improvement <- OSL_eval - null_eval
if (OSL_improvement > 0){
opt_rho <- OSL_rho
improvement <- OSL_improvement
# print('OSL')
# print(c(NA, improvement))
}else{
# print('max')
opt_rho <- tryCatch(optim(par = null_rho, fn = eval_profiled_rho,
gr = eval_grad_profiled_rho,
method = 'L-BFGS-B', control = ctrl_opt,
tXy = tXy, tXX = tXX,
ridge = prior_precision, rho_idx = rho_idx,
nu = nu, Phi = Phi, ESigma = ESigma, dim_rho = dim_rho, p.X = p.X,
sum_d = sum_d, hw_a = hw_a, A_prior = A_prior,
nu_prior = nu_prior
), error = function(e){NULL})
if (is.null(opt_rho)){
message('optimization failed; trying with BFGS instead of L-BFGS-B')
warning('optimization failed; trying with BFGS instead of L-BFGS-B')
opt_rho <- optim(par = null_rho, fn = eval_profiled_rho,
gr = eval_grad_profiled_rho,
method = 'BFGS', control = ctrl_opt,
tXy = tXy, tXX = tXX,
ridge = prior_precision, rho_idx = rho_idx,
nu = nu, Phi = Phi, ESigma = ESigma, dim_rho = dim_rho, p.X = p.X,
sum_d = sum_d, hw_a = hw_a, A_prior = A_prior,
nu_prior = nu_prior
)
}
improvement <- opt_rho$value - null_eval
# compare_improvement <- c(improvement, OSL_improvement)
# names(compare_improvement) <- c('max', 'OSL')
# print(compare_improvement)
# print(compare_improvement/compare_improvement['max'])
opt_rho <- opt_rho$par
}
if (improvement < 0){
warning('Optimization of parameter expansion failed')
opt_rho <- null_rho
}
raw_opt_rho <- opt_rho
if (p.X > 0){nonfe_rho <- opt_rho[-seq_len(p.X)]}else{nonfe_rho <- opt_rho}
Rmatrix <- mapply(split(nonfe_rho, rho_idx), dim_rho, SIMPLIFY = FALSE, FUN=function(i,d){matrix(i, nrow = d, ncol = d)})
opt_rho_hw <- mapply(Rmatrix, nu, hw_a, A_prior, ESigma, nu_prior, SIMPLIFY = FALSE,
FUN=function(R_j, nu_j, hw_a_j, A_j, ESigma.inv.j, nu_prior_j){
inv_R_j <- solve(R_j)
diag_meat <- diag(t(inv_R_j) %*% ESigma.inv.j %*% inv_R_j)
rho_hw_j <- nu_prior_j * diag_meat + 1/A_j^2
return(rho_hw_j)
})
names(opt_rho_hw) <- names(d_j)[c(which(spline_REs), which(!spline_REs))]
names(opt_rho) <- NULL
opt_rho <- list(hw = opt_rho_hw,
rho = opt_rho, improvement = improvement,
opt_par = raw_opt_rho)
}else if (method == 'profiled'){
ctrl_opt <- list(fnscale = -1)
if (!is.null(px_it)){
ctrl_opt$maxit <- px_it
}
hw_a <- vi_a_a_jp[c(which(spline_REs), which(!spline_REs))]
A_prior <- vi_a_APRIOR_jp[c(which(spline_REs), which(!spline_REs))]
nu_prior <- vi_a_nu_jp[c(which(spline_REs), which(!spline_REs))]
sum_d <- sum(d_j)
null_rho <- c(rep(1, sum(spline_REs)), stationary_rho)
null_rho <- c(as.vector(vi_beta_mean), null_rho)
dim_rho <- c(rep(1, sum(spline_REs)), d_j[!spline_REs])
if (is.null(init_rho)){
init_rho <- null_rho
}
opt_rho <- optim(par = null_rho, fn = eval_profiled_rho,
gr = eval_grad_profiled_rho,
method = 'L-BFGS-B', control = ctrl_opt,
tXy = tXy, tXX = tXX,
ridge = prior_precision, rho_idx = rho_idx,
nu = nu, Phi = Phi, ESigma = ESigma, dim_rho = dim_rho, p.X = p.X,
sum_d = sum_d, hw_a = hw_a, A_prior = A_prior,
nu_prior = nu_prior
)
null_eval <- eval_profiled_rho(null_rho, tXy = tXy, tXX = tXX, ridge = prior_precision, rho_idx = rho_idx,
nu = nu, Phi = Phi, ESigma = ESigma, dim_rho = dim_rho, p.X = p.X,
sum_d = sum_d, hw_a = hw_a, A_prior = A_prior,
nu_prior = nu_prior)
improvement <- opt_rho$value - null_eval
if (opt_rho$value < null_eval){
warning('Optimization of parameter expansion failed')
opt_rho$par <- null_rho
}
opt_rho <- raw_opt_rho <- opt_rho$par
if (p.X > 0){nonfe_rho <- opt_rho[-seq_len(p.X)]}else{nonfe_rho <- opt_rho}
Rmatrix <- mapply(split(nonfe_rho, rho_idx), dim_rho, SIMPLIFY = FALSE, FUN=function(i,d){matrix(i, nrow = d, ncol = d)})
opt_rho_hw <- mapply(Rmatrix, nu, hw_a, A_prior, ESigma, nu_prior, SIMPLIFY = FALSE,
FUN=function(R_j, nu_j, hw_a_j, A_j, ESigma.inv.j, nu_prior_j){
inv_R_j <- solve(R_j)
diag_meat <- diag(t(inv_R_j) %*% ESigma.inv.j %*% inv_R_j)
rho_hw_j <- nu_prior_j * diag_meat + 1/A_j^2
return(rho_hw_j)
})
names(opt_rho_hw) <- names(d_j)[c(which(spline_REs), which(!spline_REs))]
names(opt_rho) <- NULL
opt_rho <- list(hw = opt_rho_hw,
rho = opt_rho, improvement = improvement,
opt_par = raw_opt_rho)
}else if (method == 'OSL'){
null_rho <- c(rep(1, sum(spline_REs)), stationary_rho)
null_rho <- c(as.vector(vi_beta_mean), null_rho)
dim_rho <- c(rep(1, sum(spline_REs)), d_j[!spline_REs])
vec_OSL_prior <- mapply(moments_sigma_alpha[!spline_REs],
diag_weight[!spline_REs],
prior_weight[!spline_REs],
SIMPLIFY = FALSE, FUN=function(moment_j, phi_j, nu_j){
as.vector(moment_j$sigma.inv %*% phi_j - nu_j * Diagonal(n = nrow(phi_j)))
})
vec_OSL_prior <- do.call('c', vec_OSL_prior)
if (sum(spline_REs)){
OSL_spline_prior <- unlist(mapply(moments_sigma_alpha[spline_REs],
diag_weight[spline_REs],
prior_weight[spline_REs],
SIMPLIFY = FALSE, FUN=function(moment_j, phi_j, nu_j){
as.vector(moment_j$sigma.inv %*% phi_j - nu_j * Diagonal(n = nrow(phi_j)))
}))
vec_OSL_prior <- matrix(c(rep(0, p.X), OSL_spline_prior, vec_OSL_prior))
}else{
vec_OSL_prior <- matrix(c(rep(0, p.X), vec_OSL_prior))
}
OSL_rho <- vecR_fast_ridge(X = XR,
omega = omega, prior_precision = prior_precision, y = y,
adjust_y = as.vector(vec_OSL_prior))
if (do_huangwand){
sum_d <- sum(d_j)
hw_a <- vi_a_a_jp[c(which(spline_REs), which(!spline_REs))]
A_prior <- vi_a_APRIOR_jp[c(which(spline_REs), which(!spline_REs))]
nu_prior <- vi_a_nu_jp[c(which(spline_REs), which(!spline_REs))]
null_eval <- eval_profiled_rho(rho = null_rho, tXy = tXy, tXX = tXX, ridge = prior_precision, rho_idx = rho_idx,
nu = nu, Phi = Phi, ESigma = ESigma, dim_rho = dim_rho, p.X = p.X, hw_a, sum_d = sum_d,
A_prior = A_prior, nu_prior = nu_prior)
OSL_eval <- eval_profiled_rho(rho = OSL_rho, tXy = tXy, tXX = tXX, ridge = prior_precision, rho_idx = rho_idx,
nu = nu, Phi = Phi, ESigma = ESigma, dim_rho = dim_rho, p.X = p.X, hw_a, sum_d = sum_d,
A_prior = A_prior, nu_prior = nu_prior)
}else{
null_eval <- eval_rho(null_rho, tXy = tXy, tXX = tXX, ridge = prior_precision, rho_idx = rho_idx,
nu = nu, Phi = Phi, ESigma = ESigma, dim_rho = dim_rho, p.X = p.X)
OSL_eval <- eval_rho(OSL_rho, tXy = tXy, tXX = tXX, ridge = prior_precision, rho_idx = rho_idx,
nu = nu, Phi = Phi, ESigma = ESigma, dim_rho = dim_rho, p.X = p.X)
}
improvement <- OSL_eval - null_eval
if (improvement < 0){
OSL_rho <- null_rho
improvement <- NA
}
opt_rho <- OSL_rho
opt_rho <- list(rho = opt_rho, improvement = improvement)
}else{stop('..')}
return(opt_rho)
}
eval_rho <- function(rho, tXy, tXX, ridge, rho_idx, nu, Phi, ESigma, dim_rho, p.X){
ssr <- t(rho) %*% tXy - 1/2 * t(rho) %*% tXX %*% rho
ridge <- -1/2 * as.numeric(t(rho) %*% ridge %*% rho)
if (p.X > 0){nonfe_rho <- rho[-seq_len(p.X)]}else{nonfe_rho <- rho}
Rmatrix <- mapply(split(nonfe_rho, rho_idx), dim_rho, SIMPLIFY = FALSE, FUN=function(i,d){matrix(i, nrow = d, ncol = d)})
prior <- sum(mapply(Rmatrix, nu, Phi, ESigma, FUN=function(R_j, nu_j, Phi_j, ESigma.inv.j){
inv_R_j <- solve(R_j)
out <- - nu_j * determinant(R_j)$modulus - 1/2 * sum(Matrix::diag(inv_R_j %*% Phi_j %*% t(inv_R_j) %*% ESigma.inv.j))
return(out)
}))
return(as.numeric( ssr + ridge + prior ) )
}
eval_grad_rho <- function(rho, tXy, tXX, ridge, rho_idx, nu, Phi, ESigma, dim_rho, p.X){
ssr <- tXy - tXX %*% rho
ridge <- - ridge %*% rho
if (p.X > 0){nonfe_rho <- rho[-seq_len(p.X)]}else{nonfe_rho <- rho}
Rmatrix <- mapply(split(nonfe_rho, rho_idx), dim_rho, SIMPLIFY = FALSE, FUN=function(i,d){matrix(i, nrow = d, ncol = d)})
prior <- mapply(Rmatrix, nu, Phi, ESigma, SIMPLIFY = FALSE, FUN=function(R_j, nu_j, Phi_j, ESigma.inv.j){
inv_R_j <- solve(R_j)
inv_Phi_j <- solve(Phi_j)
# meat <- inv_R_j %*% Phi_j %*% t(inv_R_j)
meat <- solve(t(R_j) %*% inv_Phi_j %*% R_j)
out <- as.vector(- nu_j * t(inv_R_j) + inv_Phi_j %*% R_j %*% meat %*% ESigma.inv.j %*% meat)
return(out)
})
prior <- c(rep(0, p.X), unlist(prior))
return(as.vector( ssr + ridge + prior ))
}
eval_rho_hw <- function(rho, tXy, tXX, ridge, rho_idx, nu, Phi,
ESigma, dim_rho, p.X, dlist, sum_d, hw_a, A_prior, nu_prior){
rho_hw <- exp(rho[seq_len(sum_d)])
rho_hw <- split(rho_hw, dlist)
rho <- rho[-seq_len(sum_d)]
ssr <- t(rho) %*% tXy - 1/2 * t(rho) %*% tXX %*% rho
ridge <- -1/2 * as.numeric(t(rho) %*% ridge %*% rho)
if (p.X > 0){nonfe_rho <- rho[-seq_len(p.X)]}else{nonfe_rho <- rho}
Rmatrix <- mapply(split(nonfe_rho, rho_idx), dim_rho, SIMPLIFY = FALSE, FUN=function(i,d){matrix(i, nrow = d, ncol = d)})
prior_variance <- sum(mapply(Rmatrix, nu, hw_a, rho_hw, A_prior, ESigma, nu_prior,
FUN=function(R_j, nu_j, hw_a_j, rho_hw_j, A_j, ESigma.inv.j, nu_prior_j){
inv_R_j <- solve(R_j)
# Prior from Wishart
out <- - nu_j * determinant(R_j)$modulus - nu_j/2 * sum(log(rho_hw_j)) +
-1/2 * sum(Matrix::diag(inv_R_j %*% Diagonal(x = 2 * nu_prior_j * hw_a_j/rho_hw_j) %*% t(inv_R_j) %*% ESigma.inv.j))
# Prior from Inverse-Gamma and Entropy
out <- out + sum(-1/2 * log(rho_hw_j) - 1/A_j^2 * hw_a_j/rho_hw_j)
return(out)
}))
return(as.numeric( ssr + ridge + prior_variance ) )
}
eval_profiled_rho <- function(rho, tXy, tXX, ridge, rho_idx, nu, Phi,
ESigma, dim_rho, p.X, sum_d, hw_a, A_prior, nu_prior){
ssr <- t(rho) %*% tXy - 1/2 * t(rho) %*% tXX %*% rho
ridge <- -1/2 * as.numeric(t(rho) %*% ridge %*% rho)
if (p.X > 0){nonfe_rho <- rho[-seq_len(p.X)]}else{nonfe_rho <- rho}
Rmatrix <- mapply(split(nonfe_rho, rho_idx), dim_rho, SIMPLIFY = FALSE, FUN=function(i,d){matrix(i, nrow = d, ncol = d)})
prior_variance <- sum(mapply(Rmatrix, nu, hw_a, A_prior, ESigma, nu_prior,
FUN=function(R_j, nu_j, hw_a_j, A_j, ESigma.inv.j, nu_prior_j){
inv_R_j <- solve(R_j)
diag_meat <- diag(t(inv_R_j) %*% ESigma.inv.j %*% inv_R_j)
rho_hw_j <- nu_prior_j * diag_meat + 1/A_j^2
out <- -(nu_j + 1)/2 * sum(log(rho_hw_j)) +
- nu_j * determinant(R_j)$modulus +
sum(hw_a_j)
return(out)
}))
return(as.numeric( ssr + ridge + prior_variance ) )
}
eval_grad_profiled_rho <- function(rho, tXy, tXX, ridge, rho_idx, nu, Phi,
ESigma, dim_rho, p.X, sum_d, hw_a, A_prior, nu_prior){
ssr <- tXy - tXX %*% rho
ridge <- - ridge %*% rho
if (p.X > 0){nonfe_rho <- rho[-seq_len(p.X)]}else{nonfe_rho <- rho}
Rmatrix <- mapply(split(nonfe_rho, rho_idx), dim_rho, SIMPLIFY = FALSE, FUN=function(i,d){matrix(i, nrow = d, ncol = d)})
prior <- mapply(Rmatrix, nu, hw_a, A_prior, ESigma, nu_prior, SIMPLIFY = FALSE,
FUN=function(R_j, nu_j, hw_a_j, A_j, ESigma.inv.j, nu_prior_j){
inv_R_j <- solve(R_j)
d_j <- ncol(R_j)
diag_meat <- diag(t(inv_R_j) %*% ESigma.inv.j %*% inv_R_j)
rho_hw_j <- diag_meat * nu_prior_j + 1/A_j^2
term_profiled <- -(nu_j + 1)/2 * nu_prior_j * 1/rho_hw_j
zeromat <- matrix(0, nrow = d_j, ncol = d_j)
invRE <- t(inv_R_j) %*% ESigma.inv.j
term_profiled <- mapply(seq_len(d_j), term_profiled, SIMPLIFY = FALSE, FUN=function(p, w){
zeromat[,p] <- 2 * invRE[p,]
as.vector(- t(inv_R_j) %*% zeromat %*% t(inv_R_j)) * w
})
term_profiled <- Reduce('+', term_profiled)
out <- as.vector(- nu_j * t(inv_R_j) + term_profiled)
return(out)
})
prior <- c(rep(0, p.X), unlist(prior))
deriv_rho <- as.vector( ssr + ridge + prior )
return(deriv_rho)
}
|
/scratch/gouwar.j/cran-all/cranData/vglmer/R/aux_functions.R
|
/scratch/gouwar.j/cran-all/cranData/vglmer/R/format_functions.R
|
|
safe_convert <- function(x){
if (isDiagonal(x)){
out <- diag(x)
lout <- seq_len(length(out)) - 1
out <- cbind(lout, lout, out)
colnames(out) <- c('i', 'j', 'x')
}else{
if (inherits(x, 'matrix')){
x <- drop0(x)
}
out <- with(attributes(as(as(x, "generalMatrix"), "TsparseMatrix")), cbind(i, j, x))
}
return(out)
}
#' @import Matrix
#' @importFrom methods as
make_mapping_alpha <- function(sigma, px.R = FALSE) {
if (!px.R) {
lapply(sigma, FUN = function(i) {
sparse_i <- safe_convert(i)
sparse_i <- sparse_i[sparse_i[, 1] >= sparse_i[, 2], , drop = F]
return(sparse_i)
})
} else {
lapply(sigma, FUN = function(i) {
sparse_i <- safe_convert(i)
return(sparse_i)
})
}
}
prepare_T <- function(mapping, levels_per_RE, variables_per_RE, running_per_RE, num_REs, cyclical = FALSE, px.R = FALSE) {
if (!cyclical) {
RE_T <- matrix(nrow = 0, ncol = 3)
} else {
RE_T <- as.list(rep(NA, num_REs))
}
for (v in 1:num_REs) {
mapping_v <- mapping[[v]]
if (cyclical) {
mapping_id_i <- rep(mapping_v[, 1], levels_per_RE[v]) +
rep(seq(1, 1 + (levels_per_RE[v] - 1) * variables_per_RE[v], by = variables_per_RE[v]), each = nrow(mapping_v))
mapping_id_j <- rep(mapping_v[, 2], levels_per_RE[v]) +
rep(seq(1, 1 + (levels_per_RE[v] - 1) * variables_per_RE[v], by = variables_per_RE[v]), each = nrow(mapping_v))
mapping_id_x <- rep(mapping_v[, 3], levels_per_RE[v])
RE_T[[v]] <- sparseMatrix(i = mapping_id_i, j = mapping_id_j, x = mapping_id_x, symmetric = T)
} else {
mapping_id_i <- rep(mapping_v[, 1], levels_per_RE[v]) +
rep(seq(1, 1 + (levels_per_RE[v] - 1) * variables_per_RE[v], by = variables_per_RE[v]), each = nrow(mapping_v))
mapping_id_j <- rep(mapping_v[, 2], levels_per_RE[v]) +
rep(seq(1, 1 + (levels_per_RE[v] - 1) * variables_per_RE[v], by = variables_per_RE[v]), each = nrow(mapping_v))
mapping_id_x <- rep(mapping_v[, 3], levels_per_RE[v])
mapping_id_i <- running_per_RE[v] + mapping_id_i
mapping_id_j <- running_per_RE[v] + mapping_id_j
RE_T <- rbind(RE_T, cbind(mapping_id_i, mapping_id_j, mapping_id_x))
}
}
if (!cyclical) {
if (px.R) {
RE_T <- sparseMatrix(i = RE_T[, 1], j = RE_T[, 2], x = RE_T[, 3], symmetric = F)
} else {
RE_T <- sparseMatrix(i = RE_T[, 1], j = RE_T[, 2], x = RE_T[, 3], symmetric = T)
}
}
return(RE_T)
}
multi_lgamma <- function(a, p) {
return(lmvgamma(x = a, p = p))
# if (length(p) != 1){stop('P must have length 1')}
# #if (any(a + 1 < p)){stop('Undefined for a < p - 1')}
# term.1 <- log(pi) * (p) * (p - 1) / 4
# term.2 <- mapply(1:p, SIMPLIFY = FALSE, FUN=function(i){
# matrix(lgamma(a + (1 - i) / 2))
# })
# term.2 <- as.vector(Reduce('+', term.2))
# return(term.1 + term.2)
}
multi_digamma <- function(a, p) {
return(mvdigamma(x = a, p = p))
# if (length(p) != 1){stop('P must have length 1')}
# #if (any(a + 1 < p)){stop('Undefined for a < p - 1')}
# term.1 <- mapply(1:p, SIMPLIFY = FALSE, FUN=function(i){
# digamma(a + (1 - i) / 2)
# })
# term.1 <- as.vector(Reduce('+', term.1))
# return(term.1)
}
#' Simple EM algorithm for starting values.
#'
#' Use ridge penalty to prevent separation. Not be called by user!
#'
#' @param X Design matrix
#' @param Z RE design matrix
#' @param s (y_i - n_i)/2 for polya-gamma input
#' @param y Raw observed y_i
#' @param est_r Initial r value (not updated!)
#' @param pg_b n_i as vector input
#' @param iter iterations
#' @param ridge variance of ridge prior
#' @name simple_EM
#' @keywords internal
#' @importFrom stats runif
EM_prelim_logit <- function(X, Z, s, pg_b, iter, ridge = 2) {
jointXZ <- cbind(X, Z)
N <- nrow(X)
EM_beta <- rep(0, ncol(jointXZ))
if (all(jointXZ[, 1] == 1)) {
EM_beta[1] <- qlogis(sum(s + pg_b / 2) / sum(pg_b))
}
if (EM_beta[1] == 0) {
EM_beta[1] <- runif(1, -.1, .1)
}
EM_variance <- sparseMatrix(i = 1:ncol(jointXZ), j = 1:ncol(jointXZ), x = 1 / ridge)
for (it in 1:iter) {
EM_pg_c <- jointXZ %*% EM_beta
EM_pg_mean <- as.vector(pg_b / (2 * EM_pg_c) * tanh(EM_pg_c / 2))
if (any(abs(EM_pg_c) < 1e-10)) {
tiny_c <- which(abs(EM_pg_c) < 1e-10)
EM_pg_mean[tiny_c] <- pg_b[tiny_c] / 4
}
EM_pg_diag_sqrt <- sparseMatrix(i = 1:N, j = 1:N, x = sqrt(EM_pg_mean))
EM_beta <- solve(Matrix::Cholesky( crossprod(EM_pg_diag_sqrt %*% jointXZ) + EM_variance),
t(jointXZ) %*% (s) )
# EM_beta <- LinRegChol(X = jointXZ, omega = EM_pg_diag, y = s, prior_precision = EM_variance)$mean
}
output <- list(beta = EM_beta[1:ncol(X)], alpha = EM_beta[-1:-ncol(X)])
return(output)
}
#' @rdname simple_EM
#' @importFrom stats runif
EM_prelim_nb <- function(X, Z, y, est_r, iter, ridge = 2) {
if (is.null(Z)) {
jointXZ <- drop0(X)
} else {
jointXZ <- cbind(X, Z)
}
N <- nrow(jointXZ)
EM_beta <- rep(0, ncol(jointXZ))
if (all(jointXZ[, 1] == 1)) {
EM_beta[1] <- log(mean(y))
}
if (EM_beta[1] == 0) {
EM_beta[1] <- runif(1, -.1, .1)
}
EM_variance <- sparseMatrix(i = 1:ncol(jointXZ), j = 1:ncol(jointXZ), x = 1 / ridge)
for (it in 1:iter) {
pg_c <- as.vector(jointXZ %*% EM_beta - log(est_r))
pg_b <- y + est_r
pg_mean <- as.vector(pg_b / (2 * pg_c) * tanh(pg_c / 2))
if (any(abs(pg_c) < 1e-10)) {
tiny_c <- which(abs(pg_c) < 1e-10)
pg_mean[tiny_c] <- pg_b[tiny_c] / 4
}
adj_out <- (y - est_r) / 2 + pg_mean * log(est_r)
# EM_beta <- LinRegChol(X = jointXZ, omega = sparseMatrix(i = 1:N, j = 1:N, x = pg_mean), y = adj_out, prior_precision = EM_variance)$mean
EM_beta <- solve(Matrix::Cholesky( t(jointXZ) %*% sparseMatrix(i = 1:N, j = 1:N, x = pg_mean) %*% jointXZ + EM_variance),
t(jointXZ) %*% (adj_out) )
}
output <- list(beta = EM_beta[1:ncol(X)], alpha = EM_beta[-1:-ncol(X)])
return(output)
}
make_log_invwishart_constant <- function(nu, Phi) {
p <- ncol(Phi)
output <- nu / 2 * log(det(Phi)) - (nu * p) / 2 * log(2) - multi_lgamma(a = nu / 2, p = p)
return(output)
}
calculate_ELBO <- function(family, ELBO_type, factorization_method,
# Fixed constants or priors
d_j, g_j, prior_sigma_alpha_phi, prior_sigma_alpha_nu,
iw_prior_constant, choose_term,
# Data
X, Z, s, y,
# PolyaGamma Parameters
vi_pg_b, vi_pg_mean, vi_pg_c,
# Sigma Parameters
vi_sigma_alpha, vi_sigma_alpha_nu, vi_sigma_outer_alpha,
# Beta Parameters / Alpha Parameters
vi_beta_mean, vi_beta_decomp,
vi_alpha_mean, vi_alpha_decomp,
log_det_beta_var, log_det_alpha_var,
log_det_joint_var = NULL,
vi_joint_decomp = NULL,
# r Parameters
vi_r_mu = NULL, vi_r_mean = NULL,
vi_r_sigma = NULL,
#linear parameters
vi_sigmasq_a = NULL, vi_sigmasq_b = NULL,
vi_sigmasq_prior_a = NULL, vi_sigmasq_prior_b = NULL,
# huang_wand parameters
do_huangwand = NULL, vi_a_a_jp = NULL, vi_a_b_jp = NULL,
vi_a_nu_jp = NULL, vi_a_APRIOR_jp = NULL
) {
####
## PREPARE INTERMEDIATE QUANTITES
###
N <- nrow(X)
# linear predictor: E[XB + ZA - log(r)]
if (family == 'negbin'){
ex_XBZA <- (X %*% vi_beta_mean + Z %*% vi_alpha_mean) - vi_r_mu
}else{
ex_XBZA <- (X %*% vi_beta_mean + Z %*% vi_alpha_mean)
}
# quadratic var, i.e. Var(x_i^T beta + z_i^T alpha)
if (factorization_method %in% c("weak", "collapsed")) {
if (is.null(vi_joint_decomp)) {
stop("Need to provide joint decomposition for ELBO weak")
}
var_XBZA <- rowSums((cbind(X, Z) %*% t(vi_joint_decomp))^2)
if (family == 'negbin'){
var_XBZA <- var_XBZA + vi_r_sigma
}
} else {
beta_quad <- rowSums((X %*% t(vi_beta_decomp))^2)
alpha_quad <- rowSums((Z %*% t(vi_alpha_decomp))^2)
var_XBZA <- beta_quad + alpha_quad
if (family == 'negbin'){
var_XBZA <- var_XBZA + vi_r_sigma
}
}
# Prepare vi_sigma_alpha
moments_sigma_alpha <- mapply(vi_sigma_alpha, vi_sigma_alpha_nu, d_j, SIMPLIFY = FALSE, FUN = function(phi, nu, d) {
inv_phi <- solve(phi)
sigma.inv <- nu * inv_phi
# ln.det <- - (multi_digamma(a = nu/2, p = d) + d * log(2) + log(det(inv_phi)) )
ln.det <- log(det(phi)) - sum(digamma((nu - 1:d + 1) / 2)) - d * log(2)
return(list(sigma.inv = sigma.inv, ln.det = ln.det))
})
ln_det_sigma_alpha <- sapply(moments_sigma_alpha, FUN = function(i) {
i$ln.det
})
inv_sigma_alpha <- lapply(moments_sigma_alpha, FUN = function(i) {
i$sigma.inv
})
## GET the terms for the expectation
## of the log-complete data given the variational distribution.
if (ELBO_type == "augmented") {
if (family == "linear") {
e_ln_sigmasq <- log(vi_sigmasq_b) - digamma(vi_sigmasq_a)
e_inv_sigmasq <- vi_sigmasq_a/vi_sigmasq_b
logcomplete_1 <- sum(-1/2 * ((y - ex_XBZA)^2 + var_XBZA) * e_inv_sigmasq) +
-1/2 * length(y) * (log(2 * pi) + e_ln_sigmasq)
#Add log prior
logcomplete_1 <- logcomplete_1 +
(-vi_sigmasq_prior_a - 1) * e_ln_sigmasq +
-vi_sigmasq_prior_b * e_inv_sigmasq
} else {
# Get the terms for the p(y, w | alpha, beta, Sigma) EXCLUDING the intractable PG.
logcomplete_1 <- -sum(vi_pg_b) * log(2) +
as.vector(t(s) %*% ex_XBZA - 1 / 2 * t(ex_XBZA) %*% Diagonal(x = vi_pg_mean) %*% ex_XBZA) +
-1 / 2 * sum(var_XBZA * vi_pg_mean)
}
# Get the terms for p(alpha | Sigma)
if (family == 'linear'){
e_ln_sigmasq <- log(vi_sigmasq_b) - digamma(vi_sigmasq_a)
logcomplete_2 <- sum(-d_j * g_j / 2 * log(2 * pi) - g_j / 2 * ln_det_sigma_alpha) +
-e_inv_sigmasq * 1 / 2 * sum(mapply(inv_sigma_alpha, vi_sigma_outer_alpha, FUN = function(a, b) {
sum(diag(a %*% b))
}))
logcomplete_2 <- logcomplete_2 +
-1/2 * sum(d_j * g_j) * (e_ln_sigmasq)
}else{
logcomplete_2 <- sum(-d_j * g_j / 2 * log(2 * pi) - g_j / 2 * ln_det_sigma_alpha) +
-1 / 2 * sum(mapply(inv_sigma_alpha, vi_sigma_outer_alpha, FUN = function(a, b) {
sum(diag(a %*% b))
}))
}
## GET THE ENTROPY
# Entropy for p(beta,alpha)
if (factorization_method == "weak") {
entropy_1 <- ncol(vi_joint_decomp) / 2 * log(2 * pi * exp(1)) +
1 / 2 * log_det_joint_var
} else {
entropy_1 <- nrow(vi_beta_mean) / 2 * log(2 * pi * exp(1)) + 1 / 2 * log_det_beta_var +
ncol(vi_alpha_decomp) / 2 * log(2 * pi * exp(1)) + 1 / 2 * log_det_alpha_var
}
#ENTROPY FOR LINK SPECIFIC PARAMETERS
if (family == 'linear'){
entropy_2 <- vi_sigmasq_a + log(vi_sigmasq_b) + lgamma(vi_sigmasq_a) +
-(vi_sigmasq_a + 1) * digamma(vi_sigmasq_a)
}else{
# Entropy for Polya-Gamma EXCLUDING intractable term that cancels
entropy_2 <- sum(vi_pg_b * vi_pg_c / 4 * tanh(vi_pg_c / 2) - vi_pg_b * log(cosh(vi_pg_c / 2)))
}
# Entropy Wisharts
entropy_3 <- -mapply(vi_sigma_alpha_nu, vi_sigma_alpha, FUN = function(nu, Phi) {
make_log_invwishart_constant(nu = nu, Phi = Phi)
}) +
(vi_sigma_alpha_nu + d_j + 1) / 2 * ln_det_sigma_alpha +
1 / 2 * mapply(vi_sigma_alpha, inv_sigma_alpha, FUN = function(a, b) {
sum(diag(a %*% b))
})
entropy_3 <- sum(entropy_3)
} else if (ELBO_type == "profiled") {
vi_r_var <- (exp(vi_r_sigma) - 1) * vi_r_mean^2
psi <- ex_XBZA + vi_r_mu
zVz <- var_XBZA - vi_r_sigma
logcomplete_1 <- VEM.PELBO.r(ln_r = vi_r_mu, y, psi, zVz) +
1 / 2 * VEM.PELBO.r_hessian(ln_r = vi_r_mu, y, psi, zVz) * vi_r_sigma
# logcomplete_1a <- sum(lgamma(y + vi_r_hat)) +
# - N * lgamma(vi_r_hat) - (vi_r_hat) * N * log(2) +
# as.vector(t((y - exp(vi_r_hat))/2) %*% ex_XBZA)
# logcomplete_1b <- sum(-(y + vi_r_mean) * log(cosh(1/2 * sqrt(ex_XBZA^2 + vi_r_sigma + var_XBZA))))
# logcomplete_1c <- N/2 * vi_r_mean * vi_r_sigma
# logcomplete_1 <- logcomplete_1a + logcomplete_1b + logcomplete_1c + choose_term
logcomplete_2 <- sum(-d_j * g_j / 2 * log(2 * pi) - g_j / 2 * ln_det_sigma_alpha) +
-1 / 2 * sum(mapply(inv_sigma_alpha, vi_sigma_outer_alpha, FUN = function(a, b) {
sum(diag(a %*% b))
}))
if (factorization_method == "weak") {
entropy_1 <- ncol(vi_joint_decomp) / 2 * log(2 * pi * exp(1)) +
1 / 2 * log_det_joint_var
} else {
entropy_1 <- ncol(vi_beta_decomp) / 2 * log(2 * pi * exp(1)) + 1 / 2 * log_det_beta_var +
ncol(vi_alpha_decomp) / 2 * log(2 * pi * exp(1)) + 1 / 2 * log_det_alpha_var
}
# Entropy of q(ln r)
if (vi_r_sigma == 0) {
entropy_2 <- 0
} else {
entropy_2 <- 1 / 2 * log(2 * pi * exp(1) * vi_r_sigma)
}
} else {
stop("ELBO must be profiled or augmented")
}
###############
# Log Complete and Entropy for p(Sigma_j) or similar
###############
if (do_huangwand){
E_ln_vi_a <- mapply(vi_a_a_jp, vi_a_b_jp, FUN=function(tilde.a, tilde.b){
sum(log(tilde.b) - digamma(tilde.a))
})
E_inv_v_a <- mapply(vi_a_a_jp, vi_a_b_jp, vi_a_nu_jp, SIMPLIFY = FALSE, FUN=function(tilde.a, tilde.b, nu){
2 * nu * Diagonal(x = tilde.a/tilde.b)
})
logcomplete_3 <- 0 + # flat prior on beta
sum(
iw_prior_constant +
- (vi_a_nu_jp + d_j - 1)/2 * (d_j * log(2 * vi_a_nu_jp) + E_ln_vi_a) +
-(2 * d_j + vi_a_nu_jp) / 2 * ln_det_sigma_alpha +
-1 / 2 * mapply(E_inv_v_a, inv_sigma_alpha, FUN = function(a, b) {
sum(diag(a %*% b))
})
)
logcomplete_3_a <- mapply(d_j, vi_a_a_jp, vi_a_b_jp, E_ln_vi_a,
vi_a_APRIOR_jp,
FUN=function(d, tilde.a, tilde.b, E_ln_vi_a.j, APRIOR.j){
1/2 * sum(log(1/APRIOR.j^2)) - d * lgamma(1/2) - 3/2 * E_ln_vi_a.j +
sum(-1/APRIOR.j^2 * tilde.a/tilde.b)
})
logcomplete_3 <- logcomplete_3 + sum(logcomplete_3_a)
}else{
logcomplete_3 <- 0 + # flat prior on beta
sum(
iw_prior_constant +
-(prior_sigma_alpha_nu + d_j + 1) / 2 * ln_det_sigma_alpha +
-1 / 2 * mapply(prior_sigma_alpha_phi, inv_sigma_alpha, FUN = function(a, b) {
sum(diag(a %*% b))
})
)
}
entropy_3 <- -mapply(vi_sigma_alpha_nu, vi_sigma_alpha, FUN = function(nu, Phi) {
make_log_invwishart_constant(nu = nu, Phi = Phi)
}) +
(vi_sigma_alpha_nu + d_j + 1) / 2 * ln_det_sigma_alpha +
1 / 2 * mapply(vi_sigma_alpha, inv_sigma_alpha, FUN = function(a, b) {
sum(diag(a %*% b))
})
entropy_3 <- sum(entropy_3)
#########
# Optional Entropy if using Huang and Wand (2013) prior
#########
if (do_huangwand){
entropy_4 <- sum(mapply(vi_a_a_jp, vi_a_b_jp, FUN=function(tilde.a, tilde.b){
sum(tilde.a + log(tilde.b) + lgamma(tilde.a) - (1 + tilde.a) * digamma(tilde.a))
}))
}else{
entropy_4 <- 0
}
###Combine all of the terms together
logcomplete <- logcomplete_1 + logcomplete_2 + logcomplete_3 +
choose_term
entropy <- entropy_1 + entropy_2 + entropy_3 + entropy_4
ELBO <- entropy + logcomplete
return(data.frame(
ELBO, logcomplete, entropy, logcomplete_1,
logcomplete_2, logcomplete_3, entropy_1, entropy_2, entropy_3, entropy_4
))
}
update_r <- function(vi_r_mu, vi_r_sigma, y, X, Z, factorization_method,
vi_beta_mean, vi_alpha_mean,
vi_joint_decomp, vi_beta_decomp, vi_alpha_decomp,
vi_r_method) {
if (vi_r_method == "fixed") {
return(c(vi_r_mu, vi_r_sigma))
}
# Get intermediate quantities
ex_XBZA <- (X %*% vi_beta_mean + Z %*% vi_alpha_mean)
# quadratic var, i.e. Var(x_i^T beta + z_i^T alpha)
if (factorization_method == "weak") {
if (is.null(vi_joint_decomp)) {
stop("Need to provide joint decomposition for ELBO weak")
}
var_XBZA <- rowSums((cbind(X, Z) %*% t(vi_joint_decomp))^2)
} else {
beta_quad <- rowSums((X %*% t(vi_beta_decomp))^2)
alpha_quad <- rowSums((Z %*% t(vi_alpha_decomp))^2)
var_XBZA <- beta_quad + alpha_quad
}
N <- length(y)
# vi_r_mu <<- vi_r_mu
# vi_r_sigma <<- vi_r_sigma
# ex_XBZA <<- ex_XBZA
# var_XBZA <<- var_XBZA
# y <<- y
# N <<- N
if (vi_r_method == "delta") {
opt_vi_r <- optim(
par = c(vi_r_mu, log(vi_r_sigma)), fn = VEM.delta_method,
y = y, psi = ex_XBZA, zVz = var_XBZA,
control = list(fnscale = -1), method = "L-BFGS"
)
prior_vi_r <- VEM.delta_method(
par = c(vi_r_mu, log(vi_r_sigma)), y = y,
psi = ex_XBZA, zVz = var_XBZA
)
if (opt_vi_r$value < prior_vi_r) {
warning("Optim for r decreased objective.")
out_par <- c(vi_r_mu, vi_r_sigma)
} else {
out_par <- c(opt_vi_r$par[1], exp(opt_vi_r$par[2]))
}
} else if (vi_r_method %in% c("VEM", "Laplace")) {
opt_vi_r <- optim(
par = vi_r_mu, fn = VEM.PELBO.r, gr = VEM.PELBO.r_deriv,
y = y, psi = ex_XBZA, zVz = var_XBZA,
control = list(fnscale = -1), method = "L-BFGS"
)
if (vi_r_method == "Laplace") {
proposed_vi_r_sigma <- -1 / VEM.PELBO.r_hessian(
ln_r = opt_vi_r$par,
y = y, psi = ex_XBZA, zVz = var_XBZA
)
# proposed_vi_r_sigma <- #as.numeric(-1/opt_vi_r$hessian)
} else {
proposed_vi_r_sigma <- 0
}
prior_vi_r <- VEM.PELBO.r(
ln_r = vi_r_mu, y = y,
psi = ex_XBZA, zVz = var_XBZA
)
if (opt_vi_r$value < prior_vi_r) {
warning("Optim for r decreased objective.")
out_par <- c(vi_r_mu, vi_r_sigma)
} else {
out_par <- c(opt_vi_r$par, proposed_vi_r_sigma)
}
} else {
stop("vi_r method must be VI or VEM or fixed")
}
return(out_par)
}
VEM.PELBO.r <- function(ln_r, y, psi, zVz) {
t1 <- -(y + exp(ln_r)) * log(2) + (y - exp(ln_r)) / 2 * (psi - ln_r) +
-(exp(ln_r) + y) * log(cosh(1 / 2 * sqrt(zVz + (psi - ln_r)^2)))
t2 <- lgamma(y + exp(ln_r)) - lgamma(exp(ln_r)) - lgamma(y + 1)
return(sum(t1 + t2))
}
approx.lgamma <- function(x, mean_r, var_r) {
input <- x + mean_r
output <- lgamma(input) + 1 / 2 * psigamma(x = input, deriv = 1) * var_r
return(output)
}
VEM.delta_method <- function(par, ln_r, y, psi, zVz) {
mu <- par[1]
sigma <- exp(par[2])
obj <- VEM.PELBO.r(ln_r = mu, y, psi, zVz) +
1 / 2 * VEM.PELBO.r_hessian(ln_r = mu, y, psi, zVz) * sigma +
1 / 2 * log(sigma)
return(obj)
}
sech <- function(x) {
1 / cosh(x)
}
VEM.PELBO.r_deriv <- function(ln_r, y, psi, zVz) {
N <- length(y)
r <- exp(ln_r)
meat <- sqrt(zVz + (psi - ln_r)^2)
# -E^lnr PolyGamma[0, E^lnr] + E^lnr PolyGamma[0, E^lnr + y]
deriv_normcon <- -N * r * psigamma(r) + r * sum(psigamma(y + r))
# Mathematica Syntax for Derivative Ln[Cosh[1/2 * Sqrt[zVz + (psi - ln_r)^2]]]
# -E^lnr Log[Cosh[1/2 Sqrt[(lnr - psi)^2 + v]]] - ((lnr - psi) (E^lnr + y) Tanh[
# 1/2 Sqrt[(lnr - psi)^2 + v]])/(2 Sqrt[(lnr - psi)^2 + v])
deriv_lncosh <- -r * log(cosh(1 / 2 * meat)) - (ln_r - psi) * (y + r) * tanh(1 / 2 * meat) / (2 * meat)
deriv_lncosh <- sum(deriv_lncosh)
# Mathematic Synax for
# 1/2 (-y + E^lnr (1 + lnr - psi - Log[4]))
deriv_prelim <- 1 / 2 * (-y + r * (1 + ln_r - psi - log(4)))
deriv_prelim <- sum(deriv_prelim)
return(deriv_normcon + deriv_lncosh + deriv_prelim)
}
VEM.PELBO.r_hessian <- function(ln_r, y, psi, zVz) {
N <- length(y)
r <- exp(ln_r)
meat <- sqrt(zVz + (psi - ln_r)^2)
# -E^lnr PolyGamma[0, E^lnr] + E^lnr PolyGamma[0, E^lnr + y] -
# E^(2 lnr) PolyGamma[1, E^lnr] + E^(2 lnr) PolyGamma[1, E^lnr + y]
deriv_normcon <- -N * r * psigamma(r) + r * sum(psigamma(y + r)) +
-N * r^2 * psigamma(r, deriv = 1) + r^2 * sum(psigamma(r + y, deriv = 1))
# Mathematica Code
# E^r Log[Cosh[1/2 Sqrt[(psi - r)^2 + z]]] + (
# E^r (psi - r) Tanh[1/2 Sqrt[(psi - r)^2 + z]])/Sqrt[(psi - r)^2 +
# z] + (-E^r - y) (((psi - r)^2 Sech[1/2 Sqrt[(psi - r)^2 + z]]^2)/(
# 4 ((psi - r)^2 + z)) - ((psi - r)^2 Tanh[
# 1/2 Sqrt[(psi - r)^2 + z]])/(2 ((psi - r)^2 + z)^(3/2)) +
# Tanh[1/2 Sqrt[(psi - r)^2 + z]]/(2 Sqrt[(psi - r)^2 + z]))
deriv_lncosh <- -r * log(cosh(1 / 2 * meat)) + r * (psi - ln_r) * tanh(1 / 2 * meat) / meat +
-(y + r) * ((psi - ln_r)^2 * sech(1 / 2 * meat)^2 / (4 * meat^2) - (psi - ln_r)^2 * tanh(1 / 2 * meat) / (2 * meat^3) +
tanh(1 / 2 * meat) / (2 * meat))
deriv_lncosh <- sum(deriv_lncosh)
# deriv_lncosh <- -(psi - ln_r)^2 * (r + y)/(2 * meat^2 * (1 + cosh(meat))) +
# -r * log(cosh(1/2 * meat)) +
# (zVz * (y + r) + r * 2 * (ln_r - psi) * meat^2) * tanh(1/2 * meat) / (2 * meat^3)
# Mathematica
# E^lnr - 1/2 E^lnr (-lnr + psi) - E^lnr Log[2]
deriv_prelim <- N * r - 1 / 2 * r * sum(psi - ln_r) - N * r * log(2)
return(deriv_normcon + deriv_lncosh + deriv_prelim)
}
#\sum_{j,g} E[tr(\alpha_{j,g}^T \Sigma_j^{-1} \alpha_{j,g})]
expect_alpha_prior_kernel <- function(vi_sigma_alpha, vi_sigma_alpha_nu, vi_sigma_outer_alpha, d_j){
moments_sigma_alpha <- mapply(vi_sigma_alpha, vi_sigma_alpha_nu,
d_j, SIMPLIFY = FALSE, FUN = function(phi, nu, d) {
inv_phi <- solve(phi)
sigma.inv <- nu * inv_phi
return(list(sigma.inv = sigma.inv))
})
inv_sigma_alpha <- lapply(moments_sigma_alpha, FUN = function(i) {i$sigma.inv})
out <- sum(mapply(inv_sigma_alpha, vi_sigma_outer_alpha, FUN = function(a, b) {sum(diag(a %*% b))}))
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/vglmer/R/helper_functions.R
|
#' Predict after vglmer
#'
#' @description These functions calculate the estimated linear predictor using
#' the variational distributions. \code{predict.vglmer} draws predictions
#' using the estimated variational distributions; \code{predict_MAVB} does so
#' using the MAVB procedure described in Goplerud (2022a).
#' @name vglmer_predict
#' @param object Model fit using \code{vglmer}.
#' @param newdata Dataset to use for predictions. It cannot be missing.
#' @param samples Number of samples to draw. Using \code{0} (default) gives the
#' expectation of the linear predictor. A positive integer draws
#' \code{samples} samples from the variational distributions and calculates
#' the linear predictor.
#' @param samples_only Default (\code{FALSE}) returns the samples from the
#' variational distributions, \bold{not} the prediction. Each row is a sample and
#' each column is a parameter.
#' @param summary Default (\code{TRUE}) returns the mean and variance of the
#' samples for each observation. \code{FALSE} returns a matrix of the sampled
#' linear predictor for each observation. Each row is a sample and each column
#' is an observation.
#' @param allow_missing_levels Default (\code{FALSE}) does not allow prediction
#' for levels not observed in the original data. \code{TRUE} allows for
#' prediction on unseen levels; the value of \code{0} (with no uncertainty) is
#' used for the corresponding random effect.
#' @param ... Not used; included to maintain compatibility with existing
#' methods.
#'
#' @examples
#'
#' set.seed(123)
#' sim_data <- data.frame(
#' x = rnorm(100),
#' y = rbinom(100, 1, 0.5),
#' g = sample(letters, 100, replace = TRUE)
#' )
#'
#' # Run with defaults
#' est_vglmer <- vglmer(y ~ x + (x | g), data = sim_data, family = "binomial")
#'
#' # Simple prediction
#' predict(est_vglmer, newdata = sim_data)
#' # Return 10 posterior draws of the linear predictor for each observation.
#' predict_MAVB(est_vglmer, newdata = sim_data, summary = FALSE, samples = 10)
#' # Predict with a new level; note this would fail if
#' # allow_missing_levels = FALSE (the default)
#' predict(est_vglmer,
#' newdata = data.frame(g = "AB", x = 0),
#' allow_missing_levels = TRUE
#' )
#' @return This function returns an estimate of the linear predictor. The
#' default returns the expected mean, i.e. \eqn{E_{q(\alpha,\beta)}[x_i^T
#' \beta + z_i^T\alpha]}. If \code{samples > 0}, these functions return a
#' summary of the prediction for each observation, i.e. the estimated mean and
#' variance. If \code{summary = FALSE}, the sampled values of the linear
#' predictor are returned as a matrix. \code{predict_MAVB} performs MAVB as
#' described in Goplerud (2022a) before returning the linear predictor.
#'
#' If \code{allow_missing_levels = TRUE}, then observations with a new
#' (unseen) level for the random effect are given a value of zero for that
#' term of the prediction.
#' @importFrom stats delete.response terms na.pass
#' @export
predict.vglmer <- function(object, newdata,
samples = 0, samples_only = FALSE,
summary = TRUE, allow_missing_levels = FALSE, ...) {
if (length(list(...)) > 0) {
stop("... not used for predict.vglmer")
}
newdata <- as.data.frame(newdata)
rownames(newdata) <- as.character(1:nrow(newdata))
parse_formula <- object$formula$interpret_gam
if (!all(parse_formula$pred.names %in% colnames(newdata))){
missing_columns <- setdiff(parse_formula$pred.names, colnames(newdata))
stop(
paste0('The following columns are missing from "newdata": ',
paste(missing_columns, collapse =', '))
)
}
fmla <- formula(object, form = 'original')
newdata_FE <- model.frame(delete.response(object$formula$fe_terms),
data = newdata, xlev = object$formula$fe_Xlevels, na.action = na.pass)
X <- model.matrix(
delete.response(object$formula$fe_terms), newdata_FE,
contrasts.arg = object$formula$fe_contrasts)
orig_X_names <- rownames(object$beta$mean)
if (!identical(colnames(X), orig_X_names)) {
print(all.equal(colnames(X), orig_X_names))
stop("Misaligned Fixed Effects")
}
mk_Z <- model.frame(delete.response(terms(object$formula$interpret_gam$fake.formula)),
data = newdata, drop.unused.levels = TRUE)
rownames_Z <- rownames(mk_Z)
if (!is.null(object$formula$re) & (length(object$formula$re) > 0) ){
# Extract the Z (Random Effect) design matrix.
mk_Z <- mkReTrms(formula(object, form = 're'), mk_Z, reorder.terms = FALSE, reorder.vars = FALSE)
Z <- t(mk_Z$Zt)
# RE names and names of variables included for each.
names_of_RE <- mk_Z$cnms
if (anyDuplicated(names(names_of_RE)) > 0){
warning('Some random effects names are duplicated. Re-naming for stability by adding "-[0-9]" at end.')
nre <- names(names_of_RE)
unre <- unique(nre)
for (u in unre){
nre_u <- which(nre == u)
if (length(nre_u) > 1){
nre[nre_u] <- paste0(nre[nre_u], '-', seq_len(length(nre_u)))
}
}
names(names_of_RE) <- nre
if (anyDuplicated(names(names_of_RE)) > 0){
stop('Renaming duplicates failed. Please rename random effects to proceed.')
}
}
number_of_RE <- length(mk_Z$Gp) - 1
# The position that demarcates each random effect.
# That is, breaks_for_RE[2] means at that position + 1 does RE2 start.
breaks_for_RE <- c(0, cumsum(diff(mk_Z$Gp)))
# Dimensionality of \alpha_{j,g}, i.e. 1 if random intercept
# 2 if random intercept + random slope
d_j <- lengths(names_of_RE)
# Number of GROUPs for each random effect.
g_j <- diff(mk_Z$Gp) / d_j
# Empty vector to build the formatted names for each random effect.
fmt_names_Z <- c()
init_Z_names <- colnames(Z)
for (v in 1:number_of_RE) {
name_of_effects_v <- names_of_RE[[v]]
mod_name <- rep(name_of_effects_v, g_j[v])
levels_of_re <- init_Z_names[(1 + breaks_for_RE[v]):breaks_for_RE[v + 1]]
fmt_names_Z <- c(fmt_names_Z, paste0(names(names_of_RE)[v], " @ ", mod_name, " @ ", levels_of_re))
}
colnames(Z) <- fmt_names_Z
}else{
Z <- drop0(Matrix(nrow = nrow(X), ncol = 0))
p.X <- ncol(X)
p.Z <- 0
names_of_RE <- c()
number_of_RE <- 0
breaks_for_RE <- c(0)
d_j <- c()
g_j <- c()
fmt_names_Z <- c()
cyclical_pos <- list()
}
# Extract the Specials
if (length(parse_formula$smooth.spec) > 0){
base_specials <- length(parse_formula$smooth.spec)
# Number of splines + one for each "by"...
n.specials <- base_specials +
sum(sapply(parse_formula$smooth.spec, FUN=function(i){i$by}) != "NA")
Z.spline <- as.list(rep(NA, n.specials))
Z.spline.size <- rep(NA, n.specials)
Z.spline.attr <- object$internal_parameters$spline$attr
special_counter <- 1
store_spline_type <- rep(NA, n.specials)
for (i in 1:base_specials){
special_i <- parse_formula$smooth.spec[[i]]
all_splines_i <- vglmer_build_spline(x = newdata[[special_i$term]],
knots = Z.spline.attr[[i]]$knots,
Boundary.knots = Z.spline.attr[[i]]$Boundary.knots,
by = newdata[[Z.spline.attr[[i]]$by]], outer_okay = TRUE,
type = Z.spline.attr[[i]]$type, override_warn = TRUE,
force_vector = TRUE)
spline_counter <- 1
for (spline_i in all_splines_i){
stopifnot(spline_counter %in% 1:2)
colnames(spline_i$x) <- paste0('spline @ ', special_i$term, ' @ ', colnames(spline_i$x))
if (spline_counter > 1){
spline_name <- paste0('spline-',special_i$term,'-', i, '-int')
}else{
spline_name <- paste0('spline-', special_i$term, '-', i, '-base')
}
Z.spline[[special_counter]] <- spline_i$x
Z.spline.size[special_counter] <- ncol(spline_i$x)
names_of_RE[[spline_name]] <- spline_name
number_of_RE <- number_of_RE + 1
d_j <- setNames(c(d_j, 1), c(names(d_j), spline_name))
g_j <- setNames(c(g_j, ncol(spline_i$x)), c(names(g_j), spline_name))
breaks_for_RE <- c(breaks_for_RE, max(breaks_for_RE) + ncol(spline_i$x))
fmt_names_Z <- c(fmt_names_Z, colnames(spline_i$x))
store_spline_type[special_counter] <- spline_counter
spline_counter <- spline_counter + 1
special_counter <- special_counter + 1
}
}
Z.spline <- drop0(do.call('cbind', Z.spline))
rownames(Z.spline) <- rownames(newdata)
if (ncol(Z) > 0){
Z.spline <- Z.spline[match(rownames(Z), rownames(Z.spline)),]
Z <- drop0(cbind(Z, Z.spline))
}else{
Z <- Z.spline
}
if (!isTRUE(all.equal(names_of_RE, object$internal_parameters$names_of_RE))){
stop('Names of REs do not match estimation data. This may occur when REs have to be re-named.')
}
if (!isTRUE(identical(object$internal_parameters$spline$size[store_spline_type %in% 1],
Z.spline.size[store_spline_type %in% 1]))){
stop('Misalignment of splines in prediction.')
}
if (!isTRUE(identical(names_of_RE, object$internal_parameters$names_of_RE))){
stop('Misalignment of spline names in prediction.')
}
}else{
n.specials <- 0
Z.spline.attr <- NULL
Z.spline <- NULL
Z.spline.size <- NULL
}
#####
### Confirm Alignment of the Z
#####
orig_Z_names <- rownames(object$alpha$mean)
not_in_original_Z <- setdiff(fmt_names_Z, orig_Z_names)
not_in_new_Z <- setdiff(orig_Z_names, fmt_names_Z)
if (length(not_in_original_Z) > 0) {
if (!allow_missing_levels) {
stop("New levels not allowed unless allow_missing_levels = TRUE")
}
}
in_both <- intersect(fmt_names_Z, orig_Z_names)
recons_Z <- drop0(sparseMatrix(i = 1, j = 1, x = 0, dims = c(nrow(Z), length(orig_Z_names))))
colnames(recons_Z) <- orig_Z_names
rownames(recons_Z) <- rownames_Z
recons_Z[, match(in_both, orig_Z_names)] <- Z[, match(in_both, fmt_names_Z)]
# Check that the entirely missing columns match those not in the original
checksum_align <- setdiff(not_in_new_Z, sort(names(which(colSums(recons_Z != 0) == 0))))
if (length(checksum_align) > 0) {
stop("Alignment Error")
}
Z <- recons_Z
rm(recons_Z)
####
total_obs <- rownames(newdata)
obs_in_both <- intersect(rownames(X), rownames(Z))
XZ <- cbind(
X[match(obs_in_both, rownames(X)), , drop = F],
Z[match(obs_in_both, rownames(Z)), , drop = F]
)
factorization_method <- object$control$factorization_method
if (is.matrix(samples)) {
if (ncol(samples) != ncol(XZ)) {
stop("Samples must be {m, ncol(Z) + ncol(X)}")
}
samples <- t(samples)
only.lp <- FALSE
} else {
if (samples == 0) {
only.lp <- TRUE
} else {
only.lp <- FALSE
}
if (factorization_method %in% c("strong", "partial")) {
vi_alpha_mean <- object$alpha$mean
vi_alpha_decomp <- object$alpha$decomp_var
p.Z <- nrow(vi_alpha_mean)
vi_beta_mean <- object$beta$mean
vi_beta_decomp <- object$beta$decomp_var
p.X <- nrow(vi_beta_mean)
if (!only.lp) {
sim_init_alpha <- matrix(rnorm(samples * p.Z), ncol = samples)
sim_init_alpha <- t(vi_alpha_decomp) %*% sim_init_alpha
sim_init_alpha <- sim_init_alpha + kronecker(vi_alpha_mean, t(matrix(1, samples)))
sim_init_beta <- matrix(rnorm(samples * p.X), ncol = samples)
sim_init_beta <- t(vi_beta_decomp) %*% sim_init_beta
sim_init_beta <- sim_init_beta + kronecker(vi_beta_mean, t(matrix(1, samples)))
} else {
sim_init_alpha <- vi_alpha_mean
sim_init_beta <- vi_beta_mean
}
} else if (factorization_method == "weak") {
vi_alpha_mean <- object$alpha$mean
p.Z <- nrow(vi_alpha_mean)
vi_beta_mean <- object$beta$mean
p.X <- nrow(vi_beta_mean)
if (!only.lp) {
vi_joint_decomp <- object$joint$decomp_var
sim_init_joint <- matrix(rnorm(samples * (p.X + p.Z)), ncol = samples)
sim_init_joint <- t(vi_joint_decomp) %*% sim_init_joint
sim_init_beta <- sim_init_joint[1:p.X, , drop = F]
sim_init_alpha <- sim_init_joint[-1:-p.X, , drop = F]
rm(sim_init_joint)
sim_init_alpha <- sim_init_alpha + kronecker(vi_alpha_mean, t(matrix(1, samples)))
sim_init_beta <- sim_init_beta + kronecker(vi_beta_mean, t(matrix(1, samples)))
} else {
sim_init_alpha <- vi_alpha_mean
sim_init_beta <- vi_beta_mean
}
} else {
stop("")
}
samples <- rbind(sim_init_beta, sim_init_alpha)
rm(sim_init_beta, sim_init_alpha)
}
if (samples_only) {
return(t(samples))
}
lp <- XZ %*% samples
if (summary) {
if (!only.lp) {
lp <- t(apply(lp, MARGIN = 1, FUN = function(i) {
c(mean(i), var(i))
}))
lp <- data.frame(mean = lp[, 1], var = lp[, 2])
lp <- lp[match(total_obs, obs_in_both), ]
rownames(lp) <- NULL
} else {
lp <- as.vector(t(apply(lp, MARGIN = 1, FUN = function(i) {
mean(i)
})))
lp <- lp[match(total_obs, obs_in_both)]
rownames(lp) <- NULL
}
return(lp)
} else {
lp <- lp[match(total_obs, obs_in_both), , drop = F]
rownames(lp) <- NULL
return(t(lp))
}
}
#' @inheritParams MAVB
#' @inheritParams vglmer_predict
#' @rdname vglmer_predict
#' @export
predict_MAVB <- function(object, newdata, samples = 0, samples_only = FALSE,
var_px = Inf, summary = TRUE, allow_missing_levels = FALSE) {
pxSamples <- MAVB(object = object, samples = samples, var_px = var_px)
lp <- predict.vglmer(object,
newdata = newdata, samples = pxSamples, samples_only = samples_only,
summary = summary, allow_missing_levels = allow_missing_levels
)
return(lp)
}
|
/scratch/gouwar.j/cran-all/cranData/vglmer/R/predict_functions.R
|
#' Generic Functions after Running vglmer
#'
#' \code{vglmer} uses many standard methods from \code{lm} and \code{lme4} with
#' limited changes. These provide summaries of the estimated variational
#' distributions.
#'
#' @details The accompanying functions are briefly described below.
#'
#' \code{coef} and \code{vcov} return the mean and variance of the fixed effects
#' (\eqn{\beta}). \code{fixef} returns the mean of the fixed effects.
#'
#' \code{ranef} extracts the random effects (\eqn{\alpha}) in a similar,
#' although slightly different format, to \code{lme4}. It includes the estimated
#' posterior mean and variance in a list of data.frames with one entry per
#' random effect \eqn{j}.
#'
#' \code{fitted} extracts the estimated expected \emph{linear predictor}, i.e.
#' \eqn{E_{q(\theta)}[x_i^T \beta + z_i^T \alpha]} at convergence.
#'
#' \code{summary} reports the estimates for all fixed effects as in \code{lm} as
#' well as some summaries of the random effects (if \code{display_re=TRUE}).
#'
#' \code{format_vglmer} collects the mean and variance of the fixed and random
#' effects into a single data.frame. This is useful for examining all of the
#' posterior estimates simultaneously. \code{format_glmer} converts an object
#' estimated with \code{[g]lmer} into a comparable format.
#'
#' \code{ELBO} extracts the ELBO from the estimated model. \code{type} can be
#' set equal to \code{"trajectory"} to get the estimated ELBO at each iteration
#' and assess convergence.
#'
#' \code{sigma} extracts the square root of the posterior mode of
#' \eqn{q(\sigma^2)} if a linear model is used.
#'
#' \code{formula} extracts the formula associated with the \code{vglmer} object.
#' By default, it returns the formula provided. The fixed and random effects
#' portions can be extracted separately using the \code{form} argument.
#'
#' @name vglmer-class
#' @param object Model fit using vglmer
#'
#' @return The functions here return a variety of different objects depending on
#' the specific function. "Details" describes the behavior of each one. Their
#' output is similar to the typical behavior for the corresponding generic
#' functions.
#' @rdname vglmer-class
#' @export
fixef.vglmer <- function(object, ...) {
out <- object$beta$mean
rn <- rownames(out)
out <- as.vector(out)
names(out) <- rn
return(out)
}
# Load fixef, ranef, sigma from lme4
#' @export
lme4::fixef
#' @export
lme4::ranef
#' @importFrom stats sigma
#' @rdname vglmer-class
#' @export
sigma.vglmer <- function(object, ...){
#{\displaystyle \frac{\sqrt{2}}{2} \left(\frac{(2m-1)\Omega}{m}\right)^{1/2}}
if (object$family != 'linear'){
stop('sigma from vglmer is only defined for linear models')
}
if (length(list(...)) > 0){
stop('... not used for sigma.vglmer')
}
naive_sigma <- with(object$sigmasq, sqrt(b/(a+1)))
return(naive_sigma)
}
#' @rdname vglmer-class
#' @export
ranef.vglmer <- function(object, ...) {
if (length(list(...)) > 0) {
stop("... not used for ranef.vglmer")
}
d_j <- object$internal_parameters$d_j
g_j <- object$internal_parameters$g_j
J <- length(d_j)
vi_alpha_mean <- as.vector(object$alpha$mean)
vi_alpha_var <- as.vector(object$alpha$dia.var)
re_pos <- rep(1:J, d_j * g_j)
vi_id <- gsub(rownames(object$alpha$mean), pattern = "^.* @ .* @ ", replacement = "")
vi_id <- split(vi_id, re_pos)
vi_alpha_mean <- split(vi_alpha_mean, re_pos)
vi_alpha_var <- split(vi_alpha_var, re_pos)
vi_parsed <- mapply(d_j, g_j, vi_alpha_mean, vi_alpha_var, vi_id, object$internal_parameters$names_of_RE,
SIMPLIFY = F,
FUN = function(d, g, mean_j, var_j, id_j, name_j) {
mat_id <- matrix(id_j, byrow = TRUE, nrow = g, ncol = d)
mat_mean <- matrix(mean_j, byrow = TRUE, nrow = g, ncol = d)
mat_var <- matrix(var_j, byrow = TRUE, nrow = g, ncol = d)
colnames(mat_mean) <- colnames(mat_var) <- name_j
id <- mat_id[, 1]
mat_mean <- data.frame(id, mat_mean, check.names = FALSE, stringsAsFactors = F)
mat_var <- data.frame(id, mat_var, check.names = FALSE, stringsAsFactors = F)
attributes(mat_mean)$"variance" <- mat_var
return(mat_mean)
}
)
return(vi_parsed)
}
#' @rdname vglmer-class
#' @method coef vglmer
#' @export
coef.vglmer <- function(object, ...) {
if (length(list(...)) > 0) {
stop("... not used for coef.vglmer")
}
out <- as.vector(object$beta$mean)
names(out) <- rownames(object$beta$mean)
return(out)
}
#' @rdname vglmer-class
#' @export
vcov.vglmer <- function(object, ...) {
if (length(list(...)) > 0) {
stop("... not used for vcov.vglmer")
}
return(as.matrix(object$beta$var))
}
#' @rdname vglmer-class
#' @method fitted vglmer
#' @export
fitted.vglmer <- function(object, ...){
if (length(list(...)) > 0) {
stop("... not used for vcov.vglmer")
}
return(object$internal_parameters$lp)
}
#' @rdname vglmer-class
#' @param x Model fit using \code{vglmer}.
#' @param ... Not used; included to maintain compatibility with existing
#' methods.
#' @method print vglmer
#' @export
print.vglmer <- function(x, ...) {
if (length(list(...)) > 0) {
"print.vglmer does not use ..."
}
N_obs <- x$internal_parameters$N
missing_obs <- x$internal_parameters$missing_obs
it_used <- x$internal_parameters$it_used
it_max <- x$internal_parameters$it_max
final_param_change <- round(max(x$internal_parameters$parameter.change), 6)
final_ELBO_change <- round(tail(diff(x$ELBO_trajectory$ELBO), 1), 8)
converged <- it_max != it_used
p.X <- nrow(x$beta$mean)
p.Z <- nrow(x$alpha$mean)
J <- length(x$sigma$cov)
cat(paste0("Formula: J = ", J, ", |Z| = ", p.Z, ", |X| = ", p.X, "\n\n"))
cat(paste(format(formula(x, form = 'original')), collapse = "\n\n"))
cat("\n\n")
if (missing_obs > 0) {
missing_info <- paste0("after ", missing_obs, " deleted because of missing data and")
} else {
missing_info <- " and"
}
cat(paste0("Model fit with ", N_obs, " observations", missing_info))
if (converged) {
cat(paste0(" converged after ", it_used, " iterations."))
} else {
cat(paste0(" *failed* to converge after ", it_max, " iterations."))
}
cat("\n\n")
cat(paste0("ELBO: ", round(x$ELBO[1], 2), "\n\n"))
cat(paste0("Factorization Method: ", x$control$factorization_method, "\n"))
cat(paste0("Parameter Expansion: ", x$control$parameter_expansion, "\n\n"))
cat(paste0("Largest Parameter Change at Convergence: ", formatC(final_param_change, format = "e", digits = 2), "\n"))
cat(paste0("ELBO Change at Convergence: ", formatC(final_ELBO_change, format = "e", digits = 2), "\n"))
invisible(list(paramater = final_param_change, ELBO = final_ELBO_change))
}
#' @rdname vglmer-class
#' @param display_re Default (\code{TRUE}) prints a summary of the
#' random effects alongside the fixed effects.
#' @importFrom lmtest coeftest
#' @method summary vglmer
#' @export
summary.vglmer <- function(object, display_re = TRUE, ...) {
sum_obj <- coeftest(x = object)
sum_sigma <- mapply(object$sigma$cov, object$sigma$df, SIMPLIFY = FALSE, FUN = function(a, b) {
fmt_IW_mean(a, b)
})
sum_sigma <- mapply(sum_sigma, object$internal_parameters$names_of_RE, SIMPLIFY = FALSE, FUN = function(i, j) {
rownames(i) <- colnames(i) <- j
return(i)
})
re_names <- names(object$internal_parameters$names_of_RE)
cat(paste0("Output from vglmer using '", object$control$factorization_method, "' factorization.\n"))
cat("\nSummary of Fixed Effects\n")
print(sum_obj)
cat("\n")
if (display_re) {
cat("Summary of Random Effects: Mean of Sigma_j (Variance)")
for (v in seq_len(length(re_names))) {
cat("\n")
cat(re_names[v])
cat("\n")
print(sum_sigma[[v]], quote = FALSE)
}
cat("\n")
}
if (object$family == "negbin") {
r_output <- object$r
# fmt_r <- function(x){formatC(x, format = 'e', digits = 2)}
fmt_r <- function(x) {
round(x, digits = 2)
}
r_ci <- exp(r_output$mu + sqrt(2 * r_output$sigma) * erfinv(c(0.05, 0.95)))
r_ci <- paste0("[", paste(fmt_r(r_ci), collapse = ", "), "]")
r_mean <- fmt_r(exp(r_output$mu + r_output$sigma / 2))
cat("Summary of Auxiliary Parameters:\n")
cat("Dispersion Parameter r:\n")
if (object$r$method == "VI") {
cat(paste0("Mean (90% Interval): ", r_mean, " ", r_ci))
} else {
cat(paste0("Mean: ", r_mean))
}
cat("\n")
cat("\n")
}
invisible()
}
#' @rdname vglmer-class
#' @param form Describes the type of formula to report:
#' \code{"original"} returns the user input, \code{"fe"} returns the fixed
#' effects only, \code{"re"} returns the random effects only.
#' @export
formula.vglmer <- function(x, form = "original", ...) {
if (form == 'original'){
x$formula$formula
}else if (form == 'fe'){
x$formula$fe
}else if (form == 're'){
x$formula$re
}else{stop('form must be "original", "fe", or "re".')}
}
#' @importFrom stats qnorm
erfinv <- function(x) {
qnorm((1 + x) / 2) / sqrt(2)
}
# Internal function to tidy-up
# inverse Wishart to extract mean
fmt_IW_mean <- function(Phi, nu, digits = 2) {
mean <- solve(as.matrix(Phi)) / (nu - nrow(Phi) - 1)
if (nu - nrow(Phi) - 1 < 0) {
return(matrix(NA, nrow = nrow(Phi), ncol = ncol(Phi)))
} else {
return(formatC(mean, format = "e", digits = 2))
}
}
#' @rdname vglmer-class
#' @export
format_vglmer <- function(object) {
beta.output <- data.frame(name = rownames(object$beta$mean), mean = as.vector(object$beta$mean), var = diag(object$beta$var), stringsAsFactors = F)
alpha.output <- data.frame(name = rownames(object$alpha$mean), mean = as.vector(object$alpha$mean), var = as.vector(object$alpha$dia.var), stringsAsFactors = F)
output <- rbind(beta.output, alpha.output)
return(output)
}
#' @rdname vglmer-class
#' @importFrom stats vcov
#' @export
format_glmer <- function(object) {
output <- do.call('rbind', mapply(ranef(object), names(ranef(object)), SIMPLIFY = FALSE, FUN = function(i,j) {
obj <- data.frame(
var = as.vector(apply(attributes(i)$postVar, MARGIN = 3, FUN = function(i) {
diag(i)
})),
mean = as.vector(t(as.matrix(i))),
name = paste0(rep(colnames(i), nrow(i)), " @ ", rep(rownames(i), each = ncol(i))), stringsAsFactors = F
)
obj[[".re"]] <- j
return(obj)
}))
output$name <- paste0(output[[".re"]], ' @ ', output[["name"]])
output_fe <- data.frame(mean = fixef(object), var = diag(stats::vcov(object)))
output_fe$name <- rownames(output_fe)
output_fe[[".re"]] <- NA
output <- rbind(output, output_fe)
output <- output[, (names(output) != ".re")]
rownames(output) <- NULL
return(output)
}
#' @rdname vglmer-class
#' @param object Model fit using \code{vglmer}.
#' @param type Default (\code{"final"}) gives the ELBO at convergence.
#' \code{"trajectory"} gives the ELBO estimated at each iteration. This is
#' used to assess model convergence.
#' @export
ELBO <- function(object, type = c('final', 'trajectory')){
type <- match.arg(type)
if (type == 'final'){
object$ELBO$ELBO
}else{
object$ELBO_trajectory$ELBO
}
}
|
/scratch/gouwar.j/cran-all/cranData/vglmer/R/print_functions.R
|
#' Code from Wand and Ormerod (2008)
#' Found here here: 10.1111/j.1467-842X.2008.00507.x
#' @param a lower boundary
#' @param b upper boundary
#' @param intKnots internal knots
#' @keywords internal
formOmega <- function(a,b,intKnots){
allKnots <- c(rep(a,4),intKnots,rep(b,4))
K <- length(intKnots) ; L <- 3 * (K+8)
xtilde <- (rep(allKnots,each=3)[-c(1,(L-1),L)]+
rep(allKnots,each=3)[-c(1,2,L)])/2
wts <- rep(diff(allKnots),each=3) * rep(c(1,4,1)/6,K+7)
Bdd <- spline.des(allKnots,xtilde,derivs=rep(2,length(xtilde)),
outer.ok=TRUE)$design
Omega <- drop0(t(Bdd) %*% Diagonal(x = wts) %*% Bdd)
return(Omega)
}
#' Create splines for use in vglmer
#'
#' This function estimates splines in \code{vglmer}, similar to \code{s(...)} in
#' \code{mgcv} albeit with many fewer options than \code{mgcv}. It allows for
#' truncated (linear) splines or O'Sullivan splines. Please see \link{vglmer}
#' for more discussion and examples.
#'
#' @param ... Variable name, e.g. \code{v_s(x)}
#' @param type Default (\code{"tpf"}) uses truncated linear splines for the
#' basis. The other option (\code{"o"}) uses O'Sullivan splines (Wand and
#' Ormerod 2008).
#' @param knots Default (\code{NULL}) uses \eqn{K=min(N/4,35)} knots evenly
#' spaced at quantiles of the covariate \code{x}. A single number specifies a
#' specific number of knots; a vector can set custom locations for knots.
#' @param by A categorical or factor covariate to interact the spline with; for
#' example, \code{v_s(x, by = g)}.
#' @param by_re Default (\code{TRUE}) regularizes the interactions between the
#' categorical factor and the covariate. See "Details" in \link{vglmer} for
#' more discussion.
#' @param force_vector Force that argument to \code{knots} is treated as vector.
#' This is usually not needed unless \code{knots} is a single integer that
#' should be treated as a single knot (vs. the number of knots).
#' @param outer_okay Default (\code{FALSE}) does not permit values in \code{x}
#' to exceed the outer knots.
#' @importFrom splines bs
#'
#' @return This function returns a list of class of \code{vglmer_spline} that is
#' passed to unexported functions. It contains the arguments noted above where
#' \code{...} is parsed into an argument called \code{term}.
#'
#' @references
#' Wand, Matt P. and Ormerod, John T. 2008. "On Semiparametric Regression with
#' O'Sullivan Penalized Splines". \emph{Australian & New Zealand Journal of
#' Statistics}. 50(2): 179-198.
#'
#' Wood, Simon N. 2017. \emph{Generalized Additive Models: An Introduction with
#' R}. Chapman and Hall/CRC.
#' @export
v_s <- function(..., type = 'tpf', knots = NULL, by = NA,
by_re = TRUE, force_vector = FALSE,
outer_okay = FALSE){
if (!(type %in% c('tpf', 'o'))){stop('non tpf not set up yet...')}
# Using mgcv's syntax for "s" to make it work with "interpret.gam"
vars <- as.list(substitute(list(...)))[-1]
d <- length(vars)
if (d > 1){stop('Unlike mgcv, only provide a single variable')}
by.var <- deparse(substitute(by), backtick = TRUE, width.cutoff = 500)
if (by.var == "."){
stop("by=. not allowed")
}
term <- deparse(vars[[1]], backtick = TRUE, width.cutoff = 500)
if (term[1] == "."){
stop("s(.) not supported.")
}
term[1] <- attr(terms(reformulate(term[1])), "term.labels")
label <- paste0("v_s(", term[1], ")")
ret <- list(term = term, outer_okay = outer_okay, force_vector = force_vector,
by = by.var, type = type, knots = knots,
by_re = by_re)
class(ret) <- 'vglmer_spline'
return(ret)
}
#' @importFrom splines spline.des
vglmer_build_spline <- function(x, knots = NULL, Boundary.knots = NULL,
by, type, override_warn = FALSE,
outer_okay = FALSE, by_re = NULL, force_vector = FALSE){
if (is.null(knots)){
ux <- length(unique(x))
if (ux < 4){stop('Cannot fit spline with fewer than 4 unique values.')}
# Use the knot heuristic in Ruppert by default.
# Keeps the size of the problem feasible.
numIntKnots <- floor(c(min(ux/4, 35)))
intKnots <- quantile(unique(x),
seq(0,1,length=(numIntKnots+2)
)[-c(1,(numIntKnots+2))])
names(intKnots) <- NULL
}else if (length(knots) == 1 & !force_vector){
if (knots < 1){
stop('If an integer, at least one knot must be provided. force_vector=TRUE may be useful here.')
}
if (as.integer(knots) != knots){
warning('knots appears to be not be an integer. Using "as.integer"')
knots <- as.integer(knots)
message(paste0('knots argument turned into ', knots, ' by coercion.'))
}
numIntKnots <- knots
intKnots <- quantile(unique(x),seq(0,1,length=
(numIntKnots+2))[-c(1,(numIntKnots+2))])
names(intKnots) <- NULL
}else{
# Sort user provided knots
knots <- sort(knots)
if (any(knots > max(x, na.rm=T)) | any(knots < min(x, na.rm=T))){
if (!override_warn){
warning('self-provided knots are outside of the observed data.')
}
}
intKnots <- knots
}
if (is.null(Boundary.knots)){
Boundary.knots <- range(x, na.rm=T)
}else{
stopifnot(length(Boundary.knots) == 2)
}
if (type == 'tpf'){
aug_knots <- c(Boundary.knots[1], intKnots, Boundary.knots[2])
x <- outer(x, aug_knots[-c(1,length(aug_knots))], '-')
x <- drop0(x * (x > 0))
spline_attr <- list(D = Diagonal(n = ncol(x)), Boundary.knots = Boundary.knots,
knots = intKnots)
}else if (type == 'o'){
# Form Omega from Wand and Ormerod (2008)
D <- formOmega(a = Boundary.knots[1], b = Boundary.knots[2], intKnots = intKnots)
# eigen decompose
eD <- eigen(D)
# transform spline design
if (override_warn){
wrapper_bs <- function(x){suppressWarnings(x)}
}else{
wrapper_bs <- function(x){x}
}
x <- wrapper_bs(splines::bs(x = x, knots = intKnots,
degree = 3, intercept = TRUE,
Boundary.knots = Boundary.knots))
x <- x %*% eD$vectors[,seq_len(ncol(D)-2)] %*%
Diagonal(x = 1/sqrt(eD$values[seq_len(ncol(D) - 2)]))
spline_attr <- list(D = Diagonal(n = ncol(x)),
Boundary.knots = Boundary.knots,
knots = intKnots, eigen_D = eD)
}else{stop('splines only set up for tpf and o')}
spline_attr$by_re <- by_re
if (!is.null(by)){
base_x <- x
u_by <- sort(unique(by))
x_by <- sparseMatrix(i = 1:length(by), j = match(by, u_by), x = 1)
names_x <- as.vector(outer(1:ncol(x), u_by, FUN=function(x,y){paste(y,x, sep = ' @ ')}))
x <- t(KhatriRao(t(x_by), t(x)))
colnames(x) <- names_x
colnames(base_x) <- paste0('base @ ', 1:ncol(base_x))
out <- list(x = x, attr = spline_attr)
class(out) <- c('spline_sparse')
base_out <- list(x = base_x, attr = spline_attr)
class(base_out) <- c('spline_sparse')
return(
list(base_out, out)
)
}else{
colnames(x) <- paste0('base @ ', 1:ncol(x))
out <- list(x = x, attr = spline_attr)
class(out) <- c('spline_sparse')
return(list(out))
}
}
print.spline_sparse <- function(x){
print(x$x)
}
image.spline_sparse <- function(x){image(x$x)}
#' Interpret a vglmer formula for splines
#' @description A modified version of interpret.gam0 from mgcv. Used when mgcv's
#' interpret.gam fails; usually when some environment object is passed to v_s.
#' @param gf A vglmer formula
#' @param textra Unused internal argument
#' @param extra.special Allow extra special terms to be passed
#' @importFrom stats reformulate terms.formula as.formula formula update.formula
#' quantile
#' @keywords internal
fallback_interpret.gam0 <- function(gf, textra = NULL, extra.special = NULL){
p.env <- environment(gf)
tf <- terms.formula(gf, specials = c("s", "te",
"ti", "t2", extra.special))
terms <- attr(tf, "term.labels")
nt <- length(terms)
if (attr(tf, "response") > 0) {
response <- as.character(attr(tf, "variables")[2])
}
else {
response <- NULL
}
sp <- attr(tf, "specials")$s
tp <- attr(tf, "specials")$te
tip <- attr(tf, "specials")$ti
t2p <- attr(tf, "specials")$t2
zp <- if (is.null(extra.special))
NULL
else attr(tf, "specials")[[extra.special]]
off <- attr(tf, "offset")
vtab <- attr(tf, "factors")
if (length(sp) > 0)
for (i in 1:length(sp)) {
ind <- (1:nt)[as.logical(vtab[sp[i], ])]
sp[i] <- ind
}
if (length(tp) > 0)
for (i in 1:length(tp)) {
ind <- (1:nt)[as.logical(vtab[tp[i], ])]
tp[i] <- ind
}
if (length(tip) > 0)
for (i in 1:length(tip)) {
ind <- (1:nt)[as.logical(vtab[tip[i], ])]
tip[i] <- ind
}
if (length(t2p) > 0)
for (i in 1:length(t2p)) {
ind <- (1:nt)[as.logical(vtab[t2p[i], ])]
t2p[i] <- ind
}
if (length(zp) > 0)
for (i in 1:length(zp)) {
ind <- (1:nt)[as.logical(vtab[zp[i], ])]
zp[i] <- ind
}
k <- kt <- kti <- kt2 <- ks <- kz <- kp <- 1
len.sp <- length(sp)
len.tp <- length(tp)
len.tip <- length(tip)
len.t2p <- length(t2p)
len.zp <- length(zp)
ns <- len.sp + len.tp + len.tip + len.t2p + len.zp
pav <- av <- rep("", 0)
smooth.spec <- list()
###################
# Modified from "mgcv"
####################
mgcvns <- loadNamespace("vglmer")
if (nt)
for (i in 1:nt) {
if (k <= ns && ((ks <= len.sp && sp[ks] == i) ||
(kt <= len.tp && tp[kt] == i) || (kz <= len.zp &&
zp[kz] == i) || (kti <= len.tip && tip[kti] ==
i) || (kt2 <= len.t2p && t2p[kt2] == i))) {
################
# Modified from "mgcv::"
#################
st <- try(eval(parse(text = paste("vglmer::",
terms[i], sep = "")), envir = p.env),
silent = TRUE)
if (inherits(st, "try-error")) {
st <- eval(parse(text = terms[i]), enclos = p.env,
envir = mgcvns)
}
if (!is.null(textra)) {
pos <- regexpr("(", st$lab, fixed = TRUE)[1]
st$label <- paste(substr(st$label, start = 1,
stop = pos - 1), textra, substr(st$label,
start = pos, stop = nchar(st$label)), sep = "")
}
smooth.spec[[k]] <- st
if (ks <= len.sp && sp[ks] == i)
ks <- ks + 1
else if (kt <= len.tp && tp[kt] == i)
kt <- kt + 1
else if (kti <= len.tip && tip[kti] == i)
kti <- kti + 1
else if (kt2 <= len.t2p && t2p[kt2] == i)
kt2 <- kt2 + 1
else kz <- kz + 1
k <- k + 1
}
else {
av[kp] <- terms[i]
kp <- kp + 1
}
}
if (!is.null(off)) {
av[kp] <- as.character(attr(tf, "variables")[1 +
off])
kp <- kp + 1
}
pf <- paste(response, "~", paste(av, collapse = " + "))
if (attr(tf, "intercept") == 0) {
pf <- paste(pf, "-1", sep = "")
if (kp > 1)
pfok <- 1
else pfok <- 0
}
else {
pfok <- 1
if (kp == 1) {
pf <- paste(pf, "1")
}
}
fake.formula <- pf
if (length(smooth.spec) > 0)
for (i in 1:length(smooth.spec)) {
nt <- length(smooth.spec[[i]]$term)
ff1 <- paste(smooth.spec[[i]]$term[1:nt], collapse = "+")
fake.formula <- paste(fake.formula, "+", ff1)
if (smooth.spec[[i]]$by != "NA") {
fake.formula <- paste(fake.formula, "+",
smooth.spec[[i]]$by)
av <- c(av, smooth.spec[[i]]$term, smooth.spec[[i]]$by)
}
else av <- c(av, smooth.spec[[i]]$term)
}
fake.formula <- as.formula(fake.formula, p.env)
if (length(av)) {
pred.formula <- as.formula(paste("~", paste(av,
collapse = "+")))
pav <- all.vars(pred.formula)
pred.formula <- stats::reformulate(pav)
}
else pred.formula <- ~1
ret <- list(pf = as.formula(pf, p.env), pfok = pfok, smooth.spec = smooth.spec,
fake.formula = fake.formula, response = response, fake.names = av,
pred.names = pav, pred.formula = pred.formula)
class(ret) <- "split.gam.formula"
ret
}
|
/scratch/gouwar.j/cran-all/cranData/vglmer/R/spline_functions.R
|
prep_lu <- function(M){
fact_lu <- expand(Matrix::lu(M))
if (is.null(fact_lu$Q)){
fact_lu$Q <- Diagonal(n = ncol(fact_lu$U))
}
fact_lu$L <- drop0(fact_lu$L)
fact_lu$U <- drop0(fact_lu$U)
return(fact_lu)
}
unprep_lu <- function(M){
recons_M <- t(M$P) %*% M$L %*% M$U %*% M$Q
# recons_M <- t(M$P) %*% drop0(zapsmall(M$L %*% M$U, 15)) %*% M$Q
logdet_M <- 2 * sum(log(abs(diag(M$U))))
return(list(M = recons_M, logdet_M = logdet_M, diag_U = diag(M$U)))
}
prep_cholesky <- function(L){
diag(L) <- log(diag(L))
return(L)
}
unprep_cholesky <- function(L){
diag(L) <- exp(diag(L))
return(L)
}
prep_matrix <- function(M){drop0(chol(as.matrix(M)))}
unprep_matrix <- function(M){t(M) %*% M}
prep_positive <- function(x){log(x)}
unprep_positive <- function(x){exp(x)}
squarem_prep_function <- function(x, type){
if (type == 'real'){
x
}else if (type == 'lu'){
prep_lu(x)
}else if (type == 'cholesky'){
prep_cholesky(x)
}else if (type == 'matrix'){
prep_matrix(x)
}else if (type == 'positive'){
prep_positive(x)
}else{stop('Invalid type')}
}
squarem_unprep_function <- function(x, type){
if (type == 'real'){
x
}else if (type == 'lu'){
unprep_lu(x)
}else if (type == 'cholesky'){
unprep_cholesky(x)
}else if (type == 'matrix'){
unprep_matrix(x)
}else if (type == 'positive'){
unprep_positive(x)
}else{stop('Invalid type')}
}
|
/scratch/gouwar.j/cran-all/cranData/vglmer/R/squarem_functions.R
|
#' SuperLearner with (Variational) Hierarchical Models
#'
#' These functions integrate \code{vglmer} (or \code{glmer}) into
#' \code{SuperLearner}. Most of the arguments are standard for
#' \code{SuperLearner} functions.
#'
#' @param Y From \code{SuperLearner}: The outcome in the training data set.
#' @param X From \code{SuperLearner}: The predictor variables in the training
#' data.
#' @param newX From \code{SuperLearner}: The predictor variables in validation
#' data.
#' @param formula The formula used for estimation.
#' @param family From \code{SuperLearner}: Currently allows \code{gaussian} or
#' \code{binomial}.
#' @param id From \code{SuperLearner}: Optional cluster identification variable.
#' See \code{SuperLearner} for more details.
#' @param obsWeights From \code{SuperLearner}: Weights for each observation. Not
#' permitted for \code{SL.vglmer}.
#' @param control Control object for estimating \code{vglmer} (e.g.,
#' \link{vglmer_control}) or \code{[g]lmer}.
#' @param object Used in \code{predict} for \code{SL.glmer} and
#' \code{SL.vglmer}. A model estimated using either \code{SL.vglmer} or
#' \code{SL.glmer}.
#' @param ... Not used; included to maintain compatibility with existing
#' methods.
#' @param learner Character name of model from \code{SuperLearner}. See
#' "Details" for how this is used.
#' @param env Environment to assign model. See "Details" for how this is used.
#' @name sl_vglmer
#'
#' @details This documentation describes two types of function.
#'
#' \bold{Estimating Hierarchical Models in SuperLearner}: Two methods for
#' estimating hierarchical models are provided one for variational methods
#' (\code{SL.vglmer}) and one for non-variational methods (\code{SL.glmer}).
#' The accompanying prediction functions are also provided.
#'
#' \bold{Formula with SuperLearner}: The \code{vglmer} package provides a way
#' to estimate models that require or use a formula with \code{SuperLearner}.
#' This allows for a design to be passed that contains variables that are
#' \emph{not} used in estimation. This can be used as follows (see
#' "Examples"). One calls the function \code{add_formula_SL} around the quoted
#' name of a \code{SuperLearner} model, e.g. \code{add_formula_SL(learner =
#' "SL.knn")}. This creates a new model and predict function with the suffix
#' \code{"_f"}. This \bold{requires} a formula to be provided for estimation.
#'
#' With this in hand, \code{"SL.knn_f"} can be passed to \code{SuperLearner} with the
#' accompanying formula argument and thus one can compare models with
#' different formula or design on the same ensemble. The \code{env} argument
#' may need to be manually specified to ensure the created functions can be
#' called by \code{SuperLearner}.
#'
#' @return The functions here return different types of output. \code{SL.vglmer}
#' and \code{SL.glmer} return fitted models with the in-sample predictions as
#' standard for \code{SuperLearner}. The \code{predict} methods return vectors
#' of predicted values. \code{add_formula_SL} creates two objects in the
#' environment (one for estimation \code{model_f} and one for prediction
#' \code{predict.model_f}) used for \code{SuperLearner}.
#' @examples
#'
#' set.seed(456)
#'
#' if (requireNamespace('SuperLearner', quietly = TRUE)){
#' require(SuperLearner)
#' sim_data <- data.frame(
#' x = rnorm(100),
#' g = sample(letters, 100, replace = TRUE)
#' )
#' sim_data$y <- rbinom(nrow(sim_data),
#' 1, plogis(runif(26)[match(sim_data$g, letters)]))
#' sim_data$g <- factor(sim_data$g)
#' sl_vglmer <- function(...){SL.vglmer(..., formula = y ~ x + (1 | g))}
#' SL.glm <- SuperLearner::SL.glm
#' add_formula_SL('SL.glm')
#' sl_glm_form <- function(...){SL.glm_f(..., formula = ~ x)}
#
#' \donttest{
#' SuperLearner::SuperLearner(
#' Y = sim_data$y, family = 'binomial',
#' X = sim_data[, c('x', 'g')],
#' cvControl = list(V = 2),
#' SL.library = c('sl_vglmer', 'sl_glm_form')
#' )
#' }
#' }
#' @export
SL.vglmer <- function(Y, X, newX, formula, family, id, obsWeights, control = vglmer_control()) {
if(!requireNamespace('vglmer', quietly = FALSE)) {stop("SL.vglmer requires the vglmer package, but it isn't available")}
if (is.character(formula)){
formula <- as.formula(formula)
}
# https://stackoverflow.com/questions/13217322/how-to-reliably-get-dependent-variable-name-from-formula-object
getResponseFromFormula = function(formula) {
if (attr(terms(as.formula(formula)) , which = 'response'))
all.vars(formula)[1]
else
NULL
}
rformula <- getResponseFromFormula(formula)
if (!is.null(rformula)){
if (rformula %in% names(X)){
warning(paste0('Outcome "', rformula, '" seems to be in "X". This is likely ill-advised'))
}
}
if ('...Y' %in% names(X)){
stop('SL.vglmer cannot accept a column in "X" called "...Y". Please rename.')
}
if (!all(obsWeights == 1)){
warning('SL.vglmer does not use weights')
}
if (family$family == 'binomial'){
family <- 'binomial'
}else if (family$family == 'gaussian'){
family <- 'linear'
}else{stop('Family must be binomial or Gaussian for SL.vglmer.')}
X[['...Y']] <- Y
formula <- update.formula(formula, '`...Y` ~ .')
fit.vglmer <- vglmer::vglmer(formula, data = X, family = family, control = control)
pred <- predict(fit.vglmer, newdata = newX, allow_missing_levels = TRUE)
if (family == 'binomial'){
pred <- plogis(pred)
}else if (family != 'linear'){
stop('SuperLearner not set up for non-linear, non-binomial families.')
}
fit <- list(object = fit.vglmer)
out <- list(pred = pred, fit = fit)
class(out$fit) <- c("SL.vglmer")
return(out)
}
#' @rdname sl_vglmer
#' @param newdata Dataset to use for predictions.
#' @param allow_missing_levels Default (\code{TRUE}) allows prediction for
#' levels not observed in the estimation data; the value of \code{0} (with no
#' uncertainty) is used for the corresponding random effect. \bold{Note:} This
#' default differs from \code{predict.vglmer}.
#' @export
predict.SL.vglmer <- function(object, newdata, allow_missing_levels = TRUE, ...){
if(!requireNamespace('vglmer', quietly = FALSE)) {stop("SL.vglmer requires the vglmer package, but it isn't available")}
pred <- predict(object$object, newdata = newdata, allow_missing_levels = allow_missing_levels)
if (object$object$family == 'binomial'){
pred <- plogis(pred)
}else if (object$object$family != 'linear'){
stop('SuperLearner not set up for non-linear, non-binomial families.')
}
return(pred)
}
#' @importFrom stats predict
#' @rdname sl_vglmer
#' @export
SL.glmer <- function(Y, X, newX, formula, family, id, obsWeights, control = NULL) {
if(!requireNamespace('lme4', quietly = FALSE)) {stop("SL.glmer requires the lme4 package, but it isn't available")}
if (is.character(formula)){
formula <- as.formula(formula)
}
# https://stackoverflow.com/questions/13217322/how-to-reliably-get-dependent-variable-name-from-formula-object
getResponseFromFormula = function(formula) {
if (attr(terms(as.formula(formula)) , which = 'response'))
all.vars(formula)[1]
else
NULL
}
rformula <- getResponseFromFormula(formula)
if (!is.null(rformula)){
if (rformula %in% names(X)){
warning(paste0('Outcome "', rformula, '" seems to be in "X". This is likely ill-advised'))
}
}
if ('...Y' %in% names(X)){
stop('SL.glmer cannot accept a column in "X" called "...Y". Please rename.')
}
X[['...Y']] <- Y
formula <- update.formula(formula, '`...Y` ~ .')
environment(formula) <- environment()
if (family$family == 'gaussian'){
if (is.null(control)){
control <- lmerControl()
}
fit.glmer <- lme4::lmer(formula, data = X, weights = obsWeights, control = control)
}else{
if (is.null(control)){
control <- glmerControl()
}
fit.glmer <- lme4::glmer(formula, data = X, weights = obsWeights, family = family, control = control)
}
pred <- stats::predict(fit.glmer, newdata = newX, allow.new.levels = TRUE, type = 'response')
fit <- list(object = fit.glmer)
out <- list(pred = pred, fit = fit)
class(out$fit) <- c("SL.glmer")
return(out)
}
#' @rdname sl_vglmer
#' @param allow.new.levels From \code{lme4}: Allow levels in prediction that are
#' not in the training data. Default is \code{TRUE} for \code{SuperLearner}.
#' @export
predict.SL.glmer <- function(object, newdata, allow.new.levels = TRUE, ...){
if(!requireNamespace('lme4', quietly = FALSE)) {stop("SL.glmer requires the lme4 package, but it isn't available")}
pred <- predict(object$object, newdata = newdata, allow.new.levels = allow.new.levels, type = 'response')
return(pred)
}
#' @rdname sl_vglmer
#' @export
add_formula_SL <- function(learner, env = parent.frame()){
base_learner <- get(learner, envir = env)
base_learner_predict <- get(paste0('predict.', learner), envir = env)
# Add an argument for "formula"
f_formals <- c(alist(formula = ), formals(base_learner, envir = env))
f_formals_predict <- c(formals(base_learner_predict, envir = env))
# Use model.matrix formula *first*
# Placeholder to pass CRAN checks
object <- newdata <- X <- newX <- NULL
f_learner <- function(formula, ...){
args <- mget(ls())
args$X <- model.frame(as.formula(formula), X)
args$newX <- model.frame(as.formula(formula), newX)
args$formula <- NULL
out <- do.call("base_learner", args)
out$fit$SL_formula <- formula
class(out$fit) <- 'base_learner_f'
return(out)
}
f_learner <- deparse(f_learner)
f_learner <- eval(parse(text = paste(gsub(f_learner, pattern='base_learner', replacement = learner), collapse = '\n')))
formals(f_learner) <- f_formals
f_learner_predict <- function(...){
args <- mget(ls())
args$newdata <- model.frame(as.formula(object$SL_formula), newdata)
args$formula <- NULL
out <- do.call("predict.base_learner", args)
return(out)
}
f_learner_predict <- deparse(f_learner_predict)
f_learner_predict <- eval(parse(text = paste(gsub(f_learner_predict, pattern='base_learner', replacement = learner), collapse = '\n')))
formals(f_learner_predict) <- f_formals_predict
assign(x = paste0(learner, '_f'), value = f_learner, envir = env)
assign(x = paste0('predict.', learner, '_f'), value = f_learner_predict, envir = env)
return(paste0(learner, '_f'))
}
|
/scratch/gouwar.j/cran-all/cranData/vglmer/R/superlearner_functions.R
|
#' Variational Inference for Hierarchical Generalized Linear Models
#'
#' This function estimates hierarchical models using mean-field variational
#' inference. \code{vglmer} accepts standard syntax used for \code{lme4}, e.g.,
#' \code{y ~ x + (x | g)}. Options are described below. Goplerud (2022a; 2022b)
#' provides details on the variational algorithms.
#'
#' @param formula \code{lme4} style-formula for random effects. Typically,
#' \code{(1 + z | g)} indicates a random effect for each level of variable
#' \code{"g"} with a differing slope for the effect of variable \code{"z"} and
#' an intercept (\code{1}); see "Details" for further discussion and how to
#' incorporate splines.
#' @param data \code{data.frame} containing the outcome and predictors.
#' @param family Options are "binomial", "linear", or "negbin" (experimental).
#' If "binomial", outcome must be either binary (\eqn{\{0,1\}}) or
#' \code{cbind(success, failure)} as per standard \code{glm(er)} syntax.
#' Non-integer values are permitted for binomial if \code{force_whole} is set
#' to \code{FALSE} in \code{vglmer_control}.
#' @param control Adjust internal options for estimation. Must use an object
#' created by \link{vglmer_control}.
#'
#' @examples
#'
#' set.seed(234)
#' sim_data <- data.frame(
#' x = rnorm(100),
#' y = rbinom(100, 1, 0.5),
#' g = sample(letters, 100, replace = TRUE)
#' )
#'
#' # Run with defaults
#' est_vglmer <- vglmer(y ~ x + (x | g), data = sim_data, family = "binomial")
#'
#' # Simple prediction
#' predict(est_vglmer, newdata = sim_data)
#'
#' # Summarize results
#' summary(est_vglmer)
#'
#' # Extract parameters
#' coef(est_vglmer); vcov(est_vglmer)
#'
#' # Comparability with lme4,
#' # although ranef is formatted differently.
#' ranef(est_vglmer); fixef(est_vglmer)
#'
#' \donttest{
#' # Run with weaker (i.e. better) approximation
#' vglmer(y ~ x + (x | g),
#' data = sim_data,
#' control = vglmer_control(factorization_method = "weak"),
#' family = "binomial")
#' }
#'
#' \donttest{
#' # Use a spline on x with a linear outcome
#' vglmer(y ~ v_s(x),
#' data = sim_data,
#' family = "linear")
#' }
#'
#' @details
#'
#' \bold{Estimation Syntax:} The \code{formula} argument takes syntax designed
#' to be a similar as possible to \code{lme4}. That is, one can specify models
#' using \code{y ~ x + (1 | g)} where \code{(1 | g)} indicates a random intercept. While
#' not tested extensively, terms of \code{(1 | g / f)} should work as expected. Terms
#' of \code{(1 + x || g)} may work, although will raise a warning about duplicated
#' names of random effects. \code{(1 + x || g)} terms may not work with spline
#' estimation. To get around this, one can might copy the column \code{g} to
#' \code{g_copy} and then write \code{(1 | g) + (0 + x | g_copy)}.
#'
#' \bold{Splines:} Splines can be added using the term \code{v_s(x)} for a
#' spline on the variable \code{x}. These are transformed into hierarchical
#' terms in a standard fashion (e.g. Ruppert et al. 2003) and then estimated
#' using the variational algorithms. At the present, only truncated linear
#' functions (\code{type = "tpf"}; the default) and O'Sullivan splines (Wand and
#' Ormerod 2008) are included. The options are described in more detail at
#' \link{v_s}.
#'
#' It is possible to have the spline vary across some categorical predictor by
#' specifying the \code{"by"} argument such as \code{v_s(x, by = g)}. In effect,
#' this adds additional hierarchical terms for the group-level deviations from
#' the "global" spline. \emph{Note:} In contrast to the typical presentation of
#' these splines interacted with categorical variables (e.g., Ruppert et al.
#' 2003), the default use of \code{"by"} includes the lower order interactions
#' that are regularized, i.e. \code{(1 + x | g)}, versus their unregularized
#' version (e.g., \code{x * g}); this can be changed using the \code{by_re}
#' argument described in \link{v_s}. Further, all group-level deviations from
#' the global spline share the same smoothing parameter (same prior
#' distribution).
#'
#' \bold{Default Settings:} By default, the model is estimated using the
#' "strong" (i.e. fully factorized) variational assumption. Setting
#' \code{vglmer_control(factorization_method = "weak")} will improve the quality
#' of the variance approximation but may take considerably more time to
#' estimate. See Goplerud (2022a) for discussion.
#'
#' By default, the prior on each random effect variance (\eqn{\Sigma_j}) uses a Huang-Wand prior (Huang
#' and Wand 2013) with hyper-parameters \eqn{\nu_j = 2} and \eqn{A_{j,k} = 5}.
#' This is designed to be proper but weakly informative. Other options are
#' discussed in \link{vglmer_control} under the \code{prior_variance} argument.
#'
#' By default, estimation is accelerated using SQUAREM (Varadhan and Roland
#' 2008) and (one-step-late) parameter expansion for variational Bayes. Under
#' the default \code{"strong"} factorization, a "translation" expansion is used;
#' under other factorizations a "mean" expansion is used. These can be adjusted
#' using \link{vglmer_control}. See Goplerud (2022b) for more discussion of
#' these methods.
#'
#' @return This returns an object of class \code{vglmer}. The available methods
#' (e.g. \code{coef}) can be found using \code{methods(class="vglmer")}.
#' \describe{
#' \item{beta}{Contains the estimated distribution of the fixed effects
#' (\eqn{\beta}). It is multivariate normal. \code{mean} contains the means;
#' \code{var} contains the variance matrix; \code{decomp_var} contains a matrix
#' \eqn{L} such that \eqn{L^T L} equals the full variance matrix.}
#' \item{alpha}{Contains the estimated distribution of the random effects
#' (\eqn{\alpha}). They are all multivariate normal. \code{mean} contains the
#' means; \code{dia.var} contains the variance of each random effect. \code{var}
#' contains the variance matrix of each random effect (j,g). \code{decomp_var}
#' contains a matrix \eqn{L} such that \eqn{L^T L} equals the full variance of
#' the entire set of random effects.}
#' \item{joint}{If \code{factorization_method="weak"}, this is a list with one
#' element (\code{decomp_var}) that contains a matrix \eqn{L} such that \eqn{L^T
#' L} equals the full variance matrix between the fixed and random effects
#' \eqn{q(\beta,\alpha)}. The marginal variances are included in \code{beta} and
#' \code{alpha}. If the factorization method is not \code{"weak"}, this is
#' \code{NULL}.}
#' \item{sigma}{Contains the estimated distribution of each random
#' effect covariance \eqn{\Sigma_j}; all distributions are Inverse-Wishart.
#' \code{cov} contains a list of the estimated scale matrices. \code{df}
#' contains a list of the degrees of freedom.}
#' \item{hw}{If a Huang-Wand prior is used (see Huang and Wand 2013 or Goplerud
#' 2022b for more details), then the estimated distribution. Otherwise, it is
#' \code{NULL}. All distributions are Inverse-Gamma. \code{a} contains a list of
#' the scale parameters. \code{b} contains a list of the shape parameters.}
#' \item{sigmasq}{If \code{family="linear"}, this contains a list of the
#' estimated parameters for \eqn{\sigma^2}; its distribution is Inverse-Gamma.
#' \code{a} contains the scale parameter; \code{b} contains the shape
#' parameter.}
#' \item{ln_r}{If \code{family="negbin"}, this contains the variational
#' parameters for the log dispersion parameter \eqn{\ln(r)}. \code{mu} contains
#' the mean; \code{sigma} contains the variance.}
#' \item{family}{Family of outcome.}
#' \item{ELBO}{Contains the ELBO at the termination of the algorithm.}
#' \item{ELBO_trajectory}{\code{data.frame} tracking the ELBO per iteration.}
#' \item{control}{Contains the control parameters from \code{vglmer_control}
#' used in estimation.}
#' \item{internal_parameters}{Variety of internal parameters used in
#' post-estimation functions.}
#' \item{formula}{Contains the formula used for estimation; contains the
#' original formula, fixed effects, and random effects parts separately for
#' post-estimation functions. See \code{formula.vglmer} for more details.}
#' }
#' @importFrom lme4 mkReTrms findbars subbars
#' @importFrom stats model.response model.matrix model.frame rnorm rWishart
#' qlogis optim residuals lm plogis setNames .getXlevels
#' @importFrom graphics plot
#' @importFrom Rcpp sourceCpp
#' @importFrom mgcv interpret.gam
#' @references
#' Goplerud, Max. 2022a. "Fast and Accurate Estimation of Non-Nested Binomial
#' Hierarchical Models Using Variational Inference." \emph{Bayesian Analysis}. 17(2):
#' 623-650.
#'
#' Goplerud, Max. 2022b. "Re-Evaluating Machine Learning for MRP Given the
#' Comparable Performance of (Deep) Hierarchical Models." Working paper.
#'
#' Huang, Alan, and Matthew P. Wand. 2013. "Simple Marginally Noninformative
#' Prior Distributions for Covariance Matrices." \emph{Bayesian Analysis}.
#' 8(2):439-452.
#'
#' Ruppert, David, Matt P. Wand, and Raymond J. Carroll. 2003.
#' \emph{Semiparametric Regression}. Cambridge University Press.
#'
#' Varadhan, Ravi, and Christophe Roland. 2008. "Simple and Globally Convergent
#' Methods for Accelerating the Convergence of any EM Algorithm." \emph{Scandinavian
#' Journal of Statistics}. 35(2): 335-353.
#'
#' Wand, Matt P. and Ormerod, John T. 2008. "On Semiparametric Regression with
#' O'Sullivan Penalized Splines". \emph{Australian & New Zealand Journal of Statistics}.
#' 50(2): 179-198.
#'
#' @useDynLib vglmer
#' @export
vglmer <- function(formula, data, family, control = vglmer_control()) {
# Verify integrity of parameter arguments
family <- match.arg(family, choices = c("negbin", "binomial", "linear"))
if (family == "negbin" & !(control$parameter_expansion %in% c('none', 'mean'))){
message('Setting parameter_expansion to mean for negative binomial estimation')
control$parameter_expansion <- 'mean'
}
checkdf <- inherits(data, 'data.frame')
if (is.null(data)){
checkdf <- TRUE
}
if (checkdf != TRUE) {
warning(paste0("data is not a data.frame? Behavior may be unexpected: ", checkdf))
}
if (!inherits(formula, 'formula')){
stop('"formula" must be a formula.')
}
# Delete the missing data
# (i.e. sub out the random effects, do model.frame)
#
nobs_init <- nrow(data)
# Interpret gam using mgcv::interpret.gam
parse_formula <- tryCatch(
interpret.gam(subbars(formula), extra.special = 'v_s'), error = function(e){NULL})
if (is.null(parse_formula)){
# If this fails, usually when there is custom argument in environment, use this instead
parse_formula <- fallback_interpret.gam0(subbars(formula), extra.special = 'v_s')
}
if (any(!sapply(parse_formula$smooth.spec, inherits, what = 'vglmer_spline'))){
stop('gam specials are not permitted; use v_s(...) and see documentation.')
}
if (control$verify_columns){
if (!all(parse_formula$pred.names %in% colnames(data))){
missing_columns <- setdiff(parse_formula$pred.names, colnames(data))
stop(
paste0('The following columns are missing from "data". Can override with vglmer_control (not usually desirable): ',
paste(missing_columns, collapse =', '))
)
}
}
data <- model.frame(parse_formula$fake.formula, data,
drop.unused.levels = TRUE)
tt <- terms(data)
nobs_complete <- nrow(data)
missing_obs <- nobs_init - nobs_complete
if (length(missing_obs) == 0) {
missing_obs <- "??"
}
#Extract the Outcome
y <- model.response(data)
if (is.matrix(y)){
N <- nrow(y)
rownames(y) <- NULL
}else{
N <- length(y)
y <- as.vector(y)
names(y) <- NULL
}
if (!inherits(control, "vglmer_control")) {
stop("control must be object from vglmer_control().")
}
do_timing <- control$do_timing
factorization_method <- control$factorization_method
print_prog <- control$print_prog
iterations <- control$iterations
quiet <- control$quiet
parameter_expansion <- control$parameter_expansion
tolerance_elbo <- control$tolerance_elbo
tolerance_parameters <- control$tolerance_parameters
debug_param <- control$debug_param
linpred_method <- control$linpred_method
vi_r_method <- control$vi_r_method
if (is.numeric(vi_r_method)){
if (length(vi_r_method) > 1){stop('If "vi_r_method" is numeric, it must be a single number.')}
vi_r_val <- as.numeric(vi_r_method)
vi_r_method <- "fixed"
}else{
vi_r_val <- NA
}
debug_ELBO <- control$debug_ELBO
# Flip given that "tictoc" accepts "quiet=quiet_time"
quiet_time <- !control$verbose_time
if (do_timing) {
if (!requireNamespace("tictoc", quietly = TRUE)) {
stop("tictoc must be installed to do timing")
}
tic <- tictoc::tic
toc <- tictoc::toc
tic.clear <- tictoc::tic.clear
tic.clearlog <- tictoc::tic.clearlog
tic.clear()
tic.clearlog()
tic("Prepare Model")
}
if (!(factorization_method %in% c("weak", "strong", "partial", "collapsed"))) {
stop("factorization_method must be 'weak', 'strong', or 'partial'.")
}
if (is.null(print_prog)) {
print_prog <- max(c(1, floor(iterations / 20)))
}
if (!(family %in% c("binomial", "negbin", "linear"))) {
stop('family must be one of "linear", "binomial", "negbin".')
}
if (family == "binomial") {
if (is.matrix(y)) {
# if (!(class(y) %in% c('numeric', 'integer'))){
if (min(y) < 0) {
stop("Negative numbers not permitted in outcome")
}
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
if (any(is.wholenumber(y) == FALSE)) {
if (control$force_whole) {
stop("If force_whole = TRUE, must provide whole numbers as outcome")
} else {
warning("Non-integer numbers in y")
}
}
# Total trials (Success + Failure)
trials <- rowSums(y)
rownames(trials) <- NULL
# Successes
y <- y[, 1]
rownames(y) <- NULL
} else {
if (!all(y %in% c(0, 1)) & family == "binomial") {
stop("Only {0,1} outcomes permitted for numeric y.")
}
trials <- rep(1, length(y))
}
} else if (family == 'negbin') {
if (is.matrix(y)) {
stop('"linear" family requires a vector outcome.')
}
if (!(class(y) %in% c("numeric", "integer"))) {
stop("Must provide vector of numbers with negbin.")
}
if (min(y) < 0) {
stop("Negative numbers not permitted in outcome")
}
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
if (any(is.wholenumber(y) == FALSE)) {
if (control$force_whole) {
stop("If force_whole = TRUE, must provide whole numbers")
} else {
warning("Non-integer numbers in y")
}
}
} else if (family == 'linear') {
if (is.matrix(y)) {
stop('"linear" family requires a vector outcome.')
}
if (!(class(y) %in% c("numeric", "integer"))) {
stop("Must provide vector of numbers with linear.")
}
y <- as.numeric(y)
#Do nothing if linear
} else {
stop('family is invalid.')
}
if (family %in% c("binomial", "linear")) {
ELBO_type <- "augmented"
} else if (family == "negbin") {
ELBO_type <- "profiled"
} else {
stop("Check ELBO_type")
}
# Extract X (FE design matrix)
fe_fmla <- tryCatch(
interpret.gam(nobars(formula), extra.special = 'v_s'), error = function(e){NULL})
if (is.null(fe_fmla)){
# If this fails, usually when there is custom argument in environment, use this instead
fe_fmla <- fallback_interpret.gam0(nobars(formula), extra.special = 'v_s')
}
if (length(fe_fmla$smooth.spec) > 0){
# Add the linear spline terms to the main effect.
fe_update <- sapply(fe_fmla$smooth.spec, FUN=function(i){
if (i$by != "NA" & i$by_re == FALSE){
fe_i <- paste0(i$term, ' * ', i$by)
}else{
fe_i <- i$term
}
})
fe_update <- paste0(fe_update, collapse = ' + ')
fe_fmla <- update.formula(fe_fmla$pf,
paste0('. ~ . + 1 + ', fe_update)
)
}else{
fe_fmla <- fe_fmla$pf
}
# Create the FE design
X <- model.matrix(fe_fmla, data = data)
fe_terms <- terms(fe_fmla)
fe_Xlevels <- .getXlevels(fe_terms, data)
fe_contrasts <- attr(X, 'contrasts')
# Extract the Z (Random Effect) design matrix.
re_fmla <- findbars(formula)
# If using splines by group, add random effects to
# the main level.
if (!all(sapply(parse_formula$smooth.spec,
FUN=function(i){i$by}) %in% c('NA'))){
by_splines <- parse_formula$smooth.spec[
which(sapply(parse_formula$smooth.spec, FUN=function(i){(i$by != "NA" & i$by_re == TRUE)}))
]
character_re <- lapply(re_fmla, FUN=function(i){strsplit(deparse(i), split = ' \\| ')[[1]]})
character_re_group <- sapply(character_re, FUN=function(i){i[2]})
if (any(duplicated(character_re_group))){
stop('Some grouping factors for random effects are duplicated. Reformulate initial formula.')
}
for (v in sapply(character_re, FUN=function(i){i[2]})){
if (!(is.factor(data[[v]]) | is.character(data[[v]]))){
data[[v]] <- as.character(data[[v]])
}
}
for (b in by_splines){
b_term <- b$term
b_by <- b$by
if (!(is.factor(data[[b_by]]) | is.character(data[[b_by]]))){
stop('For now, all v_s spline "by" factors must be characters or factors.')
}
# If "by" grouping already used, then add to the RE
if (b_by %in% character_re_group){
position_b_by <- which(b_by == character_re_group)
existing_re_b_by <- character_re[[position_b_by]][1]
new_re_b_by <- paste0(unique(c('1', strsplit(existing_re_b_by, split=' \\+ ')[[1]], b_term)), collapse = ' + ')
character_re[[position_b_by]][1] <- new_re_b_by
}else{
# If not, then add a new RE group with a
# random intercept and random slope.
character_re <- c(character_re, list(c(paste0('1 + ', b_term), b_by)))
character_re_group <- sapply(character_re, FUN=function(i){i[2]})
}
}
character_re_fmla <- paste(sapply(character_re, FUN=function(i){paste0('(', i[1], ' | ', i[2], ' )')}), collapse = " + ")
old_re <- re_fmla
re_fmla <- lapply(character_re, FUN=function(i){str2lang(paste0(i[1], ' | ', i[2]))})
}
if (!is.null(re_fmla) & (length(re_fmla) > 0)){
mk_Z <- mkReTrms(re_fmla, data, reorder.terms = FALSE, reorder.vars = FALSE)
Z <- t(mk_Z$Zt)
p.X <- ncol(X)
p.Z <- ncol(Z)
####
# Process the REs to get various useful terms.
####
# RE names and names of variables included for each.
names_of_RE <- mk_Z$cnms
if (anyDuplicated(names(names_of_RE)) > 0){
warning('Some random effects names are duplicated. Re-naming for stability by adding "-[0-9]" at end.')
nre <- names(names_of_RE)
unre <- unique(nre)
for (u in unre){
nre_u <- which(nre == u)
if (length(nre_u) > 1){
nre[nre_u] <- paste0(nre[nre_u], '-', seq_len(length(nre_u)))
}
}
names(names_of_RE) <- nre
if (anyDuplicated(names(names_of_RE)) > 0){
stop('Renaming duplicates failed. Please rename random effects to proceed.')
}
}
number_of_RE <- length(mk_Z$Gp) - 1
if ( (number_of_RE < 1) & (length(parse_formula$smooth.spec) == 0) ) {
stop("Need to provide at least one random effect or spline...")
}
# The position that demarcates each random effect.
# That is, breaks_for_RE[2] means at that position + 1 does RE2 start.
breaks_for_RE <- c(0, cumsum(diff(mk_Z$Gp)))
# Dimensionality of \alpha_{j,g}, i.e. 1 if random intercept
# 2 if random intercept + random slope
d_j <- lengths(names_of_RE)
# Number of GROUPs for each random effect.
g_j <- diff(mk_Z$Gp) / d_j
# Empty vector to build the formatted names for each random effect.
fmt_names_Z <- c()
init_Z_names <- colnames(Z)
for (v in 1:number_of_RE) {
name_of_effects_v <- names_of_RE[[v]]
mod_name <- rep(name_of_effects_v, g_j[v])
levels_of_re <- init_Z_names[(1 + breaks_for_RE[v]):breaks_for_RE[v + 1]]
fmt_names_Z <- c(fmt_names_Z, paste0(names(names_of_RE)[v], " @ ", mod_name, " @ ", levels_of_re))
}
colnames(Z) <- fmt_names_Z
cyclical_pos <- lapply(1:number_of_RE, FUN = function(i) {
seq(breaks_for_RE[i] + 1, breaks_for_RE[i + 1])
})
}else{
Z <- drop0(Matrix(nrow = nrow(X), ncol = 0))
p.X <- ncol(X)
p.Z <- 0
names_of_RE <- c()
number_of_RE <- 0
breaks_for_RE <- c(0)
d_j <- c()
g_j <- c()
fmt_names_Z <- c()
cyclical_pos <- list()
if ( (length(parse_formula$smooth.spec) == 0) ) {
stop("Need to provide at least one random effect or spline...")
}
}
M.names <- cbind(unlist(mapply(names_of_RE, g_j, SIMPLIFY = FALSE, FUN = function(i, j) {
rep(i, j)
})))
if (!is.null(M.names)){
U_names <- unique(cbind(rep(names(names_of_RE), g_j * d_j), M.names))
B_j <- lapply(split(U_names[,2], U_names[,1]), FUN=function(j){
B_jj <- which(j %in% colnames(X))
if (length(B_jj) == 0){
return(matrix(nrow = length(j), ncol = 0))
}
sparseMatrix(i = B_jj, j = 1:length(B_jj),
x = 1, dims = c(length(j), length(B_jj)))
})
U_names_Bj <- unique(U_names[,2])
M <- cbind(match(M.names[, 1], U_names_Bj), rep(1 / g_j, d_j * g_j))
M <- sparseMatrix(i = 1:nrow(M), j = M[, 1], x = M[, 2], dims = c(ncol(Z), length(U_names_Bj)))
}else{
B_j <- list()
M <- drop0(matrix(0, nrow = 0, ncol = ncol(X)))
}
if (!is.null(names_of_RE)){
any_Mprime <- TRUE
M_prime.names <- paste0(rep(names(names_of_RE), g_j * d_j), " @ ", M.names)
M_prime <- cbind(match(M_prime.names, unique(M_prime.names)), rep(1 / g_j, d_j * g_j))
M_prime <- sparseMatrix(i = seq_len(ncol(Z)),
j = M_prime[, 1],
x = M_prime[, 2],
dims = c(ncol(Z), max(M_prime[,1])))
colnames(M_prime) <- unique(M_prime.names)
M_prime_one <- M_prime
M_prime_one@x <- rep(1, length(M_prime_one@x))
stopifnot(identical(paste0(rep(names(names_of_RE), d_j), " @ ", unlist(names_of_RE)), colnames(M_prime)))
mu_to_beta_names <- match(unlist(names_of_RE), colnames(X))
id_mu_to_beta <- seq_len(sum(d_j))
which_is_na_mu_to_beta <- which(is.na(mu_to_beta_names))
if (length(which_is_na_mu_to_beta) > 0){
mu_to_beta_names <- mu_to_beta_names[-which_is_na_mu_to_beta]
id_mu_to_beta <- id_mu_to_beta[-which_is_na_mu_to_beta]
}
M_mu_to_beta <- sparseMatrix(
i = id_mu_to_beta, j = mu_to_beta_names,
x = 1, dims = c(sum(d_j), p.X))
}else{
any_Mprime <- FALSE
M_prime_one <- M_prime <- drop0(matrix(0, nrow = 0, ncol = 0))
M_mu_to_beta <- drop0(matrix(0, nrow = 0, ncol = p.X))
}
colnames(M_mu_to_beta) <- colnames(X)
rownames(M_mu_to_beta) <- colnames(M_prime)
# Extract the Specials
if (length(parse_formula$smooth.spec) > 0){
any_Mprime <- TRUE
base_specials <- length(parse_formula$smooth.spec)
# Number of splines + one for each "by"...
n.specials <- base_specials +
sum(sapply(parse_formula$smooth.spec, FUN=function(i){i$by}) != "NA")
Z.spline.attr <- as.list(rep(NA, base_specials))
Z.spline <- as.list(rep(NA, n.specials))
Z.spline.size <- rep(NA, n.specials)
special_counter <- 0
for (i in 1:base_specials){
special_i <- parse_formula$smooth.spec[[i]]
all_splines_i <- vglmer_build_spline(x = data[[special_i$term]],
by = data[[special_i$by]],
knots = special_i$knots, type = special_i$type,
force_vector = special_i$force_vector,
outer_okay = special_i$outer_okay, by_re = special_i$by_re)
Z.spline.attr[[i]] <- c(all_splines_i[[1]]$attr,
list(type = special_i$type, by = special_i$by))
spline_counter <- 1
for (spline_i in all_splines_i){
special_counter <- special_counter + 1
stopifnot(spline_counter %in% 1:2)
colnames(spline_i$x) <- paste0('spline @ ', special_i$term, ' @ ', colnames(spline_i$x))
if (spline_counter > 1){
spline_name <- paste0('spline-',special_i$term,'-', i, '-int')
}else{
spline_name <- paste0('spline-', special_i$term, '-', i, '-base')
}
Z.spline[[special_counter]] <- spline_i$x
Z.spline.size[special_counter] <- ncol(spline_i$x)
names_of_RE[[spline_name]] <- spline_name
number_of_RE <- number_of_RE + 1
d_j <- setNames(c(d_j, 1), c(names(d_j), spline_name))
g_j <- setNames(c(g_j, ncol(spline_i$x)), c(names(g_j), spline_name))
breaks_for_RE <- c(breaks_for_RE, max(breaks_for_RE) + ncol(spline_i$x))
fmt_names_Z <- c(fmt_names_Z, colnames(spline_i$x))
p.Z <- p.Z + ncol(spline_i$x)
spline_counter <- spline_counter + 1
}
}
cyclical_pos <- lapply(1:number_of_RE, FUN = function(i) {
seq(breaks_for_RE[i] + 1, breaks_for_RE[i + 1])
})
Z.spline <- drop0(do.call('cbind', Z.spline))
Z <- drop0(cbind(Z, Z.spline))
if (ncol(M_prime) == 0){
M_prime <- rbind(M_prime,
drop0(matrix(0, nrow = ncol(Z.spline), ncol = 0)))
M_prime_one <- rbind(M_prime_one,
drop0(matrix(0, nrow = ncol(Z.spline), ncol = 0)))
}else{
M_prime <- rbind(M_prime,
drop0(sparseMatrix(i = 1, j = 1, x = 0,
dims = c(ncol(Z.spline), ncol(M_prime)))))
M_prime_one <- rbind(M_prime_one,
drop0(sparseMatrix(i = 1, j = 1, x = 0,
dims = c(ncol(Z.spline), ncol(M_prime_one)))))
}
M_prime <- cbind(M_prime, drop0(sparseMatrix(i = 1, j = 1, x = 0,
dims = c(nrow(M_prime), special_counter)
)))
M_prime_one <- cbind(M_prime_one, drop0(sparseMatrix(i = 1, j = 1, x = 0,
dims = c(nrow(M_prime_one), special_counter)
)))
M_mu_to_beta <- rbind(M_mu_to_beta,
drop0(sparseMatrix(i = 1, j = 1, x = 0,
dims = c(special_counter, p.X)))
)
extra_Bj <- lapply(setdiff(names(names_of_RE), names(B_j)), FUN=function(i){Diagonal(n = d_j[i])})
names(extra_Bj) <- setdiff(names(names_of_RE), names(B_j))
B_j <- c(B_j, extra_Bj)[names(names_of_RE)]
}else{
n.specials <- 0
Z.spline.attr <- NULL
Z.spline <- NULL
Z.spline.size <- NULL
}
if (length(B_j) > 0){
B_j <- bdiag(B_j)
}else{B_j <- NULL}
debug_px <- control$debug_px
if (control$parameter_expansion %in% c('translation', 'diagonal')){
px_method <- control$px_method
px_it <- control$px_numerical_it
opt_prior_rho <- NULL
parsed_RE_groups <- get_RE_groups(formula = re_fmla, data = data)
}
# List of Lists
# Outer list: one for RE
# Inner List: One for each GROUP with its row positions.
outer_alpha_RE_positions <- mapply(d_j, g_j, breaks_for_RE[-length(breaks_for_RE)],
SIMPLIFY = FALSE, FUN = function(a, b, m) {
split(m + seq(1, a * b), rep(1:b, each = a))
})
if (anyDuplicated(unlist(outer_alpha_RE_positions)) != 0 | max(unlist(outer_alpha_RE_positions)) != ncol(Z)) {
stop("Issue with creating OA positions")
}
####
# Prepare Initial Values
###
vi_sigmasq_prior_a <- 0
vi_sigmasq_prior_b <- 0
vi_sigmasq_a <- vi_sigmasq_b <- 1
if (family == "linear") {
vi_sigmasq_a <- (nrow(X) + sum(d_j * g_j))/2 + vi_sigmasq_prior_a
vi_sigmasq_b <- sum(residuals(lm(y ~ 1))^2)/2 + vi_sigmasq_prior_b
s <- y
vi_pg_b <- 1
vi_pg_c <- NULL
vi_r_mu <- 0
vi_r_sigma <- 0
vi_r_mean <- 0
choose_term <- -length(y)/2 * log(2 * pi)
} else if (family == "binomial") {
s <- y - trials / 2
vi_pg_b <- trials
vi_r_mu <- 0
vi_r_mean <- 0
vi_r_sigma <- 0
choose_term <- sum(lchoose(n = round(trials), k = round(y)))
} else if (family == 'negbin') {
# Initialize
if (vi_r_method == "fixed") {
vi_r_mu <- vi_r_val
vi_r_mean <- exp(vi_r_mu)
vi_r_sigma <- 0
} else if (vi_r_method == "VEM") {
if (!requireNamespace("MASS", quietly = TRUE)) {
stop("Install MASS to use negbin")
}
vi_r_mean <- MASS::glm.nb(y ~ 1)$theta
vi_r_mu <- log(vi_r_mean)
vi_r_sigma <- 0
} else if (vi_r_method %in% c("Laplace", "delta")) {
init_r <- optim(
par = 0, fn = VEM.PELBO.r, method = "L-BFGS", hessian = T,
control = list(fnscale = -1), y = y, psi = rep(log(mean(y)), length(y)), zVz = 0
)
vi_r_mu <- init_r$par
vi_r_sigma <- as.numeric(-1 / init_r$hessian)
vi_r_mean <- exp(vi_r_mu + vi_r_sigma / 2)
} else {
stop("vi_r_method must be 'VEM' or 'fixed'.")
}
s <- (y - vi_r_mean) / 2
vi_pg_b <- y + vi_r_mean
choose_term <- -sum(lgamma(y + 1)) - sum(y) * log(2)
}else{
stop('family must be linear, binomial, or negative binomial.')
}
# Initalize variational parameters.
# Note that we keep a sparse matrix or lowertri such that
# t(vi_beta_decomp) %*% vi_beta_decomp = VARIANCE
vi_beta_decomp <- Diagonal(x = rep(0, ncol(X)))
vi_alpha_decomp <- Diagonal(x = rep(0, ncol(Z)))
vi_sigma_alpha_nu <- g_j
prior_variance <- control$prior_variance
do_huangwand <- FALSE
vi_a_APRIOR_jp <- vi_a_nu_jp <- vi_a_a_jp <- vi_a_b_jp <- NULL
prior_sigma_alpha_nu <- prior_sigma_alpha_phi <- NULL
if (prior_variance == 'hw') {
do_huangwand <- TRUE
INNER_IT <- control$hw_inner
vi_a_nu_jp <- rep(2, length(d_j))
names(vi_a_nu_jp) <- names(names_of_RE)
vi_a_APRIOR_jp <- lapply(d_j, FUN=function(i){rep(5, i)})
vi_a_a_jp <- mapply(d_j, vi_a_nu_jp, SIMPLIFY = FALSE,
FUN=function(i,nu){1/2 * (nu + rep(i, i))})
vi_a_b_jp <- lapply(vi_a_APRIOR_jp, FUN=function(i){1/i^2})
} else if (prior_variance == "jeffreys") {
prior_sigma_alpha_nu <- rep(0, number_of_RE)
prior_sigma_alpha_phi <- lapply(d_j, FUN = function(i) {
diag(x = 0, nrow = i, ncol = i)
})
} else if (prior_variance == "mean_exists") {
prior_sigma_alpha_nu <- d_j + 1 # Ensures the mean exists...
prior_sigma_alpha_phi <- lapply(d_j, FUN = function(i) {
diag(x = 1, nrow = i, ncol = i)
})
} else if (prior_variance == "limit") {
prior_sigma_alpha_nu <- d_j - 1
prior_sigma_alpha_phi <- lapply(d_j, FUN = function(i) {
diag(x = 0, nrow = i, ncol = i)
})
} else if (prior_variance == "uniform") {
prior_sigma_alpha_nu <- -(d_j + 1)
prior_sigma_alpha_phi <- lapply(d_j, FUN = function(i) {
diag(x = 0, nrow = i, ncol = i)
})
} else {
stop("Invalid option for prior variance provided.")
}
if (do_huangwand){
iw_prior_constant <- mapply(vi_a_nu_jp, d_j,
FUN = function(nu, d) {
nu <- nu + d - 1
return(- (nu * d) / 2 * log(2) - multi_lgamma(a = nu / 2, p = d))
}
)
vi_sigma_alpha_nu <- vi_sigma_alpha_nu + vi_a_nu_jp + d_j - 1
}else{
# normalizingly constant for wishart to make ELBO have right value to compare models.
iw_prior_constant <- mapply(prior_sigma_alpha_nu, prior_sigma_alpha_phi,
FUN = function(nu, Phi) {
if (nu <= (ncol(Phi) - 1)) {
return(0)
} else {
return(make_log_invwishart_constant(nu, Phi))
}
}
)
vi_sigma_alpha_nu <- vi_sigma_alpha_nu + prior_sigma_alpha_nu
}
if ( control$init %in% c("EM", "EM_FE") ){
if (family == "linear"){
jointXZ <- cbind(X,Z)
if (control$init == 'EM_FE'){
EM_init <- LinRegChol(X = drop0(X),
omega = sparseMatrix(i = 1:nrow(X), j = 1:nrow(X), x = 1),
y = y, prior_precision = sparseMatrix(i = 1:ncol(X), j = 1:ncol(X), x = 1e-5))$mean
# stop('Setup EM init for linear')
# solve(Matrix::Cholesky( t(joint.XZ) %*% sparseMatrix(i = 1:N, j = 1:N, x = pg_mean) %*% joint.XZ + EM_variance),
# t(joint.XZ) %*% (adj_out) )
EM_init <- list('beta' = EM_init, 'alpha' = rep(0, ncol(Z)))
}else{
stop('Setup EM init')
EM_init <- LinRegChol(X = jointXZ,
omega = sparseMatrix(i = 1:nrow(jointXZ), j = 1:nrow(jointXZ), x = 1),
y = y, prior_precision = sparseMatrix(i = 1:ncol(jointXZ), j = 1:ncol(jointXZ), x = 1/4))$mean
EM_init <- list('beta' = EM_init[1:ncol(X)], 'alpha' = EM_init[-1:-ncol(X)])
}
rm(jointXZ)
} else if (family == "negbin") {
if (control$init == 'EM_FE'){
EM_init <- EM_prelim_nb(X = X, Z = drop0(matrix(0, nrow = nrow(X), ncol = 0)), y = y, est_r = exp(vi_r_mu), iter = 15, ridge = 10^5)
EM_init <- list('beta' = EM_init$beta, 'alpha' = rep(0, ncol(Z)))
}else{
EM_init <- EM_prelim_nb(X = X, Z = Z, y = y, est_r = exp(vi_r_mu), iter = 15, ridge = 4)
}
} else {
if (control$init == 'EM_FE'){
EM_init <- EM_prelim_logit(X = X, Z = drop0(matrix(0, nrow = nrow(X), ncol = 0)), s = s, pg_b = vi_pg_b, iter = 15, ridge = 10^5)
EM_init <- list('beta' = EM_init$beta, 'alpha' = rep(0, ncol(Z)))
}else{
EM_init <- EM_prelim_logit(X = X, Z = Z, s = s, pg_b = vi_pg_b, iter = 15, ridge = 4)
}
}
vi_beta_mean <- matrix(EM_init$beta)
vi_alpha_mean <- matrix(EM_init$alpha)
vi_sigma_alpha <- calculate_expected_outer_alpha(
alpha_mu = vi_alpha_mean,
L = sparseMatrix(i = 1, j = 1, x = 1e-4, dims = rep(ncol(Z), 2)),
re_position_list = outer_alpha_RE_positions
)
if (!do_huangwand){
vi_sigma_alpha <- mapply(vi_sigma_alpha$outer_alpha, prior_sigma_alpha_phi, SIMPLIFY = FALSE, FUN = function(i, j) {
i + j
})
}else{
#Update Inverse-Wishart
vi_sigma_alpha <- mapply(vi_sigma_alpha$outer_alpha, vi_a_a_jp,
vi_a_b_jp, vi_a_nu_jp, SIMPLIFY = FALSE, FUN = function(i, tilde.a, tilde.b, nu) {
i + sparseMatrix(i = seq_len(nrow(i)), j = seq_len(nrow(i)), x = 1)
})
#Update a_{j,p}
diag_Einv_sigma <- mapply(vi_sigma_alpha,
vi_sigma_alpha_nu, d_j, SIMPLIFY = FALSE, FUN = function(phi, nu, d) {
inv_phi <- solve(phi)
sigma.inv <- nu * inv_phi
return(diag(sigma.inv))
})
vi_a_b_jp <- mapply(vi_a_nu_jp, vi_a_APRIOR_jp, diag_Einv_sigma,
SIMPLIFY = FALSE,
FUN=function(nu, APRIOR, diag_j){
1/APRIOR^2 + nu * diag_j
})
}
} else if (control$init == "random") {
vi_beta_mean <- rnorm(ncol(X))
vi_alpha_mean <- rep(0, ncol(Z))
vi_sigma_alpha <- mapply(d_j, g_j, SIMPLIFY = FALSE, FUN = function(d, g) {
out <- rWishart(n = 1, df = ifelse(g >= d, g, d), Sigma = diag(d))[ , , 1]
if (d == 1){
out <- matrix(out)
}
return(out)
})
} else if (control$init == "zero") {
vi_beta_mean <- rep(0, ncol(X))
if (family == "binomial") {
vi_beta_mean[1] <- qlogis(sum(y) / sum(trials))
} else if (family == "negbin") {
vi_beta_mean[1] <- log(mean(y))
} else if (family == 'linear'){
vi_beta_mean[1] <- mean(y)
} else {
stop('Set up init')
}
vi_alpha_mean <- rep(0, ncol(Z))
vi_sigma_alpha <- mapply(d_j, g_j, SIMPLIFY = FALSE, FUN = function(d, g) {
diag(x = 1, ncol = d, nrow = d)
})
# if (do_huangwand){stop('Setup init for zero')}
} else {
stop("Invalid initialization method")
}
zero_mat <- sparseMatrix(i = 1, j = 1, x = 0, dims = c(ncol(X), ncol(X)))
zero_mat <- drop0(zero_mat)
if (factorization_method %in% c("weak", "collapsed")) {
vi_joint_decomp <- bdiag(vi_beta_decomp, vi_alpha_decomp)
joint.XZ <- cbind(X, Z)
log_det_beta_var <- log_det_alpha_var <- NULL
} else {
vi_joint_decomp <- NULL
log_det_joint_var <- NULL
}
# Create mapping for this to allow sparse implementations.
mapping_sigma_alpha <- make_mapping_alpha(vi_sigma_alpha)
running_log_det_alpha_var <- rep(NA, number_of_RE)
lagged_alpha_mean <- rep(-Inf, ncol(Z))
lagged_beta_mean <- rep(-Inf, ncol(X))
lagged_sigma_alpha <- vi_sigma_alpha
if (factorization_method %in% c("weak", "collapsed")) {
lagged_joint_decomp <- vi_joint_decomp
} else {
lagged_alpha_decomp <- vi_alpha_decomp
lagged_beta_decomp <- vi_beta_decomp
}
lagged_vi_r_mu <- -Inf
lagged_vi_sigmasq_a <- lagged_vi_sigmasq_b <- -Inf
lagged_ELBO <- -Inf
accepted_times <- NA
skip_translate <- FALSE
accepted_times <- 0
attempted_expansion <- 0
spline_REs <- grepl(names(d_j), pattern='^spline-')
zeromat_beta <- drop0(Diagonal(x = rep(0, ncol(X))))
stationary_rho <- do.call('c', lapply(d_j[!spline_REs], FUN=function(i){as.vector(diag(x = i))}))
if (parameter_expansion %in% c("translation", "diagonal") & any_Mprime & any(!spline_REs)) {
if (do_timing){
tic('Build PX R Terms')
}
nonspline_positions <- sort(unlist(outer_alpha_RE_positions[!spline_REs]))
size_splines <- sum((d_j * g_j)[spline_REs])
est_rho <- stationary_rho
diag_rho <- which(stationary_rho == 1)
# parsed_RE_groups <- get_RE_groups(formula = formula, data = data)
# parsed_RE_groups <- parsed_RE_groups
mapping_new_Z <- do.call('cbind', parsed_RE_groups$design)
mapping_J <- split(1:sum(d_j[!spline_REs]^2), rep(1:length(d_j[!spline_REs]), d_j[!spline_REs]^2))
mapping_J <- lapply(mapping_J, FUN=function(i){i-1})
mapping_J <- sapply(mapping_J, min)
mapping_to_re <- parsed_RE_groups$factor
mapping_to_re <- unlist(apply(do.call('cbind', mapping_to_re), MARGIN = 1, list), recursive = F)
# mapping_to_re <- purrr::array_branch(do.call('cbind', mapping_to_re), margin = 1)
mapping_to_re <- lapply(mapping_to_re, FUN=function(i){
mapply(outer_alpha_RE_positions[!spline_REs], i, SIMPLIFY = FALSE,
FUN=function(a,b){a[[b]]})
})
Mmap <- do.call('rbind', lapply(mapping_to_re, FUN=function(i){as.integer(sapply(i, min))}))
start_base_Z <- cumsum(c(0,d_j[!spline_REs]))[-(number_of_RE - sum(spline_REs) +1)]
names(start_base_Z) <- NULL
store_re_id <- store_id <- list()
id_range <- 1:nrow(Mmap)
for (j in 1:(number_of_RE - sum(spline_REs))){
store_re_id_j <- store_id_j <- list()
for (jprime in 1:j){
# print(c(j, jprime))
umap <- unique(Mmap[, c(j, jprime)])
store_re_id_j[[jprime]] <- unlist(apply(umap, MARGIN = 1, list), recursive = F)
# store_re_id_j[[jprime]] <- purrr::array_branch(umap, margin = 1)
id_lookup <- split(id_range, paste(Mmap[,j], Mmap[,jprime]))
id_lookup <- id_lookup[paste(umap[,1], umap[,2])]
names(id_lookup) <- NULL
# id_lookup <- lapply(1:nrow(umap), FUN=function(i){
# umap_r <- umap[i,]
# id_r <- which( (Mmap[,j] %in% umap_r[1]) & (Mmap[,jprime] %in% umap_r[2]))
# return(id_r)
# })
store_id_j[[jprime]] <- id_lookup
}
store_id[[j]] <- store_id_j
store_re_id[[j]] <- store_re_id_j
}
store_design <- parsed_RE_groups$design
rm(parsed_RE_groups, mapping_to_re)
gc()
if (do_timing){
toc(quiet = quiet_time, log = T)
}
}
store_parameter_traj <- store_vi <- store_ELBO <- data.frame()
if (debug_param) {
store_beta <- array(NA, dim = c(iterations, ncol(X)))
store_alpha <- array(NA, dim = c(iterations, ncol(Z)))
store_sigma <- array(NA, dim = c(iterations, sum(d_j^2)))
if (do_huangwand){
store_hw <- array(NA, dim = c(iterations, sum(d_j)))
}
}
if (do_timing) {
toc(quiet = quiet_time, log = TRUE)
tic.clear()
}
## Begin VI algorithm:
if (!quiet) {
message("Begin Regression")
}
do_SQUAREM <- control$do_SQUAREM
if (factorization_method == 'collapsed'){
warning('Turning off SQUAREM for "collapsed')
do_SQUAREM <- FALSE
}
if (family %in% c('negbin')){
if (do_SQUAREM){warning('Turning off SQUAREM for negbin temporarily.')}
do_SQUAREM <- FALSE
}
if (family == 'negbin' & !(control$vi_r_method %in% c('VEM', 'fixed'))){
if (do_SQUAREM){warning('Turning off SQUAREM if "negbin" and not VEM/fixed.')}
do_SQUAREM <- FALSE
}
if (do_SQUAREM){
namedList <- utils::getFromNamespace('namedList', 'lme4')
squarem_success <- c(0, 0)
squarem_list <- list()
squarem_counter <- 1
}else{
squarem_success <- NA
}
if (debug_px){
debug_PX_ELBO <- rep(NA, iterations)
}else{
debug_PX_ELBO <- NULL
}
for (it in 1:iterations) {
if (it %% print_prog == 0) {
cat(".")
}
###
## Polya-Gamma Updates
###
# Get the x_i^T Var(beta) x_i terms.
if (do_timing) {
tic("Update PG")
}
if (family %in% 'linear'){# Ignore Polya-Gamma or Similar Updates
vi_pg_mean <- rep(1, nrow(X))
diag_vi_pg_mean <- sparseMatrix(i = 1:N, j = 1:N, x = vi_pg_mean)
}else{# Estimate Polya-Gamma or Similar Updates
if (factorization_method %in% c("weak", "collapsed")) {
# joint_quad <- rowSums( (joint.XZ %*% t(vi_joint_decomp))^2 )
# vi_joint_decomp <<- vi_joint_decomp
# joint.XZ <<- joint.XZ
joint_quad <- cpp_zVz(Z = joint.XZ, V = as(vi_joint_decomp, "generalMatrix"))
if (family == 'negbin'){
joint_quad <- joint_quad + vi_r_sigma
}
vi_pg_c <- sqrt(as.vector(X %*% vi_beta_mean + Z %*% vi_alpha_mean - vi_r_mu)^2 + joint_quad)
} else {
beta_quad <- rowSums((X %*% t(vi_beta_decomp))^2)
alpha_quad <- rowSums((Z %*% t(vi_alpha_decomp))^2)
joint_var <- beta_quad + alpha_quad
if (family == 'negbin'){
joint_var <- joint_var + vi_r_sigma
}
vi_pg_c <- sqrt(as.vector(X %*% vi_beta_mean + Z %*% vi_alpha_mean - vi_r_mu)^2 + joint_var)
}
vi_pg_mean <- vi_pg_b / (2 * vi_pg_c) * tanh(vi_pg_c / 2)
fill_zero <- which(abs(vi_pg_c) < 1e-6)
if (length(fill_zero) > 0){
vi_pg_mean[fill_zero] <- vi_pg_b[fill_zero] / 4
}
diag_vi_pg_mean <- sparseMatrix(i = 1:N, j = 1:N, x = vi_pg_mean)
}
sqrt_pg_weights <- Diagonal(x = sqrt(vi_pg_mean))
if (debug_ELBO & it != 1) {
debug_ELBO.1 <- calculate_ELBO(family = family,
ELBO_type = ELBO_type,
factorization_method = factorization_method,
d_j = d_j, g_j = g_j, prior_sigma_alpha_phi = prior_sigma_alpha_phi,
prior_sigma_alpha_nu = prior_sigma_alpha_nu,
iw_prior_constant = iw_prior_constant,
X = X, Z = Z, s = s, y = y,
vi_pg_b = vi_pg_b, vi_pg_mean = vi_pg_mean, vi_pg_c = vi_pg_c,
vi_sigma_alpha = vi_sigma_alpha, vi_sigma_alpha_nu = vi_sigma_alpha_nu,
vi_sigma_outer_alpha = vi_sigma_outer_alpha,
vi_beta_mean = vi_beta_mean, vi_alpha_mean = vi_alpha_mean,
log_det_beta_var = log_det_beta_var, log_det_alpha_var = log_det_alpha_var,
vi_beta_decomp = vi_beta_decomp, vi_alpha_decomp = vi_alpha_decomp,
vi_joint_decomp = vi_joint_decomp, choose_term = choose_term,
vi_sigmasq_a = vi_sigmasq_a, vi_sigmasq_b = vi_sigmasq_b,
vi_sigmasq_prior_a = vi_sigmasq_prior_a, vi_sigmasq_prior_b = vi_sigmasq_prior_b,
log_det_joint_var = log_det_joint_var, vi_r_mu = vi_r_mu, vi_r_mean = vi_r_mean, vi_r_sigma = vi_r_sigma,
do_huangwand = do_huangwand, vi_a_a_jp = vi_a_a_jp, vi_a_b_jp = vi_a_b_jp,
vi_a_nu_jp = vi_a_nu_jp, vi_a_APRIOR_jp = vi_a_APRIOR_jp
)
}
if (do_timing) {
toc(quiet = quiet_time, log = TRUE)
tic("Prepare Sigma")
}
# Process Sigma_j for manipulation
# if Sigma_{j} is InverseWishart(a,Phi)
# Then E[Sigma^{-1}_j] = a * Phi^{-1}
if (factorization_method == "strong") {
cyclical_T <- TRUE
} else {
cyclical_T <- FALSE
}
inv_mapping_alpha <- mapply(vi_sigma_alpha_nu, lapply(vi_sigma_alpha, solve),
SIMPLIFY = FALSE, FUN = function(a, b) {
a * b
}
)
inv_mapping_alpha <- make_mapping_alpha(inv_mapping_alpha)
if (factorization_method == "collapsed"){
cyclical_T <- TRUE
}
Tinv <- prepare_T(
mapping = inv_mapping_alpha, levels_per_RE = g_j, num_REs = number_of_RE,
variables_per_RE = d_j, running_per_RE = breaks_for_RE, cyclical = cyclical_T
)
if (!cyclical_T & factorization_method != "collapsed") {
Tinv <- as(Tinv, "generalMatrix")
} else {
Tinv <- lapply(Tinv, FUN = function(i) {
as(i, "generalMatrix")
})
}
if (do_timing) {
toc(quiet = quiet_time, log = T)
tic("Update Beta")
}
if (factorization_method == "weak") {
## Update <beta, alpha> jointly
chol.update.joint <- LinRegChol(
X = joint.XZ, omega = diag_vi_pg_mean,
prior_precision = bdiag(zero_mat, Tinv),
y = s + vi_pg_mean * vi_r_mu
)
Pmatrix <- sparseMatrix(i = 1:ncol(joint.XZ), j = 1 + chol.update.joint$Pindex, x = 1)
vi_joint_L_nonpermute <- drop0(solve(chol.update.joint$origL))
vi_joint_LP <- Pmatrix
vi_joint_decomp <- vi_joint_L_nonpermute %*% t(vi_joint_LP)
vi_beta_mean <- Matrix(chol.update.joint$mean[1:p.X], dimnames = list(colnames(X), NULL))
vi_alpha_mean <- Matrix(chol.update.joint$mean[-1:-p.X], dimnames = list(fmt_names_Z, NULL))
vi_alpha_decomp <- vi_joint_decomp[, -1:-p.X, drop = F]
vi_beta_decomp <- vi_joint_decomp[, 1:p.X, drop = F]
log_det_joint_var <- -2 * sum(log(diag(chol.update.joint$origL)))
if (do_SQUAREM){
vi_joint_L_nonpermute <- vi_joint_decomp
vi_joint_LP <- Diagonal(n = ncol(vi_joint_decomp))
}
} else if (factorization_method == "collapsed") {
if (family != 'binomial'){stop('"collapsed" not set up.')}
beta_var <- solve(t(X) %*% diag_vi_pg_mean %*% X)
beta_hat <- beta_var %*% t(X) %*% s
P <- beta_var %*% t(X) %*% diag_vi_pg_mean %*% Z
M <- Z - X %*% P
vi_alpha_mean <- solve(t(M) %*% diag_vi_pg_mean %*% M + bdiag(Tinv),
t(M) %*% (s - diag_vi_pg_mean %*% X %*% beta_hat)
)
vi_beta_mean <- beta_hat - P %*% vi_alpha_mean
sqrt_pg_weights <- Diagonal(x = sqrt(vi_pg_mean))
for (j in 1:number_of_RE) {
index_j <- cyclical_pos[[j]]
M_j <- as(M[, index_j, drop = F], 'generalMatrix')
prec_j <- crossprod(sqrt_pg_weights %*% M_j) + Tinv[[j]]
chol_var_j <- solve(t(chol(prec_j)))
running_log_det_alpha_var[j] <- 2 * sum(log(diag(chol_var_j)))
vi_alpha_decomp[index_j, index_j] <- drop0(chol_var_j)
# as(
# as(chol_var_j, "generalMatrix"), "TsparseMatrix"
# )
}
vi_alpha_L_nonpermute <- vi_alpha_decomp
vi_alpha_LP <- Diagonal(n = nrow(vi_alpha_L_nonpermute))
vi_alpha_decomp <- vi_alpha_L_nonpermute %*% t(vi_alpha_LP)
vi_alpha_decomp <- drop0(vi_alpha_decomp)
vi_alpha_decomp <- as(vi_alpha_decomp, 'generalMatrix')
log_det_alpha_var <- sum(running_log_det_alpha_var)
var_ALPHA <- t(vi_alpha_decomp) %*% vi_alpha_decomp
vi_joint_all <- bdiag(beta_var, var_ALPHA)
vi_joint_all[seq_len(nrow(beta_var)), seq_len(nrow(beta_var))] <-
P %*% var_ALPHA %*% t(P) + vi_joint_all[seq_len(nrow(beta_var)), seq_len(nrow(beta_var))]
vi_joint_all[seq_len(nrow(beta_var)),-seq_len(nrow(beta_var)), drop = F] <- - P %*% var_ALPHA
vi_joint_all[-seq_len(nrow(beta_var)),seq_len(nrow(beta_var)),drop=F] <- - t(P %*% var_ALPHA)
vi_joint_decomp <- chol(vi_joint_all)
vi_beta_decomp <- vi_joint_decomp[,1:p.X,drop=F]
# vi_beta_decomp <- chol(beta_var)
# vi_beta_L_nonpermute <- vi_beta_decomp
# vi_beta_LP <- Diagonal(n = nrow(vi_beta_mean))
# vi_joint_LP <- Diagonal(n = nrow(vi_joint_decomp))
# vi_joint_L_nonpermute <- vi_joint_decomp
log_det_joint_var <- NA
log_det_beta_var <- as.numeric(determinant(beta_var)$modulus)
} else if (factorization_method == "partial") {
if (linpred_method == "cyclical") {
# Do not run except as backup
# ###Non optimized
# precision_beta <- t(X) %*% diag_vi_pg_mean %*% X
# nonopt_beta <- solve(precision_beta, t(X) %*% (s - diag_vi_pg_mean %*% Z %*% vi_alpha_mean))
# precision_alpha <- t(Z) %*% diag_vi_pg_mean %*% Z + Tinv
# nonopt_alpha <- solve(precision_alpha, t(Z) %*% (s - diag_vi_pg_mean %*% X %*% nonopt_beta))
chol.update.beta <- LinRegChol(
X = as(X, "sparseMatrix"), omega = diag_vi_pg_mean, prior_precision = zero_mat,
y = as.vector(s - diag_vi_pg_mean %*% Z %*% vi_alpha_mean)
)
Pmatrix <- sparseMatrix(i = 1:p.X, j = 1 + chol.update.beta$Pindex, x = 1)
# P origL oriL^T P^T = PRECISION
# t(decompVar) %*% decompVar = VARIANCE = (origL^{-1} t(P))^T (origL^{-1} t(P))
vi_beta_L_nonpermute <- drop0(solve(chol.update.beta$origL))
vi_beta_LP <- Pmatrix
vi_beta_decomp <- vi_beta_L_nonpermute %*% t(vi_beta_LP)
vi_beta_mean <- chol.update.beta$mean
log_det_beta_var <- -2 * sum(log(diag(chol.update.beta$origL)))
chol.update.alpha <- LinRegChol(
X = Z, omega = diag_vi_pg_mean, prior_precision = Tinv,
y = as.vector(s - diag_vi_pg_mean %*% X %*% vi_beta_mean)
)
Pmatrix <- sparseMatrix(i = 1:p.Z, j = 1 + chol.update.alpha$Pindex, x = 1)
vi_alpha_L_nonpermute <- drop0(solve(chol.update.alpha$origL))
vi_alpha_LP <- Pmatrix
vi_alpha_decomp <- vi_alpha_L_nonpermute %*% t(vi_alpha_LP)
vi_alpha_decomp <- drop0(vi_alpha_decomp)
vi_alpha_decomp <- as(vi_alpha_decomp, 'generalMatrix')
vi_alpha_mean <- chol.update.alpha$mean
log_det_alpha_var <- -2 * sum(log(diag(chol.update.alpha$origL)))
vi_beta_mean <- Matrix(vi_beta_mean, dimnames = list(colnames(X), NULL))
vi_alpha_mean <- Matrix(vi_alpha_mean, dimnames = list(fmt_names_Z, NULL))
} else if (linpred_method == "joint") {
joint.XZ <- cbind(X, Z)
chol.update.joint <- solve(Matrix::Cholesky(
crossprod(Diagonal(x = sqrt(vi_pg_mean)) %*% joint.XZ) +
bdiag(zero_mat, bdiag(Tinv)) ),
t(joint.XZ) %*% (s + vi_pg_mean * vi_r_mu) )
vi_beta_mean <- Matrix(chol.update.joint[1:p.X,], dimnames = list(colnames(X), NULL))
vi_alpha_mean <- Matrix(chol.update.joint[-1:-p.X,], dimnames = list(fmt_names_Z, NULL))
# chol.update.joint <- LinRegChol(
# X = joint.XZ, omega = diag_vi_pg_mean, prior_precision = bdiag(zero_mat, Tinv),
# y = s + vi_pg_mean * vi_r_mu,
# save_chol = FALSE
# )
# vi_beta_mean <- Matrix(chol.update.joint$mean[1:p.X], dimnames = list(colnames(X), NULL))
# vi_alpha_mean <- Matrix(chol.update.joint$mean[-1:-p.X], dimnames = list(fmt_names_Z, NULL))
vi_beta_decomp <- solve(t(chol(as.matrix(t(X) %*% diag_vi_pg_mean %*% X))))
vi_beta_L_nonpermute <- vi_beta_decomp
vi_beta_LP <- Diagonal(n = ncol(vi_beta_decomp))
log_det_beta_var <- 2 * sum(log(diag(vi_beta_decomp)))
chol.update.alpha <- LinRegChol(
X = Z, omega = diag_vi_pg_mean, prior_precision = Tinv,
y = s + vi_pg_mean * vi_r_mu
)
Pmatrix <- sparseMatrix(i = 1:p.Z, j = 1 + chol.update.alpha$Pindex, x = 1)
vi_alpha_L_nonpermute <- drop0(solve(chol.update.alpha$origL))
vi_alpha_LP <- Pmatrix
vi_alpha_decomp <- vi_alpha_L_nonpermute %*% t(vi_alpha_LP)
vi_alpha_decomp <- drop0(vi_alpha_decomp)
log_det_alpha_var <- -2 * sum(log(diag(chol.update.alpha$origL)))
if (do_SQUAREM){
vi_alpha_L_nonpermute <- vi_alpha_decomp
vi_alpha_LP <- Diagonal(n = ncol(vi_alpha_decomp))
}
} else {
stop("Invalid linpred method for partial scheme")
}
} else if (factorization_method == "strong") {
running_log_det_alpha_var <- rep(NA, number_of_RE)
vi_alpha_decomp <- sparseMatrix(i = 1, j = 1, x = 0, dims = rep(p.Z, 2))
if (linpred_method == "joint") {
if (it == 1){
joint.XZ <- cbind(X, Z)
}
if (do_timing) {
tic("ux_mean")
}
chol.update.joint <- solve(Matrix::Cholesky(
crossprod(sqrt_pg_weights %*% joint.XZ) +
bdiag(zero_mat, bdiag(Tinv)) ),
t(joint.XZ) %*% (s + vi_pg_mean * vi_r_mu) )
vi_beta_mean <- Matrix(chol.update.joint[1:p.X,], dimnames = list(colnames(X), NULL))
vi_alpha_mean <- Matrix(chol.update.joint[-1:-p.X,], dimnames = list(fmt_names_Z, NULL))
# chol.update.joint <- LinRegChol(X = joint.XZ,
# omega = diag_vi_pg_mean,
# prior_precision = bdiag(zero_mat, bdiag(Tinv)),
# y = s + vi_pg_mean * vi_r_mu,
# save_chol = FALSE)
# vi_beta_mean <- Matrix(chol.update.joint$mean[1:p.X], dimnames = list(colnames(X), NULL))
# vi_alpha_mean <- Matrix(chol.update.joint$mean[-1:-p.X], dimnames = list(fmt_names_Z, NULL))
if (do_timing) {
toc(quiet = quiet_time, log = T)
tic("ux_var")
}
vi_beta_decomp <- solve(t(chol(as.matrix(t(X) %*% diag_vi_pg_mean %*% X))))
vi_beta_L_nonpermute <- vi_beta_decomp
vi_beta_LP <- Diagonal(n = nrow(vi_beta_decomp))
log_det_beta_var <- 2 * sum(log(diag(vi_beta_decomp)))
#-log(det(t(X) %*% diag_vi_pg_mean %*% X))
running_log_det_alpha_var <- rep(NA, number_of_RE)
for (j in 1:number_of_RE) {
index_j <- cyclical_pos[[j]]
Z_j <- Z[, index_j, drop = F]
prec_j <- crossprod(sqrt_pg_weights %*% Z_j) + Tinv[[j]]
chol_var_j <- solve(t(chol(prec_j)))
running_log_det_alpha_var[j] <- 2 * sum(log(diag(chol_var_j)))
vi_alpha_decomp[index_j, index_j] <- drop0(chol_var_j)
# as(
# as(chol_var_j, "generalMatrix"), "TsparseMatrix"
# )
}
vi_alpha_L_nonpermute <- vi_alpha_decomp
vi_alpha_LP <- Diagonal(n = nrow(vi_alpha_L_nonpermute))
log_det_alpha_var <- sum(running_log_det_alpha_var)
if (do_timing){
toc(quiet = quiet_time, log = T)
}
} else if (linpred_method == "solve_normal") {
bind_rhs_j <- list()
bind_lhs_j <- list()
for (j in 1:number_of_RE) {
index_j <- cyclical_pos[[j]]
Z_j <- Z[, index_j, drop = F]
Z_negj <- Z[, -index_j, drop = F]
prec_j <- crossprod(sqrt_pg_weights %*% Z_j) + Tinv[[j]]
chol_prec_j <- t(chol(prec_j))
chol_var_j <- solve(chol_prec_j)
mod_j <- solve(prec_j)
term_j <- mod_j %*% t(Z_j) %*% diag_vi_pg_mean %*% Z
term_j[, index_j, drop = F] <- Diagonal(n = ncol(Z_j))
term_j <- cbind(term_j, mod_j %*% t(Z_j) %*% diag_vi_pg_mean %*% X)
bind_lhs_j[[j]] <- term_j
bind_rhs_j[[j]] <- mod_j %*% t(Z_j) %*% s
running_log_det_alpha_var[j] <- 2 * sum(log(diag(chol_var_j)))
vi_alpha_decomp[index_j, index_j] <- drop0(chol_var_j)
# as(
# as(chol_var_j, "generalMatrix"), "TsparseMatrix"
# )
}
log_det_alpha_var <- sum(running_log_det_alpha_var)
bind_lhs_j <- drop0(do.call("rbind", bind_lhs_j))
bind_rhs_j <- do.call("rbind", bind_rhs_j)
vi_beta_decomp <- solve(t(chol(as.matrix(t(X) %*% diag_vi_pg_mean %*% X))))
vi_beta_var <- solve(t(X) %*% diag_vi_pg_mean %*% X)
log_det_beta_var <- 2 * sum(log(diag(vi_beta_decomp)))
# vi_beta_mean <- vi_beta_var %*% t(X) %*% (s - diag_vi_pg_mean %*% Z %*% vi_alpha_mean)
# vi_alpha_mean <- solve(bind_lhs_j[,1:ncol(Z)], bind_rhs_j)
#
# vi_alpha_mean <- Matrix(vi_alpha_mean)
# vi_beta_mean <- Matrix(vi_beta_mean)
bind_lhs_j <- drop0(rbind(bind_lhs_j, cbind(vi_beta_var %*% t(X) %*% diag_vi_pg_mean %*% Z, Diagonal(n = ncol(X)))))
bind_rhs_j <- rbind(bind_rhs_j, vi_beta_var %*% t(X) %*% s)
#
bind_solution <- solve(bind_lhs_j) %*% bind_rhs_j
# print(cbind(bind_solution, rbind(vi_alpha_mean, vi_beta_mean)))
#
vi_beta_mean <- Matrix(bind_solution[-1:-ncol(Z)], dimnames = list(colnames(X), NULL))
vi_alpha_mean <- Matrix(bind_solution[1:ncol(Z)], dimnames = list(fmt_names_Z, NULL))
} else if (linpred_method == "cyclical") {
for (j in 1:number_of_RE) {
index_j <- cyclical_pos[[j]]
Z_j <- Z[, index_j, drop = F]
Z_negj <- Z[, -index_j, drop = F]
chol.j <- LinRegChol(
X = Z_j, omega = diag_vi_pg_mean, prior_precision = Tinv[[j]],
y = as.vector(s + vi_pg_mean * vi_r_mu - diag_vi_pg_mean %*% (X %*% vi_beta_mean + Z_negj %*% vi_alpha_mean[-index_j]))
)
vi_alpha_mean[index_j] <- chol.j$mean
Pmatrix <- sparseMatrix(i = 1:ncol(Z_j), j = 1 + chol.j$Pindex, x = 1)
running_log_det_alpha_var[j] <- -2 * sum(log(diag(chol.j$origL)))
vi_alpha_decomp[index_j, index_j] <- solve(chol.j$origL) %*% t(Pmatrix)
}
vi_alpha_L_nonpermute <- vi_alpha_decomp
vi_alpha_LP <- Diagonal(n = ncol(vi_alpha_L_nonpermute))
# vi_alpha_decomp <- bdiag(vi_alpha_decomp)
log_det_alpha_var <- sum(running_log_det_alpha_var)
chol.update.beta <- LinRegChol(
X = as(X, "sparseMatrix"), omega = diag_vi_pg_mean, prior_precision = zero_mat,
y = as.vector(s + vi_pg_mean * vi_r_mu - diag_vi_pg_mean %*% Z %*% vi_alpha_mean)
)
Pmatrix <- sparseMatrix(i = 1:p.X, j = 1 + chol.update.beta$Pindex, x = 1)
vi_beta_L_nonpermute <- drop0(solve(chol.update.beta$origL))
vi_beta_LP <- Pmatrix
vi_beta_decomp <- vi_beta_L_nonpermute %*% t(Pmatrix)
vi_beta_mean <- chol.update.beta$mean
log_det_beta_var <- -2 * sum(log(diag(chol.update.beta$origL)))
vi_beta_mean <- Matrix(vi_beta_mean, dimnames = list(colnames(X), NULL))
vi_alpha_mean <- Matrix(vi_alpha_mean, dimnames = list(fmt_names_Z, NULL))
} else {
stop("Invalid linpred method")
}
} else {
stop("Invalid factorization method.")
}
if (family == 'linear'){
adjust_var <- 1/sqrt(vi_sigmasq_a/vi_sigmasq_b)
vi_beta_decomp <- vi_beta_decomp * adjust_var
vi_alpha_decomp <- vi_alpha_decomp * adjust_var
vi_joint_decomp <- vi_joint_decomp * adjust_var
if (factorization_method == 'weak'){
vi_joint_L_nonpermute <- vi_joint_L_nonpermute * adjust_var
}else{
vi_beta_L_nonpermute <- vi_beta_L_nonpermute * adjust_var
vi_alpha_L_nonpermute <- vi_alpha_L_nonpermute * adjust_var
}
ln_sigmasq <- log(vi_sigmasq_b) - log(vi_sigmasq_a)
log_det_joint_var <- log_det_joint_var + ncol(vi_joint_decomp) * ln_sigmasq
log_det_beta_var <- log_det_beta_var + ncol(vi_beta_decomp) * ln_sigmasq
log_det_alpha_var <- log_det_alpha_var + ncol(vi_alpha_decomp) * ln_sigmasq
}
if (debug_ELBO & it != 1) {
variance_by_alpha_jg <- calculate_expected_outer_alpha(L = vi_alpha_decomp, alpha_mu = as.vector(vi_alpha_mean), re_position_list = outer_alpha_RE_positions)
vi_sigma_outer_alpha <- variance_by_alpha_jg$outer_alpha
debug_ELBO.2 <- calculate_ELBO(family = family,
ELBO_type = ELBO_type,
factorization_method = factorization_method,
d_j = d_j, g_j = g_j, prior_sigma_alpha_phi = prior_sigma_alpha_phi,
prior_sigma_alpha_nu = prior_sigma_alpha_nu,
iw_prior_constant = iw_prior_constant,
X = X, Z = Z, s = s, y = y,
vi_pg_b = vi_pg_b, vi_pg_mean = vi_pg_mean, vi_pg_c = vi_pg_c,
vi_sigma_alpha = vi_sigma_alpha, vi_sigma_alpha_nu = vi_sigma_alpha_nu,
vi_sigma_outer_alpha = vi_sigma_outer_alpha,
vi_beta_mean = vi_beta_mean, vi_alpha_mean = vi_alpha_mean,
log_det_beta_var = log_det_beta_var, log_det_alpha_var = log_det_alpha_var,
vi_beta_decomp = vi_beta_decomp, vi_alpha_decomp = vi_alpha_decomp,
vi_joint_decomp = vi_joint_decomp, choose_term = choose_term,
vi_sigmasq_a = vi_sigmasq_a, vi_sigmasq_b = vi_sigmasq_b,
vi_sigmasq_prior_a = vi_sigmasq_prior_a, vi_sigmasq_prior_b = vi_sigmasq_prior_b,
log_det_joint_var = log_det_joint_var, vi_r_mu = vi_r_mu, vi_r_mean = vi_r_mean,
vi_r_sigma = vi_r_sigma,
do_huangwand = do_huangwand, vi_a_a_jp = vi_a_a_jp, vi_a_b_jp = vi_a_b_jp,
vi_a_nu_jp = vi_a_nu_jp, vi_a_APRIOR_jp = vi_a_APRIOR_jp
)
}
if (do_timing) {
toc(quiet = quiet_time, log = T)
tic("Update Sigma")
}
###
# Update \Sigma_j
##
if (!do_huangwand){#Update standard Inverse-Wishart
variance_by_alpha_jg <- calculate_expected_outer_alpha(L = vi_alpha_decomp, alpha_mu = as.vector(vi_alpha_mean), re_position_list = outer_alpha_RE_positions)
vi_sigma_outer_alpha <- variance_by_alpha_jg$outer_alpha
vi_sigma_alpha <- mapply(vi_sigma_outer_alpha, prior_sigma_alpha_phi, SIMPLIFY = FALSE, FUN = function(i, j) {
i * vi_sigmasq_a/vi_sigmasq_b + j
})
}else{
#Update Inverse-Wishart
variance_by_alpha_jg <- calculate_expected_outer_alpha(L = vi_alpha_decomp, alpha_mu = as.vector(vi_alpha_mean), re_position_list = outer_alpha_RE_positions)
vi_sigma_outer_alpha <- variance_by_alpha_jg$outer_alpha
for (inner_it in 1:INNER_IT){
vi_sigma_alpha <- mapply(vi_sigma_outer_alpha, vi_a_a_jp,
vi_a_b_jp, vi_a_nu_jp, SIMPLIFY = FALSE,
FUN = function(i, tilde.a, tilde.b, nu) {
i * vi_sigmasq_a/vi_sigmasq_b + Diagonal(x = tilde.a/tilde.b) * 2 * nu
})
#Update a_{j,p}
diag_Einv_sigma <- mapply(vi_sigma_alpha,
vi_sigma_alpha_nu, d_j, SIMPLIFY = FALSE, FUN = function(phi, nu, d) {
inv_phi <- solve(phi)
sigma.inv <- nu * inv_phi
return(diag(sigma.inv))
})
vi_a_b_jp <- mapply(vi_a_nu_jp, vi_a_APRIOR_jp, diag_Einv_sigma,
SIMPLIFY = FALSE,
FUN=function(nu, APRIOR, diag_j){
1/APRIOR^2 + nu * diag_j
})
}
# d_j <<- d_j
# vi_alpha_decomp <<- vi_alpha_decomp
# Tinv <<- Tinv
# vi_alpha_mean <<- vi_alpha_mean
}
if (do_timing) {
toc(quiet = quiet_time, log = T)
tic("Update Aux")
}
# Update the auxilary parameters
if (family == "negbin") {
vi_r_param <- update_r(
vi_r_mu = vi_r_mu, vi_r_sigma = vi_r_sigma,
y = y, X = X, Z = Z, factorization_method = factorization_method,
vi_beta_mean = vi_beta_mean, vi_beta_decomp = vi_beta_decomp,
vi_alpha_mean = vi_alpha_mean, vi_alpha_decomp = vi_alpha_decomp,
vi_joint_decomp = vi_joint_decomp, vi_r_method = vi_r_method
)
vi_r_mu <- vi_r_param[1]
vi_r_sigma <- vi_r_param[2]
vi_r_mean <- exp(vi_r_mu + vi_r_sigma / 2)
s <- (y - vi_r_mean) / 2
vi_pg_b <- y + vi_r_mean
} else if (family == 'linear') {
if (factorization_method == 'weak'){
joint_quad <- cpp_zVz(Z = joint.XZ, V = as(vi_joint_decomp, "generalMatrix"))
vi_lp <- (s - as.vector(X %*% vi_beta_mean + Z %*% vi_alpha_mean))^2 + joint_quad
} else{
beta_quad <- rowSums((X %*% t(vi_beta_decomp))^2)
alpha_quad <- rowSums((Z %*% t(vi_alpha_decomp))^2)
vi_lp <- (s - as.vector(X %*% vi_beta_mean + Z %*% vi_alpha_mean))^2 + beta_quad + alpha_quad
}
vi_kernel <- expect_alpha_prior_kernel(vi_sigma_alpha = vi_sigma_alpha,
vi_sigma_alpha_nu = vi_sigma_alpha_nu, d_j = d_j,
vi_sigma_outer_alpha = vi_sigma_outer_alpha)
vi_sigmasq_b <- (sum(vi_lp) + vi_kernel)/2 + vi_sigmasq_prior_b
}
if (do_timing) {
toc(quiet = quiet_time, log = T)
}
### PARAMETER EXPANSIONS!
if (debug_ELBO) {
debug_ELBO.3 <- calculate_ELBO(family = family,
ELBO_type = ELBO_type,
factorization_method = factorization_method,
d_j = d_j, g_j = g_j, prior_sigma_alpha_phi = prior_sigma_alpha_phi,
prior_sigma_alpha_nu = prior_sigma_alpha_nu,
iw_prior_constant = iw_prior_constant,
X = X, Z = Z, s = s, y = y,
vi_pg_b = vi_pg_b, vi_pg_mean = vi_pg_mean, vi_pg_c = vi_pg_c,
vi_sigma_alpha = vi_sigma_alpha, vi_sigma_alpha_nu = vi_sigma_alpha_nu,
vi_sigma_outer_alpha = vi_sigma_outer_alpha,
vi_beta_mean = vi_beta_mean, vi_alpha_mean = vi_alpha_mean,
log_det_beta_var = log_det_beta_var, log_det_alpha_var = log_det_alpha_var,
vi_beta_decomp = vi_beta_decomp, vi_alpha_decomp = vi_alpha_decomp,
vi_joint_decomp = vi_joint_decomp,
log_det_joint_var = log_det_joint_var,
vi_r_mu = vi_r_mu, vi_r_mean = vi_r_mean, vi_r_sigma = vi_r_sigma,
choose_term = choose_term,
vi_sigmasq_a = vi_sigmasq_a, vi_sigmasq_b = vi_sigmasq_b,
vi_sigmasq_prior_a = vi_sigmasq_prior_a, vi_sigmasq_prior_b = vi_sigmasq_prior_b,
do_huangwand = do_huangwand, vi_a_a_jp = vi_a_a_jp, vi_a_b_jp = vi_a_b_jp,
vi_a_nu_jp = vi_a_nu_jp, vi_a_APRIOR_jp = vi_a_APRIOR_jp
)
}
if (parameter_expansion == "none" | !any_Mprime) {
accept.PX <- TRUE
} else {
if (do_timing) {
tic("Update PX")
}
# Do a simple mean adjusted expansion.
# Get the mean of each random effect.
vi_mu_j <- t(M_prime) %*% vi_alpha_mean
meat_Bj <- bdiag(mapply(vi_sigma_alpha, vi_sigma_alpha_nu, d_j,
SIMPLIFY = FALSE, FUN = function(phi, nu, d) {
inv_phi <- solve(phi)
sigma.inv <- nu * inv_phi
return(sigma.inv)
}))
proj_vi_mu_j <- B_j %*% solve(t(B_j) %*% meat_Bj %*% B_j) %*% t(B_j) %*% meat_Bj %*% vi_mu_j
# Remove the "excess mean" mu_j from each random effect \alpha_{j,g}
# and add the summd mass back to the betas.
vi_alpha_mean <- vi_alpha_mean - M_prime_one %*% proj_vi_mu_j
vi_beta_mean <- vi_beta_mean + t(M_mu_to_beta) %*% proj_vi_mu_j
variance_by_alpha_jg <- calculate_expected_outer_alpha(
L = vi_alpha_decomp,
alpha_mu = as.vector(vi_alpha_mean),
re_position_list = outer_alpha_RE_positions
)
vi_sigma_outer_alpha <- variance_by_alpha_jg$outer_alpha
if (parameter_expansion == "mean"){accept.PX <- TRUE}
}
quiet_rho <- control$quiet_rho
if (parameter_expansion %in% c("translation", "diagonal") & skip_translate == FALSE & any_Mprime) {
attempted_expansion <- attempted_expansion + 1
if (debug_px){
prior.ELBO <- calculate_ELBO(family = family, ELBO_type = ELBO_type,
factorization_method = factorization_method,
d_j = d_j, g_j = g_j, prior_sigma_alpha_phi = prior_sigma_alpha_phi,
prior_sigma_alpha_nu = prior_sigma_alpha_nu,
iw_prior_constant = iw_prior_constant,
X = X, Z = Z, s = s, y = y,
vi_pg_b = vi_pg_b, vi_pg_mean = vi_pg_mean, vi_pg_c = vi_pg_c,
vi_sigma_alpha = vi_sigma_alpha, vi_sigma_alpha_nu = vi_sigma_alpha_nu,
vi_sigma_outer_alpha = vi_sigma_outer_alpha,
vi_beta_mean = vi_beta_mean, vi_alpha_mean = vi_alpha_mean,
log_det_beta_var = log_det_beta_var, log_det_alpha_var = log_det_alpha_var,
vi_beta_decomp = vi_beta_decomp, vi_alpha_decomp = vi_alpha_decomp,
vi_joint_decomp = vi_joint_decomp, choose_term = choose_term,
vi_sigmasq_a = vi_sigmasq_a, vi_sigmasq_b = vi_sigmasq_b,
vi_sigmasq_prior_a = vi_sigmasq_prior_a, vi_sigmasq_prior_b = vi_sigmasq_prior_b,
log_det_joint_var = log_det_joint_var,
vi_r_mu = vi_r_mu, vi_r_mean = vi_r_mean, vi_r_sigma = vi_r_sigma,
do_huangwand = do_huangwand, vi_a_a_jp = vi_a_a_jp, vi_a_b_jp = vi_a_b_jp,
vi_a_nu_jp = vi_a_nu_jp, vi_a_APRIOR_jp = vi_a_APRIOR_jp
)
}
if (!quiet_rho){cat('r')}
if (do_timing){
tic('px_r')
}
if (any(!spline_REs)){
raw_R <- R_ridge <- vecR_ridge_new(L = vi_alpha_decomp[,nonspline_positions], pg_mean = diag(diag_vi_pg_mean),
mapping_J = mapping_J, d = d_j[!spline_REs],
store_id = store_id, store_re_id = store_re_id,
store_design = store_design,
diag_only = (factorization_method == 'strong'))
}else{
raw_R <- R_ridge <- matrix(0, ncol = 0, nrow = 0)
}
if (factorization_method == 'weak'){
stop('no Translation PX for weak yet...')
}
if (!quiet_rho){cat('r')}
if (any(!spline_REs)){
R_design <- vecR_design(alpha_mu = as.vector(vi_alpha_mean), Z = mapping_new_Z,
M = Mmap, mapping_J = mapping_J, d = d_j[!spline_REs],
start_z = start_base_Z)
}else{
R_design <- matrix(0, nrow = N, ncol = 0)
}
if (sum(spline_REs)){
R_spline_design <- sapply(cyclical_pos[spline_REs], FUN=function(i){
as.vector(Z[,i,drop=F] %*% vi_alpha_mean[i,])
})
R_spline_ridge <- sapply(cyclical_pos[spline_REs], FUN=function(s){vi_alpha_decomp[,s, drop = F]})
R_spline_ridge <- Diagonal(x =mapply(R_spline_ridge, cyclical_pos[spline_REs], FUN=function(V, pos){
sum(vi_pg_mean * cpp_zVz(Z = drop0(Z[,pos,drop=F]), V = as(V, 'generalMatrix')))
}))
# Manually convert "ddiMatrix" to "generalMatrix" so doesn't fail on
# old versions of "Matrix" package.
if (inherits(R_spline_ridge, 'ddiMatrix')){
R_spline_ridge <- diag(R_spline_ridge)
R_spline_ridge <- sparseMatrix(
i = seq_len(length(R_spline_ridge)),
j = seq_len(length(R_spline_ridge)),
x = R_spline_ridge)
}else{
R_spline_ridge <- as(R_spline_ridge, 'generalMatrix')
}
}else{
R_spline_ridge <- drop0(matrix(0, nrow = 0, ncol = 0))
R_spline_design <- matrix(nrow = nrow(X), ncol = 0)
}
if (do_timing){
toc(quiet = quiet_time, log = TRUE)
tic('px_fit')
}
#If a DIAGONAL expansion, then only update the diagonal elements
if (parameter_expansion == "diagonal"){
stop('parameter_expansion "diagonal" turned off.')
# XR <- cbind(X, R_spline_design, R_design[, diag_rho])
# R_ridge <- bdiag(zeromat_beta, R_spline_ridge, R_ridge[diag_rho, diag_rho])
#
# if (do_huangwand){
# vec_OSL_prior <- do.call('c', mapply(vi_a_APRIOR_jp[!spline_REs],
# vi_a_a_jp[!spline_REs],
# vi_a_b_jp[!spline_REs],
# SIMPLIFY = FALSE,
# FUN=function(i,a,b){1-2/i^2 * a/b}))
# vec_OSL_prior <- c(rep(0, p.X), OSL_spline_prior, vec_OSL_prior)
# }else{
# vec_OSL_prior <- vec_OSL_prior[c(seq_len(p.X + sum(spline_REs)), p.X + sum(spline_REs) + diag_rho),,drop=F]
# }
# if (length(vec_OSL_prior) != ncol(XR)){stop('MISALIGNED DIMENSIONS')}
#
# update_expansion_XR <- vecR_fast_ridge(X = drop0(XR),
# omega = diag_vi_pg_mean, prior_precision = R_ridge, y = as.vector(s),
# adjust_y = as.vector(vec_OSL_prior))
#
# update_expansion_bX <- Matrix(update_expansion_XR[1:p.X])
# update_expansion_splines <- Matrix(update_expansion_XR[-(1:p.X)][seq_len(size_splines)])
#
# update_expansion_R <- mapply(split(update_expansion_XR[-seq_len(p.X + size_splines)],
# rep(1:(number_of_RE - sum(spline_REs)), d_j[!spline_REs])), d_j[!spline_REs], SIMPLIFY = FALSE,
# FUN=function(i,d){
# dg <- diag(x = d)
# diag(dg) <- i
# return(dg)
# })
# update_diag_R <- split(update_expansion_XR[-seq_len(p.X + size_splines)],
# rep(1:(number_of_RE - sum(spline_REs)), d_j[!spline_REs]))
# rownames(update_expansion_bX) <- colnames(X)
}else{
XR <- drop0(cbind(drop0(X), drop0(R_spline_design), drop0(R_design)))
R_ridge <- bdiag(zeromat_beta, R_spline_ridge, R_ridge)
moments_sigma_alpha <- mapply(vi_sigma_alpha, vi_sigma_alpha_nu, d_j,
SIMPLIFY = FALSE, FUN = function(phi, nu, d) {
inv_phi <- solve(phi)
sigma.inv <- nu * inv_phi
ln.det <- log(det(phi)) - sum(digamma((nu - 1:d + 1) / 2)) - d * log(2)
return(list(sigma.inv = sigma.inv, ln.det = ln.det))
})
if (family == 'linear'){# Rescale for linear
XR <- XR * sqrt(vi_sigmasq_a/vi_sigmasq_b)
adj_s <- s * sqrt(vi_sigmasq_a/vi_sigmasq_b)
R_ridge <- R_ridge * vi_sigmasq_a/vi_sigmasq_b
offset <- 0
}else if (family == 'negbin'){
adj_s <- s
offset <- vi_r_mu
stop('translation not set up for negative binomial.')
}else if (family == 'binomial'){
adj_s <- s
offset <- 0
}else{stop("family not set up for translation expansion.")}
update_expansion_XR <- update_rho(
XR = XR, y = adj_s, omega = diag_vi_pg_mean,
prior_precision = R_ridge, vi_beta_mean = vi_beta_mean,
moments_sigma_alpha = moments_sigma_alpha,
prior_sigma_alpha_nu = prior_sigma_alpha_nu, prior_sigma_alpha_phi = prior_sigma_alpha_phi,
vi_a_a_jp = vi_a_a_jp, vi_a_b_jp = vi_a_b_jp, vi_a_nu_jp = vi_a_nu_jp,
vi_a_APRIOR_jp = vi_a_APRIOR_jp,
stationary_rho = stationary_rho,
spline_REs = spline_REs, d_j = d_j,
do_huangwand = do_huangwand, offset = offset,
p.X = p.X, method = px_method, px_it = px_it,
init_rho = opt_prior_rho
)
if (px_method %in% c('numerical_hw', 'profiled', 'dynamic')){
px_improve <- update_expansion_XR$improvement
opt_prior_rho <- update_expansion_XR$opt_par
update_expansion_hw <- update_expansion_XR$hw
update_expansion_XR <- update_expansion_XR$rho
}else if (px_method %in% c('numerical', 'OSL')){
px_improve <- update_expansion_XR$improvement
opt_prior_rho <- update_expansion_XR <- update_expansion_XR$rho
}
opt_prior_rho <- NULL
update_expansion_bX <- Matrix(update_expansion_XR[1:p.X])
update_expansion_splines <- as.list(update_expansion_XR[-(1:p.X)][seq_len(sum(spline_REs))])
if (any(!spline_REs)){
update_expansion_R <- mapply(split(update_expansion_XR[-1:-(p.X + sum(spline_REs))],
rep(1:(number_of_RE - sum(spline_REs)), d_j[!spline_REs]^2)), d_j[!spline_REs],
SIMPLIFY = FALSE, FUN=function(i,d){matrix(i, nrow = d)})
}
}
if (do_timing){
toc(quiet = quiet_time, log = TRUE)
tic('px_propose')
}
est_rho_all <- update_expansion_XR[-(1:p.X)]
if (sum(spline_REs)){
est_rho_spline <- est_rho_all[seq_len(sum(spline_REs))]
est_rho <- est_rho_all[-seq_len(sum(spline_REs))]
}else{
est_rho <- est_rho_all
est_rho_spline <- 1
}
if (px_method %in% c('numerical_hw', 'profiled', 'dynamic')){
check_rho_hw <- unlist(vi_a_b_jp[c(which(spline_REs), which(!spline_REs))])
check_rho_hw <- check_rho_hw - unlist(update_expansion_hw)
names(check_rho_hw) <- NULL
}else{
check_rho_hw <- 0
}
if (!quiet_rho){
print(round(c(est_rho_spline, est_rho, check_rho_hw), 5))
}
if (parameter_expansion == 'diagonal'){
if (!is.na(px_improve) & (max(abs(est_rho - 1)) < 1e-6) & (max(abs(est_rho_spline - 1)) < 1e-6) ){
if (!quiet_rho){print('No further improvements')}
skip_translate <- TRUE
}
}else{
if (length(est_rho) > 0){
diff_rho <- max(abs(est_rho - stationary_rho))
}else{diff_rho <- 0}
if (!is.na(px_improve) & (diff_rho < 1e-6) & (max(abs(est_rho_spline - 1)) < 1e-6) ){
if (!quiet_rho){print('No further improvements')}
skip_translate <- TRUE
}
if (!is.na(px_improve)){
if (abs(px_improve) < 1e-7){
if (!quiet_rho){print('No further improvements (ELBO)')}
skip_translate <- TRUE
}
}
}
if (sum(spline_REs) > 0){
if (parameter_expansion == 'diagonal'){
old_update_diag_R <- update_diag_R
update_diag_R <- lapply(d_j, FUN=function(i){rep(1, i)})
update_diag_R[!spline_REs] <- old_update_diag_R
}
if (any(!spline_REs)){
old_update_expansion_R <- update_expansion_R
update_expansion_R <- lapply(d_j, FUN=function(i){diag(i)})
update_expansion_R[!spline_REs] <- old_update_expansion_R
rm(old_update_expansion_R)
}else{
update_expansion_R <- as.list(rep(NA, length(spline_REs)))
}
update_expansion_R[spline_REs] <- lapply(update_expansion_splines, FUN=function(i){matrix(i)})
}
prop_vi_sigma_alpha <- mapply(vi_sigma_alpha, update_expansion_R, SIMPLIFY = FALSE,
FUN=function(Phi, R){R %*% Phi %*% t(R)})
# cat('r')
# Are any of the estimated "R_j" have a negative determinant?
sign_detRj <- sign(sapply(update_expansion_R, det))
any_neg_det <- any(sign_detRj < 0)
mapping_for_R_block <- make_mapping_alpha(update_expansion_R, px.R = TRUE)
update_expansion_Rblock <- prepare_T(mapping = mapping_for_R_block, levels_per_RE = g_j, num_REs = number_of_RE,
variables_per_RE = d_j, running_per_RE = breaks_for_RE, cyclical = FALSE, px.R = TRUE)
check_Rblock <- bdiag(mapply(update_expansion_R, g_j, FUN=function(i,g){bdiag(lapply(1:g, FUN=function(k){i}))}))
if (max(abs(check_Rblock - update_expansion_Rblock)) != 0){
warning('Error in creating parameter expansion; check that ELBO increases monotonically.')
}
update_expansion_R_logdet <- sapply(update_expansion_R, FUN=function(i){determinant(i)$modulus})
prop_vi_beta_mean <- update_expansion_bX
prop_vi_alpha_mean <- update_expansion_Rblock %*% vi_alpha_mean
if (!quiet_rho){cat('r')}
if (factorization_method != 'weak'){
prop_log_det_joint_var <- prop_vi_joint_decomp <- NULL
if (!any_neg_det){
prop_vi_alpha_decomp <- vi_alpha_decomp %*% t(update_expansion_Rblock)
}else{
warning(paste0('Manually corrected R_j with negative determinant at iteration ', it))
if (all(d_j == 1)){
if (!isDiagonal(update_expansion_Rblock)){
stop('Correction failed as R_j is not diagonal. Try requiring optimization of PX objective.')
}
diag(update_expansion_Rblock) <- abs(diag(update_expansion_Rblock))
prop_vi_alpha_decomp <- vi_alpha_decomp %*% t(update_expansion_Rblock)
}else{
prop_vi_alpha_decomp <- update_expansion_Rblock %*% t(vi_alpha_decomp) %*%
vi_alpha_decomp %*% t(update_expansion_Rblock)
prop_vi_alpha_decomp <- Matrix::Cholesky(prop_vi_alpha_decomp)
prop_vi_alpha_decomp <- with(expand(prop_vi_alpha_decomp), t(L) %*% P)
}
}
prop_log_det_alpha_var <- log_det_alpha_var + 2 * sum(update_expansion_R_logdet * g_j)
prop_log_det_beta_var <- log_det_beta_var
prop_vi_beta_decomp <- vi_beta_decomp
prop_variance_by_alpha_jg <- calculate_expected_outer_alpha(
L = prop_vi_alpha_decomp,
alpha_mu = as.vector(prop_vi_alpha_mean),
re_position_list = outer_alpha_RE_positions)
prop_vi_sigma_outer_alpha <- prop_variance_by_alpha_jg$outer_alpha
}else{
stop('...')
# Be sure to set up linear case here too..
}
if (do_huangwand){
if (parameter_expansion == "diagonal"){
prop_vi_a_b_jp <- mapply(vi_a_b_jp, update_diag_R, SIMPLIFY = FALSE,
FUN=function(i,j){i / j^2})
if (px_method != 'OSL'){stop('Double check diagonal expansion')}
}else{
if (px_method %in% c('OSL')){
prop_moments <- mapply(moments_sigma_alpha, update_expansion_R, SIMPLIFY = FALSE,
FUN=function(Phi, R){
inv_R <- solve(R)
return(diag(t(inv_R) %*% Phi$sigma.inv %*% inv_R))
})
prop_vi_a_b_jp <- mapply(vi_a_nu_jp, vi_a_APRIOR_jp, prop_moments,
SIMPLIFY = FALSE,
FUN=function(nu, APRIOR, diag_j){
1/APRIOR^2 + nu * diag_j
})
}else if (px_method == 'numerical'){
prop_vi_a_b_jp <- vi_a_b_jp
}else{
prop_vi_a_b_jp <- update_expansion_hw[names(vi_a_b_jp)]
}
}
}else{
prop_vi_a_b_jp <- NULL
}
# #L^T L = Variance
# #R Var R^T --->
# # L %*% R^T
if (debug_px){
prop.ELBO <- calculate_ELBO(family = family,
ELBO_type = ELBO_type,
factorization_method = factorization_method,
d_j = d_j, g_j = g_j, prior_sigma_alpha_phi = prior_sigma_alpha_phi,
prior_sigma_alpha_nu = prior_sigma_alpha_nu,
iw_prior_constant = iw_prior_constant,
X = X, Z = Z, s = s, y = y,
vi_pg_b = vi_pg_b, vi_pg_mean = vi_pg_mean, vi_pg_c = vi_pg_c,
vi_sigma_alpha_nu = vi_sigma_alpha_nu,
vi_sigmasq_a = vi_sigmasq_a, vi_sigmasq_b = vi_sigmasq_b,
vi_sigmasq_prior_a = vi_sigmasq_prior_a, vi_sigmasq_prior_b = vi_sigmasq_prior_b,
vi_r_mean = vi_r_mean, vi_r_sigma = vi_r_sigma, vi_r_mu = vi_r_mu,
vi_sigma_alpha = prop_vi_sigma_alpha,
vi_a_b_jp = prop_vi_a_b_jp,
vi_sigma_outer_alpha = prop_vi_sigma_outer_alpha,
vi_beta_mean = prop_vi_beta_mean, vi_alpha_mean = prop_vi_alpha_mean,
log_det_beta_var = prop_log_det_beta_var,
log_det_alpha_var = prop_log_det_alpha_var,
log_det_joint_var = prop_log_det_joint_var,
vi_beta_decomp = prop_vi_beta_decomp,
vi_alpha_decomp = prop_vi_alpha_decomp,
vi_joint_decomp = prop_vi_joint_decomp,
do_huangwand = do_huangwand, vi_a_a_jp = vi_a_a_jp,
vi_a_nu_jp = vi_a_nu_jp, vi_a_APRIOR_jp = vi_a_APRIOR_jp,
choose_term
)
}
if (!quiet_rho){cat('d')}
# If debugging, check whether the change in ELBO
# from the profiled objective agrees with the
# change from the actual ELBO.
if (debug_px){
ELBO_diff <- prop.ELBO$ELBO - prior.ELBO$ELBO
if (!is.na(px_improve)){
if (abs(ELBO_diff - px_improve) > sqrt(.Machine$double.eps)){
stop('PX does not agree with debug.')
# browser()
# stop()
}
}else{
if (!isTRUE(all.equal(ELBO_diff, 0))){stop('PX altered parameters when NA.')}
}
debug_PX_ELBO[it] <- ELBO_diff
}
if (is.na(px_improve)){
accept.PX <- FALSE
}else if (px_improve > 0){
accept.PX <- TRUE
}else{
accept.PX <- FALSE
}
if (accept.PX){
# Accept the PX-VB adjustment
vi_beta_mean <- prop_vi_beta_mean
vi_alpha_mean <- prop_vi_alpha_mean
vi_sigma_alpha <- prop_vi_sigma_alpha
if (factorization_method == 'weak'){
stop('Setup reassignment weak')
if (do_SQUAREM){stop('...')}
}else{
vi_alpha_decomp <- prop_vi_alpha_decomp
log_det_alpha_var <- prop_log_det_alpha_var
if (do_SQUAREM){
vi_alpha_L_nonpermute <- vi_alpha_decomp
vi_alpha_LP <- Diagonal(n = ncol(vi_alpha_decomp))
}
}
variance_by_alpha_jg <- prop_variance_by_alpha_jg
vi_sigma_outer_alpha <- prop_vi_sigma_outer_alpha
if (do_huangwand){
vi_a_b_jp <- prop_vi_a_b_jp
}
}
if (!quiet_rho){
print(accept.PX)
if (debug_px){
out_px <- c(prop.ELBO$ELBO, prior.ELBO$ELBO)
names(out_px) <- c('PX', 'prior')
print(out_px)
}
}
if (isFALSE(accept.PX) & (px_method %in% c('numerical', 'profiled', 'numerical_hw'))){stop("PX SHOULD NOT FAIL")}
accepted_times <- accept.PX + accepted_times
if (do_timing){
toc(quiet = quiet_time, log = TRUE)
}
rm(prop_vi_beta_mean, prop_vi_alpha_mean, prop_vi_sigma_alpha, prop_vi_alpha_decomp,
prop_log_det_alpha_var, prop_variance_by_alpha_jg, prop_vi_sigma_outer_alpha)
rownames(vi_alpha_mean) <- fmt_names_Z
}
# Adjust the terms in the ELBO calculation that are different.
final.ELBO <- calculate_ELBO(family = family,
ELBO_type = ELBO_type,
factorization_method = factorization_method,
d_j = d_j, g_j = g_j, prior_sigma_alpha_phi = prior_sigma_alpha_phi,
prior_sigma_alpha_nu = prior_sigma_alpha_nu,
iw_prior_constant = iw_prior_constant,
X = X, Z = Z, s = s, y = y,
vi_pg_b = vi_pg_b, vi_pg_mean = vi_pg_mean, vi_pg_c = vi_pg_c,
vi_sigma_alpha = vi_sigma_alpha, vi_sigma_alpha_nu = vi_sigma_alpha_nu,
vi_sigma_outer_alpha = vi_sigma_outer_alpha,
vi_beta_mean = vi_beta_mean, vi_alpha_mean = vi_alpha_mean,
log_det_beta_var = log_det_beta_var, log_det_alpha_var = log_det_alpha_var,
vi_beta_decomp = vi_beta_decomp, vi_alpha_decomp = vi_alpha_decomp,
vi_joint_decomp = vi_joint_decomp, choose_term = choose_term,
vi_sigmasq_a = vi_sigmasq_a, vi_sigmasq_b = vi_sigmasq_b,
vi_sigmasq_prior_a = vi_sigmasq_prior_a, vi_sigmasq_prior_b = vi_sigmasq_prior_b,
log_det_joint_var = log_det_joint_var, vi_r_mu = vi_r_mu, vi_r_mean = vi_r_mean, vi_r_sigma = vi_r_sigma,
do_huangwand = do_huangwand, vi_a_a_jp = vi_a_a_jp, vi_a_b_jp = vi_a_b_jp,
vi_a_nu_jp = vi_a_nu_jp, vi_a_APRIOR_jp = vi_a_APRIOR_jp
)
if (do_timing) {
toc(quiet = quiet_time, log = TRUE)
tic("Update Squarem")
}
if (do_SQUAREM){
if (factorization_method %in% c('weak', 'collapsed')){
vi_alpha_L_nonpermute <- vi_beta_L_nonpermute <- NULL
vi_alpha_LP <- vi_beta_LP <- NULL
}else{
vi_joint_L_nonpermute <- vi_joint_LP <- NULL
}
squarem_list[[squarem_counter]] <- namedList(vi_sigma_alpha_nu,
vi_sigma_alpha, vi_alpha_mean, vi_beta_mean,
vi_pg_c, vi_alpha_L_nonpermute, vi_alpha_LP,
vi_beta_L_nonpermute, vi_beta_LP,
vi_alpha_L_nonpermute,
vi_joint_L_nonpermute, vi_joint_LP,
vi_a_a_jp, vi_a_b_jp,
vi_r_mu, vi_r_sigma, vi_r_mean)
if (family == 'linear'){
squarem_list[[squarem_counter]]$vi_sigmasq_a <- vi_sigmasq_a
squarem_list[[squarem_counter]]$vi_sigmasq_b <- vi_sigmasq_b
}
if (squarem_counter %% 3 == 0){
ELBOargs <- list(family = family,
ELBO_type = ELBO_type,
factorization_method = factorization_method,
d_j = d_j, g_j = g_j, prior_sigma_alpha_phi = prior_sigma_alpha_phi,
prior_sigma_alpha_nu = prior_sigma_alpha_nu,
iw_prior_constant = iw_prior_constant,
X = X, Z = Z, s = s, y = y,
vi_pg_b = vi_pg_b, vi_pg_mean = vi_pg_mean, vi_pg_c = vi_pg_c,
vi_sigma_alpha = vi_sigma_alpha, vi_sigma_alpha_nu = vi_sigma_alpha_nu,
vi_sigma_outer_alpha = vi_sigma_outer_alpha,
vi_beta_mean = vi_beta_mean, vi_alpha_mean = vi_alpha_mean,
log_det_beta_var = log_det_beta_var, log_det_alpha_var = log_det_alpha_var,
vi_beta_decomp = vi_beta_decomp, vi_alpha_decomp = vi_alpha_decomp,
vi_joint_decomp = vi_joint_decomp, choose_term = choose_term,
vi_sigmasq_a = vi_sigmasq_a, vi_sigmasq_b = vi_sigmasq_b,
vi_sigmasq_prior_a = vi_sigmasq_prior_a, vi_sigmasq_prior_b = vi_sigmasq_prior_b,
log_det_joint_var = log_det_joint_var,
vi_r_mu = vi_r_mu, vi_r_mean = vi_r_mean, vi_r_sigma = vi_r_sigma,
do_huangwand = do_huangwand, vi_a_a_jp = vi_a_a_jp, vi_a_b_jp = vi_a_b_jp,
vi_a_nu_jp = vi_a_nu_jp, vi_a_APRIOR_jp = vi_a_APRIOR_jp
)
if (factorization_method %in% c('weak', 'collapsed')){
squarem_par <- c('vi_a_b_jp', 'vi_sigma_alpha', 'vi_pg_c',
'vi_alpha_mean', 'vi_beta_mean', 'vi_joint_L_nonpermute')
squarem_type <- c('positive', 'matrix', 'positive',
'real', 'real', 'cholesky')
squarem_structure <- c('list', 'list', 'vector', 'vector', 'vector',
'vector')
}else{
squarem_par <- c('vi_a_b_jp', 'vi_sigma_alpha', 'vi_pg_c',
'vi_alpha_mean', 'vi_beta_mean', 'vi_beta_L_nonpermute',
'vi_alpha_L_nonpermute')
squarem_type <- c('positive', 'matrix', 'positive',
'real', 'real', 'cholesky', 'cholesky')
squarem_structure <- c('list', 'list', 'vector', 'vector', 'vector',
'vector', 'vector')
}
if (family == 'negbin'){
stop('Setup SQUAREM For negbin')
if (vi_r_method == 'VEM'){
squarem_par <- c(squarem_par, 'vi_r_mu')
squarem_type <- c(squarem_type, 'real')
squarem_structure <- c(squarem_structure, 'vector')
} else if (vi_r_method %in% c('Laplace', 'delta')) {
stop('Set up Laplace/delta for SQUAREM')
squarem_par <- c(squarem_par, 'vi_r_mu', 'vi_r_sigma')
squarem_type <- c(squarem_type, 'real', 'positive')
squarem_structure <- c(squarem_structure, 'vector', 'vector')
} else if (vi_r_method == 'fixed') {
}
}
if (family %in% 'linear'){
squarem_par <- c(squarem_par, 'vi_sigmasq_b')
squarem_type <- c(squarem_type, 'positive')
squarem_structure <- c(squarem_structure, 'vector')
}
remove_hw_b <- FALSE
if (!do_huangwand){
squarem_type <- squarem_type[!grepl(squarem_par, pattern='vi_a_b_jp')]
squarem_structure <- squarem_structure[!grepl(squarem_par, pattern='vi_a_b_jp')]
squarem_par <- squarem_par[!grepl(squarem_par, pattern='vi_a_b_jp')]
}else{
if (remove_hw_b){
squarem_type <- squarem_type[!grepl(squarem_par, pattern='vi_a_b_jp')]
squarem_structure <- squarem_structure[!grepl(squarem_par, pattern='vi_a_b_jp')]
squarem_par <- squarem_par[!grepl(squarem_par, pattern='vi_a_b_jp')]
}
}
remove_c <- FALSE
if (remove_c | family %in% 'linear'){
squarem_type <- squarem_type[!grepl(squarem_par, pattern='vi_pg_c')]
squarem_structure <- squarem_structure[!grepl(squarem_par, pattern='vi_pg_c')]
squarem_par <- squarem_par[!grepl(squarem_par, pattern='vi_pg_c')]
}
check_tri <- sapply(squarem_par[squarem_type == 'cholesky'], FUN=function(nm_i){
si <- sapply(squarem_list, FUN=function(i){isTriangular(i[[nm_i]])})
return(all(si))
})
squarem_type[squarem_type == 'cholesky'][check_tri == FALSE] <- 'lu'
if ('vi_pg_c' %in% squarem_par){
# Address possibility of "zero" for vi_pg_c
squarem_list <- lapply(squarem_list, FUN=function(i){
i$vi_pg_c <- ifelse(abs(i$vi_pg_c) < 1e-6, 1e-6, i$vi_pg_c)
return(i)
})
}
squarem_list <- lapply(squarem_list, FUN=function(i){
i[squarem_par] <- mapply(squarem_par, squarem_type,
squarem_structure, SIMPLIFY = FALSE, FUN=function(s_par, s_type, s_str){
if (s_str == 'vector'){
out <- squarem_prep_function(i[[s_par]], s_type)
}else{
out <- lapply(i[[s_par]], FUN=function(j){squarem_prep_function(j, s_type)})
}
return(out)
})
return(i)
})
prep_SQUAREM <- mapply(squarem_par, squarem_structure, squarem_type,
SIMPLIFY = FALSE, FUN=function(s_par, s_str, s_type){
if (s_type == 'lu'){
r <- list(
'L' = squarem_list[[2]][[s_par]]$L - squarem_list[[1]][[s_par]]$L,
'U' = squarem_list[[2]][[s_par]]$U - squarem_list[[1]][[s_par]]$U
)
d2 <- list(
'L' = squarem_list[[3]][[s_par]]$L - squarem_list[[2]][[s_par]]$L,
'U' = squarem_list[[3]][[s_par]]$U - squarem_list[[2]][[s_par]]$U
)
v <- list("L" = d2$L - r$L, 'U' = d2$U - r$U)
norm_sq_r <- sum(sapply(r, FUN=function(i){sum(i@x^2)}))
norm_sq_v <- sum(sapply(v, FUN=function(i){sum(i@x^2)}))
max_d <- max(sapply(d2, FUN=function(i){max(abs(i@x))}))
P <- squarem_list[[3]][[s_par]]$P
Q <- squarem_list[[3]][[s_par]]$Q
}else if (s_str == 'list'){
r <- mapply(squarem_list[[2]][[s_par]], squarem_list[[1]][[s_par]], SIMPLIFY = FALSE, FUN=function(i,j){i - j})
d2 <- mapply(squarem_list[[3]][[s_par]], squarem_list[[2]][[s_par]], SIMPLIFY = FALSE, FUN=function(i,j){i - j})
v <- mapply(d2, r, SIMPLIFY = FALSE, FUN=function(i,j){i - j})
norm_sq_r <- sum(unlist(lapply(r, as.vector))^2)
norm_sq_v <- sum(unlist(lapply(v, as.vector))^2)
max_d <- max(abs(sapply(d2, FUN=function(j){max(abs(j))})))
P <- NULL
Q <- NULL
}else{
r <- squarem_list[[2]][[s_par]] - squarem_list[[1]][[s_par]]
d2 <- squarem_list[[3]][[s_par]] - squarem_list[[2]][[s_par]]
v <- d2 - r
norm_sq_r <- sum(r^2)
norm_sq_v <- sum(v^2)
max_d = max(abs(d2))
P <- NULL
Q <- NULL
}
return(list(first = squarem_list[[1]][[s_par]],
second = squarem_list[[2]][[s_par]],
max_d = max_d, P = P, Q = Q,
r = r, v = v, norm_sq_r = norm_sq_r, norm_sq_v = norm_sq_v))
})
ind_alpha <- FALSE
if (ind_alpha){
alpha <- -sqrt((sapply(prep_SQUAREM, FUN=function(i){i$norm_sq_r}))) /
sqrt((sapply(prep_SQUAREM, FUN=function(i){i$norm_sq_v})))
if (any(alpha > -1)){
alpha[which(alpha > -1)] <- -1.01
}
if (any(alpha < -10)){
alpha[which(alpha < -10)] <- -10
}
max_d <- sapply(prep_SQUAREM, FUN=function(i){i$max_d})
if (any(max_d < tolerance_parameters)){
alpha[which(max_d < tolerance_parameters)] <- -1.01
}
}else{
alpha <- -sqrt(sum(sapply(prep_SQUAREM, FUN=function(i){i$norm_sq_r}))) /
sqrt(sum(sapply(prep_SQUAREM, FUN=function(i){i$norm_sq_v})))
if (alpha > -1){
alpha <- -1.01
}
if (alpha < -10){
alpha <- -10
}
alpha <- rep(alpha, length(prep_SQUAREM))
names(alpha) <- names(prep_SQUAREM)
}
if (!quiet_rho){print(alpha)}
orig_squarempar <- squarem_par
orig_alpha <- alpha
for (attempt_SQUAREM in 1:10){
if (!quiet_rho){print(mean(alpha))}
squarem_par <- orig_squarempar
if (attempt_SQUAREM > 1){
alpha <- (alpha - 1)/2
}
prop_squarem <- mapply(prep_SQUAREM, squarem_structure, squarem_type, alpha, SIMPLIFY = FALSE,
FUN=function(i, s_str, s_type, s_alpha){
if (s_type == 'lu'){
prop_squarem <- lapply(c('L', 'U'), FUN=function(k){
i$first[[k]] - 2 * s_alpha * i$r[[k]] + s_alpha^2 * i$v[[k]]
})
names(prop_squarem) <- c('L', 'U')
prop_squarem$P <- i$P
prop_squarem$Q <- i$Q
if (!quiet_rho){if (!isTRUE(all.equal(i$second$P, i$P))){print('MISALIGNED at P')}}
if (!quiet_rho){if (!isTRUE(all.equal(i$second$Q, i$Q))){print('MISALIGNED at Q')}}
}else if (s_str == 'list'){
prop_squarem <- mapply(i$first, i$second,
i$r, i$v, SIMPLIFY = FALSE, FUN=function(i_1, s_1, r_1, v_1){
out <- i_1 - 2 * s_alpha * r_1 + s_alpha^2 * v_1
return(out)
})
names(prop_squarem) <- names(i$first)
}else{
prop_squarem <- i$first - 2 * s_alpha * i$r + s_alpha^2 * i$v
}
return(prop_squarem)
})
names(prop_squarem) <- squarem_par
prop_ELBOargs <- ELBOargs
prop_squarem <- mapply(prop_squarem, squarem_type,
squarem_structure, SIMPLIFY = FALSE, FUN=function(i, s_type, s_str){
if (s_str == 'vector'){
out <- squarem_unprep_function(i, s_type)
}else{
out <- lapply(i, FUN=function(j){squarem_unprep_function(j, s_type)})
}
return(out)
})
if (factorization_method == 'weak'){
if (squarem_type[squarem_par == 'vi_joint_L_nonpermute'] == 'lu'){
prop_squarem$vi_joint_decomp <- prop_squarem$vi_joint_L_nonpermute$M
prop_ELBOargs$log_det_joint_var <- prop_squarem$vi_joint_L_nonpermute$logdet_M
}else{
prop_squarem$vi_joint_decomp <- prop_squarem$vi_joint_L_nonpermute %*% t(squarem_list[[1]]$vi_joint_LP)
prop_ELBOargs$log_det_joint_var <- 2 * sum(log(diag(prop_squarem$vi_joint_L_nonpermute)))
}
prop_squarem$vi_alpha_decomp <- prop_squarem$vi_joint_decomp[, -1:-p.X, drop = F]
prop_squarem$vi_beta_decomp <- prop_squarem$vi_joint_decomp[, 1:p.X, drop = F]
squarem_par <- c(squarem_par, 'log_det_joint_var')
squarem_par <- c(squarem_par, 'vi_joint_decomp')
}else if (factorization_method == 'collapsed'){
stop('Setup squarem for collapsed')
if (squarem_type[squarem_par == 'vi_joint_L_nonpermute'] == 'lu'){
prop_squarem$vi_joint_decomp <- prop_squarem$vi_joint_L_nonpermute$M
prop_ELBOargs$log_det_joint_var <- prop_squarem$vi_joint_L_nonpermute$logdet_M
}else{
prop_squarem$vi_joint_decomp <- prop_squarem$vi_joint_L_nonpermute %*% t(squarem_list[[1]]$vi_joint_LP)
prop_ELBOargs$log_det_joint_var <- 2 * sum(log(diag(prop_squarem$vi_joint_L_nonpermute)))
}
prop_squarem$vi_alpha_decomp <- prop_squarem$vi_joint_decomp[, -1:-p.X, drop = F]
prop_squarem$vi_beta_decomp <- prop_squarem$vi_joint_decomp[, 1:p.X, drop = F]
squarem_par <- c(squarem_par, 'log_det_joint_var')
squarem_par <- c(squarem_par, 'vi_joint_decomp')
}else{
if (squarem_type[squarem_par == 'vi_beta_L_nonpermute'] == 'lu'){
prop_ELBOargs$log_det_beta_var <- prop_squarem$vi_beta_L_nonpermute$logdet_M
prop_squarem$vi_beta_decomp <- prop_squarem$vi_beta_L_nonpermute$M
}else{
prop_ELBOargs$log_det_beta_var <- 2 * sum(log(diag(prop_squarem$vi_beta_L_nonpermute)))
prop_squarem$vi_beta_decomp <- prop_squarem$vi_beta_L_nonpermute %*% t(squarem_list[[1]]$vi_beta_LP)
}
if (squarem_type[squarem_par == 'vi_alpha_L_nonpermute'] == 'lu'){
prop_ELBOargs$log_det_alpha_var <- prop_squarem$vi_alpha_L_nonpermute$logdet_M
prop_squarem$vi_alpha_decomp <- prop_squarem$vi_alpha_L_nonpermute$M
}else{
prop_ELBOargs$log_det_alpha_var <- 2 * sum(log(diag(prop_squarem$vi_alpha_L_nonpermute)))
prop_squarem$vi_alpha_decomp <- prop_squarem$vi_alpha_L_nonpermute %*% t(squarem_list[[1]]$vi_alpha_LP)
}
squarem_par <- c(squarem_par, 'log_det_alpha_var', 'log_det_beta_var')
squarem_par <- c(squarem_par, 'vi_alpha_decomp', 'vi_beta_decomp')
}
if (family == 'negbin'){
if (vi_r_method == 'VEM'){
prop_ELBOargs$vi_r_mean <- exp(prop_squarem$vi_r_mu)
} else if (vi_r_method %in% c('Laplace', 'delta')){
prop_ELBOargs$vi_r_mean <- exp(prop_squarem$vi_r_mu + prop_squarem$vi_r_sigma/2)
}
if (factorization_method != 'weak'){
prop_joint_var <- rowSums((X %*% t(prop_squarem$vi_beta_decomp))^2) +
rowSums((Z %*% t(prop_squarem$vi_alpha_decomp))^2)
}else{
prop_joint_var <- cpp_zVz(Z = joint.XZ,
V = as(prop_squarem$vi_joint_decomp, "generalMatrix"))
}
if (vi_r_method %in% c('Laplace', 'delta')){
prop_joint_var <- prop_joint_var + vi_r_sigma
}
prop_ELBOargs$vi_pg_c <- sqrt(as.vector(X %*% prop_squarem$vi_beta_mean + Z %*% prop_squarem$vi_alpha_mean - prop_squarem$vi_r_mu)^2 + prop_joint_var)
prop_ELBOargs$vi_pg_b <- y + prop_ELBOargs$vi_r_mean
}
for (v in names(prop_squarem)){
prop_ELBOargs[[v]] <- prop_squarem[[v]]
}
prop_ELBOargs$vi_alpha_L_nonpermute <- NULL
prop_ELBOargs$vi_beta_L_nonpermute <- NULL
prop_ELBOargs$vi_joint_L_nonpermute <- NULL
if ('vi_alpha_mean' %in% names(prop_squarem)){
prop_variance_by_alpha_jg <- calculate_expected_outer_alpha(
L = (prop_squarem$vi_alpha_decomp),
alpha_mu = as.vector(prop_squarem$vi_alpha_mean),
re_position_list = outer_alpha_RE_positions)
prop_ELBOargs[['vi_sigma_outer_alpha']] <- prop_variance_by_alpha_jg$outer_alpha
squarem_par <- c(squarem_par, 'vi_sigma_outer_alpha')
}
if (remove_hw_b){
prop_diag_Einv_sigma <- mapply(prop_ELBOargs$vi_sigma_alpha,
vi_sigma_alpha_nu, d_j, SIMPLIFY = FALSE, FUN = function(phi, nu, d) {
inv_phi <- solve(phi)
sigma.inv <- nu * inv_phi
return(diag(sigma.inv))
})
prop_ELBOargs$vi_a_b_jp <- mapply(vi_a_nu_jp, vi_a_APRIOR_jp, prop_diag_Einv_sigma,
SIMPLIFY = FALSE,
FUN=function(nu, APRIOR, diag_j){
1/APRIOR^2 + nu * diag_j
})
squarem_par <- c(squarem_par, 'vi_a_b_jp')
}
if ('vi_pg_c' %in% squarem_par){
if (family %in% 'binomial'){
prop_vi_pg_mean <- prop_ELBOargs$vi_pg_b / (2 * prop_ELBOargs$vi_pg_c) * tanh(prop_ELBOargs$vi_pg_c / 2)
fill_zero <- which(abs(prop_ELBOargs$vi_pg_c) < 1e-6)
if (length(fill_zero) > 0){
prop_vi_pg_mean[fill_zero] <- prop_ELBOargs$vi_pg_b[fill_zero]/4
}
prop_ELBOargs[['vi_pg_mean']] <- prop_vi_pg_mean
squarem_par <- c(squarem_par, 'vi_pg_mean')
}else{stop('Set up SQUAREM for other family')}
}else if (!(family %in% 'linear')){
if (family != 'binomial'){stop('check squarem for non-binomial case')}
if (factorization_method %in% c("weak", "collapsed")) {
joint_quad <- cpp_zVz(Z = joint.XZ,
V = as(prop_ELBOargs$vi_joint_decomp, "generalMatrix"))
if (family == 'negbin'){
joint_quad <- joint_quad + prop_ELBOargs$vi_r_sigma
}
prop_ELBOargs$vi_pg_c <- sqrt(as.vector(X %*% prop_ELBOargs$vi_beta_mean + Z %*% prop_ELBOargs$vi_alpha_mean - prop_ELBOargs$vi_r_mu)^2 + joint_quad)
} else {
beta_quad <- rowSums((X %*% t(prop_ELBOargs$vi_beta_decomp))^2)
alpha_quad <- rowSums((Z %*% t(prop_ELBOargs$vi_alpha_decomp))^2)
joint_var <- beta_quad + alpha_quad
if (family == 'negbin'){
joint_var <- joint_var + prop_ELBOargs$vi_r_sigma
}
prop_ELBOargs$vi_pg_c <- sqrt(as.vector(X %*% prop_ELBOargs$vi_beta_mean + Z %*% prop_ELBOargs$vi_alpha_mean - prop_ELBOargs$vi_r_mu)^2 + joint_var)
}
prop_vi_pg_mean <- prop_ELBOargs$vi_pg_b / (2 * prop_ELBOargs$vi_pg_c) * tanh(prop_ELBOargs$vi_pg_c / 2)
fill_zero <- which(abs(prop_ELBOargs$vi_pg_c) < 1e-6)
if (length(fill_zero) > 0){
prop_vi_pg_mean[fill_zero] <- prop_ELBOargs$vi_pg_b[fill_zero]/4
}
prop_ELBOargs[['vi_pg_mean']] <- prop_vi_pg_mean
squarem_par <- c(squarem_par, 'vi_pg_c', 'vi_pg_mean')
}
elbo_init <- do.call("calculate_ELBO", ELBOargs)
elbo_squarem <- do.call("calculate_ELBO", prop_ELBOargs)
if (!quiet_rho){print(c(elbo_squarem$ELBO, elbo_init$ELBO))}
if (elbo_squarem$ELBO >= elbo_init$ELBO){break}
}
if (elbo_squarem$ELBO >= elbo_init$ELBO){
if (!quiet_rho){cat('SUCCESS')}
squarem_success <- squarem_success + 1
squarem.ELBO <- elbo_squarem
final.ELBO <- elbo_squarem
for (v in squarem_par){
assign(v, prop_ELBOargs[[v]])
}
test_ELBO <- calculate_ELBO(family = family,
ELBO_type = ELBO_type,
factorization_method = factorization_method,
d_j = d_j, g_j = g_j, prior_sigma_alpha_phi = prior_sigma_alpha_phi,
prior_sigma_alpha_nu = prior_sigma_alpha_nu,
iw_prior_constant = iw_prior_constant,
X = X, Z = Z, s = s, y = y,
vi_pg_b = vi_pg_b, vi_pg_mean = vi_pg_mean, vi_pg_c = vi_pg_c,
vi_sigma_alpha = vi_sigma_alpha, vi_sigma_alpha_nu = vi_sigma_alpha_nu,
vi_sigma_outer_alpha = vi_sigma_outer_alpha,
vi_beta_mean = vi_beta_mean, vi_alpha_mean = vi_alpha_mean,
log_det_beta_var = log_det_beta_var, log_det_alpha_var = log_det_alpha_var,
vi_beta_decomp = vi_beta_decomp, vi_alpha_decomp = vi_alpha_decomp,
vi_joint_decomp = vi_joint_decomp, choose_term = choose_term,
vi_sigmasq_a = vi_sigmasq_a, vi_sigmasq_b = vi_sigmasq_b,
vi_sigmasq_prior_a = vi_sigmasq_prior_a, vi_sigmasq_prior_b = vi_sigmasq_prior_b,
log_det_joint_var = log_det_joint_var, vi_r_mu = vi_r_mu, vi_r_mean = vi_r_mean, vi_r_sigma = vi_r_sigma,
do_huangwand = do_huangwand, vi_a_a_jp = vi_a_a_jp, vi_a_b_jp = vi_a_b_jp,
vi_a_nu_jp = vi_a_nu_jp, vi_a_APRIOR_jp = vi_a_APRIOR_jp
)
if (test_ELBO$ELBO != elbo_squarem$ELBO){stop('....')}
}else{
if (!quiet_rho){cat('FAIL')}
squarem_success[1] <- squarem_success[1] + 1
final.ELBO <- squarem.ELBO <- final.ELBO
}
squarem_list <- list()
squarem_counter <- 1
}else{
squarem_counter <- squarem_counter + 1
}
}
if (do_timing) {
toc(quiet = quiet_time, log = T)
tic("Final Cleanup")
}
if (debug_ELBO & it != 1) {
debug_ELBO.1$step <- 1
debug_ELBO.2$step <- 2
debug_ELBO.3$step <- 3
if (do_SQUAREM & (it %% 3 == 0)){
squarem.ELBO$step <- 4
final.ELBO$step <- 5
update_ELBO <- rbind(debug_ELBO.1, debug_ELBO.2, debug_ELBO.3, squarem.ELBO, final.ELBO)
}else{
final.ELBO$step <- 4
update_ELBO <- rbind(debug_ELBO.1, debug_ELBO.2, debug_ELBO.3, final.ELBO)
}
update_ELBO$it <- it
store_ELBO <- rbind(store_ELBO, update_ELBO)
} else {
final.ELBO$step <- NA
final.ELBO$it <- it
store_ELBO <- rbind(store_ELBO, final.ELBO)
}
# if (!quiet_rho){
# if (factorization_method == 'weak'){
# print('NonsparseA')
# print(length(vi_joint_decomp@x))
# }else{
# print('NonsparseA')
# print(length(vi_alpha_decomp@x))
# }
# }
## Change diagnostics
change_elbo <- final.ELBO$ELBO - lagged_ELBO
change_alpha_mean <- max(abs(vi_alpha_mean - lagged_alpha_mean))
change_beta_mean <- max(abs(vi_beta_mean - lagged_beta_mean))
unlist_vi <- c(unlist(lapply(vi_sigma_alpha, as.vector)), unlist(vi_a_b_jp))
if (debug_ELBO){
svi <- data.frame(t(as.vector(unlist_vi)))
svi$it <- it
store_vi <- rbind(store_vi, svi)
}
change_sigma_mean <- mapply(vi_sigma_alpha, lagged_sigma_alpha, FUN = function(i, j) {
max(abs(i - j))
})
if (factorization_method %in% c("weak", "collapsed")) {
change_joint_var <- 0 # change_joint_var <- max(abs(vi_joint_decomp - lagged_joint_decomp))
change_alpha_var <- change_beta_var <- 0
} else {
change_joint_var <- 0
change_alpha_var <- max(abs(vi_alpha_decomp - lagged_alpha_decomp))
change_beta_var <- max(abs(vi_beta_decomp - lagged_beta_decomp))
}
change_vi_r_mu <- vi_r_mu - lagged_vi_r_mu
if (do_timing) {
toc(quiet = quiet_time, log = T)
}
if (debug_param) {
store_beta[it, ] <- as.vector(vi_beta_mean)
store_alpha[it, ] <- as.vector(vi_alpha_mean)
if (do_huangwand){
store_hw[it,] <- unlist(vi_a_b_jp)
colnames(store_hw) <- names(unlist(vi_a_b_jp))
}
store_sigma[it,] <- unlist(lapply(vi_sigma_alpha, as.vector))
colnames(store_sigma) <- names(unlist(lapply(vi_sigma_alpha, as.vector)))
}
change_all <- data.frame(change_alpha_mean, change_beta_mean,
t(change_sigma_mean), change_alpha_var, change_beta_var, change_joint_var, change_vi_r_mu)
if ((max(change_all) < tolerance_parameters) | (change_elbo > 0 & change_elbo < tolerance_elbo)) {
if (!quiet) {
message(paste0("Converged after ", it, " iterations with ELBO change of ", round(change_elbo, 1 + abs(floor(log(tolerance_elbo) / log(10))))))
message(paste0("The largest change in any variational parameter was ", round(max(change_all), 1 + abs(floor(log(tolerance_parameters) / log(10))))))
}
break
}
if (debug_ELBO){
change_all$it <- it
store_parameter_traj <- rbind(store_parameter_traj, change_all)
}
if (!quiet & (it %% print_prog == 0)) {
message(paste0("ELBO Change: ", round(change_elbo, 10)))
message(paste0("Other Parameter Changes: ", max(change_all)))
}
lagged_alpha_mean <- vi_alpha_mean
lagged_beta_mean <- vi_beta_mean
lagged_alpha_decomp <- vi_alpha_decomp
lagged_beta_decomp <- vi_beta_decomp
lagged_sigma_alpha <- vi_sigma_alpha
lagged_vi_r_mu <- vi_r_mu
lagged_ELBO <- final.ELBO$ELBO
}
if (it == iterations) {
message(paste0("Ended without Convergence after ", it, " iterations : ELBO change of ", round(change_elbo[1], abs(floor(log(tolerance_elbo) / log(10))))))
}
if (parameter_expansion %in% c("translation", "diagonal")) {
final.ELBO$accepted_PX <- accepted_times / attempted_expansion
}
rownames(vi_beta_mean) <- colnames(X)
output <- list(
beta = list(mean = vi_beta_mean),
ELBO = final.ELBO,
ELBO_trajectory = store_ELBO,
sigma = list(cov = vi_sigma_alpha, df = vi_sigma_alpha_nu),
alpha = list(mean = vi_alpha_mean)
)
if (family == 'linear'){
output$sigmasq <- list(a = vi_sigmasq_a, b = vi_sigmasq_b)
}else if (family == 'negbin'){
}
output$family <- family
output$control <- control
if (do_timing) {
tic_log <- tictoc::tic.log(format = FALSE)
tic_log <- data.frame(stage = sapply(tic_log, FUN = function(i) {
i$msg
}), time = sapply(tic_log, FUN = function(i) {
i$toc - i$tic
}), stringsAsFactors = F)
tic.clear()
tic.clearlog()
tic_summary <- lapply(split(tic_log$time, tic_log$stage),
FUN=function(i){
data.frame(n = length(i), mean = mean(i), min = min(i), max = max(i),
total = sum(i))
}
)
tic_summary <- do.call('rbind', tic_summary)
tic_summary$variable <- rownames(tic_summary)
rownames(tic_summary) <- NULL
} else {
tic_summary <- NULL
}
if (debug_param) {
store_beta <- store_beta[1:it,,drop=F]
store_alpha <- store_alpha[1:it,,drop=F]
if (do_huangwand){
store_hw <- store_hw[1:it,,drop=F]
}else{store_hw <- NULL}
store_sigma <- store_sigma[1:it,,drop=F]
output$parameter_trajectory <- list(beta = store_beta,
alpha = store_alpha,
sigma = store_sigma,
hw = store_hw)
}
if (factorization_method %in% c("weak", "collapsed")) {
output$joint <- list(decomp_var = vi_joint_decomp)
}
if (control$return_data) {
output$data <- list(X = X, Z = Z, y = y, trials = trials)
}
output$formula <- list(formula = formula,
re = re_fmla, fe = fe_fmla,
interpret_gam = parse_formula,
tt = tt, fe_Xlevels = fe_Xlevels,
fe_contrasts = fe_contrasts, fe_terms = fe_terms)
output$alpha$dia.var <- unlist(lapply(variance_by_alpha_jg$variance_jg, FUN = function(i) {
as.vector(sapply(i, diag))
}))
output$beta$var <- t(vi_beta_decomp) %*% vi_beta_decomp
output$beta$decomp_var <- vi_beta_decomp
if (family == "negbin") {
output$ln_r <- list(mu = vi_r_mu, sigma = vi_r_sigma, method = vi_r_method)
}
if (do_huangwand){
output$hw <- list(a = vi_a_a_jp, b = vi_a_b_jp)
}
output$internal_parameters <- list(
it_used = it, it_max = iterations,
lp = as.vector(X %*% vi_beta_mean + Z %*% vi_alpha_mean - vi_r_mu),
parameter.change = change_all,
parameter.vi = store_vi,
parameter.path = store_parameter_traj,
spline = list(attr = Z.spline.attr, size = Z.spline.size),
missing_obs = missing_obs, N = nrow(X),
acceleration = list(accept.PX = accept.PX,
squarem_success = squarem_success, debug_PX_ELBO = debug_PX_ELBO),
names_of_RE = names_of_RE, d_j = d_j, g_j = g_j
)
MAVB_parameters <- list(
M_mu_to_beta = M_mu_to_beta,
M_prime = M_prime,
M_prime_one = M_prime_one,
B_j = B_j,
outer_alpha_RE_positions = outer_alpha_RE_positions,
d_j = d_j, g_j = g_j
)
output$internal_parameters$MAVB_parameters <- MAVB_parameters
output$alpha$var <- variance_by_alpha_jg$variance_jg
output$alpha$decomp_var <- vi_alpha_decomp
output$timing <- tic_summary
class(output) <- "vglmer"
return(output)
}
#' Control for vglmer estimation
#'
#' This function controls various estimation options for \code{vglmer}.
#'
#' @param iterations Default of 1000; this sets the maximum number of iterations
#' used in estimation.
#' @param factorization_method Factorization assumption for the variational
#' approximation. Default of \code{"strong"}, i.e. a fully factorized model.
#' Described in detail in Goplerud (2022a). \code{"strong"}, \code{"partial"},
#' and \code{"weak"} correspond to Schemes I, II, and III respectively in that
#' paper.
#' @param prior_variance Prior distribution on the random effect variance
#' \eqn{\Sigma_j}. Options are \code{hw}, \code{jeffreys}, \code{mean_exists},
#' \code{uniform}, and \code{gamma}. The default (\code{hw}) is the Huang-Wand
#' (2013) prior whose hyper-parameters are \eqn{\nu_j} = 2 and \eqn{A_{j,k}} =
#' 5. Otherwise, the prior is an Inverse Wishart with the following parameters
#' where \eqn{d_j} is the dimensionality of the random effect \eqn{j}.
#' \itemize{
#' \item mean_exists: \eqn{IW(d_j + 1, I)}
#' \item jeffreys: \eqn{IW(0, 0)}
#' \item uniform: \eqn{IW(-[d_j+1], 0)}
#' \item limit: \eqn{IW(d_j - 1, 0)}
#' }
#' Estimation may fail if an improper prior (\code{jeffreys}, \code{uniform},
#' \code{limit}) is used.
#' @param tolerance_elbo Default (\code{1e-8}) sets a convergence threshold if
#' the change in the ELBO is below the tolerance.
#' @param tolerance_parameters Default (\code{1e-5}) sets a convergence
#' threshold that is achieved if no parameter changes by more than the
#' tolerance from the prior estimated value.
#' @param parameter_expansion Default of \code{"translation"} (see Goplerud
#' 2022b). Valid options are \code{"translation"}, \code{"mean"}, or
#' \code{"none"}. \code{"mean"} should be employed if \code{"translation"} is
#' not enabled or is too computationally expensive. For negative binomial
#' estimation or any estimation where \code{factorization_method != "strong"},
#' only \code{"mean"} and \code{"none"} are available.
#' @param px_method When code \code{parameter_expansion="translation"}, default
#' (\code{"dynamic"}) tries a one-step late update and, if this fails, a
#' numerical improvement by L-BFGS-B. For an Inverse-Wishart prior on
#' \eqn{\Sigma_j}, this is set to \code{"osl"} that only attempts a
#' one-step-late update.
#' @param px_numerical_it Default of 10; if L-BFGS_B is needed for a parameter
#' expansion, this sets the number of steps used.
#' @param hw_inner If \code{prior_variance="hw"}, this sets the number of
#' repeated iterations between estimating \eqn{\Sigma_j} and \eqn{a_{j,k}}
#' variational distributions at each iteration. A larger number approximates
#' jointly updating both parameters. Default (10) typically performs well.
#' @param force_whole Default (\code{TRUE}) requires integers for observed
#' outcome for binomial or count models. \code{FALSE} allows for fractional
#' responses.
#' @param vi_r_method Default (\code{"VEM"}) uses a variational EM algorithm for
#' updating \eqn{r} if \code{family="negbin"}. This assumes a point mass
#' distribution on \eqn{r}. A number can be provided to fix \eqn{r}. These are
#' the only available options.
#' @param init Default (\code{"EM_FE"}) initializes the mean variational
#' parameters for \eqn{q(\beta, \alpha)} by setting the random effects to zero
#' and estimating the fixed effects using a short-running EM algorithm.
#' \code{"EM"} initializes the model with a ridge regression with a guess as
#' to the random effect variance. \code{"random"} initializes the means
#' randomly. \code{"zero"} initializes them at zero.
#' @param debug_param Default (\code{FALSE}) does not store parameters before
#' the final iteration. Set to \code{TRUE} to debug convergence issues.
#' @param debug_ELBO Default (\code{FALSE}) does not store the ELBO after each
#' parameter update. Set to \code{TRUE} to debug convergence issues.
#' @param quiet_rho Default (\code{FALSE}) does not print information about
#' parameter expansions. Set to \code{TRUE} to debug convergence issues.
#' @param debug_px Default (\code{FALSE}) does not store information about
#' whether parameter expansion worked. Set to \code{TRUE} to convergence
#' issues.
#' @param linpred_method Default (\code{"joint"}) updates the mean parameters
#' for the fixed and random effects simultaneously. This can improve the speed
#' of estimation but may be costly for large datasets; use \code{"cyclical"}
#' to update each parameter block separately.
#' @param print_prog Default (\code{NULL}) prints a \code{"."} to indicate once
#' 5\% of the total iterations have elapsed. Set to a positive integer
#' \code{int} to print a \code{"."} every \code{int} iterations.
#' @param quiet Default (\code{FALSE}) does not print intermediate output about
#' convergence. Set to \code{TRUE} to debug.
#' @param return_data Default (\code{FALSE}) does not return the original
#' design. Set to \code{TRUE} to debug convergence issues.
#' @param verbose_time Default (\code{FALSE}) does not print the time elapsed
#' for each parameter update. Set to \code{TRUE}, in conjunction with
#' \code{do_timing=TRUE}, to see the time taken for each parameter update.
#' @param do_timing Default (\code{FALSE}) does not estimate timing of each
#' variational update; \code{TRUE} requires the package \code{tictoc}.
#' @param do_SQUAREM Default (\code{TRUE}) accelerates estimation using SQUAREM
#' (Varadhan and Roland 2008).
#' @param verify_columns Default (\code{FALSE}) \bold{does not} verify that all
#' columns are drawn from the data.frame itself versus the environment. Set to
#' \code{TRUE} to debug potential issues.
#'
#' @return This function returns a named list with class \code{vglmer_control}.
#' It is passed to \code{vglmer} in the argument \code{control}. This argument
#' only accepts objects created using \code{vglmer_control}.
#'
#' @references
#' Goplerud, Max. 2022a. "Fast and Accurate Estimation of Non-Nested Binomial
#' Hierarchical Models Using Variational Inference." \emph{Bayesian Analysis}.
#' 17(2): 623-650.
#'
#' Goplerud, Max. 2022b. "Re-Evaluating Machine Learning for MRP Given the
#' Comparable Performance of (Deep) Hierarchical Models." Working Paper.
#'
#' Huang, Alan, and Matthew P. Wand. 2013. "Simple Marginally Noninformative
#' Prior Distributions for Covariance Matrices." \emph{Bayesian Analysis}.
#' 8(2):439-452.
#'
#' Varadhan, Ravi, and Christophe Roland. 2008. "Simple and Globally Convergent
#' Methods for Accelerating the Convergence of any EM Algorithm."
#' \emph{Scandinavian Journal of Statistics}. 35(2): 335-353.
#' @export
vglmer_control <- function(iterations = 1000,
prior_variance = "hw",
factorization_method = c("strong", "partial", "weak"),
parameter_expansion = "translation", do_SQUAREM = TRUE,
tolerance_elbo = 1e-8, tolerance_parameters = 1e-5,
force_whole = TRUE, print_prog = NULL,
do_timing = FALSE, verbose_time = FALSE,
return_data = FALSE, linpred_method = "joint",
vi_r_method = "VEM", verify_columns = FALSE,
debug_param = FALSE, debug_ELBO = FALSE, debug_px = FALSE,
quiet = TRUE, quiet_rho = TRUE,
px_method = 'dynamic', px_numerical_it = 10,
hw_inner = 10,
init = "EM_FE") {
factorization_method <- match.arg(factorization_method)
prior_variance <- match.arg(prior_variance,
choices = c("hw", "mean_exists", "jeffreys", "limit", "uniform"))
linpred_method <- match.arg(linpred_method, choices = c("joint", "cyclical", "solve_normal"))
parameter_expansion <- match.arg(parameter_expansion, choices = c("translation", "mean", "none"))
# vi_r_method <- match.arg(vi_r_method, choices = c("VEM", "fixed", "Laplace", "delta"))
init <- match.arg(init, choices = c("EM_FE", "EM", "random", "zero"))
if (!is.null(print_prog)){
if (print_prog < 0){stop('print_prog must be non-negative integer or NULL.')}
}
if (iterations < 0){stop('iterations must be positive integer')}
if (tolerance_elbo < 0 | tolerance_parameters < 0){
stop('tolerance for ELBO and parameters must be non-negative.')
}
if (factorization_method != "strong" & parameter_expansion != "mean"){
message('Setting parameter_expansion to mean for non-strong factorization')
parameter_expansion <- 'mean'
}
if (prior_variance != 'hw' & px_method != 'OSL' & parameter_expansion %in% c('diagonal', 'translation')){
px_method <- 'OSL'
message('Setting px_method to "OSL" if translation & non-HW prior.')
}
output <- mget(ls())
class(output) <- c("vglmer_control")
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/vglmer/R/vglmer_regression.R
|
#' Codon Adaptation Index (CAI)
#'
#' Measure the Codon Adaptation Index (CAI) Sharp and Li (1987), of DNA sequence.
#'
#' For more information about CAI \href{https://academic.oup.com/nar/article-abstract/15/3/1281/1166844?redirectedFrom=fulltext}{Sharp and Li, 1987}.
#'
#' @usage CAI.values(df.virus, ENc.set.host,
#' df.host,genetic.code = "1",set.len = 5, threshold = 0)
#'
#' @param df.virus a data frame with seq_name and its virus DNA sequence.
#' @param ENc.set.host a data frame with ENc values of a host.
#' @param df.host a data frame with seq_name and its host DNA sequence.
#' @param genetic.code a single string that uniquely identifies a genetic code to use.
#' @param set.len a number represents a percent that will be used as reference genes from the total host genes.
#' @param threshold optional numeric, specifying sequence length, in codons, used for filtering.
#'
#' @return A data.frame containing the computed CAI values for each DNA sequences within df.fasta.
#'
#' @import coRdon
#' @import stringr
#' @importFrom Biostrings DNAStringSet
#'
#' @examples
#'
#' \dontshow{
#' file_path <- system.file("extdata", "sysdata.RData" ,package = "vhcub")
#' load(file = file_path)
#' }
#' \donttest{
#' # read DNA from fasta file
#' fasta <- fasta.read("virus.fasta", "host.fasta")
#' fasta.v <- fasta[[1]]
#' fasta.h <- fasta[[2]]
#' # Calculate CAI
#' enc.df.host <- ENc.values(fasta.h)
#' }
#' \dontshow{fasta.v <- fasta.v[1:10,]}
#' cai.df <- CAI.values(fasta.v, enc.df.host, fasta.h)
#'
#'
#' @export
#' @author Ali Mostafa Anwar \email{[email protected]} and Mohmed Soudy \email{[email protected]}
#'
CAI.values <- function(df.virus, ENc.set.host, df.host, genetic.code = "1",
set.len = 5, threshold = 0) {
# get the refrence set from ENc values of host
newENc <- ENc.set.host[order(ENc.set.host$ENc), ]
set.len <- length(newENc$gene.name) * (set.len / 100)
gene.set <- newENc$gene.name[1:set.len]
gene.set <- df.host[df.host$seq_name %in% gene.set, ]
dna.set <- as.vector(gene.set$sequence)
# this function will make the sequence len&&3 = 0
firstframe <- function(sequence) {
sequence <- str_sub(sequence, start = 1, end = (nchar(sequence) - nchar(sequence) %% 3))
return(sequence)
}
dna.set <- lapply(dna.set, function(x) firstframe(x))
dna.set <- unlist(dna.set, use.names = FALSE)
# calc. codontable for dna set (ref gene set)
dna.set <- DNAStringSet(dna.set)
cT.set <- codonTable(dna.set)
# calc CAI for virus dna
length <- 1:length(df.virus$seq_name)
df.cai.all <- data.frame()
for (i_seq in length) {
sequence <- as.character(df.virus$sequence[[i_seq]])
sequence <- str_sub(sequence, start = 1, end = (nchar(sequence) - nchar(sequence) %% 3))
seq_name <- df.virus$seq_name[[i_seq]]
dna <- DNAStringSet(c(sequence, "NNN"))
cT <- codonTable(dna)
cai <- CAI(cT,
subsets = list(cT.set), ribosomal = FALSE,
id_or_name2 = genetic.code, alt.init = TRUE,
stop.rm = TRUE, filtering = "none",
len.threshold = threshold
)[[1]]
df.cai <- NULL
df.cai <- data.frame(gene.name = seq_name, CAI = cai)
df.cai.all <- rbind(df.cai.all, df.cai)
}
return(df.cai.all)
}
|
/scratch/gouwar.j/cran-all/cranData/vhcub/R/CAI.values.R
|
#' ENc-GC3 scatterplot.
#'
#' Make an ENc-GC3 scatterplot. Where the y-axis represents the ENc values and the x-axis represents the GC3 content.
#' The red fitting line shows the expected ENc values when codon usage bias affected solely by GC3.
#'
#' For more information about ENc-GC3 plot \href{https://www.tandfonline.com/doi/full/10.1038/emi.2016.106}{Butt et al., 2016}.
#'
#' @usage ENc.GC3plot(enc.df, gc.df)
#'
#' @param enc.df a data frame with ENc values.
#' @param gc.df a data frame with GC3 values.
#'
#' @return A ggplot object.
#'
#' @import ggplot2
#'
#' @examples
#'
#' \dontshow{
#' file_path <- system.file("extdata", "sysdata.RData" ,package = "vhcub")
#' load(file = file_path)
#' }
#' \donttest{
#' # read DNA from fasta file
#' fasta <- fasta.read("virus.fasta", "host.fasta")
#' fasta.v <- fasta[[1]]
#' enc.df.virus <- ENc.values(fasta.v)
#' }
#'
#' gc.df <- GC.content(fasta.v)
#'
#' ENc.GC3plot(enc.df.virus, gc.df)
#'
#' @export
#'
#' @author Ali Mostafa Anwar \email{[email protected]} and Mohmed Soudy \email{[email protected]}
#'
ENc.GC3plot <- function(enc.df, gc.df) {
x <- NULL
eq <- function(x) {
2 + x + (29 / (x^2 + (1 - x)^2))
}
plot <- ggplot() + geom_point(data = enc.df, aes(x = gc.df$GC3, y = enc.df$ENc)) +
stat_function(fun = eq, geom = "line", color = "red", size = 1, data = data.frame(x = c(seq(0, 1, 0.001))), aes(x)) +
theme_classic() + xlab("GC3") + ylab("ENc")
return(plot)
}
|
/scratch/gouwar.j/cran-all/cranData/vhcub/R/ENc.GC3plot.R
|
#' Effective Number of Codons (ENc).
#'
#' Measure the Effective Number of Codons (ENc) of DNA sequence. Using its modified version (Novembre, 2002).
#'
#' For more information about ENc \href{https://academic.oup.com/mbe/article/19/8/1390/997706}{Novembre, 2002}.
#'
#' @usage ENc.values(df.fasta,genetic.code = "1",threshold=0)
#'
#' @param df.fasta a data frame with seq_name and its DNA sequence.
#' @param genetic.code a single string that uniquely identifies a genetic code to use.
#' @param threshold optional numeric, specifying sequence length, in codons, used for filtering.
#'
#' @return A data.frame containing the computed ENc values for each DNA sequences within df.fasta.
#'
#' @import coRdon
#' @importFrom Biostrings DNAStringSet
#'
#' @examples
#' \dontshow{
#' file_path <- system.file("extdata", "sysdata.RData" ,package = "vhcub")
#' load(file = file_path)
#' }
#' \donttest{
#' # read DNA from fasta file
#' fasta <- fasta.read("virus.fasta", "host.fasta")
#' fasta.v <- fasta[[1]]
#' fasta.h <- fasta[[2]]
#' }
#' \dontshow{fasta.v <- fasta.v[1:30,]}
#' # Calculate ENc
#' enc.df.v <- ENc.values(fasta.v)
#' \donttest{
#' enc.df.h <- ENc.values(fasta.h)
#' }
#'
#' @export
#'
#' @author Ali Mostafa Anwar \email{[email protected]} and Mohmed Soudy \email{[email protected]}
#'
ENc.values <- function(df.fasta, genetic.code = "1", threshold = 0) {
length <- 1:length(df.fasta$seq_name)
df.enc.all <- data.frame()
for (i_seq in length) {
sequence <- as.character(df.fasta$sequence[[i_seq]])
sequence <- str_sub(sequence, start = 1, end = (nchar(sequence) - nchar(sequence) %% 3))
seq_name <- df.fasta$seq_name[[i_seq]]
dna <- DNAStringSet(c(sequence, "NNN"))
cT <- codonTable(dna)
ENc <- ENC(cT,
id_or_name2 = genetic.code,
alt.init = TRUE, stop.rm = TRUE,
filtering = "none", len.threshold = threshold
)[[1]]
df.enc <- NULL
df.enc <- data.frame(gene.name = seq_name, ENc = ENc)
df.enc.all <- rbind(df.enc.all, df.enc)
}
return(df.enc.all)
}
|
/scratch/gouwar.j/cran-all/cranData/vhcub/R/ENc.values.R
|
#' GC content
#'
#' Calculates overall GC content as well as GC at first, second, and third codon positions.
#'
#' @usage GC.content(df.virus)
#'
#' @param df.virus data frame with seq_name and its DNA sequence.
#'
#' @return A data.frame with overall GC content as well as GC at first, second, and third codon positions of all DNA sequence from df.virus.
#'
#' @import seqinr
#'
#' @examples
#' \dontshow{
#' file_path <- system.file("extdata", "sysdata.RData" ,package = "vhcub")
#' load(file = file_path)
#' }
#'
#' \donttest{
#' # read DNA from fasta file
#' fasta <- fasta.read("virus.fasta", "host.fasta")
#' fasta.v <- fasta[[1]]
#' fasta.h <- fasta[[2]]
#' }
#'
#' # Calculate GC content
#' gc.df <- GC.content(fasta.v)
#'
#' @export
#'
#' @author Ali Mostafa Anwar \email{[email protected]} and Mohmed Soudy \email{[email protected]}
GC.content <- function(df.virus) {
df.all.GC <- data.frame()
length <- 1:length(df.virus$seq_name)
for (i_seq in length) {
sequence <- as.character(df.virus$sequence[[i_seq]])
seq_name <- df.virus$seq_name[[i_seq]]
gc <- GC(s2c(sequence))
gc1 <- GCpos(s2c(sequence), "1")
gc2 <- GCpos(s2c(sequence), "2")
gc3 <- GCpos(s2c(sequence), "3")
df.gc <- data.frame(gene.name = seq_name, GC = gc, GC1 = gc1, GC2 = gc2, GC3 = gc3)
df.all.GC <- rbind(df.all.GC,df.gc)
}
return(df.all.GC)
}
|
/scratch/gouwar.j/cran-all/cranData/vhcub/R/GC.content.R
|
#' Parity rule 2 (PR2) plot
#'
#' Make a Parity rule 2 (PR2) plot, where the AT-bias [A3/(A3 +T3)] at the third codon position of the four-codon amino acids of entire genes is the ordinate and the GC-bias [G3/(G3 +C3)] is the abscissa. The center of the plot, where both coordinates are 0.5, is where A = U and G = C (PR2), with no bias between the influence of the mutation and selection rates.
#'
#' For more information about PR2 plot \href{https://www.tandfonline.com/doi/full/10.1038/emi.2016.106}{Butt et al., 2016}.
#'
#' @usage PR2.plot(fasta.df)
#'
#' @param fasta.df a data frame with seq_name and its DNA sequence.
#'
#' @return A ggplot object.
#'
#' @import ggplot2
#'
#' @examples
#' \dontshow{
#' file_path <- system.file("extdata", "sysdata.RData" ,package = "vhcub")
#' load(file = file_path)
#' }
#' \donttest{
#' # read DNA from fasta file
#' fasta <- fasta.read("virus.fasta", "host.fasta")
#' fasta.v <- fasta[[1]]
#' fasta.h <- fasta[[2]]
#' }
#'
#' PR2.plot(fasta.v)
#'
#' @export
#'
#' @author Ali Mostafa Anwar \email{[email protected]} and Mohmed Soudy \email{[email protected]}
#'
PR2.plot <- function(fasta.df) {
freq.nt.all <- data.frame()
length <- 1:length(fasta.df$seq_name)
for (i_seq in length) {
sequence <- tolower(as.character(fasta.df$sequence[[i_seq]]))
seq_name <- as.character(fasta.df$seq_name[[i_seq]])
freq.nt <- count(s2c(sequence), wordsize = 1, by = 3, start = 2)
freq.nt <- as.data.frame(freq.nt)
col.name <- freq.nt$Var1
freq.nt <- as.data.frame(t(as.data.frame(freq.nt)))
colnames(freq.nt) <- col.name
freq.nt <- freq.nt[-c(1), ]
rownames(freq.nt) <- seq_name
freq.nt.all <- rbind(freq.nt.all, freq.nt)
}
freq.nt.all$a <- as.numeric(freq.nt.all$a)
freq.nt.all$t <- as.numeric(freq.nt.all$t)
freq.nt.all$g <- as.numeric(freq.nt.all$g)
freq.nt.all$c <- as.numeric(freq.nt.all$c)
A3T3 <- NULL
G3C3 <- NULL
freq.nt.all$A3T3 <- freq.nt.all$a / (freq.nt.all$a + freq.nt.all$t)
freq.nt.all$G3C3 <- freq.nt.all$g / (freq.nt.all$g + freq.nt.all$c)
plot <- ggplot(freq.nt.all, aes(x = A3T3, y = G3C3)) + geom_point(size = 4) +
ylab("A3/(A3 + T3)") + xlab("G3/(G3 + C3)") + ylim(0, 1) + xlim(0, 1) + theme_classic(base_size = 20) +
geom_hline(yintercept = 0.5, color = "red", size = 1.2) + geom_vline(xintercept = 0.5, color = "red", size = 1.2)
return(plot)
}
|
/scratch/gouwar.j/cran-all/cranData/vhcub/R/PR2.plot.R
|
#' Relative Codon Deoptimization Index (RCDI)
#'
#' Measure the Relative Codon Deoptimization Index (RCDI) of DNA sequence.
#'
#' For more information about RCDI \href{https://bmcresnotes.biomedcentral.com/articles/10.1186/1756-0500-3-87}{Puigbò et al., 2010}
#'
#' @usage RCDI.values(fasta.virus, fasta.host, enc.host, set.len= 5)
#'
#' @param fasta.virus a data frame with virus seq_name and its DNA sequence.
#' @param fasta.host a data frame with host seq_name and its DNA sequence.
#' @param enc.host a data frame of a hosts' ENc values.
#' @param set.len a number represents a percent that will be used as reference genes from the total host genes.
#'
#' @return A data.frame containing the computed ENc values for each DNA sequences within df.fasta.
#'
#' @importFrom Biostrings DNAStringSet
#'
#' @examples
#' \dontshow{
#' file_path <- system.file("extdata", "sysdata.RData" ,package = "vhcub")
#' load(file = file_path)
#' }
#' \donttest{
#' # read DNA from fasta file
#' fasta <- fasta.read("virus.fasta", "host.fasta")
#' fasta.v <- fasta[[1]]
#' fasta.h <- fasta[[2]]
#' }
#' # Calculate RCDI
#' \donttest{
#' enc.df.host <- ENc.values(fasta.h)
#' }
#' \dontshow{fasta.v <- fasta.v[1:30,]}
#' rcdi.df <- RCDI.values(fasta.v, fasta.h, enc.df.host)
#'
#' @export
#'
#' @author Ali Mostafa Anwar \email{[email protected]} and Mohmed Soudy \email{[email protected]}
#'
#'
RCDI.values <- function(fasta.virus, fasta.host, enc.host, set.len = 5) {
newENc <- enc.host[order(enc.host$ENc), ]
set.len <- length(newENc$gene.name) * (set.len / 100)
gene.set <- newENc$gene.name[1:set.len]
gene.set <- fasta.host[fasta.host$seq_name %in% gene.set, ]
rscu.virus <- RSCU.values(fasta.virus)
rscu.ref <- RSCU.values(gene.set)
df.rscu.ref <- data.frame()
length <- 1:length(rscu.ref)
for (i_mean in length) {
df.rscu <- NULL
means <- mean(rscu.ref[[i_mean]], na.rm = TRUE)
codon <- colnames(rscu.ref)
codon <- codon[i_mean]
df.rscu <- data.frame(codon = codon, rscu.ref = means)
df.rscu.ref <- rbind(df.rscu.ref, df.rscu)
}
codon.name <- df.rscu.ref$codon
df.rscu.ref <- as.data.frame(t(df.rscu.ref))
colnames(df.rscu.ref) <- codon.name
df.rscu.ref <- df.rscu.ref[-c(1), ]
RCDI.df <- data.frame()
length <- 1:length(fasta.virus$seq_name)
for (i_seq in length) {
sequence <- as.character(fasta.virus$sequence[[i_seq]])
firstframe <- function(sequence) {
sequence <- str_sub(sequence, start = 1, end = (nchar(sequence) - nchar(sequence) %% 3))
return(sequence)
}
sequence <- firstframe(sequence)
seq_name <- as.character(fasta.virus$seq_name[[i_seq]])
rscu <- uco(s2c(sequence),
index = "rscu",
as.data.frame = FALSE, NA.rscu = 0
)
rscu <- as.data.frame(t(rscu))
rownames(rscu) <- "rscu"
dna <- DNAStringSet(c(sequence, "NNN"))
count <- codonTable(dna)
count <- as.data.frame(count@counts[1, ])
count <- as.data.frame(t(count))
colnames(count) <- tolower(colnames(count))
rownames(count) <- "count"
CiFa <- rbind(rscu, count)
CiFa.CiFh <- rbind(CiFa, df.rscu.ref)
CiFa.CiFh <- as.data.frame(t(CiFa.CiFh))
N <- as.numeric(floor(nchar(sequence) / 3))
CiFa.CiFh$rscu <- as.numeric(CiFa.CiFh$rscu)
CiFa.CiFh$rscu.ref <- as.numeric(CiFa.CiFh$rscu.ref)
CiFa.CiFh$count <- as.numeric(CiFa.CiFh$count)
CiFa.CiFh$RCDI <- ((CiFa.CiFh$rscu / CiFa.CiFh$rscu.ref) * CiFa.CiFh$count) / N
RCDI <- sum(CiFa.CiFh$RCDI)
df <- NULL
df <- data.frame(gene.name = seq_name, RCDI = RCDI)
RCDI.df <- rbind(RCDI.df, df)
}
return(RCDI.df)
}
|
/scratch/gouwar.j/cran-all/cranData/vhcub/R/RCDI.values.R
|
#' Relative Synonymous Codon Usage (RSCU)
#'
#' Measure the Relative Synonymous Codon Usage (RSCU) of DNA sequence.
#'
#' For more information about ENc \href{https://academic.oup.com/nar/article-abstract/14/13/5125/1143812?redirectedFrom=fulltext}{Sharp et al., 1986}.
#'
#' @usage RSCU.values(df.fasta)
#'
#' @param df.fasta a data frame with seq_name and its DNA sequence.
#'
#' @return A data.frame containing the computed RSCU values for each codon for each DNA sequences within df.fasta.
#'
#' @import seqinr
#'
#' @examples
#' \dontshow{
#' file_path <- system.file("extdata", "sysdata.RData" ,package = "vhcub")
#' load(file = file_path)
#' }
#' \donttest{
#' # read DNA from fasta file
#' fasta <- fasta.read("virus.fasta", "host.fasta")
#' fasta.v <- fasta[[1]]
#' fasta.h <- fasta[[2]]
#' }
#' # Calculate RSCU
#' \donttest{RSCU.H <- RSCU.values(fasta.h)}
#' RSCU.V <- RSCU.values(fasta.v)
#'
#' @export
#'
#' @author Ali Mostafa Anwar \email{[email protected]} and Mohmed Soudy \email{[email protected]}
#'
RSCU.values <- function(df.fasta) {
codons <- uco(s2c("aaa"), index = "rscu", NA.rscu = 0)
codons <- as.data.frame(codons)
codons.names <- as.vector(row.names(codons))
rscu.df <- data.frame(row.names = codons.names)
length <- 1:length(df.fasta$seq_name)
for (i_seq in length) {
sequence <- as.character(df.fasta$sequence[[i_seq]])
seq_name <- as.character(df.fasta$seq_name[[i_seq]])
rscu <- uco(s2c(sequence),
index = "rscu",
as.data.frame = FALSE, NA.rscu = 0
)
rscu <- as.data.frame(rscu)
colnames(rscu) <- seq_name
rscu.df <- cbind(rscu.df, rscu)
}
rscu.df <- as.data.frame(t(rscu.df))
return(rscu.df)
}
|
/scratch/gouwar.j/cran-all/cranData/vhcub/R/RSCU.values.R
|
#' Synonymous codon usage eorderliness (SCUO)
#'
#' Measure the Synonymous Codon Usage Eorderliness (SCUO) of DNA sequence (Wan et al., 2004).
#'
#' For more information about ENc \href{https://bmcevolbiol.biomedcentral.com/articles/10.1186/1471-2148-4-19}{Wan et al., 2004}.
#'
#' @usage SCUO.values(df.fasta,genetic.code = "1",threshold=0)
#'
#' @param df.fasta a data frame with seq_name and its DNA sequence.
#' @param genetic.code a single string that uniquely identifies a genetic code to use.
#' @param threshold optional numeric, specifying sequence length, in codons, used for filtering.
#'
#' @return A data.frame containing the computed SCUO values for each DNA sequences within df.fasta.
#'
#' @import coRdon
#' @import seqinr
#' @importFrom Biostrings DNAStringSet
#'
#' @examples
#' \dontshow{
#' file_path <- system.file("extdata", "sysdata.RData" ,package = "vhcub")
#' load(file = file_path)
#' }
#'
#' \donttest{
#' # read DNA from fasta file
#' fasta <- fasta.read("virus.fasta", "host.fasta")
#' fasta.v <- fasta[[1]]
#' fasta.h <- fasta[[2]]
#' }
#' # Calculate SCUO
#' \dontshow{fasta.v <- fasta.v[1:10,]}
#' SCUO.df <- SCUO.values(fasta.v)
#'
#' @export
#'
#' @author Ali Mostafa Anwar \email{[email protected]} and Mohmed Soudy \email{[email protected]}
SCUO.values <- function(df.fasta, genetic.code = "1", threshold = 0) {
length <- 1:length(df.fasta$seq_name)
df.SCUO.all <- data.frame()
for (i_seq in length) {
sequence <- as.character(df.fasta$sequence[[i_seq]])
sequence <- str_sub(sequence, start = 1, end = (nchar(sequence) - nchar(sequence) %% 3))
seq_name <- df.fasta$seq_name[[i_seq]]
dna <- DNAStringSet(c(sequence, "NNN"))
cT <- codonTable(dna)
SCUO <- SCUO(cT,
id_or_name2 = genetic.code,
alt.init = TRUE, stop.rm = TRUE, filtering = "none",
len.threshold = threshold
)[[1]]
df.SCUO <- NULL
df.SCUO <- data.frame(gene.name = seq_name, SCUO = SCUO)
df.SCUO.all <- rbind(df.SCUO.all, df.SCUO)
}
return(df.SCUO.all)
}
|
/scratch/gouwar.j/cran-all/cranData/vhcub/R/SCUO.values.R
|
#' Similarity Index (SiD)
#'
#' Measure the Similarity Index (SiD) between a virus and its host codon usage.
#'
#' For more information about SiD \href{https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0077239}{Zhou et al., 2013}.
#'
#'
#' @usage SiD.value(rscu.host,rscu.virus)
#'
#' @param rscu.host a data frame with RSCU a host codon values.
#' @param rscu.virus a data frame with RSCU a virus codon values.
#'
#' @return A numeric represent a SiD value.
#'
#' @examples
#'
#' \dontshow{
#' file_path <- system.file("extdata", "sysdata.RData" ,package = "vhcub")
#' load(file = file_path)
#' }
#' \donttest{
#' # read DNA from fasta file
#' fasta <- fasta.read("virus.fasta", "host.fasta")
#' fasta.v <- fasta[[1]]
#' fasta.h <- fasta[[2]]
#' RSCU.H <- RSCU.values(fasta.h)
#' RSCU.V <- RSCU.values(fasta.v)
#' }
#' # Calculate SiD
#' SiD <- SiD.value(RSCU.host, RSCU.virus)
#'
#' @export
#'
#' @author Ali Mostafa Anwar \email{[email protected]} and Mohmed Soudy \email{[email protected]}
#'
SiD.value <- function(rscu.host, rscu.virus) {
df.rscu.host <- data.frame()
length <- 1:length(rscu.host)
for (i_mean in length) {
df.rscu <- NULL
means <- mean(rscu.host[[i_mean]], na.rm = TRUE)
codon <- colnames(rscu.host)
codon <- codon[i_mean]
df.rscu <- data.frame(codon = codon, rscu.host = means)
df.rscu.host <- rbind(df.rscu.host, df.rscu)
}
df.rscu.virus <- data.frame()
length <- 1:length(rscu.virus)
for (i_mean in length) {
df.rscu <- NULL
means <- mean(rscu.virus[[i_mean]], na.rm = TRUE)
codon <- colnames(rscu.virus)
codon <- codon[i_mean]
df.rscu <- data.frame(codon = codon, rscu.virus = means)
df.rscu.virus <- rbind(df.rscu.virus, df.rscu)
}
rscu.df.all <- merge(df.rscu.host, df.rscu.virus, by = "codon")
rscu.df.all$rscu.all <- rscu.df.all$rscu.host * rscu.df.all$rscu.virus
up <- sum(rscu.df.all$rscu.all)
down <- sqrt((sum(rscu.df.all$rscu.host)^2) * (sum(rscu.df.all$rscu.virus)^2))
R.a.b <- up / down
D.a.b <- (1 - R.a.b) / 2
return(D.a.b)
}
|
/scratch/gouwar.j/cran-all/cranData/vhcub/R/SiD.value.R
|
#' Statistical dinucleotide over- and underrepresentation (base model).
#'
#' A measure of statistical dinucleotide over- and underrepresentation; by allows for random sequence generation by shuffling (with/without replacement) of all bases in the sequence.
#'
#' For more information \href{https://www.rdocumentation.org/packages/seqinr/versions/3.6-1/topics/dinucleotides}{seqinr}.
#'
#' @usage dinuc.base(df.virus,permutations=500,exact_numbers = FALSE)
#'
#' @param df.virus data frame with seq_name and its DNA sequence.
#' @param permutations the number of permutations for the z-score computation.
#' @param exact_numbers if TRUE exact analytical calculation will be used.
#'
#' @return A data.frame containing the computed statistic for each dinucleotide in all DNA sequences within df.virus.
#'
#' @import seqinr
#'
#' @examples
#' \dontshow{
#' file_path <- system.file("extdata", "sysdata.RData" ,package = "vhcub")
#' load(file = file_path)
#' }
#' \donttest{
#' # read DNA from fasta file
#' fasta <- fasta.read("virus.fasta", "host.fasta")
#' fasta.v <- fasta[[1]]
#' fasta.h <- fasta[[2]]
#' }
#' \dontshow{fasta.v <- fasta.v[1:30,]}
#' # Calculate zscore using (base model)
#' base <- dinuc.base(fasta.v, permutations = 10)
#'
#' @export
#'
#' @author Ali Mostafa Anwar \email{[email protected]} and Mohmed Soudy \email{[email protected]}
dinuc.base <- function(df.virus, permutations = 500, exact_numbers = FALSE) {
dinuc.baseall <- data.frame()
length <- 1:length(df.virus$seq_name)
for (i.seq in length) {
sequence <- s2c(tolower(as.character(df.virus$sequence[[i.seq]])))
sequence.name <- as.character(df.virus$seq_name[[i.seq]])
base <- zscore(sequence, simulations = permutations, modele = "base", exact = exact_numbers)
base <- as.data.frame(base)
aa <- base$Freq[base$Var1 == "aa"]
ac <- base$Freq[base$Var1 == "ac"]
ag <- base$Freq[base$Var1 == "ag"]
at <- base$Freq[base$Var1 == "at"]
tt <- base$Freq[base$Var1 == "tt"]
ta <- base$Freq[base$Var1 == "ta"]
tc <- base$Freq[base$Var1 == "tc"]
tg <- base$Freq[base$Var1 == "tg"]
gg <- base$Freq[base$Var1 == "gg"]
ga <- base$Freq[base$Var1 == "ga"]
gt <- base$Freq[base$Var1 == "gt"]
gc <- base$Freq[base$Var1 == "gc"]
cc <- base$Freq[base$Var1 == "cc"]
ca <- base$Freq[base$Var1 == "ca"]
ct <- base$Freq[base$Var1 == "ct"]
cg <- base$Freq[base$Var1 == "cg"]
dinuc.base <- NULL
dinuc.base <- data.frame(
gene.name = sequence.name,
aa = aa,
ac = ac,
ag = ag,
at = at,
tt = tt,
ta = ta,
tc = ta,
tg = tg,
gg = gg,
ga = ga,
gt = gt,
gc = gc,
cc = cc,
ca = ca,
ct = ct,
cg = cg
)
dinuc.baseall <- rbind(dinuc.baseall, dinuc.base)
}
return(dinuc.baseall)
}
|
/scratch/gouwar.j/cran-all/cranData/vhcub/R/dinuc.base.R
|
#' Statistical dinucleotide over- and underrepresentation (codon model).
#'
#' A measure of statistical dinucleotide over- and underrepresentation; by allows for random sequence generation by shuffling (with/without replacement) of codons.
#'
#' For more information \href{https://www.rdocumentation.org/packages/seqinr/versions/3.6-1/topics/dinucleotides}{seqinr}.
#'
#' @usage dinuc.codon(df.virus,permutations=500,exact_numbers = FALSE)
#'
#' @param df.virus data frame with seq_name and its DNA sequence.
#' @param permutations the number of permutations for the z-score computation.
#' @param exact_numbers if TRUE exact analytical calculation will be used.
#'
#' @return A data.frame containing the computed statistic for each dinucleotide in all DNA sequences within df.virus.
#'
#' @import seqinr
#'
#' @examples
#' \dontshow{
#' file_path <- system.file("extdata", "sysdata.RData" ,package = "vhcub")
#' load(file = file_path)
#' }
#' \donttest{
#' # read DNA from fasta file
#' fasta <- fasta.read("virus.fasta", "host.fasta")
#' fasta.v <- fasta[[1]]
#' fasta.h <- fasta[[2]]
#' }
#' \dontshow{fasta.v <- fasta.v[1:30,]}
#' # Calculate zscore using (codon model)
#' codon <- dinuc.codon(fasta.v, permutations = 10)
#'
#' @export
#'
#' @author Ali Mostafa Anwar \email{[email protected]} and Mohmed Soudy \email{[email protected]}
dinuc.codon <- function(df.virus, permutations = 500, exact_numbers = FALSE) {
dinuc.codonall <- data.frame()
length <- 1:length(df.virus$seq_name)
for (i.seq in length) {
sequence <- s2c(tolower(as.character(df.virus$sequence[[i.seq]])))
sequence.name <- as.character(df.virus$seq_name[[i.seq]])
codon <- zscore(sequence, simulations = permutations, modele = "codon", exact = exact_numbers)
codon <- as.data.frame(codon)
aa <- codon$Freq[codon$Var1 == "aa"]
ac <- codon$Freq[codon$Var1 == "ac"]
ag <- codon$Freq[codon$Var1 == "ag"]
at <- codon$Freq[codon$Var1 == "at"]
tt <- codon$Freq[codon$Var1 == "tt"]
ta <- codon$Freq[codon$Var1 == "ta"]
tc <- codon$Freq[codon$Var1 == "tc"]
tg <- codon$Freq[codon$Var1 == "tg"]
gg <- codon$Freq[codon$Var1 == "gg"]
ga <- codon$Freq[codon$Var1 == "ga"]
gt <- codon$Freq[codon$Var1 == "gt"]
gc <- codon$Freq[codon$Var1 == "gc"]
cc <- codon$Freq[codon$Var1 == "cc"]
ca <- codon$Freq[codon$Var1 == "ca"]
ct <- codon$Freq[codon$Var1 == "ct"]
cg <- codon$Freq[codon$Var1 == "cg"]
dinuc.codon <- NULL
dinuc.codon <- data.frame(
gene.name = sequence.name,
aa = aa,
ac = ac,
ag = ag,
at = at,
tt = tt,
ta = ta,
tc = ta,
tg = tg,
gg = gg,
ga = ga,
gt = gt,
gc = gc,
cc = cc,
ca = ca,
ct = ct,
cg = cg
)
dinuc.codonall <- rbind(dinuc.codonall, dinuc.codon)
}
return(dinuc.codonall)
}
|
/scratch/gouwar.j/cran-all/cranData/vhcub/R/dinuc.codon.R
|
#' Statistical dinucleotide over- and underrepresentation (syncodon model).
#'
#' A measure of statistical dinucleotide over- and underrepresentation; by allows for random sequence generation by shuffling (with/without replacement) of synonymous codons.
#'
#' For more information \href{https://www.rdocumentation.org/packages/seqinr/versions/3.6-1/topics/dinucleotides}{seqinr}.
#'
#' @usage dinuc.syncodon(df.virus,permutations=500,exact_numbers = FALSE)
#'
#' @param df.virus data frame with seq_name and its DNA sequence.
#' @param permutations the number of permutations for the z-score computation.
#' @param exact_numbers if TRUE exact analytical calculation will be used.
#'
#' @return A data.frame containing the computed statistic for each dinucleotide in all DNA sequences within df.virus.
#'
#' @import seqinr
#'
#' @examples
#' \dontshow{
#' file_path <- system.file("extdata", "sysdata.RData" ,package = "vhcub")
#' load(file = file_path)
#' }
#' \donttest{
#' # read DNA from fasta file
#' fasta <- fasta.read("virus.fasta", "host.fasta")
#' fasta.v <- fasta[[1]]
#' fasta.h <- fasta[[2]]
#' }
#' \dontshow{fasta.v <- fasta.v[1:30,]}
#' # Calculate zscore using (syncodon model)
#' syncodon <- dinuc.syncodon(fasta.v, permutations = 10)
#'
#' @export
#'
#' @author Ali Mostafa Anwar \email{[email protected]} and Mohmed Soudy \email{[email protected]}
dinuc.syncodon <- function(df.virus, permutations = 500, exact_numbers = FALSE) {
dinuc.syncodonall <- data.frame()
length <- 1:length(df.virus$seq_name)
for (i.seq in length) {
sequence <- s2c(tolower(as.character(df.virus$sequence[[i.seq]])))
sequence.name <- as.character(df.virus$seq_name[[i.seq]])
syncodon <- zscore(sequence,
simulations = permutations,
modele = "syncodon", exact = exact_numbers
)
syncodon <- as.data.frame(syncodon)
aa <- syncodon$Freq[syncodon$Var1 == "aa"]
ac <- syncodon$Freq[syncodon$Var1 == "ac"]
ag <- syncodon$Freq[syncodon$Var1 == "ag"]
at <- syncodon$Freq[syncodon$Var1 == "at"]
tt <- syncodon$Freq[syncodon$Var1 == "tt"]
ta <- syncodon$Freq[syncodon$Var1 == "ta"]
tc <- syncodon$Freq[syncodon$Var1 == "tc"]
tg <- syncodon$Freq[syncodon$Var1 == "tg"]
gg <- syncodon$Freq[syncodon$Var1 == "gg"]
ga <- syncodon$Freq[syncodon$Var1 == "ga"]
gt <- syncodon$Freq[syncodon$Var1 == "gt"]
gc <- syncodon$Freq[syncodon$Var1 == "gc"]
cc <- syncodon$Freq[syncodon$Var1 == "cc"]
ca <- syncodon$Freq[syncodon$Var1 == "ca"]
ct <- syncodon$Freq[syncodon$Var1 == "ct"]
cg <- syncodon$Freq[syncodon$Var1 == "cg"]
dinuc.syncodon <- NULL
dinuc.syncodon <- data.frame(
gene.name = sequence.name,
aa = aa,
ac = ac,
ag = ag,
at = at,
tt = tt,
ta = ta,
tc = ta,
tg = tg,
gg = gg,
ga = ga,
gt = gt,
gc = gc,
cc = cc,
ca = ca,
ct = ct,
cg = cg
)
dinuc.syncodonall <- rbind(dinuc.syncodonall, dinuc.syncodon)
}
return(dinuc.syncodonall)
}
|
/scratch/gouwar.j/cran-all/cranData/vhcub/R/dinuc.syncodon.R
|
#' Read fasta formate and convert it to data frame
#'
#' @usage fasta.read(virus.fasta,host.fasta)
#'
#' @param virus.fasta directory path to the virus fasta file.
#' @param host.fasta directory path to the host fasta file.
#'
#' @return A list with two data frames.
#'
#' @note The list with two data.frames; the first one for virus DNA sequences and the second one for the host.
#'
#' @importFrom Biostrings readDNAStringSet
#'
#' @examples
#' \donttest{
#' fasta <- fasta.read("virus.fasta", "host.fasta")
#' fasta.v <- fasta[[1]]
#' fasta.h <- fasta[[2]]
#' }
#' @export
#'
#' @author Ali Mostafa Anwar \email{[email protected]} and Mohmed Soudy \email{[email protected]}
fasta.read <- function(virus.fasta, host.fasta) {
virus.fasta <- readDNAStringSet(virus.fasta)
seq_name <- names(virus.fasta)
sequence <- paste(virus.fasta)
df.virus <- data.frame(seq_name, sequence)
host.fasta <- readDNAStringSet(host.fasta)
seq_name <- names(host.fasta)
sequence <- paste(host.fasta)
df.host <- data.frame(seq_name, sequence)
return(list(df.virus, df.host))
}
|
/scratch/gouwar.j/cran-all/cranData/vhcub/R/read.fasta.R
|
#' vhcub: A package to analysis the co-adaptation of codon usage between a virus and its host.
#'
#'
#' vhcub can calculate various codon usage bias measurements as; effective number of codons (ENc),
#' codon adaptation index (CAI), relative codon deoptimization index (RCDI), similarity index (SiD),
#' synonymous codon usage eorderliness (SCUO) and, relative synonymous codon usage (RSCU).
#' Also, it provides a statistical dinucleotide over- and underrepresentation with three different models.
#' Implement several methods for visualization of codon usage as ENc.GC3plot and PR2.plot.
#'
#'
#' @section vhcub functions:
#'
#' fasta.read: read fasta format files and convert it to data.frame.
#'
#' GC.content: calculates overall GC content as well as GC at first, second, and third codon positions.
#'
#' RSCU.values: measure the Relative Synonymous Codon Usage (RSCU) of DNA sequence.
#'
#' SCUO.values: measure the Synonymous Codon Usage Eorderliness (SCUO) of DNA sequence.
#'
#' RCDI.values: measure the Relative Codon Deoptimization Index (RCDI) of DNA sequence.
#'
#' CAI.values: measure the Codon Adaptation Index (CAI) Sharp and Li (1987), of DNA sequence.
#'
#' ENc.values: measure the Effective Number of Codons (ENc) of DNA sequence. Using its modified version.
#'
#' dinuc.syncodon: measure of statistical dinucleotide over- and underrepresentation; by allows for random sequence generation by shuffling (with/without replacement) of synonymous codons.
#'
#' dinuc.codon: measure of statistical dinucleotide over- and underrepresentation; by allows for random sequence generation by shuffling (with/without replacement) of codons.
#'
#' dinuc.base: measure of statistical dinucleotide over- and underrepresentation; by allows for random sequence generation by shuffling (with/without replacement) of all bases in the sequence.
#'
#' ENc.GC3plot: make an ENc-GC3 scatterplot. Where the y-axis represents the ENc values and the x-axis represents the GC3 content. The red fitting line shows the expected ENc values when codon usage bias affected solely by GC3.
#'
#' PR2.plot: make a Parity rule 2 (PR2) plot, where the AT-bias [A3/(A3 +T3)] at the third codon position of the four-codon amino acids of entire genes is the ordinate and the GC-bias [G3/(G3 +C3)] is the abscissa. The center of the plot, where both coordinates are 0.5, is where A = U and G = C (PR2), with no bias between the influence of the mutation and selection rates.
#'
#' @examples
#' \donttest{
#' # read DNA from fasta files
#' fasta <- fasta.read("virus.fasta", "host.fasta")
#' fasta.v <- fasta[[1]]
#' fasta.h <- fasta[[2]]
#' # calculate GC content
#' gc.df <- GC.content(fasta.v)
#' # measure of statistical dinucleotide over- and underrepresentation
#' syncodon <- dinuc.syncodon(fasta.v,permutations=10)
#' base <- dinuc.base(fasta.v,permutations=10)
#' codon <- dinuc.codon(fasta.v,permutations=10)
#' # calculate ENc
#' enc.df <- ENc.values(fasta.v)
#' enc.df.h <- ENc.values(fasta.h)
#' # calculate SCUO and CAI
#' SCUO.df <- SCUO.values(fasta.v)
#' cai.df <- CAI.values(fasta.v,enc.df.h, fasta.h)
#' # calculate RSCU
#' RSCU.H <- RSCU.values(fasta.h)
#' RSCU.V <- RSCU.values(fasta.v)
#' # calculate SiD
#' SiD <- SiD.value(RSCU.H,RSCU.V)
#' # calculate RCDI
#' rcdi.df <- RCDI.values(fasta.v,fasta.h, enc.df.h)
#' # plot ENc.GC3plot
#' ENc.GC3plot(enc.df,gc.df)
#' # plot PR2.plot
#' PR2.plot(fasta.v)
#' }
#'
#' @author Ali Mostafa Anwar \email{[email protected]} and Mohmed Soudy \email{[email protected]}
#'
#' @docType package
#' @name vhcub
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/vhcub/R/vhcub.R
|
CUB <-
function(file=NULL, sequence=NULL, method="ENC")
{
stopifnot(
!(is.null(file) && is.null(sequence)),
method[1] %in% c("ENC"))
if (!is.null(file)) {
if (!requireNamespace("seqinr", quietly=TRUE)) {
stop("Reading FASTA files require package seqinr")
}
sequence <- seqinr::read.fasta(file)
}
sequence <- .checkseq(sequence, gene.name=if(is.null(file)) "" else file)
if (method[1]=="ENC") {
return(sapply(sequence, .ENC))
}
}
|
/scratch/gouwar.j/cran-all/cranData/vhica/R/CUB.R
|
div <- function
(file=NULL, sequence=NULL, sqs=NULL, method="LWL85", pairwise=TRUE, max.lim=3)
{
stopifnot(
!(is.null(file) && is.null(sequence)),
method[1] %in% c("LWL85"),
requireNamespace("gtools", quietly=TRUE))
if (!is.null(file)) {
if (!requireNamespace("seqinr", quietly=TRUE)) {
stop("Reading FASTA files require package seqinr")
}
sequence <- seqinr::read.fasta(file)
}
sequence <- .checkseq(sequence, gene.name=if (is.null(file)) "" else file)
if (is.null(sqs)) {
sqs <- names(sequence)
}
if (length(sqs) < 2)
stop("Less than 2 sequences in ", file)
combn <- gtools::combinations(n=length(sqs), r=2, v=sqs)
if (method[1]=="LWL85") {
return(data.frame(div=.LWL85(sequence, combn[,1], combn[,2], pairwise=pairwise, max.lim=max.lim), sp1=combn[,1], sp2=combn[,2]))
}
stop("Method ", method, " unknown.") # This should never happen
}
|
/scratch/gouwar.j/cran-all/cranData/vhica/R/div.R
|
image.vhica <-
function (x, element = "", H1.test = "bilat", treefile = NULL,
skip.void = FALSE, species = NULL, p.threshold = 0.05, p.adjust.method = "bonferroni",
ncolors = 1024, main = element, threshcol = 0.1, colsqueeze=1, species.font.family="mono", species.font.cex=1,
max.spname.length=10, ...)
{
op <- par(no.readonly = TRUE)
tree <- .prepare.phylo(treefile)
if (!"phylo" %in% class(tree)) {
species <- .check.species(x, user.species = species)
tree <- list(tip.label = species)
}
else {
species <- .check.species(x, user.species = species,
tree.species = tree$tip.label)
}
elements <- .element.present(x, element, species = tree$tip.label,
skip.void = skip.void)
if(is.null(elements)) {
stop("Element ", element, " cannot be found. Nothing to plot.")
}
if (length(tree) > 1 && skip.void) {
missing.species <- tree$tip.label[!tree$tip.label %in%
sapply(strsplit(elements, ".", fixed = TRUE), function(el) el[1])]
tree <- ape::drop.tip(tree, missing.species)
species <- species[species %in% tree$tip.label]
}
stats <- .stat.matrix(vhica.obj = x, element = element, elements = elements,
p.adjust.method = p.adjust.method, H1.test = H1.test)
thresh <- NULL
col.range <- c(-0.5, 0.5)
if (H1.test != "greater") {
thresh <- c(thresh, -abs(log10(p.threshold)))
col.range[1] <- -5
}
if (H1.test != "lower") {
thresh <- c(thresh, abs(log10(p.threshold)))
col.range[2] <- 5
}
cols <- .make.col.obj(n = ncolors, min.col = "red", max.col = "blue",
threshold = thresh, range = col.range, threshcol = threshcol, colsqueeze=colsqueeze)
layout(matrix(1:4, nrow = 2), widths = c(0.3, 0.7), heights = c(0.3,
0.7))
.plot.caption(col.obj = cols, main = element, p.adjust.method = p.adjust.method,
thresh.lines = thresh)
if ("phylo" %in% class(tree)) {
.plot.phylo(tree, species, horizontal = TRUE)
}
else {
frame()
}
if ("phylo" %in% class(tree)) {
.plot.phylo(tree, species)
}
else {
frame()
}
.plot.matrix(pmatrix = stats, species = species, elements = elements,
col.obj = cols, species.font.family=species.font.family, species.font.cex=species.font.cex,
max.spname.length=max.spname.length, ...)
layout(1)
par(op)
# There is probably a more elegant way to do this
dS <- matrix(NA, ncol=ncol(stats), nrow=nrow(stats))
colnames(dS) <- colnames(stats)
rownames(dS) <- rownames(stats)
dS[as.matrix(x$div[x$div$seq==element, c("sp1","sp2")])] <- x$div$dS[x$div$seq==element]
dS[as.matrix(x$div[x$div$seq==element, c("sp2","sp1")])] <- x$div$dS[x$div$seq==element]
ans <- list(name = element, tree = tree, species = species, elements = elements,
stats = stats, thresh=thresh, dS=dS)
class(ans) <- c("vhicaimage", class(ans))
return(invisible(ans))
}
|
/scratch/gouwar.j/cran-all/cranData/vhica/R/image.vhica.R
|
plot.vhica <-
function (x, sp1 = NULL, sp2 = NULL, ...)
{
op <- par(no.readonly = TRUE)
species <- c(sp1, sp2)
if (is.null(species)) {
species <- unique(c(x$div[, ncol(x$div) - 0:1]))
}
if (length(species) < 2)
stop("VHICA analysis requires at least two species")
ly <- matrix(0, ncol = length(species) - 1, nrow = length(species) -
1)
ly[upper.tri(ly, diag = TRUE)] <- 1:(length(species) * (length(species) -
1)/2)
layout(ly)
for (sp1 in 1:(length(species) - 1)) {
for (sp2 in (sp1 + 1):length(species)) {
cross <- paste(species[sp1], species[sp2], sep = "X")
if (!cross %in% names(x$reg)) {
cross <- paste(species[sp2], species[sp1], sep = "X")
}
if (!cross %in% names(x$reg)) {
stop("The cross between species ", species[sp1],
" and ", species[sp2], " is not documented")
}
.plot.regression(x$reg[[cross]], ...)
}
}
par(op)
}
|
/scratch/gouwar.j/cran-all/cranData/vhica/R/plot.vhica.R
|
read.vhica <-
function (gene.fasta=NULL, target.fasta=NULL, cb.filename=NULL, div.filename=NULL,
reference = "Gene", divergence = "dS", CUB.method="ENC", div.method="LWL85", div.pairwise=TRUE, div.max.lim=3, species.sep="_", gene.sep=".", family.sep=".", ...)
{
stopifnot(
!(is.null(gene.fasta) && is.null(target.fasta)) ||
!(is.null(cb.filename) && is.null(div.filename)))
vhica.obj <- list()
if (!is.null(gene.fasta)) {
vhica.obj$cbias <-
.seq.codon.bias(gene.fasta=gene.fasta, target.fasta=target.fasta, method=CUB.method, species.sep=species.sep, family.sep=family.sep)
vhica.obj$div <-
.seq.divergence(sequence.fasta=c(gene.fasta, target.fasta), method=div.method, pairwise=div.pairwise, max.lim=div.max.lim, species.sep=species.sep, family.sep=family.sep)
if (!is.null(cb.filename))
write.table(vhica.obj$cbias, file=cb.filename, sep="\t", quote=FALSE, row.names=TRUE)
if (!is.null(div.filename))
write.table(vhica.obj$div, file=div.filename, sep="\t", quote=FALSE, row.names=FALSE)
} else {
vhica.obj$cbias <- .read.codon.bias(file = cb.filename, reference = reference)
vhica.obj$div <- .read.divergence(file = div.filename, divergence = divergence)
}
vhica.obj$reg <- .reference.regression(vhica.obj$cbias, vhica.obj$div,
reference = reference, divergence = divergence, family.sep=family.sep, ...)
vhica.obj$reference <- reference
tmp.target <- levels(vhica.obj$cbias[, "Type"])
vhica.obj$target <- tmp.target[tmp.target != reference][1]
vhica.obj$divergence <- divergence
vhica.obj$family.sep=family.sep
class(vhica.obj) <- c("vhica", class(vhica.obj))
return(vhica.obj)
}
|
/scratch/gouwar.j/cran-all/cranData/vhica/R/read.vhica.R
|
summary.vhicaimage <- function(object, divrate=NA, p.thresh=1, ...)
{
st <- object$stats
ds <- object$dS
st[lower.tri(st)] <- NA
ds[lower.tri(ds)] <- NA
tokeep <- c(!is.na(ds)) # should be the same as !is.na(st)
ans <- data.frame(
expand.grid(colnames(object$stats), rownames(object$stats))[tokeep,],
'p.value'= c(10^object$stats)[tokeep],
dS= c(object$dS)[tokeep])
colnames(ans)[1:2] <- c("sp1","sp2")
rownames(ans) <- NULL
if (!is.na(divrate))
ans[,"Time(Mya)"] <- ans$dS/(2*divrate)
return(ans[ans$'p.value' <= p.thresh,])
}
|
/scratch/gouwar.j/cran-all/cranData/vhica/R/summary.vhicaimage.R
|
.capwords <-
function (s, strict = FALSE)
{
cap <- function(s) paste(toupper(substring(s, 1, 1)), {
s <- substring(s, 2)
if (strict)
tolower(s)
else s
}, sep = "", collapse = " ")
sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
}
.remove.space <-
function (s)
{
sapply(strsplit(s, split=" ", fixed=TRUE), paste0, collapse="")
}
.check.input.consistency <-
function (cbias, div, warn = FALSE, family.sep)
{
err.FUN <- stop
if (warn)
err.FUN <- warning
seq.cbias <- if (which(colnames(cbias) == "Type") == 1 || !is.null(rownames(cbias)))
rownames(cbias) else cbias[, 1]
seq.div <- unique(c(.get.TE.sub(as.character(div[, 1]), species = 1, family.sep=family.sep),
.get.TE.sub(as.character(div[, 1]), species = 2, family.sep=family.sep)))
extra <- seq.cbias[!(seq.cbias %in% seq.div)]
if (length(extra) > 0)
err.FUN("Sequences ", paste(extra, collapse = " "), " are in CB but not in Divergence.")
extra <- seq.div[!(seq.div %in% seq.cbias)]
if (length(extra) > 0)
err.FUN("Sequences ", paste(extra, collapse = " "), " are in Divergence but not in CB.")
sp.cbias <- colnames(cbias)[-(seq_len(which(colnames(cbias)=="Type")))]
sp.div <- unique(c(as.character(div[, 3]), as.character(div[, 4])))
extra <- sp.cbias[!(sp.cbias %in% sp.div)]
if (length(extra) > 0)
err.FUN("Species ", paste(extra, collapse = " "), " are in CB but not in Divergence.")
extra <- sp.div[!(sp.div %in% sp.cbias)]
if (length(extra) > 0)
err.FUN("Species ", paste(extra, collapse = " "), " are in Divergence but not in CB.")
}
.check.species <-
function (vhica.obj, user.species = NULL, tree.species = NULL)
{
species <- user.species
data.species <- unique(unlist(strsplit(names(vhica.obj$reg),
split = "X")))
if (is.null(species)) {
species <- data.species
}
if (!all(data.species %in% species)) {
warning("More species in the data than specified in the species vector.")
species <- unique(c(species, data.species))
}
if (!all(species %in% data.species)) {
warning("Some of the species are not in the data. Dropped.")
species <- species[!species %in% data.species]
}
if (!is.null(tree.species)) {
if(!all(species %in% tree.species)) {
warning("Labels in the phylogenetic tree do not match the data set. Better not to plot the tree.")
}
species <- unique(c(species, tree.species))
species <- species[match(tree.species, species)]
}
if (is.null(names(species))) {
names(species) <- species
}
names(species)[is.na(names(species))] <- species[is.na(names(species))]
return(species)
}
.element.present <-
function (vhica.obj, element, species = NULL, skip.void = FALSE)
{
element.present <- NULL
for (cross in names(vhica.obj$reg)) {
sp12 <- unlist(strsplit(cross, "X"))
target.table <- vhica.obj$reg[[cross]][[vhica.obj$target]]
if (nrow(target.table) > 0) {
for (index.target in seq_len(nrow(target.table))) {
fullname <- rownames(target.table)[index.target]
if (.get.TE.fam(fullname, family.sep=vhica.obj$family.sep) == element) {
if (.get.TE.sub(fullname, sub.only = TRUE, family.sep=vhica.obj$family.sep) ==
"") {
element.present <- c(element.present, sp12)
}
else {
if (!is.na(target.table[index.target, "resid"])) {
element.present <- c(element.present, paste(sp12,
c(.get.TE.sub(fullname, species = 1, sub.only = TRUE, family.sep=vhica.obj$family.sep),
.get.TE.sub(fullname, species = 2, sub.only = TRUE, family.sep=vhica.obj$family.sep)),
sep = vhica.obj$family.sep))
}
}
}
}
}
}
element.present <- unique(element.present)
if (!skip.void && !is.null(species)) {
already.there <- unique(sapply(element.present, function(el) {
strsplit(el, vhica.obj$family.sep, fixed = TRUE)[[1]][1]
}))
element.present <- c(element.present, species[!(species %in%
already.there)])
}
if (!is.null(species)) {
species.only <- sapply(element.present, function(el) {
strsplit(el, vhica.obj$family.sep, fixed = TRUE)[[1]][1]
})
subspecies.only <- sapply(element.present, function(el) {
strsplit(el, vhica.obj$family.sep, fixed = TRUE)[[1]][2]
})
element.present <- element.present[order(match(species.only,
species), subspecies.only)]
}
return(element.present)
}
.get.TE.fam <-
function (seqname, family.sep)
{
sapply(strsplit(seqname, split = family.sep, fixed=TRUE), function(s) s[1])
}
.get.TE.sub <-
function (seqname, species = 1, sub.only = FALSE, family.sep)
{
if (!species %in% c(1, 2)) {
stop("Species should be 1 or 2")
}
.get.TE.sub.intra.subonly <- function(seq) {
sp <- strsplit(seq, split = family.sep, fixed=TRUE)[[1]]
if (length(sp) == 1)
return("")
if (length(sp) == 2)
return(sp[2])
if (length(sp) == 3)
return(sp[1 + species])
stop(paste0("Error: sequence name ", seq, " not properly formatted"))
}
.get.TE.sub.intra <- function(seq) {
sp <- strsplit(seq, split = family.sep, fixed=TRUE)[[1]]
if (length(sp) == 1)
return(sp)
if (length(sp) == 2)
return(paste(sp[1], sp[2], sep = family.sep))
if (length(sp) == 3)
return(paste(sp[1], sp[1 + species], sep = family.sep))
stop(paste0("Error: sequence name ", seq, " not properly formatted"))
}
FUN.sub <- if (sub.only)
.get.TE.sub.intra.subonly
else .get.TE.sub.intra
return(sapply(seqname, FUN.sub))
}
.get.TE.fam.longseq <-
function(seqname, species.sep, family.sep)
{
sapply(seqname, function(s) {
fs <- strsplit(s, split=family.sep, fixed=TRUE)[[1]]
fulln <- strsplit(fs[1], split=species.sep, fixed=TRUE)[[1]][1]
if (length(fs) == 1)
return(fulln)
else
return(paste(fulln, fs[length(fs)], sep=family.sep))
})
}
.make.col.obj <-
function (n = 1000, max.col = "blue", min.col = "red", mid.col = "white",
range = c(-5, 5), threshold = c(-1, 1) * abs(log10(0.05)),
threshcol = 0.1, colsqueeze=1, extr = c(-1000, 1000))
{
.make.half <- function(nn, thr, ran, ext) {
thr <- abs(thr)
ran <- abs(ran)
ext <- abs(ext)
nn.t <- max(round(threshcol * nn), 2)
nn.r <- max(nn - nn.t - 2, 2)
ans <- 0
if (thr > 0)
ans <- c(ans, seq(0, thr, length.out = nn.t)[-1])
if (ran > thr)
ans <- c(ans, seq(thr, ran, length.out = nn.r)[-1])
s <- 1/colsqueeze
fun <- function(x) ifelse(x < thr, x*x*(s-1)/thr + x*(2-s), (x*x*(s-1) - x*(s*(thr+ran) - 2*thr) + ran*thr*(s-1))/(thr-ran))
ans <- fun(ans)
ans <- ans[ans >= 0 & ans <= ran]
ans <- c(ans, ext)
return(ans)
}
ans <- list()
range <- sort(range)
if (length(threshold) > 0)
threshold <- sort(threshold)
extr <- sort(extr)
if (range[1] < extr[1])
extr[1] <- range[1] - 0.01
if (range[2] > extr[2])
extr[2] <- range[2] + 0.01
if (length(range) != 2)
stop("range should be a vector of size 2")
if (length(extr) != 2)
stop("extr should be a vector of size 2")
if (length(threshold) > 2)
stop("threshold should be of size 0, 1, and 2")
if (length(threshold) == 0)
threshold <- extr
if (sum(threshold < 0) == 0)
threshold <- c(0, threshold)
if (sum(threshold > 0) == 0)
threshold <- c(threshold, 0)
if (n < 7)
stop("At least 7 colors are necessary")
ans$breaks <- sort(unique(c(-.make.half(round(n/2), ext = extr[1],
ran = range[1], thr = threshold[1]), .make.half(n - round(n/2) +
1, ext = extr[2], ran = range[2], thr = threshold[2]))))
ans$col <- c(colorRampPalette(c(min.col, mid.col))(sum(ans$breaks <
0) - 1), colorRampPalette(c(mid.col, max.col))(sum(ans$breaks >=
0)))
return(ans)
}
.plot.caption <-
function (col.obj, main = "", p.adjust.method = "none", nslices = 1000,
thresh.lines = NA)
{
par(mar = c(4, 1, 2, 1))
if (!requireNamespace("plotrix", quietly=TRUE)) {
frame()
return()
}
compl <- ""
if (p.adjust.method != "none") {
compl <- paste(" (", .capwords(p.adjust.method), ")",
sep = "")
}
ticks <- pretty(floor(col.obj$breaks[2]):ceiling(col.obj$breaks[length(col.obj$breaks) -
1]))
ticks <- ticks[ticks >= round(col.obj$breaks[2]) & ticks <=
round(col.obj$breaks[length(col.obj$breaks) - 1])]
plot(NULL, type = "n", yaxt = "n", xlab = paste("p-value",
compl, sep = ""), ylab = "", xlim = range(ticks), ylim = c(0,
0.4), bty = "n", xaxt = "n")
axis(1, at = ticks, labels = parse(text = sapply(ticks, function(t) if (t ==
0)
"1"
else paste0("10^-", abs(t)))), las = 2)
.fun.getcol <- function(pp) {
col.obj$col[which(col.obj$breaks > pp)[1] - 1]
}
plotrix::gradient.rect(min(ticks), 0, max(ticks), 0.2, nslices = nslices,
col = sapply(seq(min(ticks), max(ticks), length.out = nslices),
.fun.getcol))
if (!is.na(thresh.lines[1])) {
abline(v = thresh.lines, lty = 3)
}
title(main)
}
.plot.matrix <-
function (pmatrix, species, elements, zlim = range(pmatrix, na.rm = TRUE),
col.obj = .make.col.obj(n = 1000), na.col = "gray", grid.col = "darkgray",
species.font.family="mono", species.font.cex=1, max.spname.length=10, ...)
{
if(sum(!is.na(pmatrix)) == 0)
warning("Nothing to plot. Check the dataset and/or the element name.")
par(mar = c(0.1, 0.1, 0.1, 0.1))
realx <- 0.5
extra.elements <- elements[!elements %in% species]
for (sp in species) {
nn <- 1 + length(grep(pattern = sp, x = extra.elements))
realx <- c(realx, realx[length(realx)] + seq(0, 1, length.out = nn + 1)[-1])
}
ccol <- col.obj$col
cbreaks <- col.obj$breaks
if (!is.na(na.col)) {
dummy.val <- min(cbreaks) - 1
ccol <- c(na.col, ccol)
cbreaks <- c(dummy.val, cbreaks)
pmatrix[is.na(pmatrix)] <- dummy.val
}
image(x = realx, y = max(realx) - rev(realx) + 0.5, z = t(pmatrix[nrow(pmatrix):1,
]), axes = FALSE, col = ccol, breaks = cbreaks, zlim = zlim,
...)
if (!is.na(grid.col)) {
abline(h = seq(from = 0.5, by = 1, length.out = length(species) +
1), col = grid.col)
abline(v = seq(from = 0.5, by = 1, length.out = length(species) +
1), col = grid.col)
}
ns <- names(species)
lab <- ifelse(nchar(ns) > max.spname.length, paste0(substr(ns, 1, max.spname.length - 4), "..", substr(ns, nchar(ns)-2, nchar(ns))), ns)
axis(2, at = seq_along(species), labels = rev(lab),
las = 2, lwd.ticks = 0, lwd = 0, family = species.font.family, cex.axis=species.font.cex)
axis(3, at = seq_along(species), labels = lab,
las = 2, lwd.ticks = 0, lwd = 0, family = species.font.family, cex.axis=species.font.cex)
}
.plot.phylo <-
function (tree, species = "", horizontal = FALSE, show.tip.label = FALSE,
...)
{
if (!requireNamespace("ape", quitely=TRUE))
stop("Cannot plot trees without the package ape")
shift <- if (length(tree$tip.label) < 15)
22/(length(tree$tip.label)^1.5)
else 0
if (horizontal) {
par(mar = c(shift, 0.1, shift, 3.6))
plot(ape::rotateConstr(tree, rev(species)), direction = "rightwards",
show.tip.label = show.tip.label, ...)
}
else {
par(mar = c(3.6, shift, 0.1, shift))
plot(tree, direction = "downwards", show.tip.label = show.tip.label,
...)
}
}
.plot.regression <-
function (reg, xlim = range(c(reg$model[, 2], reg[[length(reg)]][,
1]), na.rm=TRUE), ylim = range(c(reg$model[, 1]), reg[[length(reg)]][,
2], na.rm=TRUE), xlab = names(reg$model)[2], ylab = names(reg$model)[1],
reg.line = TRUE, elements = rownames(reg[[length(reg)]]),
pch.gene = 1, pch.element = 2, col.gene = "black", col.element = "black",
element.names = TRUE, lty.reg = 2, col.reg = "black", pval = NA,
lty.pval = 3, col.pval = "red", unilat = -1, ...)
{
plot(NULL, xlim = xlim, ylim = ylim, xlab = xlab, ylab = ylab,
...)
points(reg$model[, 2], reg$model[, 1], pch = pch.gene, col = col.gene)
if (reg.line) {
abline(reg, lty = lty.reg, col = col.reg)
}
if (!is.na(pval)[1]) {
if (unilat == 0) {
pval.lines <- qnorm(c(pval/2, 1 - pval/2))
}
else {
pval.lines <- sign(unilat) * qnorm(1 - pval)
}
sdr <- sd(reg$residuals)
for (pl in pval.lines) {
abline(coef(reg)[1] + sdr * pl, coef(reg)[2], lty = lty.pval,
col = col.pval)
}
}
points(reg[[length(reg)]][elements, 1], reg[[length(reg)]][elements,
2], pch = pch.element, col = col.element)
in.elements <- elements %in% rownames(reg[[length(reg)]])
if (element.names && sum(in.elements > 0)) {
pos <- ifelse(reg[[length(reg)]][elements, 1] > mean(xlim),
2, 4)
NAs <- is.na(pos)
text(reg[[length(reg)]][elements, 1][!NAs], reg[[length(reg)]][elements,
2][!NAs], pos = pos[in.elements][!NAs], labels = elements[!NAs])
}
}
.prepare.phylo <-
function (treefile)
{
if (is.null(treefile)) {
warning("No tree file specfied: the phylogeny will not be plotted")
return(NA)
}
if (!requireNamespace("ape", quietly=TRUE)) {
warning("The ape library is not available: the phylogeny will not be plotted")
return(NA)
}
try(tree <- ape::read.tree(treefile))
if (inherits(tree, "try-error")) {
return(NA)
}
return(tree)
}
.read.codon.bias <-
function (file, reference = "Gene")
{
rawdata <- read.table(file, header = TRUE, row.names = NULL, stringsAsFactors = TRUE)
if (ncol(rawdata) < 3) {
stop("Not enough columns in file ", file)
}
type.column <- which(sapply(seq_len(ncol(rawdata)), function(x) {
is.factor(rawdata[, x])
}))
if (length(type.column) != 1) {
stop("Only one column should be a factor in file ", file)
}
colnames(rawdata)[type.column] <- "Type"
if (length(levels(rawdata$Type)) > 2) {
stop("Only two type levels allowed in file ", file)
}
if (!(reference %in% levels(rawdata$Type))) {
stop("No reference type ", reference, " in file ", file)
}
rownames(rawdata) <- rawdata[, 1]
return(rawdata)
}
.read.divergence <-
function (file, divergence = "dS")
{
rawdata <- read.table(file, header = TRUE, row.names = NULL, stringsAsFactors = TRUE)
if (ncol(rawdata) != 4) {
stop("Number of columns different than 4 in file ", file)
}
rawdata[, 1] <- as.character(rawdata[, 1])
return(rawdata)
}
.seq.codon.bias.clean <-
function(ll, gene.sep, species.sep, family.sep)
{
ans <- list()
names(ll) <- sapply(strsplit(basename(names(ll)), split=gene.sep, fixed=TRUE),
function(s) .remove.space(s[1]))
for (i in seq_along(ll)) {
namsp <- .get.TE.fam.longseq(names(ll[[i]]), species.sep=species.sep, family.sep=family.sep)
subf <- .get.TE.sub(namsp, family.sep=family.sep, sub.only=TRUE)
sp <- .get.TE.fam(namsp, family.sep=family.sep)
if (all(subf == "")) {
xx <- ll[[i]]
names(xx) <- sp
genename <- names(ll)[i]
if (genename %in% names(ans))
stop("Gene ", genename, " duplicated in the data set.")
ans[[genename]] <- xx
} else {
for (ss in unique(subf)) {
if (ss=="") stop("Inconsistent sub-family naming in gene ", names(ll)[i], ".")
xx <- ll[[i]][subf==ss]
names(xx) <- sp[subf==ss]
genename <- paste(names(ll)[i], ss, sep=family.sep)
if (genename %in% names(ans))
stop("Gene ", genename, " duplicated in the data set.")
ans[[genename]] <- xx
}
}
}
return(ans)
}
.seq.codon.bias.format <-
function(CUB, name, tag, species.ref)
{
stopifnot(
all(is.na(CUB)) || is.vector(CUB, mode="numeric"),
length(CUB) > 0,
!is.null(names(CUB)),
nchar(name) > 0,
nchar(tag) > 0,
length(species.ref) > 0)
if (any(nchar(names(CUB)) < 1))
stop("In sequence file ", name, ": species names too short.")
if (length(unique(names(CUB))) != length(names(CUB))) {
stop("In sequence file ", name, ": Duplicated species names: ", names(CUB)[duplicated(names(CUB))], ".")
}
if (!all(names(CUB) %in% species.ref)) {
stop("In sequence file ", name, ": Unknown species: ", paste(names(CUB)[!(names(CUB) %in% species.ref)], collapse=","), ".")
}
CUB <- CUB[species.ref]
names(CUB) <- species.ref
data.frame(Type=as.factor(tag), as.list(CUB), row.names=name, check.names=FALSE)
}
.seq.codon.bias <-
function(gene.fasta, target.fasta, method="ENC", ref.name="Gene", targ.name="TE", species.sep="_", gene.sep=".", family.sep=".")
{
if (requireNamespace("parallel", quietly=TRUE)) {
mymclapply <- parallel::mclapply
} else {
mymclapply <- lapply
}
CUBgenes <- mymclapply(gene.fasta, function(genefile) CUB(genefile, method=method))
names(CUBgenes) <- gene.fasta
CUBtarget <- mymclapply(target.fasta, function(genefile) CUB(genefile, method=method))
names(CUBtarget) <- target.fasta
listgenes <- .seq.codon.bias.clean(CUBgenes, gene.sep=gene.sep, species.sep=species.sep, family.sep=family.sep)
listtarget <- .seq.codon.bias.clean(CUBtarget, gene.sep=gene.sep, species.sep=species.sep, family.sep=family.sep)
species <- unique(unlist(lapply(c(listgenes, listtarget), names)))
ans.genes <- do.call(rbind, mymclapply(seq_along(listgenes), function(i) .seq.codon.bias.format(listgenes[[i]], names(listgenes)[i], tag=ref.name, species.ref=species)))
ans.targ <- do.call(rbind, mymclapply(seq_along(listtarget), function(i) .seq.codon.bias.format(listtarget[[i]], names(listtarget)[i], tag=targ.name, species.ref=species)))
return(rbind(ans.genes, ans.targ))
}
.seq.divergence <-
function(sequence.fasta, divergence="dS", method="LWL85", pairwise=FALSE, species.sep="_", gene.sep=".", family.sep=".", max.lim=3)
{
if (requireNamespace("parallel", quietly=TRUE)) {
mymclapply <- parallel::mclapply
} else {
mymclapply <- lapply
}
listseq <- mymclapply(sequence.fasta, function(genefile) {
ans <- div(genefile, method=method, pairwise=pairwise, max.lim=max.lim)
names(ans)[which(names(ans)=="div")] <- divergence
seqn <- rep(.remove.space(strsplit(basename(genefile), split=gene.sep, fixed=TRUE)[[1]][1]), nrow(ans))
fullsp1 <- .get.TE.fam.longseq(as.character(ans$sp1), species.sep=species.sep, family.sep=family.sep)
fullsp2 <- .get.TE.fam.longseq(as.character(ans$sp2), species.sep=species.sep, family.sep=family.sep)
ans$sp1 <- .get.TE.fam(fullsp1, family.sep=family.sep)
ans$sp2 <- .get.TE.fam(fullsp2, family.sep=family.sep)
subf1 <- .get.TE.sub(fullsp1, sub.only=TRUE, family.sep=family.sep)
subf2 <- .get.TE.sub(fullsp2, sub.only=TRUE, family.sep=family.sep)
if (any(xor(subf1 == "", subf2 == ""))) stop("Inconsistent species/sub-family naming")
seqn <- ifelse(subf1=="", seqn, ifelse(subf1==subf2, paste(seqn, subf1, sep=family.sep), paste(seqn, subf1, subf2, sep=family.sep)))
data.frame(seq=seqn, ans)
})
do.call(rbind, listseq)
}
.reference.regression <-
function (cbias, div, reference = "Gene", divergence = "dS",
CB.as.x = TRUE, warn = FALSE, family.sep=".")
{
full.list <- .tables2list(cbias, div, warn = warn, reference = reference,
divergence = divergence, family.sep=family.sep)
if (requireNamespace("parallel", quietly=TRUE)) {
mymclapply <- parallel::mclapply
} else {
mymclapply <- lapply
}
return(mymclapply(full.list, FUN = function(cross) {
cross2 <- cross[cross$Type == reference, ]
cross.TE <- cross[cross$Type != reference, ]
meanCB <- 0.5 * cross2$CB1 + 0.5 * cross2$CB2
div <- cross2$div
names(div) <- names(meanCB) <- rownames(cross2)
meanCB.TE <- 0.5 * cross.TE$CB1 + 0.5 * cross.TE$CB2
div.TE <- cross.TE$div
names(meanCB.TE) <- names(div.TE) <- rownames(cross.TE)
ans <- NULL
resid.TE <- NULL
if (CB.as.x) {
ans <- lm(div ~ meanCB)
resid.TE <- div.TE - (meanCB.TE * coef(ans)[2] +
coef(ans)[1])
ans[[levels(cross$Type)[2]]] <- data.frame(meanCB = meanCB.TE,
div = div.TE, resid = resid.TE, rel.res = resid.TE/sd(resid(ans)))
} else {
ans <- lm(meanCB ~ div)
resid.TE <- meanCB.TE - (div.TE * coef(ans)[2] +
coef(ans)[1])
ans[[levels(cross$Type)[2]]] <- data.frame(div = div.TE,
meanCB = meanCB.TE, resid = resid.TE, rel.res = resid.TE/sd(resid(ans)))
}
rownames(ans[[levels(cross$Type)[2]]]) <- rownames(cross.TE)
return(ans)
}))
}
.reverse.sub <-
function (seqname, family.sep)
{
sp <- strsplit(seqname, split = family.sep, fixed=TRUE)
return(unlist(lapply(sp, function(ss) {
if (length(ss) == 3) return(paste0(ss[1], family.sep, ss[3],
family.sep, ss[2]))
if (length(ss) == 2) return(paste0(ss[1], family.sep, ss[2]))
if (length(ss) == 1) return(ss[1])
stop("Error: sequence name not properly formatted")
})))
}
.stat.matrix <-
function (vhica.obj, element, elements, p.adjust.method = "none",
H1.test = "bilat")
{
stopifnot(length(elements) > 1)
ans <- matrix(NA, ncol = length(elements), nrow = length(elements))
colnames(ans) <- rownames(ans) <- elements
for (index.TE1 in 1:(length(elements) - 1)) {
for (index.TE2 in (index.TE1 + 1):length(elements)) {
TE1 <- elements[index.TE1]
TE2 <- elements[index.TE2]
decomp <- strsplit(c(TE1, TE2), split = vhica.obj$family.sep, fixed = TRUE)
sp.TE1 <- decomp[[1]][1]
sp.TE2 <- decomp[[2]][1]
if (!paste(sp.TE1, "X", sp.TE2, sep = "") %in% names(vhica.obj$reg)) {
tmp <- index.TE1
new.index.TE1 <- index.TE2
new.index.TE2 <- index.TE1
TE1 <- elements[new.index.TE1]
TE2 <- elements[new.index.TE2]
decomp <- strsplit(c(TE1, TE2), split = vhica.obj$family.sep, fixed = TRUE)
sp.TE1 <- decomp[[1]][1]
sp.TE2 <- decomp[[2]][1]
}
else {
new.index.TE1 <- index.TE1
new.index.TE2 <- index.TE2
}
sub.TE1 <- if (length(decomp[[1]]) == 2)
decomp[[1]][2]
else ""
sub.TE2 <- if (length(decomp[[2]]) == 2)
decomp[[2]][2]
else ""
crossname <- paste(sp.TE1, sp.TE2, sep = "X")
element.table <- vhica.obj$reg[[crossname]][[vhica.obj$target]]
if (sub.TE1 == "") {
if (sub.TE2 == "") {
linename <- element
}
else {
linename <- "DOESNOTEXIST"
}
}
else {
if (sub.TE1 == sub.TE2) {
linename <- paste(element, sub.TE1, sep = vhica.obj$family.sep)
}
else {
if (sub.TE2 == "") {
linename <- "DOESNOTEXIST"
}
else {
linename <- paste0(element, vhica.obj$family.sep, sub.TE1,
vhica.obj$family.sep, sub.TE2)
}
}
}
if (linename %in% rownames(vhica.obj$reg[[crossname]]$TE)) {
norm.resid <- element.table[linename, "rel.res"]
pval <- NA
if (H1.test == "lower") {
p.val <- pnorm(norm.resid)
}
else if (H1.test == "bilat") {
p.val <- 2 * pnorm(-abs(norm.resid))
if (norm.resid > 0)
p.val <- -p.val
}
else if (H1.test == "greater") {
p.val <- -pnorm(-norm.resid)
}
else {
stop("H1.test ", H1.test, " incorrect. Should be \"lower\", \"bilat\", or \"greater\".")
}
ans[TE1, TE2] <- ans[TE2, TE1] <- p.val
}
}
}
corrected.p <- log10(p.adjust(abs(ans[upper.tri(ans)]), method = p.adjust.method))
corrected.p <- ifelse(ans[upper.tri(ans)] > 0, corrected.p, -corrected.p)
ans[upper.tri(ans)] <- corrected.p
ans[lower.tri(ans)] <- t(ans)[lower.tri(ans)]
return(ans)
}
.tables2list <-
function (cbias, div, check = TRUE, keep.absent = FALSE, warn = FALSE,
reference = "Gene", divergence = "dS", family.sep)
{
if (requireNamespace("parallel", quietly=TRUE)) {
mymclapply <- parallel::mclapply
} else {
mymclapply <- lapply
}
.make.unitary.table <- function(nn, sp1, sp2, sub.div) {
if (nn %in% cbias[, 1]) {
cc <- data.frame(Type = cbias[nn, "Type"], CB1 = cbias[nn,
sp1], CB2 = cbias[nn, sp2], div = sub.div[nn,
divergence], name = nn)
}
else {
rev.species <- (sub.div[nn, "sp1"] == sp2)
te1 <- .get.TE.sub(nn, species = 1, family.sep=family.sep)
te2 <- .get.TE.sub(nn, species = 2, family.sep=family.sep)
if (rev.species) {
cc <- data.frame(Type = cbias[te1, "Type"], CB1 = cbias[te2,
sp1], CB2 = cbias[te1, sp2], div = sub.div[nn,
divergence], name = .reverse.sub(nn, family.sep=family.sep))
}
else {
cc <- data.frame(Type = cbias[te1, "Type"], CB1 = cbias[te1,
sp1], CB2 = cbias[te2, sp2], div = sub.div[nn,
divergence], name = nn)
}
}
return(cc)
}
if (check)
.check.input.consistency(cbias, div, warn = warn, family.sep=family.sep)
ans <- list()
species <- unique(c(colnames(cbias)[-c(1, 2)], as.character(div[,
3]), as.character(div[, 4])))
stopifnot(length(species) > 1)
for (index.sp1 in 1:(length(species) - 1)) {
for (index.sp2 in (index.sp1 + 1):length(species)) {
sp1 <- species[index.sp1]
sp2 <- species[index.sp2]
cross <- paste(sp1, sp2, sep = "X")
sub.div <- div[(div[, 3] == sp1 & div[, 4] == sp2) |
(div[, 3] == sp2 & div[, 4] == sp1), ]
compnames <- rownames(sub.div) <- as.character(sub.div[, 1])
tt <- do.call(rbind, mymclapply(compnames, .make.unitary.table,
sp1 = sp1, sp2 = sp2, sub.div = sub.div))
rownames(tt) <- tt$name
tt$name <- NULL
if (!keep.absent) {
tt <- tt[!is.na(tt[, 4]), ]
}
ans[[cross]] <- tt
}
}
return(ans)
}
.checkseq <-
function(seq, gene.name="") {
# 0 check if the object makes sense
stopifnot(
length(seq) > 0,
is.list(seq),
all(sapply(seq, function(s) "SeqFastadna" %in% class(s))))
# 1 check if all sequences have the same size
ll <- sapply(seq, length)
if (max(ll) != min(ll)) {
warning(gene.name, " Sequences do not have the same length. Adding as many n as necessary.")
seq <- lapply(seq, function(s) {ans <- c(s, rep("n", max(ll)-length(s))); class(ans) <- "SeqFastadna"; ans})
}
#2 check if sequence length is a multiple of 3
mll <- max(ll)
if (mll %% 3 != 0) {
warning(gene.name, " Sequence length ", mll, " is not a multiple of 3. Truncating.")
seq <- lapply(seq, function(s) {ans <- s[1:(3*(mll%/%3))]; class(ans) <- "SeqFastadna"; ans})
}
return(seq)
}
.ENC <-
function(seq, numcode=1, Wright.corr=TRUE)
{
stopifnot(
"SeqFastadna" %in% class(seq),
requireNamespace("seqinr"))
yy <- seqinr::ucoweight(seq, numcode=numcode)
yy.filt <- yy[sapply(yy,sum) > 1 & names(yy) != "*"]
Fc <- sapply(yy.filt, function(x) {n <- sum(x); (n*sum((x/n)^2)-1)/(n-1)})
SF <- sapply(yy.filt, length)
SF <- SF[Fc != 0]
Fc <- Fc[Fc != 0]
ans <- 2 + sum(SF==2)/mean(Fc[SF==2]) + sum(SF==3)/mean(Fc[SF==3]) + sum(SF==4)/mean(Fc[SF==4]) + sum(SF==6)/mean(Fc[SF==6])
ans[!is.finite(ans)] <- NA
if (Wright.corr) {
if (sum(SF==3)==0) # No Ile
ans <- ans + 1/(0.5*mean(Fc[SF==2])+0.5*mean(Fc[SF==4]))
if (sum(SF==2) == 0 | sum(SF==4) == 0 | sum(SF==6) == 0)
ans <- NA
if (is.finite(ans) && ans > 61) ans <- 61
}
return(ans)
}
.LWL85 <-
function(seq, sq1=names(seq)[1], sq2=names(seq)[2], pairwise=TRUE, max.lim=max.lim)
{
stopifnot(
all(sapply(seq, function(s) "SeqFastadna" %in% class(s))),
length(sq1) == length(sq2),
all(c(sq1,sq2) %in% names(seq)),
requireNamespace("seqinr"))
if (!pairwise) {
ali <- seqinr::as.alignment(nb=length(seq), nam=names(seq), seq=sapply(seq, function(s) paste(s, collapse="")))
ks <- as.matrix(seqinr::kaks(ali)$ks)
candidate <- ks[cbind(sq1, sq2)]
return(ifelse (is.na(candidate) | candidate > max.lim, NA, candidate))
} else {
return(sapply(seq_along(sq1), function(i) {
subseq <- seq[c(sq1[i], sq2[i])]
subali <- seqinr::as.alignment(nb=2, nam=c(sq1[i], sq2[i]), seq=sapply(subseq, function(s) paste(s, collapse="")))
candidate <- seqinr::kaks(subali)$ks[1]
return(if(is.na(candidate) || candidate > max.lim) NA else candidate)
}))
}
}
|
/scratch/gouwar.j/cran-all/cranData/vhica/R/vhica-internal.R
|
# build from existing stack with existing index, or dimensions
#' @export RasterArray
setMethod("initialize",signature="RasterArray",
definition=function(.Object,stack, index=NULL, dim=NULL){
if(!requireNamespace("terra", quietly=TRUE)) stop("This class requires the terra package.")
# automatic wrapper
if(is.null(index)) index <- 1:nlayers(stack)
# some defense for index
if(is.null(dim)){
if(!inherits(stack,"SpatRaster")) stop("The 'stack' has to be a 'SpatRaster' - class object.")
if(!is.numeric(index)) stop("The 'index' has to be a 'numeric' object.")
# where were supposed to be NAs
bNA <- is.na(index)
if(any(index[!bNA]%%1!=0) | any(index[!bNA]<1)) stop("The 'index' has to contain positive integer values.")
# the number of valid entries mis the number of layers
if(sum(!bNA)!=nlayers(stack)) stop("You have to provide as many layers as many valid entries in index.")
# reorder the stack
noNAInd <- index[!bNA]
newStack <- stack[[noNAInd]]
# force index to be monotonous integer sequence
newIndex <- index
newIndex[] <- NA
newIndex[!bNA] <- 1:nlayers(stack)
# store final object
.Object@index <- newIndex
.Object@stack <- newStack
}else{
if(!is.numeric(dim)) stop("The 'dim' argument has to be a 'numeric' vector.")
if(nlayers(stack)!=prod(dim, na.rm=TRUE)) warning("The number of layers in the does not equal the product of the 'dim' vector.")
.Object@stack<- stack
index <- array(1:nlayers(stack), dim=dim)
# in case of reuse
index[duplicated(as.numeric(index))] <- NA
.Object@index<- index
}
return(.Object)
}
)
setMethod(
"show",
signature="RasterArray",
function (object)
{
cat(paste0("class : ", class(object), "\n"))
## if (rotated(object)) {
## cat("rotated : TRUE\n")
## }
mnr <- 15
# if (filename(object) != "") {
# cat("filename :", filename(object), "\n")
# }
nl <- nlayers(object)
if (nl > 0) {
cat("Element properties: \n")
cat(paste0("- class : ",class(object@stack[[1]]), "\n"))
dims <- dim(object@stack[[1]])
cat(paste0("- dimensions : ", dims[1],", ",dims[2]," (nrow, ncol)\n"))
reses <- terra::res(object@stack[[1]])
cat(paste0("- resolution : ",reses[1],", ",reses[2]," (x, y)\n"))
extent <- terra::ext(object@stack[[1]])
cat(paste0("- extent : ", extent$xmin, ", ",extent$xmax,", ",extent$ymin,", ",extent$ymax, " (xmin, xmax, ymin, ymax)\n"))
refsys <- paste(terra::crs(object@stack[[1]], describe=TRUE)$name)
cat(paste0("- coord.ref. : ",refsys,"\n"))
cat("Array properties: \n")
adim <- dim(object)
allName <- names(object)
if(length(adim)==1){
cat("- dimensions : ", paste(adim, collapse=", "),
" (vector)\n",
sep = "")
}else{
allName<- dimnames(object)
if(length(allName)==2){
cat("- dimensions : ", paste(adim, collapse=", "),
" (nrow, ncol)\n",
sep = "")
}else{
cat("- dimensions : ", paste(adim, collapse=", "),
" (nrow, ncol, ...)\n",
sep = "")
}
# for(i in 1:length(allName)){
# if(i==1) cat("- rownames : ", paste(allName[[i]], collapse=", "), "\n", sep = "")
# if(i==2) cat("- colnames : ", paste(allName[[i]], collapse=", "), "\n", sep = "")
# if(i>2) cat(paste("- Dim", i, " names", sep=""), " : ", paste(allName[[i]], collapse=", "), "\n", sep = "")
# }
}
cat("- num. layers : ", nlayers(object), "\n",
sep = "")
cat("- missing : ", sum(is.na(object@index)), "\n",
sep = "")
cat("- proxy:\n ")
theProx <- proxy(object)
theProx[] <- abbrev(theProx)
print(theProx)
} else {
cat("nlayers :", nl, "\n")
if(sum(is.na(object@index))>0){
cat("- missing : ", sum(is.na(object@index)), "\n",
sep = "")
cat("- proxy:\n ")
theProx <- proxy(object)
theProx[] <- abbrev(theProx)
print(theProx)
}
}
cat("\n")
}
)
#' Positions of missing values in a '\code{\link[via:RasterArray-class]{RasterArray}}'-class object
#'
#' The function behaves similar to the regular \code{is.na()} function applied to the proxy object of a '\code{RasterArray}'.
#'
#' @param x A \code{RasterArray} class object.
#' @return A \code{logical} \code{vector}, \code{matrix} or \code{array} matching the structure of the \code{RasterArray}.
#'
#' @examples
#' ex <- rastex()
#' ex[2] <- NA
#' is.na(ex)
#'
#' @export
is.na.RasterArray<-function(x){
is.na(proxy(x))
}
|
/scratch/gouwar.j/cran-all/cranData/via/R/RasterArray-base.R
|
# VirtualArray with itself
setMethod("c2", signature=c("RasterArray", "RasterArray"),
definition=function(x, y){
# shift indices of the second argument
indexPlus<- y@index+nlayers(x)
# combine the indices
ind <- c(x@index, indexPlus)
# the final object
endObj <- RasterArray(c(x@stack, y@stack), index=ind)
return(endObj)
}
)
# adding multiple RasterLayers
setMethod("c2", signature=c("RasterArray", "SpatRaster"),
definition=function(x, y){
# The new index
ind <- c(x@index, (nlayers(x)+1):(nlayers(x)+nlayers(y)))
if(nlayers(y)==1){
callSymb <- sys.call(which=-3)
if(is.symbol(callSymb[[3]])){
names(ind)[length(ind)] <- deparse(callSymb[[3]])
}
}
endObj <- RasterArray(c(x@stack, y), index=ind)
return(endObj)
}
)
setMethod("c2", signature=c("RasterArray", "logical"),
definition=function(x, y){
if(!any(!is.na(y))) "Invalid argument."
ind<- c(x@index, rep(NA,length(y)))
# copy the name if it there is one
if(!is.null(names(y))) names(ind)[(length(ind)-length(y)+1):length(ind)] <- names(y)
# replace index with new
x@index <- ind
# return corrected object
return(x)
}
)
|
/scratch/gouwar.j/cran-all/cranData/via/R/RasterArray-combine.R
|
###########################################################
# Arith methods
setMethod("Arith", c(e1="RasterArray", e2="numeric"),
definition=function(e1,e2){
e1@stack <- methods::callGeneric(e1@stack, e2)
e1
}
)
setMethod("Arith", c(e2="RasterArray", e1="numeric"),
definition=function(e1,e2){
e2@stack <- methods::callGeneric(e2@stack, e1)
e2
}
)
###########################################################
# Compare method
setMethod("Compare", c(e1="RasterArray", e2="SpatRaster"),
definition=function(e1,e2){
e1@stack <- methods::callGeneric(e1@stack, e2)
e1
}
)
###########################################################
# Math method
setMethod("Math", c(x="RasterArray"),
definition=function(x){
x@stack <- methods::callGeneric(x@stack)
x
}
)
###########################################################
# Math2 method
setMethod("Math2", signature=c(x="RasterArray"),
definition=function(x, digits){
op=.Generic[[1]]
switch(op,
round = return({
if(missing(digits)) digits <- 0
x@stack <- round(x@stack,digits)
x
}),
signif = return({
if(missing(digits)) digits <- 6
x@stack <- signif(x@stack,digits)
x
})
)
}
)
setMethod("Summary", c(x="RasterArray"),
definition=function(x,..., na.rm=FALSE){
op<-.Generic[[1]]
# if(op=="range"){
# }else{
methods::callGeneric(x@stack,..., na.rm=na.rm)
# }
}
)
|
/scratch/gouwar.j/cran-all/cranData/via/R/RasterArray-groupgen.R
|
# Functions that are practically inherited from SpatRaster*
#' Resolution of a '\code{\link[via:RasterArray-class]{RasterArray}}'-class object
#'
#' The methods are inherited from the '\code{\link[terra:rast]{SpatRaster}}' class, see \code{\link[terra]{res}}. Replacement is not allowed.
#'
#' @param x a \code{RasterArray}-class object.
#' @return A \code{numeric} vector.
#'
#' @rdname res
#' @examples
#' ex <- rastex()
#' res(ex)
#' yres(ex)
#' xres(ex)
#' @export xres
setMethod(
"xres",
signature="RasterArray",
function(x){
if(!requireNamespace("terra", quietly=TRUE)){
stop("This function requires the terra package.")
}
terra::xres(x@stack)
}
)
# setMethod(
# "filename",
# signature="RasterArray",
# function(x){
# filename(x@stack)
# }
# )
#' @rdname res
#' @export yres
setMethod(
"yres",
signature="RasterArray",
function(x){
if(!requireNamespace("terra", quietly=TRUE)){
stop("This function requires the terra package.")
}
terra::yres(x@stack)
}
)
#' @rdname res
#' @export res
setMethod(
"res",
signature="RasterArray",
function(x){
if(!requireNamespace("terra", quietly=TRUE)){
stop("This function requires the terra package.")
}
terra::res(x@stack)
}
)
#' Rotate a '\code{\link[via:RasterArray-class]{RasterArray}}'-class object
#'
#' The method is inherited from the '\code{\link[terra:rast]{SpatRaster}}' class.
#'
#' @param x (\code{\link[via:RasterArray-class]{RasterArray}}) Object.
#' @param ... Additional arguments passed to the \code{\link[terra]{rotate}} function.
#' @rdname rotate
#' @return A \code{\link[via:RasterArray-class]{RasterArray}}-class object.
#' @name rotate
NULL
#' @rdname rotate
setMethod(
"rotate",
signature=c("RasterArray"),
function(x,...){
if(!requireNamespace("terra", quietly=TRUE)){
stop("This function requires the terra package.")
}
x@stack <- terra::rotate(x@stack,...)
return(x)
}
)
#' @rdname ext
setMethod(
"ext",
signature=c("RasterArray"),
function(x,...){
if(!requireNamespace("terra", quietly=TRUE)){
stop("This function requires the terra package.")
}
a <- terra::ext(x@stack,...)
return(a)
}
)
#' Resample a '\code{\link[via:RasterArray-class]{RasterArray}}'-class object
#'
#' The method is inherited from the '\code{\link[terra:rast]{SpatRaster}}' class.
#'
#' @param x a \code{\link[via:RasterArray-class]{RasterArray}}-class object.
#' @param y The y argument of the \code{\link[terra:resample]{resample}} function.
#' @return A resampled \code{\link[via:RasterArray-class]{RasterArray}}-class object.
#' @param ... arguments passed to the \code{\link[terra:resample]{resample}} function.
#'
#' @rdname resample
#' @aliases resample,RasterArray-method
#' @examples
#' ex <- rastex()
#' if(requireNamespace("terra", quietly=TRUE)){
#' template <- terra::rast(res=5)
#' resampled <- resample(ex, template)
#' }
#' @export resample
setMethod(
"resample",
signature=c("RasterArray", "ANY"),
function(x,y,...){
if(!requireNamespace("terra", quietly=TRUE)){
stop("This function requires the terra package.")
}
x@stack <- terra::resample(x@stack, y,...)
return(x)
}
)
#' Crop a '\code{\link[via:RasterArray-class]{RasterArray}}'-class object
#'
#' The method is inherited from the '\code{\link[terra:rast]{SpatRaster}}' class.
#'
#' @param x a \code{\link[via:RasterArray-class]{RasterArray}} class object.
#' @param y an \code{\link[via:ext]{SpatExtent}}-class object, or any object from which an extent object can be extracted (see Details)
#' @param ... arguments passed to the \code{\link[terra]{crop}} function.
#' @return A cropped \code{\link[via:RasterArray-class]{RasterArray}}-class object.
#'
#' @examples
#' ex <- rastex()
#' # crop to a specific area
#' if(requireNamespace("terra", quietly=TRUE)){
#' ext <- terra::ext(c(
#' xmin = 106.58,
#' xmax = 157.82,
#' ymin = -45.23,
#' ymax = 1.14
#' ))
#' # cropping all
#' au<- crop(ex, ext)
#' }
#'
#' @rdname crop
#' @exportMethod crop
setMethod(
"crop",
signature=c("RasterArray"),
function(x,y,...){
if(!requireNamespace("terra", quietly=TRUE)){
stop("This function requires the terra package.")
}
x@stack <- terra::crop(x@stack,y,...)
return(x)
}
)
#' Aggregate raster cells in a '\code{\link[via:RasterArray-class]{RasterArray}}'-class object
#'
#' The method is inherited from the '\code{\link[terra:rast]{SpatRaster}}' class.
#'
#' @param x a \code{\link[via:RasterArray-class]{RasterArray}}-class object.
#' @param ... arguments passed to the \code{\link[terra]{aggregate}} function.
#'
#' @exportMethod aggregate
#' @return An aggregated \code{\link[via:RasterArray-class]{RasterArray}}-class object.
#' @examples
#' library(terra)
#' ex <- rastex()
#' agg <- aggregate(ex, 30)
#' @rdname aggregate
#' @name aggregate
NULL
#' @rdname aggregate
setMethod(
"aggregate",
signature=c("RasterArray"),
function(x,...){
if(!requireNamespace("terra", quietly=TRUE)){
stop("This function requires the terra package.")
}
x@stack <- terra::aggregate(x@stack,...)
return(x)
}
)
#' Disaggregate raster cells in a '\code{\link[via:RasterArray-class]{RasterArray}}'-class object
#'
#' The method is inherited from the '\code{\link[terra:rast]{SpatRaster}}' class.
#'
#' @param x a \code{\link[via:RasterArray-class]{RasterArray}} class object.
#' @return A disaggregated \code{\link[via:RasterArray-class]{RasterArray}} class object.
#' @param ... arguments passed to the \code{\link[terra]{disagg}} function.
#'
#' @exportMethod disagg
#' @examples
#' ex <- rastex()
#' disagg <- disagg(ex, 3)
#' @rdname disagg
#' @name disagg
NULL
#' @rdname disagg
setMethod(
"disagg",
signature=c("RasterArray"),
function(x,...){
if(!requireNamespace("terra", quietly=TRUE)){
stop("This function requires the terra package.")
}
x@stack <- terra::disagg(x@stack,...)
return(x)
}
)
#' Projecting a '\code{\link[via:RasterArray-class]{RasterArray}}'-class object
#'
#' The method is inherited from the '\code{\link[terra:rast]{SpatRaster}}' class. See \code{\link[terra:project]{project}} for details.
#'
#' @rdname project
setMethod("project", "RasterArray",
function(x, y, ...){
if(!requireNamespace("terra", quietly=TRUE)){
stop("This function requires the terra package.")
}
x@stack <- terra::project(x=x@stack, y=y, ...)
return(x)
}
)
|
/scratch/gouwar.j/cran-all/cranData/via/R/RasterArray-rast.R
|
#' @rdname replacementSingle
#' @exportMethod "[<-"
setReplaceMethod(
"[",
signature(x="RasterArray", value="SpatRaster"),
definition=function(x,i,j,..., value){
x<- VirtualArrayReplaceLayer(x=x, i=i, j=j, value=value, ...)
return(x)
}
)
|
/scratch/gouwar.j/cran-all/cranData/via/R/RasterArray-replace.R
|
#' @rdname arraylength
#' @exportMethod nlayers
setMethod(
"nlayers",
signature="RasterArray",
function(x){
dims <- dim(x@stack)
return(dims[3])
}
)
#' Number of cells in a '\code{\link[via:RasterArray-class]{RasterArray}}'-class object
#'
#' The method is inherited from the '\code{\link[terra:rast]{SpatRaster}}' class.
#'
#' @param x a \code{\link[via:RasterArray-class]{RasterArray}} class object.
#' @rdname ncell
#' @return A \code{numeric} value.
#' @examples
#' ex <- rastex()
#' ncell(ex)
#' @exportMethod ncell
setMethod(
"ncell",
signature="RasterArray",
function(x){
if(!requireNamespace("terra", quietly=TRUE)){
stop("This function requires the terra package.")
}
terra::ncell(x@stack)
}
)
#' @rdname nvalues
setMethod(
"nvalues",
signature="RasterArray",
function(x){
# returns the layer names
prod(dim(x@stack))
}
)
#' @rdname dimlayer
setMethod(
"dimlayer",
signature="RasterArray",
function(x){
# depends on subset-method
dim(x@stack[[1]])[1:2]
}
)
|
/scratch/gouwar.j/cran-all/cranData/via/R/RasterArray-xattrib.R
|
#' @export SfArray
setMethod("initialize",signature="SfArray",
definition=function(.Object,stack, index=NULL, dim=NULL){
# defend for the presence of sp
if(!requireNamespace("sf", quietly=TRUE)){
stop("This function requires the sf package.")
}
# execute everything from the XArray constructor
ga <- XArray(stack=stack, index=index, dim=dim)
# get the crs
firstCRS <- sf::st_crs(ga@stack[[1]])
# and then test the entities for the class
for(i in 1:nlayers(ga@stack)){
x <- ga@stack[[i]]
# check for class
if(!inherits(x, "sf")) stop("At least one element is not an sf object")
if(sf::st_crs(x)!=firstCRS) stop("Mismatching CRS.")
}
# if everything goes well, all is fine!
.Object@stack <- ga@stack
.Object@index <- ga@index
return(.Object)
}
)
|
/scratch/gouwar.j/cran-all/cranData/via/R/SfArray-base.R
|
#' @rdname replacementSingle
#' @exportMethod "[<-"
setReplaceMethod(
"[",
signature(x="SfArray", value="sf"),
definition=function(x,i,j,..., value){
x<- VirtualArrayReplaceLayer(x=x, i=i, j=j, value=value, ...)
return(x)
}
)
|
/scratch/gouwar.j/cran-all/cranData/via/R/SfArray-replace.R
|
# build from existing stack with existing index, or dimensions
#' @export SfcArray
setMethod("initialize",signature="SfcArray",
definition=function(.Object,stack, index=NULL, dim=NULL){
# defend for the presence of sp
if(!requireNamespace("sf", quietly=TRUE)){
stop("This function requires the sf package.")
}
# execute everything from the XArray constructor
ga <- XArray(stack=stack, index=index, dim=dim)
# get the crs
firstCRS <- sf::st_crs(ga@stack[[1]])
# and then test the entities for the class
for(i in 1:nlayers(ga@stack)){
x <- ga@stack[[i]]
# check for class
if(!inherits(x, "sfc")) stop("At least one element is not an sfc object")
if(inherits(x, "sf")) stop("At least one element is an sf-class object. Use SfArray instead.")
if(sf::st_crs(x)!=firstCRS) stop("Mismatching CRS.")
}
# if everything goes well, all is fine!
.Object@stack <- ga@stack
.Object@index <- ga@index
return(.Object)
}
)
setMethod(
"show",
signature="SfcArray",
function (object)
{
cat("class :", class(object), "\n")
## if (rotated(object)) {
## cat("rotated : TRUE\n")
## }
mnr <- 15
# if (filename(object) != "") {
# cat("filename :", filename(object), "\n")
# }
nl <- nlayers(object)
if (nl > 0) {
cat("Element properties: \n")
cat("- class : ", class(object@stack[[1]]), "\n")
cat("- geodetic CRS: ", format(sf::st_crs(object@stack[[1]])), "\n")
cat("Array properties: \n")
adim <- dim(object)
allName <- names(object)
if(length(adim)==1){
cat("- dimensions : ", paste(adim, collapse=", "),
" (vector)\n",
sep = "")
}else{
allName<- dimnames(object)
if(length(allName)==2){
cat("- dimensions : ", paste(adim, collapse=", "),
" (nrow, ncol)\n",
sep = "")
}else{
cat("- dimensions : ", paste(adim, collapse=", "),
" (nrow, ncol, ...)\n",
sep = "")
}
# for(i in 1:length(allName)){
# if(i==1) cat("- rownames : ", paste(allName[[i]], collapse=", "), "\n", sep = "")
# if(i==2) cat("- colnames : ", paste(allName[[i]], collapse=", "), "\n", sep = "")
# if(i>2) cat(paste("- Dim", i, " names", sep=""), " : ", paste(allName[[i]], collapse=", "), "\n", sep = "")
# }
}
cat("- num. layers : ", nlayers(object), "\n",
sep = "")
cat("- missing : ", sum(is.na(object@index)), "\n",
sep = "")
cat("- proxy:\n ")
print(proxy(object))
} else {
cat("nlayers :", nl, "\n")
if(sum(is.na(object@index))>0){
cat("- missing : ", sum(is.na(object@index)), "\n",
sep = "")
cat("- proxy:\n ")
print(proxy(object))
}
}
cat("\n")
}
)
|
/scratch/gouwar.j/cran-all/cranData/via/R/SfcArray-base.R
|
#' @rdname replacementSingle
#' @exportMethod "[<-"
setReplaceMethod(
"[",
signature(x="SfcArray", value="sfc"),
definition=function(x,i,j,..., value){
x<- VirtualArrayReplaceLayer(x=x, i=i, j=j, value=value, ...)
return(x)
}
)
|
/scratch/gouwar.j/cran-all/cranData/via/R/SfcArray-replace.R
|
#' @rdname st_crs
#' @method st_crs SfcArray
#' @export
st_crs.SfcArray <- function(x,...){
# CRS should be the same in the entire stack
if(!requireNamespace("sf", quietly=TRUE)) stop("This function requires the 'sf' package to run.")
crs <- sf::st_crs(x@stack[[1]],...)
return(crs)
}
#' @rdname st_transform
#' @method st_transform SfcArray
#' @export
st_transform.SfcArray <- function(x,...){
# CRS should be the same in the entire stack
if(!requireNamespace("sf", quietly=TRUE)) stop("This function requires the 'sf' package to run.")
for(i in 1:length(x@stack)){
x@stack[[i]]<- sf::st_transform(x@stack[[i]],...)
}
return(x)
}
#' @rdname st_bbox
#' @method st_bbox SfcArray
#' @export
st_bbox.SfcArray <- function(obj,...){
# CRS should be the same in the entire stack
if(!requireNamespace("sf", quietly=TRUE)) stop("This function requires the 'sf' package to run.")
m <- NULL
for(i in 1:length(obj@stack)){
m<- rbind(m, sf::st_bbox(obj@stack[[i]]))
}
final <- sf::st_bbox(obj@stack[[1]])
final["xmin"] <- min(m[,"xmin"])
final["ymin"] <- min(m[,"ymin"])
final["xmax"] <- max(m[,"xmax"])
final["ymax"] <- max(m[,"ymax"])
return(final)
}
|
/scratch/gouwar.j/cran-all/cranData/via/R/SfcArray-sf.R
|
#' The proxy of an from a class derived from '\code{\link[via:XArray-class]{VirtualArray}}'
#'
#' This function returns an object that symbolizes the structure of layers in the '\code{\link[via:XArray-class]{XArray}}', '\code{\link[via:RasterArray-class]{RasterArray}}' or '\code{\link[via:SfArray-class]{SfArray}}'.
#'
#' The \code{proxy} method wraps the names of layers in the \code{@stack} using the \code{@index} slot of the '\code{\link[via:XArray-class]{VirtualArray}}'.
#'
#' @param x \code{\link[via:XArray-class]{XArray}}, \code{\link[via:RasterArray-class]{RasterArray}} or \code{\link[via:SfArray-class]{SfArray}} object.
#' @return A \code{vector}, \code{matrix} or \code{array} of characters representing the \code{\link[via:XArray-class]{VirtualArray}} structure.
#' @param ... additional arguments passed to class-specific methods.
#' @examples
#' data(exemplar)
#' proxy(exemplar)
#'
#' data(paleocoastlines)
#' proxy(paleocoastlines)
#' @exportMethod proxy
#' @rdname proxy
setGeneric("proxy", function(x,...) standardGeneric("proxy"))
#' @rdname proxy
setMethod(
"proxy",
signature="VirtualArray",
function(x){
ind <- x@index
# only NAs are present
if(any(!is.na(ind))){
if(!is.null(names(x@stack))) ind[]<- names(x@stack)[ind]
}
return(ind)
}
)
#' Transpose a '\code{\link[via:XArray-class]{VirtualArray}}'-class object
#'
#' @examples
#' data(exemplar)
#' t(exemplar)
#' data(paleocoastlines)
#' t(paleocoastlines)
#' @param x A \code{\link[via:XArray-class]{VirtualArray}}-class object.
#' @return A \code{\link[via:XArray-class]{VirtualArray}}-class object.
#' @exportMethod t
#' @rdname t-methods
setMethod(
"t",
"VirtualArray",
function(x){
if(length(dim(x))>2) stop("The array has too many dimensions. ")
# transpose index
tIndex<- t(x@index)
vIndex <- as.numeric(tIndex)
# ordering
vIndna <- vIndex[!is.na(vIndex)]
# reorder the stack
if(inherits(x@stack, "SpatRaster")){
x@stack <- x@stack[[vIndna]]
}else{
x@stack <- x@stack[vIndna]
}
# refill the index
tIndex[!is.na(tIndex)] <- 1:nlayers(x)
# copy names
if(!is.null(colnames(x@index))) rownames(tIndex) <- colnames(x@index)
if(!is.null(rownames(x@index))) colnames(tIndex) <- rownames(x@index)
if(!is.null(names(x@index))) colnames(tIndex) <- names(x@index)
# replace the index
x@index <- tIndex
return(x)
}
)
# function to defragment the matrix
defragment <- function(x){
b <- is.na(x)
x[!b] <- 1:sum(!b)
return(x)
}
# this utility function will combine the layer specific information
# 2d matrix (vals2d) layer names are represented as colnames
extendDim <- function(proxy, vals2d, newdim=1){
# the original dimensions of the proxy
origDim <- dim(proxy)
if(is.null(origDim)) origDim <- length(proxy)
# the names of the proxy
origNames <- dimnames(proxy)
if(is.null(origNames)) origNames <- list(names(proxy))
# number extended
nVals <-dim(vals2d)[newdim]
# copy the names properly
addNames <- dimnames(vals2d)[[newdim]]
# where are the non-na values
naMap <- !is.na(proxy)
# vector shape of the data
endObj <- rep(NA, prod(c(origDim,nVals)))
# loop through the new dimension
for(i in 1:nVals){
# what contains the new dimension?
if(newdim==1){
theseVals<- vals2d[i,]
}else{
theseVals<- vals2d[,i]
}
# the final object.
endObj[(1:length(proxy))+(i-1)*length(proxy)] <- theseVals[proxy]
}
# dimensions and names set right
dim(endObj) <- c(origDim,nVals)
dimnames(endObj) <- c(origNames, list(addNames))
return(endObj)
}
|
/scratch/gouwar.j/cran-all/cranData/via/R/VirtualArray-base.R
|
#' Combine a one-dimensional '\code{\link[via:XArray-class]{VirtualArray}}'-class object with other objects
#'
#' NOTE: Sequences that start with an \code{NA} do not yet work.
#
#' @rdname combine
#' @param x \code{\link[via:XArray-class]{VirtualArray}} object to combine wit other objects.
#' @return A \code{\link[via:XArray-class]{VirtualArray}}-class object.
#' @param ... additional objects to combine.
#' @export
setMethod(
"c",
"VirtualArray",
#c.RasterArray<-
function(x, ...){
listArg <- list(...)
finXA <- x
# store the system call
callSymb <- sys.call(which=0)
symbolNames <- names(callSymb)
# run loop only if it is more than 1
if(length(listArg)!=0){
for(i in 1:length(listArg)){
elem <- listArg[[i]]
# name of the first will be taken care of by c2
finXA<-c2(finXA, elem)
# try to overwrite the name - necessary for multiple combinations
if(!is.null(symbolNames)){
if(symbolNames[i+2]!=""){
names(finXA@index)[length(finXA)] <- symbolNames[i+2]
}
}
}
}
return(finXA)
}
)
# pairwise generic
setGeneric("c2", function(x,y,...) standardGeneric("c2"))
|
/scratch/gouwar.j/cran-all/cranData/via/R/VirtualArray-combine.R
|
#' Replace layers in an object that is of a class derived from '\code{\link[via:XArray-class]{VirtualArray}}'.
#'
#'
#' Single bracket \code{'['} refers to indices and names within the '\code{\link[via:XArray-class]{VirtualArray}}'-class object. Use double brackets to replace layers based on their names (in the \code{@stack}).
#' Object types of the same kind class can be used to replace values in '\code{\link[via:XArray-class]{XArray}}'-class objects. '\code{\link[terra:rast]{SpatRaster}}'-class objects can be used to replace values in '\code{\link[via:RasterArray-class]{RasterArray}}'-class objects. Classes inheriting from '\code{\link[sf:sf]{sf}}' can be used with '\code{\link{SfArray}}'-class objects.
#'
#' @param x \code{\link[via:XArray-class]{VirtualArray}}-class object.
#' @param i subscript of the first dimension(rows) or vector-like subsetting.
#' @param j subscript of the second dimension (columns).
#' @param ... subscript of additional dimensions.
#' @param value A same class object as \code{x}.
#' @aliases [<-,VirtualArray-method
#' @return The function has no return value.
#' @examples
#' ex <- rastex()
#' # replace third element with missing value
#' ex[3] <- NA
#' # duplicate first element and make it the second too
#' ex[2] <- ex[1]
#' ex
#'
#' @rdname replacementSingle
#' @exportMethod "[<-"
setReplaceMethod(
"[",
signature(x="VirtualArray", value="logical"),
definition=function(x,i,j,..., value){
# fetch the index
indDim <- dim(x@index)
# this should only apply to NA
if(any(!is.na(value))) stop("Replacement with TRUE/FALSE is not valid.")
sysCall <- sys.call(which=-1)
if(sum(is.na(value))!=length(value)) stop("Invalid replacement type.")
if(is.null(indDim) | length(indDim)==1){
if(length(i)!=length(value) & length(value)!=1) stop("Invalid replacement length.")
theIndex <- x@index[i]
x@index[i] <- NA
}
# multi- dim case
if(length(indDim)>=2){
# one dimensinoal
if(length(sysCall)==4){
theIndex <- x@index[i]
x@index[i] <- NA
}else{
theIndex <- x@index[i,j,...]
x@index[i,j,...] <- NA
}
}
# ensure flat index
# rebuild the stack
origInd<- 1:nlayers(x@stack)
keepOrig <- origInd[!origInd%in%theIndex]
# omit unwanted layers
if(inherits(x, "XArray")){
x@stack <- x@stack[keepOrig]
}
if(inherits(x, "RasterArray")){
x@stack <- x@stack[[keepOrig]]
}
# constrain order again
x@index<- defragment(x@index)
return(x)
}
)
# Generalized layer replacement function for VirtualArray. Method dispatch written explicitly as RasterArray[ <- RasterLayer and SpatialArray [<- Spatial*
VirtualArrayReplaceLayer <- function(x,i,j,value,...){
# fetch the index
indDim <- dim(x@index)
# one dim case
if(is.null(indDim) | length(indDim)==1){
# pointer to the stack to be replaced
theIndex <- x@index[i]
# separte the index based on NA
bIndNA <- is.na(theIndex)
# if at least one layer stack is not there
if(any(bIndNA)){
# usef for the addition
newI <- i[bIndNA]
# prepare the new index vector
tempIndex <- x@index
tempIndex[newI]<- -1
tempIndex <- defragment(tempIndex)
# where are the old layers in the new stack
oldInNew <- tempIndex[!is.na(x@index)]
# the index of the new layers
totallyNew <- tempIndex[newI]
# add to the rest
newInd <- c(oldInNew, totallyNew)
# use this to reorder
tempInd2 <- rep(NA, length(newInd))
tempInd2[newInd]<- 1:length(newInd)
# add the new layer to the stack
if(inherits(value, "SpatRaster")){
newStack <- c(x@stack, value[[rep(1, length(totallyNew))]])
# the reorderd stack
x@stack <- newStack[[tempInd2]]
# XArray + derived
}else{
newStack <- c(x@stack, list(value)[rep(1, length(totallyNew))])
x@stack <- newStack[tempInd2]
}
x@index <- tempIndex
}
if(any(!bIndNA)){
# restart the process, now with the new index vector
theIndex <- x@index[i]
replaceIndex <- theIndex[!bIndNA]
# simply replace the layers in the stack...
allVals <- 1:nlayers(x@stack)
origVals <- allVals[!allVals%in%replaceIndex]
# create a reorderer vector
newInd <-c(origVals, replaceIndex)
tempInd2 <- rep(NA, length(newInd))
tempInd2[newInd] <- 1:length(tempInd2)
if(inherits(value, "SpatRaster")){
# put the additional elements to the stack
newStack <- c(x@stack[[origVals]], value[[rep(1, length(replaceIndex))]])
# reorder to correct
x@stack <- newStack[[tempInd2]]
# XArray + derived
}else{
# put the additional elements to the stack
newStack <- c(x@stack[origVals], list(value)[rep(1, length(replaceIndex))])
# reorder to correct
x@stack <- newStack[tempInd2]
}
}
}
# multi- dim case
if(length(indDim)>=2){
theIndex <- x@index[i,j,...]
# separte the index based on NA
bIndNA <- is.na(theIndex)
if(any(bIndNA)){
fullInd <- 1:length(x@index)
dim(fullInd) <- dim(x@index)
newIJ <- fullInd[i,j,...]
newIJ<- newIJ[bIndNA]
# prepare the index vector
tempIndex <- x@index
tempIndex[newIJ]<- -1
tempIndex <- defragment(tempIndex)
# where are the old layers in the new stack
oldInNew <- tempIndex[!is.na(x@index)]
# add the index at the end
totallyNew <- tempIndex[newIJ]
newInd <- c(oldInNew, totallyNew)
# use this to reorder
tempInd2 <- rep(NA, length(newInd))
tempInd2[newInd]<- 1:length(newInd)
if(inherits(value, "SpatRaster")){
# add the new layer to the stack
newStack <- c(x@stack, value[[rep(1, length(totallyNew))]])
x@stack <- newStack[[tempInd2]]
# XArray + derived
}else{
newStack <- c(x@stack, list(value)[rep(1, length(totallyNew))])
x@stack <- newStack[tempInd2]
}
# the reorderd stack
x@index <- tempIndex
}
if(any(!bIndNA)){
# restart the process, now with the new index vector
theIndex <- x@index[i,j,...]
replaceIndex <- theIndex[!bIndNA]
# simply replace the layers in the stack...
allVals <- 1:nlayers(x@stack)
origVals <- allVals[!allVals%in%replaceIndex]
# create a reorderer vector
newInd <-c(origVals, replaceIndex)
tempInd2 <- rep(NA, length(newInd))
tempInd2[newInd] <- 1:length(tempInd2)
# stacking is dependent on the kind of Array
if(inherits(value, "SpatRaster")){
# put the additional elements to the stack
newStack <- c(x@stack[[origVals]], value[[rep(1, length(replaceIndex))]])
x@stack <- newStack[[tempInd2]]
# XArray + derived
}else{
newStack <- c(x@stack[origVals], list(value)[rep(1, length(replaceIndex))])
x@stack <- newStack[tempInd2]
}
# reorder to correct
}
}
return(x)
}
#' Replace elements of '\code{\link[via:XArray-class]{VirtualArray}}'-class objects.
#'
#' Double bracket \code{'[['} refers to layers' name in the names of the \code{@stack} member of the '\code{\link[via:XArray-class]{VirtualArray}}'. Use single brackets to replace elements based on their position in the '\code{\link[via:XArray-class]{VirtualArray}}'-class object.
#'
#' @param x Object from a class derived from \code{\link[via:XArray-class]{VirtualArray}}.
#' @param i subscript of layers to replace.
#' @param value \code{character} vector.
#' @return The function has no return value.
#'
#' @aliases [[<-,VirtualArray-method
#' @aliases [[<-,VirtualArray,ANY,ANY-method
#' @rdname doubleBracketReplace
#' @exportMethod "[[<-"
setReplaceMethod(
"[[",
signature(x="VirtualArray"),
function(x,i, value){
x@stack[[i]] <- value
return(x)
}
)
|
/scratch/gouwar.j/cran-all/cranData/via/R/VirtualArray-replace.R
|
#' Subset a '\code{\link[via:XArray-class]{VirtualArray}}'-class object
#'
#' Extract subsets of an object from a class derived from '\code{\link[via:XArray-class]{VirtualArray}}' similarly to a regular array.
#'
#' @param x \code{\link[via:XArray-class]{VirtualArray}}-class object.
#' @param i subscript of the first dimension(rows) or vector-like subsetting.
#' @param j subscript of the second dimension (columns).
#' @param ... subscript of additional dimensions.
#' @param drop \code{logical} in case the result of subsetting is a single element, should the \\code{\link[via:XArray-class]{VirtualArray}} wrapper be dropped?
#' @param oneDim \code{logical} In case of multidimensional \code{\link[via:XArray-class]{VirtualArray}}s, setting \code{oneDim} to \code{TRUE} allows the application of one dimensional subscripts.
#' @return Either the same class as \code{x}, or the class that forms the element of the \code{\link[via:XArray-class]{VirtualArray}}.
# combined
#' @rdname subset
#' @exportMethod subset
#' @examples
#' ex <- rastex()
#' # first 4
#' subset(ex, i=1:4)
#' # missing at the end
#' subset(ex, i=1:12)
#' # character subscript
#' subset(ex, i=c("a", "b"))
#' # logical subscript
#' subs <- rep(TRUE, length(ex))
#' subs[1] <- FALSE # remove first
#' subset(ex, i= subs)
#' # no drop
#' subset(ex, i=1, drop=FALSE)
setMethod(
"subset",
signature(x="VirtualArray"),
function(x, i,j, ...,oneDim=FALSE, drop=TRUE){
# fetch the index
indDim <- dim(x@index)
# one dim case
if(is.null(indDim) | length(indDim)==1){
originIndex <-x@index[i]
}
# two dim case
if(length(indDim)>=2){
# multidimensional subscript
if(!oneDim){
originIndex <-x@index[i,j,...]
# one dimensional subscript
}else{
originIndex<-x@index[i, drop=TRUE]
}
}
# constrain one dimension
fetchIndex <- originIndex
dim(fetchIndex) <- NULL
# drop to a single entity
if(length(fetchIndex)==1 & drop==TRUE){
# if it is NA
if(is.na(fetchIndex)){
x<-NA
}else{
x<- x@stack[[fetchIndex]]
}
# keep using RasterArray
}else{
# separate the NAs
bNA <- is.na(fetchIndex)
if(any(bNA)){
validFetch <- fetchIndex[!bNA]
}else{
validFetch <- fetchIndex
}
# wrappers will not be dropped
if(inherits(x@stack, "list")){
x@stack <- x@stack[validFetch]
}else{
# get the relevant layers
x@stack <- x@stack[[validFetch]]
}
# rewrite the index
x@index<- originIndex
# if the proxy was numeric, it should be reset
x@index[!bNA] <- 1:nlayers(x@stack)
return(x)
}
}
)
#' Indexing to extract subsets of a 'code{\link[via:XArray-class]{VirtualArray}}'-class object
#'
#' Single bracket \code{'['} refers to indices and names within the '\code{\link[via:XArray-class]{VirtualArray}}'-class object. Use double brackets to extract layers based on their names (in the \code{@stack}).
#'
#' @param x An object from a \code{\link[via:XArray-class]{VirtualArray}}-derived class.
#' @param i subscript of the first dimension(rows) or vector-like subsetting.
#' @param j subscript of the second dimension (columns).
#' @param ... subscript of additional dimensions.
#' @param drop \code{logical} in case the result of subsetting is a single element, should the \code{\link[via:XArray-class]{VirtualArray}}-derived wrapper be dropped?
#' @return An object from either the same class as \code{x} or the class of its elements.
#' @rdname VirtualArray-single-bracket-method
#' @aliases [,VirtualArray-method
#' @examples
#' ex <- rastex()
#' # numeric subsetting
#' firstThree <- ex[1:3]
#' # character subsetting
#' second <- ex["d"]
#' # logical subsetting
#' subscript <- rep(FALSE, length(ex))
#' subscript[2] <- TRUE
#' second2 <- ex[subscript]
#' data(paleocoastlines)
#' present<- paleocoastlines["0", ]
#' allMargin <- paleocoastlines[, "margin"]
#'
#' @exportMethod [
setMethod(
"[",
signature(x="VirtualArray", i="ANY", j="ANY"),
definition=function(x,i,j,..., drop=TRUE){
# save system call
sysCall <- sys.call(which=-1)
# look for drop and omit from the call
call <- as.character(sysCall)
args <- names(sysCall)
dropNum <- which(args=="drop")
if(length(dropNum)>0){
call <- call[-dropNum]
}
# check whether one or multidimensional subscripts are necessary
oneDim<-FALSE
if(length(call)==3){
oneDim <- TRUE
}
subset(x,i,j,..., oneDim=oneDim, drop=drop)
}
)
#' Indexing to extract the elements of a '\code{\link[via:XArray-class]{VirtualArray}}'-derived class object.
#'
#' Double bracket \code{'[['} refers to elements'/layers' name in the \code{@stack} of the '\code{\link[via:XArray-class]{VirtualArray}}'-derived object. Use single brackets to extract elements based on their position in the '\code{\link[via:XArray-class]{VirtualArray}}'.
#'
#' @param x \code{\link[via:XArray-class]{VirtualArray}}
#' @param i subscript of the first dimension(rows) or vector-like subsetting.
#' @param drop \code{logical} should the \code{\link[via:XArray-class]{VirtualArray}} be dropped and the element be reduced to the element class?
#' @return A \code{\link[via:XArray-class]{VirtualArray}}-derived class object, or an object of the class that makes up the VirtualArray
#' @rdname VirtualArray-double-bracket-method
#' @aliases [[,VirtualArray-method
#' @exportMethod "[["
#' @examples
#' data(exemplar)
#' # finds a layer
#' exemplar[["sample1"]]
#' # returns a stack
#' exemplar[[c("sample1", "sample2")]]
#' # replaces a layervalues, but not the attributes of the layer
#' exemplar2 <- exemplar
#' exemplar2[["sample1"]] <- exemplar2[["sample2"]]
#' # compare every value in the they are all the same
#' exemplar2[["sample1"]]$x == exemplar2[["sample2"]]$x
setMethod(
"[[",
signature(x="VirtualArray"),
function(x,i,drop=TRUE){
# where are NAs in the subscrtip
bNA <- is.na(i)
if(sum(bNA)==length(i)) return(i)
# logical method
if(is.logical(i)){
if(length(i)!=length(x)) stop("Invalid subscript length.")
# stack subscript
usedInd <- i
usedInd[bNA] <- FALSE
# drop not understood for SpatRaster
if(!inherits(x, "RasterArray")){
#select appropriate layers
newStack<- x@stack[which(usedInd)]
}else{
newStack<- x@stack[[which(usedInd)]]
}
# index subscript
newIndex <- x@index[i]
newIndex[!is.na(newIndex)] <- 1:sum(!is.na(newIndex))
}
# either character or numeric
if(is.character(i) | is.numeric(i)){
# drop not understood for SpatRaster
if(!inherits(x, "RasterArray")){
# XArray - list subsetting
newStack<- x@stack[i[!bNA]]
}else{
newStack<- x@stack[[i[!bNA]]]
}
# reindex
newIndex <- rep(NA, length(i))
newIndex[!bNA] <- 1:nlayers(newStack)
}
# depending on type of object
if(inherits(newStack, "list")){
final <- XArray(index=newIndex, stack=newStack)
}
if(inherits(newStack, "SpatRaster")){
final <- RasterArray(index=newIndex, stack=newStack)
}
if(drop){
if(length(final)==1){
final <- final@stack[[1]]
}
}
return(final)
}
)
|
/scratch/gouwar.j/cran-all/cranData/via/R/VirtualArray-subset.R
|
#' Dimensions of '\code{\link[via:XArray-class]{VirtualArray}}'-derived class objects
#'
#' The function returns the dimensions of the array in which elements are organized.
#' @param x A \code{\link[via:XArray-class]{VirtualArray}}-derived class object.
#' @return A \code{numeric} vector.
#'
#' @examples
#' data(exemplar)
#' dim(exemplar)
#' @exportMethod dim
setMethod(
"dim",
signature="VirtualArray",
function(x){
proxyDim <- dim(x@index)
if(is.null(proxyDim)) proxyDim <- length(x@index)
proxyDim
}
)
#' Names of one-dimensional '\code{\link[via:XArray-class]{VirtualArray}}'-derived class objects.
#'
#' Get or set the names of one-dimensional '\code{\link[via:XArray-class]{VirtualArray}}'-derived class objects
#' @param x \code{\link[via:XArray-class]{VirtualArray}}-derived class object.
#' @param value \code{character} vector.
#' @return A \code{character} vector of names or \code{NULL}.
#'
#' @examples
#' ex <- rastex()
#' names(ex)
#' names(ex)[4] <- "weirdo"
#' # NULL
#' @rdname names
#' @exportMethod names
setMethod(
"names",
signature="VirtualArray",
function(x){
names(x@index)
}
)
#' @rdname names
#' @exportMethod "names<-"
setReplaceMethod(
"names",
signature="VirtualArray",
definition=function(x, value){
# not defined for matrices or higher
if(is.null(names(x))) names(x@index) <- rep(NA, length(x@index))
names(x@index) <- value
return(x)
})
#' Number of elements or layers in a '\code{\link[via:XArray-class]{VirtualArray}}'-derived class object
#'
#' Function to return the length of the array in which elements are organized.
#'
#' The \code{length()} function returns the number elements that should be present based on the array structure itself, and not the total number of values stored in the object. As the object can contain missing values, the number of actual layers can be queried with \code{\link{nlayers}}.
#'
#' @param x a \code{\link[via:XArray-class]{VirtualArray}}-derived class object.
#' @return A \code{numeric} value.
#' @examples
#' ex <- rastex()
#' # omit third element
#' ex[3] <- NA
#' # number of elements in the RasterArray
#' length(ex)
#' # remaining number values in the stack
#' length(ex@stack)
#' # the number of remaining layers in the RasterArray
#' nlayers(ex)
#'
#' @rdname arraylength
#' @exportMethod length
setMethod(
"length",
signature="VirtualArray",
function(x) length(x@index)
)
#' @rdname layers
setMethod(
"layers",
signature="VirtualArray",
function(x){
# returns the layer names
names(x@stack)
}
)
#####################
#' Column names of two-dimensional '\code{\link[via:XArray-class]{VirtualArray}}'-derived class object.
#'
#' Get or set the column names of two-dimensional '\code{\link[via:XArray-class]{VirtualArray}}'-derived class objects
#' @param x \code{\link[via:XArray-class]{VirtualArray}}-derived class object.
#' @param value \code{character} vector.
#' @return A \code{character} vector of column names or \code{NULL}.
#'
#' @examples
#' data(paleocoastlines)
#' colnames(paleocoastlines)
#' colnames(paleocoastlines) <- c("a", "b")
#' @rdname colnames
#' @exportMethod colnames
setMethod(
"colnames",
signature="VirtualArray",
function(x) colnames(x@index)
)
#' @rdname colnames
#' @exportMethod "colnames<-"
setReplaceMethod(
"colnames",
signature="VirtualArray",
definition=function(x, value){
# not defined for matrices or higher
if(length(dim(x))!=2) stop("The proxy is not a 2D matrix.")
colnames(x@index) <- value
return(x)
})
#' Row names of two-dimensional '\code{\link[via:XArray-class]{VirtualArray}}'-derived class objects.
#'
#' Get or set the row names of two-dimensional '\code{\link[via:XArray-class]{VirtualArray}}'-derived class object
#' @param x \code{\link[via:XArray-class]{VirtualArray}}-class object.
#' @param value \code{character} vector.
#' @return A \code{character} vector of row names or \code{NULL}.
#'
#' @examples
#' data(paleocoastlines)
#' rownames(paleocoastlines)
#' rownames(paleocoastlines) <- paste(rownames(paleocoastlines), "Ma")
#' @rdname rownames
#' @exportMethod rownames
setMethod(
"rownames",
signature="VirtualArray",
function(x) rownames(x@index)
)
#' @rdname rownames
#' @exportMethod "rownames<-"
setReplaceMethod(
"rownames",
signature="VirtualArray",
definition=function(x, value){
# not defined for matrices or higher
if(length(dim(x))!=2) stop("The proxy is not a 2D matrix.")
rownames(x@index) <- value
return(x)
})
#' Names of a multidimensional '\code{\link[via:XArray-class]{VirtualArray}}'-derived class object.
#'
#' Get or set the dimnames of multidimensional \code{\link[via:XArray-class]{VirtualArray}}-derived class object.
#' @param x \code{\link{RasterArray}} or \code{\link[via:SfArray-class]{SfArray}} object.
#' @param value \code{character} vector.
#' @return A \code{list} of \code{character} vectors or \code{NULL}.
#'
#' @examples
#' ex <- rastex()
#' dimnames(ex)
#' data(paleocoastlines)
#' dimnames(paleocoastlines)
#' dimnames(paleocoastlines)[[2]] <- c("first", "second")
#' names(dimnames(paleocoastlines)) <- c("age", "type")
#' @rdname dimnames
#' @exportMethod dimnames
setMethod(
"dimnames",
signature="VirtualArray",
function(x) dimnames(x@index)
)
#' @rdname dimnames
#' @exportMethod "dimnames<-"
setReplaceMethod(
"dimnames",
signature="VirtualArray",
definition=function(x, value){
# not defined for matrices or higher
if(is.null(dim(x))) stop("One-dimensional VirtualArrays have no dimnames.")
dimnames(x@index) <- value
return(x)
})
#' Number of columns and rows of a '\code{\link[via:XArray-class]{VirtualArray}}'-derived class object.
#'
#' Unlike the \code{ncol} and \code{nrow} functions of the '\code{\link[terra:terra-package]{terra}}' package, this function returns the number of columns and rows of the '\code{\link[via:XArray-class]{VirtualArray}}'-derived container, rather than the dimensions of the contained '\code{\link[terra:rast]{SpatRaster}}'-class object.
#'
#' @param x A \code{\link[via:XArray-class]{VirtualArray}}-derived class object.
#' @rdname adimatt
#' @return A \code{numeric} value of the number of columns and rows.
#' @exportMethod ncol
#' @examples
#' data(paleocoastlines)
#' ncol(paleocoastlines)
#' nrow(paleocoastlines)
setMethod(
"ncol",
signature="VirtualArray",
function(x){
ncol(x@index)
}
)
#' @rdname adimatt
#' @exportMethod nrow
setMethod(
"nrow",
signature="VirtualArray",
function(x){
nrow(x@index)
}
)
|
/scratch/gouwar.j/cran-all/cranData/via/R/VirtualArray-xattrib.R
|
# build from existing stack with existing index, or dimensions
#' @export XArray
setMethod("initialize",signature="XArray",
definition=function(.Object,stack, index=NULL, dim=NULL){
# some defense for index
if(is.null(index)){
index <- 1:length(stack)
}
# check whether the stack has the same types
if(!inherits(stack,"list")) stop("The 'stack' has to be a 'list' - class object.")
if(!classcheck(stack)) stop("The 'stack' can only contain a single class of items.")
if(is.null(dim)){
if(!is.numeric(index)) stop("The 'index' has to be a 'numeric' object.")
# where were supposed to be NAs
bNA <- is.na(index)
if(any(index[!bNA]%%1!=0) | any(index[!bNA]<1)) stop("The 'index' has to contain positive integer values.")
# the number of valid entries mismatch the number of layers
if(sum(!bNA)!=length(stack)) stop("You have to provide as many layers as many valid entries in index.")
# reorder the stack
noNAInd <- index[!bNA]
newStack <- stack[noNAInd]
# force index to be monotonous integer sequence
newIndex <- index
newIndex[] <- NA
newIndex[!bNA] <- 1:length(stack)
# store final object
.Object@index <- newIndex
.Object@stack <- newStack
}else{
if(!is.numeric(dim)) stop("The 'dim' argument has to be a 'numeric' vector.")
if(length(stack)!=prod(dim, na.rm=TRUE)) warning("The number of layers in the does not equal the product of the 'dim' vector.")
.Object@stack<- stack
index <- array(1:length(stack), dim=dim)
# in case of reuse
index[duplicated(as.numeric(index))] <- NA
.Object@index<- index
}
return(.Object)
}
)
setMethod(
"show",
signature="XArray",
function (object)
{
cat("class :", class(object), "\n")
## if (rotated(object)) {
## cat("rotated : TRUE\n")
## }
mnr <- 15
# if (filename(object) != "") {
# cat("filename :", filename(object), "\n")
# }
nl <- nlayers(object)
if (nl > 0) {
cat("Element properties: \n")
cat("- class : ", class(object@stack[[1]]), "\n")
cat("Array properties: \n")
adim <- dim(object)
allName <- names(object)
if(length(adim)==1){
cat("- dimensions : ", paste(adim, collapse=", "),
" (vector)\n",
sep = "")
}else{
allName<- dimnames(object)
if(length(allName)==2){
cat("- dimensions : ", paste(adim, collapse=", "),
" (nrow, ncol)\n",
sep = "")
}else{
cat("- dimensions : ", paste(adim, collapse=", "),
" (nrow, ncol, ...)\n",
sep = "")
}
# for(i in 1:length(allName)){
# if(i==1) cat("- rownames : ", paste(allName[[i]], collapse=", "), "\n", sep = "")
# if(i==2) cat("- colnames : ", paste(allName[[i]], collapse=", "), "\n", sep = "")
# if(i>2) cat(paste("- Dim", i, " names", sep=""), " : ", paste(allName[[i]], collapse=", "), "\n", sep = "")
# }
}
cat("- num. layers : ", nlayers(object), "\n",
sep = "")
cat("- missing : ", sum(is.na(object@index)), "\n",
sep = "")
cat("- proxy:\n ")
theProx <- proxy(object)
theProx[] <- abbrev(theProx)
print(theProx)
} else {
cat("nlayers :", nl, "\n")
if(sum(is.na(object@index))>0){
cat("- missing : ", sum(is.na(object@index)), "\n",
sep = "")
cat("- proxy:\n ")
theProx <- proxy(object)
theProx[] <- abbrev(theProx)
print(theProx)
}
}
cat("\n")
}
)
|
/scratch/gouwar.j/cran-all/cranData/via/R/XArray-base.R
|
################################################################
# Internals for c- methods
# Adding as ingle element
setMethod("c2", signature=c("XArray", "ANY"),
definition=function(x, y){
callStack <- sys.calls()
# check the type - should be the same as the rest of XArray
targetClass <- class(x@stack[[1]])[1]
naCase <- FALSE
# if the type is not the same as the rest
if(!inherits(y, targetClass)){
# if it has more than one values -> halt
if(length(y)>1){
stop("Incompatible class.")
# if it is only one value it might still be good
}else{
# if that is not missing - > halt
if(!is.na(y)){
stop("Incompatible class.")
}else{
naCase <- TRUE
}
}
}
# The new index
if(!naCase){
ind <- c(x@index, nlayers(x)+1)
callSymb <- sys.call(which=-3)
endObj <- XArray(c(x@stack, list(y)), index=ind)
if(is.symbol(callSymb[[3]])){
names(endObj@stack)[nlayers(endObj)] <- deparse(callSymb[[3]])
}
}else{
ind <- c(x@index, NA)
endObj <- XArray(x@stack, index=ind)
}
return(endObj)
}
)
# VirtualArray with itself
setMethod("c2", signature=c("XArray", "XArray"),
definition=function(x, y){
# shift indices of the second argument
indexPlus<- y@index+nlayers(x)
# combine the indices
ind <- c(x@index, indexPlus)
# the final object
endObj <- XArray(c(x@stack, y@stack), index=ind)
return(endObj)
}
)
|
/scratch/gouwar.j/cran-all/cranData/via/R/XArray-combine.R
|
#' @rdname arraylength
#' @exportMethod nlayers
setMethod(
"nlayers",
signature="XArray",
function(x) length(x@stack)
)
|
/scratch/gouwar.j/cran-all/cranData/via/R/XArray-xattrib.R
|
# to be used as index in the the Arrays
setClassUnion("arrayORmatrixORvector", c("vector", "matrix", "array"))
VirtualArray <- setClass("VirtualArray", slots=list(index="arrayORmatrixORvector", stack="ANY"))
#' Virtual array of general R objects
#'
#' Template for construction of virtual arrays ('\code{VirtualArray}') and a derived class ('\code{XArray}') to instantiate it with general objects.
#'
#' The '\code{VirtualArray}' class implements structures to organize objects of the same class in multidimensional arrays. Subsetting rules were defined using the proxy object in the \code{index} slot. The '\code{VirtualArray}' is the base class for '\code{XArray}' and '\code{\link[via:RasterArray-class]{RasterArray}}' classes.
#' The '\code{XArray}' class derived from \code{VirtualArray} allows the instantiation of basic virtual arrays with genearl R objects, which form a single \code{list} in the \code{@stack slot}. The '\code{\link[via:SfArray-class]{SfArray}}' class is derived from the '\code{XArray}' class.
#'
#' The class has two slots:
#' \code{@stack}: A list containing objects of the same class (i.e. layers).
#' \code{@index}: A proxy object that represents the structure of the entities.
#'
#'
#' @param stack A \code{list}-class object.
#' @param index A \code{vector}, \code{matrix} or \code{array} type object. Includes the indices of layers in the stack.
#' @param dim A \code{numeric} vector. Same as for \code{array}, creates \code{proxy} procedurally.
#' @return An \code{XArray}-class object.
#' @examples
#' # 2d XArray of vectors
#' data(exemplar)
#' st <-exemplar@stack
#' ind <- 1:nlayers(st)
#' dim(ind) <- c(3,4)
#' dimnames(ind) <- list(n = c(10, 20, 30), seed = 1:4)
#' xa<- XArray(stack=st, index=ind)
#'
#' @exportClass XArray
XArray <- setClass("XArray", contains="VirtualArray")
#' Array of '\code{\link[terra:rast]{SpatRaster}}'-class objects
#'
#' Array class for easier navigation of multilayer rasters
#'
#' The class implements structures to organize single-layer '\code{\link[terra:rast]{SpatRaster}}'-class objects that have the same dimensions and coordinate reference system. Subsetting rules were defined using the proxy object in the \code{@index} slot. See examples for implementations.
#'
#' The class has two slots:
#' \code{@stack}: A '\code{\link[terra:rast]{SpatRaster}}'-class object with multiple layers, the actual data.
#' \code{index}: A proxy object that represents the organization of the layer in the array.
#'
#' @param stack A \code{\link[terra:rast]{SpatRaster}} object.
#' @param index A \code{vector}, \code{matrix} or \code{array} type object. Includes either the indices of layers in the stack, or their names.
#' @param dim A \code{numeric} vector. Same as for \code{array}, creates \code{proxy} procedurally.
#' @return A '\code{\link[via:RasterArray-class]{RasterArray}}'-class object.
#' @examples
#' # example data
#' ex <- rastex()
#' st <-ex@stack
#' ind <- 1:6
#' names(ind) <- letters[1:length(ind)]
#' ra<- RasterArray(stack=st, index=ind)
#'
#' @exportClass RasterArray
RasterArray <- setClass("RasterArray", contains="VirtualArray")
#' Array of '\code{\link[sf:sf]{sf}}'-derived class data
#'
#' Array class for easier navigation of vector spatial datasets
#'
#' The class implements structures to organize entire '\code{\link[sf:sfc]{sfc}}' and '\code{\link[sf:sf]{sf}}' objects that share coordinate reference systems. The 'SfcArray' class is derived from '\code{\link[via:XArray-class]{XArray}}' and represents arrays of geometry sets. The '\code{SfArray}' class is derived from '\code{SfArray}', that allows the wrapping of '\code{\link[sf:sf]{sf}}' objects with attributes. Subsetting rules were defined using the proxy object in the \code{@index} slot. See examples for implementations.
#'
#' The classes have two slots:
#' \code{@stack}: A \code{list} object with multiple '\code{\link[sf:sf]{sf}}' class layers, the actual data.
#' \code{@index}: A proxy object that represents the organization of the layers.
#'
#' @param stack A \code{list} of \code{sf}-class objects or \code{sfc}-class objects.
#' @param index A \code{vector}, \code{matrix} or \code{array} type object. Includes either the indices of layers in the stack, or their names.
#' @param dim A \code{numeric} vector. Same as for \code{array}, creates \code{proxy} procedurally.
#' @return An '\code{\link[via:SfArray-class]{SfcArray}}' or '\code{\link[via:SfArray-class]{SfArray}}'-class object.
#' @rdname SfArray-class
#' @examples
#' # example data
#' library(sf)
#' data(paleocoastlines)
#' st <-paleocoastlines@stack
#' ind <- 1:nlayers(st)
#' dim(ind) <- c(3,2)
#' dimnames(ind) <- list(age=c(0, 10, 20), c("margin", "coastlines"))
#' sa<- SfcArray(stack=st, index=ind)
#'
#' @exportClass SfcArray
SfcArray <- setClass("SfcArray", contains="XArray")
#' @name SfArray
#' @aliases SfArray-class
#' @rdname SfArray-class
#' @exportClass SfArray
SfArray <- setClass("SfArray", contains="SfcArray")
|
/scratch/gouwar.j/cran-all/cranData/via/R/classes.R
|
################################################################################
# To XArray
#' Coerce into an \code{\link[via:SfcArray-class]{SfcArray}} or \code{\link[via:SfArray-class]{SfArray}} object
#'
#' @param from Either a \code{\link[via:SfcArray-class]{SfcArray}}, \code{\link[via:SfArray-class]{SfArray}} or \code{\link[via:XArray-class]{XArray}}-class object
#' @rdname coercion
#' @return Either a \code{\link[via:SfcArray-class]{SfcArray}}, \code{\link[via:SfcArray-class]{SfArray}} or \code{\link[via:XArray-class]{XArray}}-class object
#' @export
setGeneric("as.XArray", function(from) standardGeneric("as.XArray"))
# demotions
#' @rdname coercion
setMethod(as.XArray, signature=c("SfcArray"), definition=function(from){
# the processed version
XArray(index=from@index,stack=from@stack)
})
## #' @rdname coercion
## #' @name as
## setAs(from="SfcArray", to="XArray", function(from){
## as.XArray(from)
## })
#' @rdname coercion
setMethod(as.XArray, signature=c("SfArray"), definition=function(from){
# the processed version
XArray(index=from@index,stack=from@stack)
})
## #' @rdname coercion
## #' @name as
## setAs(from="SfArray", to="XArray", function(from){
## as.XArray(from)
## })
################################################################################
# To SfcArray
#' @rdname coercion
#' @export
setGeneric("as.SfcArray", function(from) standardGeneric("as.SfcArray"))
#' @rdname coercion
setMethod(as.SfcArray, signature=c("XArray"), definition=function(from){
# defend for the presence of sf
if(!requireNamespace("sf", quietly=TRUE)){
stop("This function requires the sf package.")
}
# try the promotion
SfcArray(index=from@index,stack=from@stack)
})
setAs(from="XArray", to="SfcArray", function(from){
as.SfcArray(from)
})
#' @rdname coercion
setMethod(as.SfcArray, signature=c("SfArray"), definition=function(from){
# defend for the presence of sf
if(!requireNamespace("sf", quietly=TRUE)){
stop("This function requires the sf package.")
}
# take the stack
theStack <- from@stack
# go through every element
for(i in 1:length(theStack)){
# the current element
x <- theStack[[i]]
theStack[[i]]<- x$geometry
}
# the processed version
SfcArray(index=from@index,stack=theStack)
})
## setAs(from="SfArray", to="SfcArray", function(from){
## as.SfcArray(from)
## })
################################################################################
# To SfArray
#' Coerce into an SfArray or SfcArray object
#'
#' @rdname coercion
#' @export
setGeneric("as.SfArray", function(from) standardGeneric("as.SfArray"))
#' @rdname coercion
setMethod(as.SfArray, signature=c("XArray"), definition=function(from){
# defend for the presence of sf
if(!requireNamespace("sf", quietly=TRUE)){
stop("This function requires the sf package.")
}
# try the promotion
SfArray(index=from@index,stack=from@stack)
})
setAs(from="XArray", to="SfArray", function(from){
as.SfArray(from)
})
#' @rdname coercion
setMethod(as.SfArray, signature=c("SfcArray"), definition=function(from){
# defend for the presence of sf
if(!requireNamespace("sf", quietly=TRUE)){
stop("This function requires the sf package.")
}
# take the stack
theStack <- from@stack
# go through every element
for(i in 1:length(theStack)){
# the current element
x <- theStack[[i]]
theStack[[i]]<- sf::st_sf(data.frame(a=1:length(x)), geom=x)
}
# the processed version
SfArray(index=from@index,stack=theStack)
})
#' @rdname coercion
#' @name as
setAs(from="SfcArray", to="SfArray", function(from){
as.SfArray(from)
})
|
/scratch/gouwar.j/cran-all/cranData/via/R/conversions.R
|
#' PaleoMAP PaleoCoastlines (excerpt)
#'
#' A dataset containing the coastline reconstructions based on the PaleoMAP PaleoDEMS \url{https://www.earthbyte.org/paleodem-resource-scotese-and-wright-2018/} and the Paleobiology Database \url{https://paleobiodb.org} for 0, 10 and 20Ma.
#'
#' This is version v7. The article describing the entire set is under review. Once that is published, the entire dataset will be available.
#'
#' @format A \code{\link[via:SfcArray-class]{SfcArray}} with 3 continental margin and 3 paleocoastline layers (3 rows and 2 columns).
#' @source
#' Kocsis, A. T., & Scotese, C. R. (2020). PaleoMAP PaleoCoastlines data [Data set]. Zenodo. https://doi.org/10.5281/zenodo.3903164
#' @usage data(paleocoastlines)
"paleocoastlines"
#' Example '\code{\link[via:XArray-class]{XArray}}'-class object
#'
#' A '\code{\link[via:XArray-class]{XArray}}'-class objects of \code{data.frame}s, which were made from a single \code{data.frame} with random sampling. The original object had two columns, the first (\code{x}) an integer seqence \code{1:100}, the second \code{y} a variable produced with \code{0.5 * x -30 + N(0,10)}.
#'
#' @format: \code{\link[via:XArray-class]{XArray}} with 3 sample sizes (rows), and 4 different seeds (column).
#' @usage data(exemplar)
"exemplar"
#' Procedural example structure to demonstrate the capabilities of the '\code{\link[via:RasterArray-class]{RasterArray}}' class
#'
#' Binary versions of \code{\link[terra:rast]{SpatRaster}}-class objects are problematic, this function is used to instantiate a \code{\link[via:RasterArray-class]{RasterArray}} example.
#'
#' @return A two-dimensional \code{\link[via:RasterArray-class]{RasterArray}}-class object, with three rows and four columns.
#' @rdname rastex
#' @examples
#' # create example
#' example <- rastex()
#'
#' # subset - single bracket
#' example['b']
#'
#' # subset - single bracket
#' example[c(4, 6)]
#'
#' # subset - double bracket
#' example[["layer_2"]]
#' @export
rastex <- function(){
if(!requireNamespace("terra", quietly=TRUE)){
stop("This function requires the 'terra' package.")
}
r1 <- terra::rast(res=c(1,1))
terra::values(r1) <- 1:terra::ncell(r1)
stack <- c(r1, r1+1,r1+2, r1+3, r1+4, r1+5)
names(stack) <- paste("layer", 1:6, sep="_")
one <- RasterArray(stack)
names(one) <- letters[1:length(one)]
return(one)
}
|
/scratch/gouwar.j/cran-all/cranData/via/R/data.R
|
# NOTE: sf uses S3, if it is not available, the substitute generics have to be defined for S3 and not S4!
#' Names of layers in the \code{stack} of a '\code{\link[via:XArray-class]{VirtualArray}}'-class object
#'
#' @param x A \code{\link[via:XArray-class]{VirtualArray}}-derived class object.
#' @param ... additional arguments passed to class-specific methods.
#' @return A \code{character} vector of names.
#' @exportMethod layers
#'
#' @examples
#' # names of layers in the stack
#' data(exemplar)
#' layers(exemplar)
#' @rdname layers
setGeneric("layers", function(x,...) standardGeneric("layers"))
#' @rdname arraylength
setGeneric(
name="nlayers",
def=function(x){
standardGeneric("nlayers")
}
)
#' Dimensions of layers in a '\code{\link[via:XArray-class]{VirtualArray}}'-class object
#'
#' The funcion will return the dimensions '\code{\link[terra:rast]{SpatRaster}}'-class layers.
#'
#' @param x A \code{\link[via:XArray-class]{VirtualArray}} class object.
#' @return A \code{numeric} vector with the number of rows and columns in the \code{\link[via:XArray-class]{VirtualArray}}s.
#' @param ... additional arguments passed to class-specific methods.
#'
#' @rdname dimlayer
#' @exportMethod dimlayer
setGeneric("dimlayer", function(x,...) standardGeneric("dimlayer"))
#' The total number of values in a '\code{\link[via:RasterArray-class]{RasterArray}}'-class object
#'
#' @param x A \code{\link[via:RasterArray-class]{RasterArray}}-class object.
#' @param ... additional arguments passed to class-specific methods.
#' @return A \code{numeric} value.
#'
#' @export nvalues
#' @examples
#' ex <- rastex()
#' nvalues(ex)
#' @rdname nvalues
setGeneric("nvalues", function(x,...) standardGeneric("nvalues"))
# Generics from terra
# ncell from
#' @name ncell
#' @rdname ncell
if(!requireNamespace("terra", quietly=TRUE)){
setGeneric("ncell", function(x) standardGeneric("ncell"))
}else{
setGeneric("ncell", def=terra::ncell, package="terra")
}
# Generics from terra
#' @name xres
#' @rdname res
if(!requireNamespace("terra", quietly=TRUE)){
setGeneric("xres", function(x) standardGeneric("xres"))
}else{
setGeneric("xres", def=terra::xres, package="terra")
}
#' @name yres
#' @rdname res
if(!requireNamespace("terra", quietly=TRUE)){
setGeneric("yres", function(x) standardGeneric("yres"))
}else{
setGeneric("yres", def=terra::yres, package="terra")
}
#' Resolution of a '\code{\link[via:RasterArray-class]{RasterArray}}'-class object
#'
#' @name res
#' @rdname res
if(!requireNamespace("terra", quietly=TRUE)){
setGeneric("res", function(x) standardGeneric("res"))
}else{
setGeneric("res", def=terra::res, package="terra")
}
#' Resampling a '\code{\link[via:RasterArray-class]{RasterArray}}'-class object
#'
#' @name resample
#' @rdname resample
if(!requireNamespace("terra", quietly=TRUE)){
setGeneric("resample", function(x,y,...) standardGeneric("resample"))
}else{
setGeneric("resample", def=terra::resample, package="terra")
}
#' Cropping a '\code{\link[via:RasterArray-class]{RasterArray}}'-class object
#'
#' @name crop
#' @rdname crop
if(!requireNamespace("terra", quietly=TRUE)){
setGeneric("crop", function(x,y,...) standardGeneric("crop"))
}else{
setGeneric("crop", def=terra::crop, package="terra")
}
if(!requireNamespace("terra", quietly=TRUE)){
setGeneric("aggregate", function(x,...) standardGeneric("aggregate"))
}else{
setGeneric("aggregate", def=terra::aggregate, package="terra")
}
if(!requireNamespace("terra", quietly=TRUE)){
setGeneric("disagg", function(x,...) standardGeneric("disagg"))
}else{
setGeneric("disagg", def=terra::disagg, package="terra")
}
#' Project a '\code{\link[via:RasterArray-class]{RasterArray}}'-class object
#'
#' The method implemets the \code{\link[terra]{project}} function for '\code{\link[via:RasterArray-class]{RasterArray}}'-class objects.
#'
#' @param x A \code{\link[via:RasterArray-class]{RasterArray}} object to project.
#' @param y A \code{\link[via:RasterArray-class]{RasterArray}} the same options as in \code{\link[terra]{project}}.
#' @param ... additional arguments as for \code{\link[terra]{project}}.
#' @rdname project
#' @return A projected \code{\link[via:RasterArray-class]{RasterArray}}-class object.
#' @export project
#' @examples
#' # project first three to mollweide
#' ex <- rastex()
#' mollEx <- project(ex[1:3], y="ESRI:54009")
"project"
if(!requireNamespace("terra", quietly=TRUE)){
setGeneric("project", function(x,...) standardGeneric("project"))
}else{
setGeneric("project", def=terra::project, package="terra")
}
if(!requireNamespace("terra", quietly=TRUE)){
setGeneric("mask", function(x,...) standardGeneric("mask"))
}else{
setGeneric("mask", def=terra::mask, package="terra")
}
if(!requireNamespace("terra", quietly=TRUE)){
setGeneric("rotate", function(x,...) standardGeneric("rotate"))
}else{
setGeneric("rotate", def=terra::rotate, package="terra")
}
#' Extent of a '\code{\link[via:RasterArray-class]{RasterArray}}'-class object
#'
#' The method is inherited from the '\code{\link[terra:rast]{SpatRaster}}' class.
#'
#' @param x a \code{\link[via:RasterArray-class]{RasterArray}}-class object.
#' @param ... arguments passed to the \code{\link[terra]{ext}} function.
#'
#' @return An aggregated \code{\link[via:RasterArray-class]{RasterArray}}-class object.
#' @examples
#' ex <- rastex()
#' extent <- ext(ex)
#' @rdname ext
#' @export
#' @name ext
if(!requireNamespace("terra", quietly=TRUE)){
setGeneric("ext", function(x,...) standardGeneric("ext"))
}else{
setGeneric("ext", def=terra::ext, package="terra")
}
#' Coordinate reference system of an '\code{\link[via:SfArray-class]{SfArray}}'-class object
#'
#' The method is inherited from the '\code{\link[sf:sf]{sf}}' class.
#'
#' @param x a \code{\link[sf:sf]{sf}}-class object.
#' @param ... arguments passed to the \code{\link[sf:st_crs]{st_crs}} function.
#'
#' @return An aggregated \code{\link[via:RasterArray-class]{RasterArray}} class object.
#' @examples
#' data(paleocoastlines)
#' crs <- st_crs(paleocoastlines)
#' @rdname st_crs
#' @name st_crs
#' @export
if(!requireNamespace("sf", quietly=TRUE)){
st_crs <- function(x, ...){
UseMethod("st_crs", x)
}
# setGeneric("st_crs", function(x) standardGeneric("st_crs")) # WRONG, S3 instead of S4!
}else{
setGeneric("st_crs", def=sf::st_crs, package="sf")
}
#' Projection change of an '\code{\link[via:SfArray-class]{SfArray}}'-class object
#'
#' The method is inherited from the '\code{\link[sf:sf]{sf}}' class.
#'
#' @param x a \code{\link[sf:sf]{sf}}-class object.
#' @param ... arguments passed to the \code{\link[sf]{st_transform}} function.
#'
#' @return An \code{\link[via:RasterArray-class]{RasterArray}}-class object.
#' @examples
#' data(paleocoastlines)
#' moll<- st_transform(paleocoastlines, "ESRI:54009")
#' plot(moll["20", "margin"], col="cyan")
#' plot(moll["20", "coast"], add=TRUE, col="brown")
#' @rdname st_transform
#' @name st_transform
#' @export
if(!requireNamespace("sf", quietly=TRUE)){
st_transform <- function(x,...){
UseMethod("st_transform",x)
}
# setGeneric("st_transform", function(x) standardGeneric("st_transform"))# WRONG, S3 instead of S4!
}else{
setGeneric("st_transform", def=sf::st_transform, package="sf")
}
#' Bounding box of an '\code{\link[via:SfArray-class]{SfArray}}'-class object
#'
#' The method is inherited from the '\code{\link[sf:sf]{sf}}' class.
#'
#' @param obj a \code{\link[sf:sf]{sf}}-class object.
#' @param ... arguments passed to the \code{\link[sf]{st_bbox}} function.
#'
#' @return An \code{\link[via:RasterArray-class]{RasterArray}} class object.
#' @examples
#' data(paleocoastlines)
#' bb<- st_bbox(paleocoastlines)
#' @rdname st_bbox
#' @name st_bbox
#' @export
if(!requireNamespace("sf", quietly=TRUE)){
# setGeneric("st_bbox", function(x) standardGeneric("st_bbox"))# WRONG, S3 instead of S4!
st_bbox <- function(obj,...){
UseMethod("st_bbox",obj)
}
}else{
setGeneric("st_bbox", def=sf::st_bbox, package="sf")
# st_bbox <- sf::st_bbox
}
|
/scratch/gouwar.j/cran-all/cranData/via/R/generics.R
|
#' Redefine bounds of a named matrix
#'
#' The function restructures a \code{\link[base]{matrix}} and extends its current limits to a range defined by a names attribute
#'
#' This is essentially a subsetting function that allows you to subset even when the rownames or colnames vector
#' extends beyond the bounds of a matrix and traditional subsetting methods result in the notorious 'out of bounds' error.
#' @param x The matrix to be restructured.
#' @param cols Column names guiding the restructuring.
#' @param rows Row names guiding the restructuring.
#'
#' @return A matrix with extended bounds.
#' @examples
#' a<-matrix(1:9, ncol=3)
#' rownames(a) <- c("a", "c", "d")
#' newbounds(a, rows=letters[1:5])
#' @export
newbounds <- function(x, cols=NULL, rows=NULL){
if(!is.matrix(x)) stop("The newbounds() function is only applicable to matrices.")
if(!is.null(rows)){
if(is.null(rownames(x))) stop("The matrix must have rownames.")
newX <- matrix(NA, ncol=ncol(x), nrow=length(rows))
colnames(newX) <- colnames(x)
rownames(newX) <- rows
# reorder items to match the new order
ordering <- rows[rows%in%rownames(x)]
x2 <- x[ordering, , drop=FALSE]
# insert into new bounds
newX[rows%in%rownames(x2), ] <- x2[rownames(x2)%in%rows, , drop=FALSE]
}
if(!is.null(cols)){
if(is.null(colnames(x))) stop("The matrix must have colnames.")
newX <- matrix(NA, nrow=nrow(x), ncol=length(cols))
rownames(newX) <- rownames(x)
colnames(newX) <- cols
# reorder items to match the new order
ordering <- cols[cols%in%colnames(x)]
x2 <- x[,ordering , drop=FALSE]
# insert into new bounds
newX[,cols%in%colnames(x)] <- x[, colnames(x)%in%cols, drop=FALSE]
}
return(newX)
}
#' Names as numerics
#'
#' The set of functions return names of objects directly cast to numeric values.
#'
#' @param x Object with names, colnames or rownames attributes.
#' @rdname nums
#' @return Numeric vector.
#' @examples
#'
#' # base R object
#' a <- 1:10
#' names(a) <- seq(10, 100, 10)
#' nums(a)
#'
#' # XArray
#' data(exemplar)
#' colnums(exemplar)
#' rownums(exemplar)
#' @export
nums <- function(x){
as.numeric(names(x))
}
#' @rdname nums
#' @export
colnums<- function(x){
as.numeric(colnames(x))
}
#' @rdname nums
#' @export
rownums <- function(x){
as.numeric(rownames(x))
}
# one dimensional subscript of n dimensional array on a given margin
marginsubset <- function(x, mar, i){
# number of dimensions necessary
dims <- length(dim(x))
# construct subsetting call
callThis <- paste("x[", paste(rep(",",mar-1), collapse=""),"i", paste(rep(",", dims-mar), collapse=""), "]", collapse="")
# as an expression
express <- parse(text=callThis)
eval(express)
}
#' @rdname arraylength
#' @exportMethod nlayers
setMethod(
"nlayers",
signature="list",
function(x) length(x)
)
#' @name nlayers
#' @rdname arraylength
#' @aliases nlayers,SpatRaster-method
#' @exportMethod nlayers
setMethod(
"nlayers",
signature="SpatRaster",
function(x){
dims <- dim(x)
return(dims[3])
}
)
# function to check the classes of the stack candidate list
classcheck <- function(x){
# the very first
first <- class(x[[1]])
# treat different sfcs as the same...
first[first=="sfc_MULTIPOLYGON"] <- "sfc_POLYGON"
# default result
pass <- TRUE
# check all of them separately
if(length(x)>1){
# look through all of them
for(i in 2:length(x)){
# the next entity
newclass <- class(x[[i]])
newclass[newclass=="sfc_MULTIPOLYGON"] <- "sfc_POLYGON"
# should have the same number of entries
suppressWarnings(theCheck <- first == newclass)
if(any(!theCheck)) pass <- FALSE
}
}
return(pass)
}
# utility function to abbreviate the name of layers
abbrev <- function(x){
# find extension
split <- strsplit(x, "\\.")
# extensions
ext <- unlist(lapply(split, function(x) x[length(x)]))
rest <- unlist(lapply(split, function(x) paste(x[-length(x)], collapse=".")))
# length of name in chars
len <- nchar(rest)
# where is this needed?
bApp <- len > 8
# abbreviation
abbreviated <- paste0(substr(rest[bApp], 1, 5), "~", substr(rest[bApp], len[bApp]-1, len[bApp]), ".", ext[bApp])
# where this is applicable
x[bApp] <- abbreviated
return(x)
}
|
/scratch/gouwar.j/cran-all/cranData/via/R/utility.R
|
#' Virtual Arrays
#'
#' The base class '\code{\link[via:XArray-class]{VirtualArray}}' is defined, which acts as a wrapper around lists allowing users to fold arbitrary sequential data into n-dimensional, R-style virtual arrays. The derived '\code{\link[via:XArray-class]{XArray}}' class is defined to be used for homogeneous lists that contain a single class of objects. The '\code{\link[via:RasterArray-class]{RasterArray}}' and '\code{\link[via:SfArray-class]{SfArray}}' classes enable the use of stacked spatial data instead of lists.
#' #'
#' This is still the pre-alpha version. As is R, this is free software and comes with ABSOLUTELY NO WARRANTY. Nevertheless, notes about found bugs and suggestions are more than welcome.
#'
#' @author Adam T. Kocsis ([email protected])
#' @docType package
#' @name via
NULL
#' @importFrom methods new cbind2 rbind2 as as<- slot slot<-
NULL
|
/scratch/gouwar.j/cran-all/cranData/via/R/zzz.R
|
library(via)
library(terra)
# a very simple stack
ra <- rast(res=c(30,30))
values(ra) <- 1:ncell(ra)
# stack of rasters
sta <- ra
for(i in 2:12){
assign(paste0("ra",i), ra+(i-1)*10)
sta <- c(sta, get(paste0("ra",i)))
rm(list=paste0("ra",i))
}
names(sta) <- paste0("lay_", 1:12)
################################################################################
primitive <- RasterArray(sta)
# vector case
index <- 1:dim(sta)[3]
names(index) <- paste0("a", 1:length(index))
ga1d <- RasterArray(stack=sta, index=index)
# vector case with missing
ind1dNAfront <- c(NA, NA, 1:10)
names(ind1dNAfront) <- letters[1:length(ind1dNAfront)]
ga1dNAfront <- RasterArray(stack=sta[[1:10]], ind1dNAfront)
# vector case with mid missing
ind1dNAmid <- c(1:4, NA, 5:9, NA, 10)
names(ind1dNAmid) <- letters[1:length(ind1dNAmid)]
ga1dNAmid<- RasterArray(stack=sta[[1:10]], ind1dNAmid)
# vector case with end missing
ind1dNAend <- c(1:10,NA, NA)
names(ind1dNAend) <- letters[1:length(ind1dNAend)]
ga1dNAend<- RasterArray(stack=sta[[1:10]], ind1dNAend)
###############################################################################
# matrix case
ind <- matrix(1:length(index), ncol=4, nrow=3)
colnames(ind) <- LETTERS[1:4]
rownames(ind) <-letters[1:3]
ga2d <- RasterArray(stack=sta, ind)
# matrix case - missing
# 2d cases
ind2dNAmid <- matrix(ind1dNAmid, ncol=4)
colnames(ind2dNAmid) <- LETTERS[1:4]
rownames(ind2dNAmid) <- letters[1:3]
ga2dNAmid <- RasterArray(index=ind2dNAmid, stack=sta[[1:10]])
ind2dNAfront<- matrix(ind1dNAfront, ncol=4)
colnames(ind2dNAfront) <- LETTERS[1:4]
rownames(ind2dNAfront) <- letters[1:3]
ga2dNAfront<- RasterArray(index=ind2dNAfront, stack=sta[[1:10]])
ind2dNAend<- matrix(ind1dNAend, ncol=4)
colnames(ind2dNAend) <- LETTERS[1:4]
rownames(ind2dNAend) <- letters[1:3]
ga2dNAend<- RasterArray(index=ind2dNAend, stack=sta[[1:10]])
###############################################################################
# 3d case
ind3dNAfront<- array(ind1dNAfront, dim=c(2,3,2))
dimnames(ind3dNAfront) <- list(
first=letters[1:2],
second=LETTERS[1:3],
third=paste0("a",1:2)
)
ga3dNAfront <- RasterArray(index=ind3dNAfront, stack=sta[[1:10]])
ind3dNAmid <- array(ind1dNAmid, dim=c(2,3,2))
dimnames(ind3dNAmid) <- list(
first=letters[1:2],
second=LETTERS[1:3],
third=paste0("a",1:2)
)
ga3dNAmid<- RasterArray(index=ind3dNAmid, stack=sta[[1:10]])
ind3dNAend<- array(ind1dNAend, dim=c(2,3,2))
dimnames(ind3dNAend) <- list(
first=letters[1:2],
second=LETTERS[1:3],
third=paste0("a",1:2)
)
ga3dNAend<- RasterArray(index=ind3dNAend, stack=sta[[1:10]])
|
/scratch/gouwar.j/cran-all/cranData/via/inst/tinytest/RasterArray_objects.R
|
library(sf)
library(via)
library(tinytest)
######################
# Function for testing from rgplates
detailedBounds <- function(x,y, xmin=-180, xmax=180, ymin=-90, ymax=90){
rbind(
cbind(seq(xmin, xmax, length.out=x), rep(ymax, x)),
cbind(rep(xmax, y), seq(ymax, ymin, length.out=y)),
cbind(seq(xmax, xmin, length.out=x), rep(ymin, x)),
cbind(rep(xmin, y), seq(ymin, ymax, length.out=y))
)
}
mapedge <- function(x=360, y=180, xmin=-180, xmax=180, ymin=-90, ymax=90, out="sf"){
# return a rectangle
rectangle <- detailedBounds(x, y, xmin, xmax, ymin, ymax)
# outdefense
if(!out%in%c("sf", "sp")) stop("Invalid 'out' argument!.")
# old spatials
if(out=="sp"){
# check for the presense of spatials
if(!requireNamespace("sp", quietly=TRUE)){
stop("This output requires the sp package!")
}else{
final <- sp::SpatialPolygons(list(sp::Polygons(list(sp::Polygon(rectangle)), ID="0")), proj4string=sp::CRS("+proj=longlat"))
}
}
# default method
if(out=="sf"){
# sf is a hard dependency in any case
final<- st_geometry(st_polygon(list(rectangle)))
# set appropriate CRS
st_crs(final) <- "EPSG:4326"
}
# return object
return(final)
}
#############################################################################
# define a generator function for this
generator <- function(n, crs=4326){
li<-list()
for(i in 1:n){
x <-runif(2, -180, 180)
y<- runif(2, -90,90)
li[[i]]<-mapedge(xmin=min(x), xmax=max(x), ymin=min(y), ymax=max(y))
}
res <- li[[1]]
for(i in 2:length(li)){
res <- c(res, li[[i]])
}
st_crs(res) <- 4326
final <- st_sf(data.frame(a=1:n), geometry=res)
return(final)
}
# visalize
# plot(generator(6))
# create nice examples
set.seed(11)
tra <- st_transform(generator(60), crs="ESRI:54009")
plot(tra$geom)
plot(tra, main="", add=TRUE, nbreaks=20)
# Start prototyping
theList <- list()
for(i in 1:12){
# list
theList[[i]] <- generator(10)
}
names(theList) <- paste0("lay_", 1:length(theList))
################################################################################
expect_silent(primitive <- SfArray(theList))
# vector case
index <- 1:length(theList)
names(index) <- paste0("a", 1:length(index))
ga1d <- SfArray(stack=theList, index=index)
# vector case with missing
ind1dNAfront <- c(NA, NA, 1:10)
names(ind1dNAfront) <- letters[1:length(ind1dNAfront)]
ga1dNAfront <- SfArray(stack=theList[1:10], ind1dNAfront)
# vector case with mid missing
ind1dNAmid <- c(1:4, NA, 5:9, NA, 10)
names(ind1dNAmid) <- letters[1:length(ind1dNAmid)]
ga1dNAmid<- SfArray(stack=theList[1:10], ind1dNAmid)
# vector case with missing
ind1dNAend <- c(1:10,NA, NA)
names(ind1dNAend) <- letters[1:length(ind1dNAend)]
ga1dNAend<- SfArray(stack=theList[1:10], ind1dNAend)
###############################################################################
# matrix case
ind <- matrix(1:length(index), ncol=4, nrow=3)
colnames(ind) <- LETTERS[1:4]
rownames(ind) <-letters[1:3]
ga2d <- SfArray(stack=theList, ind)
# matrix case - missing
# 2d cases
ind2dNAmid <- matrix(ind1dNAmid, ncol=4)
colnames(ind2dNAmid) <- LETTERS[1:4]
rownames(ind2dNAmid) <- letters[1:3]
ga2dNAmid <- SfArray(index=ind2dNAmid, stack=theList[1:10])
ind2dNAfront<- matrix(ind1dNAfront, ncol=4)
colnames(ind2dNAfront) <- LETTERS[1:4]
rownames(ind2dNAfront) <- letters[1:3]
ga2dNAfront<- SfArray(index=ind2dNAfront, stack=theList[1:10])
ind2dNAend<- matrix(ind1dNAend, ncol=4)
colnames(ind2dNAend) <- LETTERS[1:4]
rownames(ind2dNAend) <- letters[1:3]
ga2dNAend<- SfArray(index=ind2dNAend, stack=theList[1:10])
###############################################################################
# 3d case
ind3dNAfront<- array(ind1dNAfront, dim=c(2,3,2))
dimnames(ind3dNAfront) <- list(
first=letters[1:2],
second=LETTERS[1:3],
third=paste0("a",1:2)
)
ga3dNAfront <- SfArray(index=ind3dNAfront, stack=theList[1:10])
ind3dNAmid <- array(ind1dNAmid, dim=c(2,3,2))
dimnames(ind3dNAmid) <- list(
first=letters[1:2],
second=LETTERS[1:3],
third=paste0("a",1:2)
)
ga3dNAmid<- SfArray(index=ind3dNAmid, stack=theList[1:10])
ind3dNAend<- array(ind1dNAend, dim=c(2,3,2))
dimnames(ind3dNAend) <- list(
first=letters[1:2],
second=LETTERS[1:3],
third=paste0("a",1:2)
)
ga3dNAend<- SfArray(index=ind3dNAend, stack=theList[1:10])
|
/scratch/gouwar.j/cran-all/cranData/via/inst/tinytest/SfArray_objects.R
|
# construct a 3,4 genarray of dataframes
library(via)
# Constructors
theList <- list()
for(i in 1:12){
theList[[i]] <- data.frame(val=1:10+i)
}
names(theList) <- paste0("lay", 1:12)
# vector-like
ind1d <- 1:12
names(ind1d) <- letters[1:12]
# index object 1
ind2d <- matrix(1:12, ncol=4)
colnames(ind2d) <- LETTERS[1:4]
rownames(ind2d) <- letters[1:3]
ind3d<- array(1:12, dim=c(2,3,2))
dimnames(ind3d) <- list(
first=letters[1:2],
second=LETTERS[1:3],
third=paste0("a",1:2)
)
#################################################
# Constructor calls
# 1. Basic - without missing values
# 1d vector
ga1d <- XArray(index=ind1d, stack=theList)
# without inex
ga1d_noInd <- XArray(stack=theList)
# 2d array
ga2d <- XArray(index=ind2d, stack=theList)
# 3d array
ga3d <- XArray(index=ind3d, stack=theList)
# 2. Missing values
# Newer index with missing values
# vector
ind1dNAmid <- c(1,2,3,4, NA, 5, 6, 7, 8, 9, NA, 10)
names(ind1dNAmid) <- letters[1:length(ind1dNAmid)]
ga1dNAmid <- XArray(index=ind1dNAmid, stack=theList[1:10])
ind1dNAfront <- c(NA, NA, 1,2,3,4, 5, 6, 7, 8, 9, 10)
names(ind1dNAfront) <- letters[1:length(ind1dNAfront)]
ga1dNAfront<- XArray(index=ind1dNAfront, stack=theList[1:10])
ind1dNAend <- c(1,2,3,4, 5, 6, 7, 8, 9, 10, NA, NA)
names(ind1dNAend) <- letters[1:length(ind1dNAend)]
ga1dNAend<- XArray(index=ind1dNAend, stack=theList[1:10])
# 2d cases
ind2dNAmid <- matrix(ind1dNAmid, ncol=4)
colnames(ind2dNAmid) <- LETTERS[1:4]
rownames(ind2dNAmid) <- letters[1:3]
ga2dNAmid <- XArray(index=ind2dNAmid, stack=theList[1:10])
ind2dNAfront<- matrix(ind1dNAfront, ncol=4)
colnames(ind2dNAfront) <- LETTERS[1:4]
rownames(ind2dNAfront) <- letters[1:3]
ga2dNAfront<- XArray(index=ind2dNAfront, stack=theList[1:10])
ind2dNAend<- matrix(ind1dNAend, ncol=4)
colnames(ind2dNAend) <- LETTERS[1:4]
rownames(ind2dNAend) <- letters[1:3]
ga2dNAend<- XArray(index=ind2dNAend, stack=theList[1:10])
# 3d case
ind3dNAfront<- array(ind1dNAfront, dim=c(2,3,2))
dimnames(ind3dNAfront) <- list(
first=letters[1:2],
second=LETTERS[1:3],
third=paste0("a",1:2)
)
ga3dNAfront <- XArray(index=ind3dNAfront, stack=theList[1:10])
ind3dNAmid <- array(ind1dNAmid, dim=c(2,3,2))
dimnames(ind3dNAmid) <- list(
first=letters[1:2],
second=LETTERS[1:3],
third=paste0("a",1:2)
)
ga3dNAmid<- XArray(index=ind3dNAmid, stack=theList[1:10])
ind3dNAend<- array(ind1dNAend, dim=c(2,3,2))
dimnames(ind3dNAend) <- list(
first=letters[1:2],
second=LETTERS[1:3],
third=paste0("a",1:2)
)
ga3dNAend<- XArray(index=ind3dNAend, stack=theList[1:10])
################################################################################
# Basic attributes
|
/scratch/gouwar.j/cran-all/cranData/via/inst/tinytest/XArray_objects.R
|
library(tinytest)
# X-attribs of RasterArray objects
source("./RasterArray_objects.R")
################################################################################
# STILL TO BE DONE, drafts are added here to guide the develpment of systematic tests
ga2d[1,2]
ga2d[["lay_1"]]
# basic group generics
(ga1d+0.4)[[1]]
(0.5 + ga1d)[[1]]
cos(ga1d)[1]
# compare
(ga1d == ga1d[[1]])[[1]]
(ga1d == ga1d[[2]])[[1]]
round(ga1d + 0.4)[[1]]
summary(ga1d)
################################################################################
# replacement
# 1d
# missing values
ga1dre1 <- ga1d
ga1dre1[1] <- NA
ga1d[2]
ga1dre1[2]
# single layer
ga1dre1[1] <- ga1d[6]
ga1dre1[2] <- ga1d[6]
ga1dre1[["lay_1"]] <-ga1d[6]
# 2d
ga2dre <- ga2d
ga2dre[2,2] <- ga1d[6]
ga2dre[1,1] <- NA
ga2dre["a", "B"] <- NA
ga2dre[["lay_12"]] <- ga1d[6]
######################################################################
# combinations
# RasterArray-RasterArray
c(ga1d, ga1d)
# RasterArray-SpatRaster
c(ga1d, one=ra)
c(ga1d, sta)
c(ga1d, ga1d,sta)
# RasterArray-NA
c(ga1d, NA)
|
/scratch/gouwar.j/cran-all/cranData/via/inst/tinytest/test_RasterArray_base.R
|
library(tinytest)
# X-attribs of XArray objects
source("./SfArray_objects.R")
###################################
# Informal draft testing
##### Produced bugs with SfArray!!!
ga1d <- as.SfcArray(ga1d)
# replacement
# 1d
# missing values
ga1dre1 <- ga1d
ga1dre1[1] <- NA
ga1d[2]
ga1dre1[2]
# single layer
ga1dre1[1] <- ga1d[6]
plot(ga1d[6])
plot(ga1dre1[1])
ga1dre1[2] <- ga1d[6]
plot(ga1dre1[2])
ga1dre1[["d"]] <-ga1d[6]
plot(ga1dre1[["d"]])
# 2d
ga2dre <- ga2d
plot(ga2dre[2,3])
ga2dre[2,2] <- ga1d[6]
# no change in original
plot(ga2dre[2,3])
plot(ga1d[6])
plot(ga2dre[2,2])
ga2dre[1,1] <- NA
ga2dre["c", "A"] <- NA
# name change and replacement
ga2dre[["lay_7"]] <- ga1d[6]
plot(ga2dre[["lay_7"]])
# should be equal
ga2dre[["lay_7"]]
ga1d[6]
############################################################x
# st_crs
st_crs(primitive)
# project
## allMoll<-st_transform(primitive, "ESRI:54009")
## plot(allMoll[[1]])
## st_crs(allMoll)
## st_bbox(primitive)
## st_bbox(allMoll)
|
/scratch/gouwar.j/cran-all/cranData/via/inst/tinytest/test_SfArray_base.R
|
library(tinytest)
# X-attribs of XArray objects
source("./SfArray_objects.R")
###################################
# SfArray -> SfcArray
# Check wheter it correctly is transformed with normal method
expect_silent(sfc <- as.SfcArray(primitive))
# properly created
expect_true(inherits(sfc, "SfcArray"))
expect_true(!inherits(sfc, "SfArray"))
# check element-by element
lapply(sfc@stack, function(x){
expect_true(inherits(x, "sfc"))
expect_true(!inherits(x, "sf"))
})
# alternative
# expect_silent(sfc <- as(primitive, "SfcArray"))
lapply(sfc@stack, function(x){
expect_true(inherits(x, "sfc"))
expect_true(!inherits(x, "sf"))
})
###################################
# SfcArray -> SfArray
expect_silent(sf <- as.SfArray(sfc))
expect_silent(sf <- as(sfc, "SfArray"))
expect_true(inherits(sf, "SfArray"))
# check element-by element
lapply(sf@stack, function(x){
expect_true(inherits(x, "sf"))
})
# alternative
expect_silent(sf <- as(sfc, "SfArray"))
# check element-by element
lapply(sf@stack, function(x){
expect_true(inherits(x, "sf"))
})
###################################
# SfcArray -> XArray
# Check wheter it correctly is transformed with normal method
expect_silent(xArSFC <- as.XArray(sfc))
expect_true(!inherits(xArSFC, "SfcArray"))
###################################
# SfArray -> XArray
expect_silent(xArSF <- as.XArray(primitive))
expect_true(!inherits(xArSF, "SfcArray"))
###################################
# XArray -> SfcArray
expect_silent(sfcRe <- as.SfcArray(xArSFC))
expect_error(sfRe <- as.SfcArray(xArSF))
###################################
# XArray -> SfArray
expect_silent(sfRe <- as.SfArray(xArSF))
expect_error(sfcRe <- as.SfArray(xArSFC))
|
/scratch/gouwar.j/cran-all/cranData/via/inst/tinytest/test_SfArray_conversion.R
|
library(tinytest)
# X-attribs of XArray objects
source("./XArray_objects.R")
################################################################################
# dim()
expect_equal(dim(ga1d), 12)
expect_equal(dim(ga2d), c(3,4))
expect_equal(dim(ga3d), c(2,3,2))
# missing values
# 1D case
expect_equal(dim(ga1dNAmid), 12)
expect_equal(dim(ga1dNAfront), 12)
expect_equal(dim(ga1dNAend), 12)
# 2D case
expect_equal(dim(ga2dNAmid), c(3,4))
expect_equal(dim(ga2dNAfront), c(3,4))
expect_equal(dim(ga2dNAend), c(3,4))
# 3d case
expect_equal(dim(ga3dNAmid),c(2,3,2))
expect_equal(dim(ga3dNAfront), c(2,3,2))
expect_equal(dim(ga3dNAend), c(2,3,2))
################################################################################
# names()
expect_equal(names(ga1d), letters[1:12])
expect_null(names(ga2d))
expect_null(names(ga3d))
################################################################################
# names<-
# replace first
ga1d_nameFirst <- ga1d
names(ga1d_nameFirst)[1] <- "asdf"
expect_equal(
names(ga1d_nameFirst),
c("asdf", names(ga1d)[2:12])
)
# replace middle
ga1d_nameMid <- ga1d
names(ga1d_nameMid)[6] <- "asdf"
expect_equal(
names(ga1d_nameMid),
c( names(ga1d)[1:5], "asdf", names(ga1d)[7:12])
)
# replace last
ga1d_nameLast <- ga1d
names(ga1d_nameLast)[12] <- "asdf"
expect_equal(
names(ga1d_nameLast),
c( names(ga1d)[1:11], "asdf")
)
# replace multiple
ga1d_nameMulti <- ga1d
names(ga1d_nameMulti)[c(1, 6, 12)] <- c("asdf", "qwer", "yxcv")
expect_equal(
names(ga1d_nameMulti),
c(
"asdf",
names(ga1d)[2:5],
"qwer",
names(ga1d)[7:11],
"yxcv"
)
)
################################################################################
# length()
expect_equal(length(ga1d), 12)
expect_equal(length(ga2d), 12)
expect_equal(length(ga3d), 12)
################################################################################
# nlayers()
# no gaps
expect_equal(length(ga1d), nlayers(ga1d))
expect_equal(length(ga2d), nlayers(ga1d))
expect_equal(length(ga3d), nlayers(ga1d))
# gaps
# 1D case
expect_equal(nlayers(ga1dNAmid), 10)
expect_equal(nlayers(ga1dNAfront), 10)
expect_equal(nlayers(ga1dNAend), 10)
# 2D case
expect_equal(nlayers(ga2dNAmid), 10)
expect_equal(nlayers(ga2dNAfront), 10)
expect_equal(nlayers(ga2dNAend), 10)
# 3d case
expect_equal(nlayers(ga3dNAmid),10)
expect_equal(nlayers(ga3dNAfront),10 )
expect_equal(nlayers(ga3dNAend), 10)
################################################################################
# layers()
# no gaps
expLay <- paste0("lay", 1:12)
expect_equal(layers(ga1d), expLay)
expect_equal(layers(ga2d), expLay)
expect_equal(layers(ga3d), expLay)
# multidim, with NAs
expLay <- paste0("lay", 1:10)
expect_equal(layers(ga1dNAmid),expLay )
expect_equal(layers(ga2dNAmid), expLay)
expect_equal(layers(ga3dNAmid), expLay)
################################################################################
# colnames()
expect_null(colnames(ga1d))
expect_equal(colnames(ga2d), c("A", "B", "C", "D"))
expect_equal(colnames(ga3d), c("A", "B", "C"))
################################################################################
# colnames<-
# replace one
ga2d_colnamesOne <- ga2d
colnames(ga2d_colnamesOne)[1] <- "asdf"
expect_equal(colnames(ga2d_colnamesOne), c("asdf", "B", "C", "D"))
# all replace
ga2d_colnamesAll <- ga2d
colnames(ga2d_colnamesAll) <- rev(letters)[1:4]
expect_equal(colnames(ga2d_colnamesAll), rev(letters)[1:4])
################################################################################
# rownames()
expect_null(rownames(ga1d))
expect_equal(rownames(ga2d), c("a", "b", "c"))
expect_equal(rownames(ga3d), c("a", "b"))
################################################################################
# rownames<-
# replace one
ga2d_rownamesOne <- ga2d
rownames(ga2d_rownamesOne)[1] <- "asdf"
expect_equal(rownames(ga2d_rownamesOne), c("asdf", "b", "c"))
# all replace
ga2d_rownamesAll <- ga2d
rownames(ga2d_rownamesAll) <- rev(letters)[1:3]
expect_equal(rownames(ga2d_rownamesAll), rev(letters)[1:3])
################################################################################
# dimnames()
expect_null(dimnames(ga1d))
dn2 <- list(
c("a", "b", "c"),
c("A", "B", "C", "D"))
expect_equal(dimnames(ga2d), dn2)
dn3 <- list(
first=c("a", "b" ),
second=c("A", "B", "C" ),
third=c("a1", "a2"))
expect_equal(dimnames(ga3d), dn3)
################################################################################
# dimnames <-
# partial replacement
ga2d_part<- ga2d
dimnames(ga2d_part)[[2]]<- 1:4
expect_equal(dimnames(ga2d)[[1]], dimnames(ga2d_part)[[1]])
expect_equal(dimnames(ga2d_part)[[2]], as.character(1:4))
# deletion and complete replacement
ga2d_no <- ga2d
dimnames(ga2d_no) <- NULL
expect_null(dimnames(ga2d_no))
dimnames(ga2d_no)<-dn2
expect_equal(dimnames(ga2d), dimnames(ga2d_no))
################################################################################
# ncol
expect_null(ncol(ga1d))
expect_equal(ncol(ga2d), 4)
expect_equal(ncol(ga3d), 3)
################################################################################
# nrow
expect_null(nrow(ga1d))
expect_equal(nrow(ga2d), 3)
expect_equal(nrow(ga3d), 2)
|
/scratch/gouwar.j/cran-all/cranData/via/inst/tinytest/test_XArray_attributes.R
|
library(tinytest)
# X-attribs of XArray objects
source("./XArray_objects.R")
################################################################################
# proxy()
# I. complete cases
# 1D case
expect_silent(prox_ga1d <- proxy(ga1d))
expect_equal(names(prox_ga1d), names(ga1d))
# special case!
expect_equal(length(prox_ga1d), dim(ga1d))
expect_equivalent(prox_ga1d, names(ga1d@stack))
# 2D case
expect_silent(prox_ga2d <- proxy(ga2d))
expect_equal(colnames(prox_ga2d), colnames(ga2d))
expect_equal(rownames(prox_ga2d), rownames(ga2d))
expect_equal(dimnames(prox_ga2d), dimnames(ga2d))
expect_equal(dim(prox_ga2d), dim(ga2d))
expect_equal(as.character(prox_ga2d), names(ga2d@stack))
# 3D case
expect_silent(prox_ga3d <- proxy(ga3d))
expect_equal(colnames(prox_ga3d), colnames(ga3d))
expect_equal(rownames(prox_ga3d), rownames(ga3d))
expect_equal(dimnames(prox_ga3d), dimnames(ga3d))
expect_equal(dim(prox_ga3d), dim(ga3d))
expect_equal(as.character(prox_ga3d), names(ga3d@stack))
# II. gappy cases
# 1D no names
prox <- proxy(ga1dNAmid)
expect_equal(length(prox), length(ga1dNAmid))
expect_equivalent(prox[!is.na(prox)], layers(ga1dNAmid))
prox <- proxy(ga1dNAfront)
expect_equal(length(prox), length(ga1dNAfront))
expect_equivalent(prox[!is.na(prox)], layers(ga1dNAfront))
prox <- proxy(ga1dNAend)
expect_equal(length(prox), length(ga1dNAend))
expect_equivalent(prox[!is.na(prox)], layers(ga1dNAend))
# 2D proper names
prox <- proxy(ga2dNAmid)
expect_equal(length(prox), length(ga2dNAmid))
expect_equal(sum(!is.na(prox)), nlayers(ga2dNAmid))
expect_equal(prox[!is.na(prox)], layers(ga2dNAmid))
# 3D proper names
prox <- proxy(ga3dNAmid)
expect_equal(length(prox), length(ga3dNAmid))
expect_equal(sum(!is.na(prox)), nlayers(ga3dNAmid))
expect_equal(prox[!is.na(prox)], layers(ga3dNAmid))
################################################################################
# t()
# 0. not-applicable
expect_silent(t1 <- t(ga1d))
expect_equal(t1@index, t(ga1d@index))
expect_equal(t1@stack, ga1d@stack)
expect_error(t(ga3d))
# 1. complete case
transposed <- t(ga2d)
expect_equal(dim(ga2d), rev(dim(transposed)))
# proxies should invert
expect_equal(proxy(ga2d), t(proxy(transposed)))
# 2.gappy case
# A. gaps at the front
transposed <- t(ga2dNAfront)
# dimensions ok
expect_equal(dim(ga2dNAfront), rev(dim(transposed)))
# number of layers!
expect_equal(nlayers(ga2dNAfront), nlayers(transposed))
# positions of missing values
expect_equal(is.na(ga2dNAfront@index), is.na(t(transposed@index)))
# proxies should invert perfectly
expect_equal(proxy(ga2dNAfront), t(proxy(transposed)))
# B. mid gaps
transposed <- t(ga2dNAmid)
# dimensions ok
expect_equal(dim(ga2dNAmid), rev(dim(transposed)))
# number of layers!
expect_equal(nlayers(ga2dNAmid), nlayers(transposed))
# positions of missing values
expect_equal(is.na(ga2dNAmid@index), is.na(t(transposed@index)))
# proxies should invert perfectly
expect_equal(proxy(ga2dNAmid), t(proxy(transposed)))
# C. gaps at the end
transposed <- t(ga2dNAend)
# dimensions ok
expect_equal(dim(ga2dNAend), rev(dim(transposed)))
# number of layers!
expect_equal(nlayers(ga2dNAend), nlayers(transposed))
# positions of missing values
expect_equal(is.na(ga2dNAend@index), is.na(t(transposed@index)))
# proxies should invert perfectly
expect_equal(proxy(ga2dNAend), t(proxy(transposed)))
|
/scratch/gouwar.j/cran-all/cranData/via/inst/tinytest/test_XArray_base.R
|
library(tinytest)
# X-attribs of GenArray objects
source("./XArray_objects.R")
elem <- ga1d[1]
elem2 <- ga1d[2]
##################################################################################
# Regular c()
# combine things with the GenArray should result in it
# missing value
ne <- c(ga1d, NA)
# name addition
ne <- c(ga1d, one=NA)
# some meaningful data
ne <- c(ga1d, elem)
ne <- c(ga1d, one=elem)
ne["one"]
ne[length(ne)]
ne[["elem"]]
ne[["elem", drop=F]] # triggers an error
# multiple elements, recursive
ne <- c(ga1d, one= elem, elem2)
ne <- c(ga1d, one= elem, one2= elem2)
# expected error
expect_error(ne <- c(ga1d, 12))
##############################################
me <- c(ga1d, ga1d)
|
/scratch/gouwar.j/cran-all/cranData/via/inst/tinytest/test_XArray_combinations.R
|
# false constructions
library(tinytest)
# bases
theList <- list()
for(i in 1:12){
theList[[i]] <- data.frame(val=1:10+i)
}
names(theList) <- paste0("lay", 1:12)
# vector-liek
ind1d <- 1:12
names(ind1d) <- letters[1:12]
# index object 1
ind2d <- matrix(1:12, ncol=4)
colnames(ind) <- LETTERS[1:4]
rownames(ind) <- letters[1:3]
################################################################################
# A. mismatching index and stack
# index too short
expect_error(
XArray(index=1:11, stack=theList)
)
# index too long
expect_error(
XArray(index=1:13, stack=theList)
)
# index appropriate, but stack too long
expect_error(
XArray(index=c(1:11, NA), stack=theList)
)
################################################################################
# B. mismatching items in stack
theList2 <- theList
theList2[[12]] <- 1:15
expect_error(
XArray(stack=theList2)
)
# gaps in the stack
theList3 <- theList
theList3[[12]] <- NA
expect_error(
XArray(stack=theList3)
)
|
/scratch/gouwar.j/cran-all/cranData/via/inst/tinytest/test_XArray_false.R
|
library(tinytest)
# X-attribs of XArray objects
source("./XArray_objects.R")
################################################################################
# I. complete cases
# A. Numeric subscripts
# NULL subset
expect_silent(nullLayer <- ga1d[0])
expect_equivalent(class(nullLayer), "XArray")
expect_equal(length(nullLayer), 0)
# single value
expect_equal(ga1d[1], ga1d@stack[[1]])
expect_silent(oneLayer <- ga1d[1, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga1d@stack[1])
expect_equal(names(oneLayer), names(ga1d)[1])
# multiple values
expect_silent(twoLayer <- ga1d[3:4])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga1d@stack[[3]])
expect_equal(twoLayer@stack[[2]], ga1d@stack[[4]])
expect_equal(names(twoLayer), names(ga1d)[3:4])
# B. Character subscripts
# wrong subscript
expect_equal(ga1d["wrong"], NA)
# single value
expect_silent(ga1d["a"])
expect_equal(ga1d["a"], ga1d[1])
expect_silent(oneLayer <- ga1d["a", drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga1d@stack[1])
expect_equal(names(oneLayer), names(ga1d)[1])
# multiple values
expect_silent(twoLayer <- ga1d[c("c","d")])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga1d@stack[[3]])
expect_equal(twoLayer@stack[[2]], ga1d@stack[[4]])
expect_equal(names(twoLayer), names(ga1d)[3:4])
# C. Logical subscripts
# NULL subscript
bLog <- rep(FALSE, length(ga1d))
expect_silent(nullLayer <- ga1d[bLog])
expect_equivalent(class(nullLayer), "XArray")
expect_equal(length(nullLayer), 0)
# single value
bLog[1] <- TRUE
expect_equal(ga1d[bLog], ga1d@stack[[1]])
expect_silent(oneLayer <- ga1d[bLog, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga1d@stack[1])
expect_equal(names(oneLayer), names(ga1d)[1])
# multiple values
bLog <- rep(FALSE, length(ga1d))
bLog[3:4] <- TRUE
expect_silent(twoLayer <- ga1d[bLog])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga1d@stack[[3]])
expect_equal(twoLayer@stack[[2]], ga1d@stack[[4]])
expect_equal(names(twoLayer), names(ga1d)[3:4])
################################################################################
# Negative subscripts
# single value
# front omission
expect_silent(oneOmit <- ga1d[-1])
expect_equal(oneOmit@stack, ga1d@stack[2:12])
expect_equal(names(oneOmit), names(ga1d)[-1])
# mid omission
expect_silent(oneOmit <- ga1d[-5])
expect_equal(oneOmit@stack, ga1d@stack[c(1:4, 6:12)])
expect_equal(names(oneOmit), names(ga1d)[-5])
# end omission
expect_silent(oneOmit <- ga1d[-12])
expect_equal(oneOmit@stack, ga1d@stack[c(1:11)])
expect_equal(names(oneOmit), names(ga1d)[-12])
# multiple values
# continuous
expect_silent(twoOmit <- ga1d[-(1:2)])
expect_equal(twoOmit@stack, ga1d@stack[3:12])
expect_equal(names(twoOmit), names(ga1d)[-c(1:2)])
# disjunct
expect_silent(twoOmit <- ga1d[-c(3,6)])
expect_equal(twoOmit@stack, ga1d@stack[c(1:2,4,5,7:12)])
expect_equal(names(twoOmit), names(ga1d)[-c(3,6)])
################################################################################
# II. 1D - with missing
# A. Numeric subscript
# 0. NULL subset
# case mid
expect_silent(nullLayer <- ga1dNAmid[0])
expect_equivalent(class(nullLayer), "XArray")
expect_equal(length(nullLayer), 0)
# case front
expect_silent(nullLayer <- ga1dNAfront[0])
expect_equivalent(class(nullLayer), "XArray")
expect_equal(length(nullLayer), 0)
# caseend
expect_silent(nullLayer <- ga1dNAend[0])
expect_equivalent(class(nullLayer), "XArray")
expect_equal(length(nullLayer), 0)
# Single value
# Mid case
# single value (valid)
expect_equal(ga1dNAmid[1], ga1dNAmid@stack[[1]])
expect_silent(oneLayer <- ga1dNAmid[1, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga1dNAmid@stack[1])
expect_equal(names(oneLayer), names(ga1d)[1])
# single value (invalid)
expect_equal(ga1dNAmid[5], NA)
expect_silent(oneLayer <- ga1dNAmid[5, drop=FALSE])
expect_equivalent(oneLayer@index, as.numeric(NA))
expect_equivalent(oneLayer@stack, list())
# front case
# single value (valid)
expect_equal(ga1dNAfront[10], ga1dNAfront@stack[[8]])
expect_silent(oneLayer <- ga1dNAfront[10, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga1dNAmid@stack[8])
expect_equal(names(oneLayer), names(ga1dNAfront)[10])
# single value (invalid)
expect_equal(ga1dNAfront[1], NA)
expect_silent(oneLayer <- ga1dNAfront[1, drop=FALSE])
expect_equivalent(oneLayer@index, as.numeric(NA))
expect_equivalent(oneLayer@stack, list())
# end case
# single value (valid)
expect_equal(ga1dNAend[1], ga1dNAend@stack[[1]])
expect_silent(oneLayer <- ga1dNAend[1, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga1dNAmid@stack[1])
expect_equal(names(oneLayer), names(ga1dNAend)[1])
# single value (invalid)
expect_equal(ga1dNAend[12], NA)
expect_silent(oneLayer <- ga1dNAend[12, drop=FALSE])
expect_equivalent(oneLayer@index, as.numeric(NA))
expect_equivalent(oneLayer@stack, list())
# Multiple values
# mid case
# All valid
expect_silent(twoLayer <- ga1dNAmid[3:4])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga1dNAmid@stack[[3]])
expect_equal(twoLayer@stack[[2]], ga1dNAmid@stack[[4]])
expect_equal(names(twoLayer), names(ga1dNAmid)[3:4])
# one missing - first
expect_silent(twoLayer <- ga1dNAmid[4:5])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer@stack), 1)
expect_equal(twoLayer@stack[[1]], ga1dNAmid@stack[[4]])
expect_equal(names(twoLayer), names(ga1dNAmid)[4:5])
# one missing - second
expect_silent(twoLayer <- ga1dNAmid[5:6])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer@stack), 1)
expect_equal(twoLayer@stack[[1]], ga1dNAmid@stack[[5]])
# all missing
expect_silent(twoLayer <- ga1dNAmid[c(5, 11)])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer), 2)
expect_equal(length(twoLayer@stack), 0)
# front case
# All valid
expect_silent(twoLayer <- ga1dNAfront[3:4])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga1dNAmid@stack[[1]])
expect_equal(twoLayer@stack[[2]], ga1dNAmid@stack[[2]])
expect_equal(names(twoLayer), names(ga1dNAfront)[3:4])
# one missing - first
expect_silent(twoLayer <- ga1dNAfront[2:3])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer@stack), 1)
expect_equal(twoLayer@stack[[1]], ga1dNAfront@stack[[1]])
expect_equal(names(twoLayer), names(ga1dNAfront)[2:3])
# all missing
expect_silent(twoLayer <- ga1dNAfront[1:2])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer), 2)
expect_equal(length(twoLayer@stack), 0)
# end case
# All valid
expect_silent(twoLayer <- ga1dNAend[3:4])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga1dNAmid@stack[[3]])
expect_equal(twoLayer@stack[[2]], ga1dNAmid@stack[[4]])
expect_equal(names(twoLayer), names(ga1dNAmid)[3:4])
# one missing - second
expect_silent(twoLayer <- ga1dNAend[10:11])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer@stack), 1)
expect_equal(twoLayer@stack[[1]], ga1dNAfront@stack[[10]])
expect_equal(names(twoLayer), names(ga1dNAfront)[10:11])
# all missing
expect_silent(twoLayer <- ga1dNAend[11:12])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer), 2)
expect_equal(length(twoLayer@stack), 0)
################################################################################
# B. Character subscript
################################################################################
# Single value
# Mid case
# single value (valid)
expect_equal(ga1dNAmid["a"], ga1dNAmid@stack[[1]])
expect_silent(oneLayer <- ga1dNAmid["a", drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga1dNAmid@stack[1])
expect_equal(names(oneLayer), names(ga1d)[1])
# single value (invalid)
expect_equal(ga1dNAmid["e"], NA)
expect_silent(oneLayer <- ga1dNAmid["e", drop=FALSE])
expect_equivalent(oneLayer@index, as.numeric(NA))
expect_equivalent(oneLayer@stack, list())
# Front case
# single value (valid)
expect_equal(ga1dNAfront["e"], ga1dNAmid@stack[[3]])
expect_silent(oneLayer <- ga1dNAfront["e", drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga1dNAfront@stack[3])
expect_equal(names(oneLayer), names(ga1dNAfront)[5])
# single value (invalid)
expect_equal(ga1dNAfront["a"], NA)
expect_silent(oneLayer <- ga1dNAfront["a", drop=FALSE])
expect_equivalent(oneLayer@index, as.numeric(NA))
expect_equivalent(oneLayer@stack, list())
# end case
# single value (valid)
expect_equal(ga1dNAend["a"], ga1dNAmid@stack[[1]])
expect_silent(oneLayer <- ga1dNAend["a", drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga1dNAend@stack[1])
expect_equal(names(oneLayer), names(ga1dNAend)[1])
# single value (invalid)
expect_equal(ga1dNAend["l"], NA)
expect_silent(oneLayer <- ga1dNAend["l", drop=FALSE])
expect_equivalent(oneLayer@index, as.numeric(NA))
expect_equivalent(oneLayer@stack, list())
# Multiple values
# mid case
# All valid
expect_silent(twoLayer <- ga1dNAmid[c("c","d")])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga1dNAmid@stack[[3]])
expect_equal(twoLayer@stack[[2]], ga1dNAmid@stack[[4]])
expect_equal(names(twoLayer), names(ga1dNAmid)[3:4])
# one missing - first
expect_silent(twoLayer <- ga1dNAmid[c("d","e")])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer@stack), 1)
expect_equal(twoLayer@stack[[1]], ga1dNAmid@stack[[4]])
expect_equal(names(twoLayer), names(ga1dNAmid)[4:5])
# one missing - second
expect_silent(twoLayer <- ga1dNAmid[c("e","f")])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer@stack), 1)
expect_equal(twoLayer@stack[[1]], ga1dNAmid@stack[[5]])
# all missing
expect_silent(twoLayer <- ga1dNAmid[c("e","k")])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer), 2)
expect_equal(length(twoLayer@stack), 0)
# front case
# All valid
expect_silent(twoLayer <- ga1dNAfront[c("c", "d")])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga1dNAmid@stack[[1]])
expect_equal(twoLayer@stack[[2]], ga1dNAmid@stack[[2]])
expect_equal(names(twoLayer), names(ga1dNAfront)[3:4])
# one missing - first
expect_silent(twoLayer <- ga1dNAfront[c("b", "c")])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer@stack), 1)
expect_equal(twoLayer@stack[[1]], ga1dNAfront@stack[[1]])
expect_equal(names(twoLayer), names(ga1dNAfront)[2:3])
# all missing
expect_silent(twoLayer <- ga1dNAfront[c("a","b")])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer), 2)
expect_equal(length(twoLayer@stack), 0)
# end case
# All valid
expect_silent(twoLayer <- ga1dNAend[c("c","d")])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga1dNAmid@stack[[3]])
expect_equal(twoLayer@stack[[2]], ga1dNAmid@stack[[4]])
expect_equal(names(twoLayer), names(ga1dNAmid)[3:4])
# one missing - second
expect_silent(twoLayer <- ga1dNAend[c("j","k")])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer@stack), 1)
expect_equal(twoLayer@stack[[1]], ga1dNAfront@stack[[10]])
expect_equal(names(twoLayer), names(ga1dNAfront)[10:11])
# all missing
expect_silent(twoLayer <- ga1dNAend[c("k","l")])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer), 2)
expect_equal(length(twoLayer@stack), 0)
################################################################################
# Logical subscripts
# NULL result
bLog <- rep(FALSE, length(ga1d))
# mid
expect_silent(nullLayer <- ga1dNAmid[bLog])
expect_equivalent(class(nullLayer), "XArray")
expect_equal(length(nullLayer), 0)
# front
expect_silent(nullLayer <- ga1dNAfront[bLog])
expect_equivalent(class(nullLayer), "XArray")
expect_equal(length(nullLayer), 0)
# end
expect_silent(nullLayer <- ga1dNAend[bLog])
expect_equivalent(class(nullLayer), "XArray")
expect_equal(length(nullLayer), 0)
########################################----------------------------------------
# single value
# mid case
bLog <- rep(FALSE, length(ga1d))
bLog[1] <- TRUE
# (valid)
expect_equal(ga1dNAmid[bLog], ga1d@stack[[1]])
expect_silent(oneLayer <- ga1dNAmid[bLog, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga1dNAmid@stack[1])
expect_equal(names(oneLayer), names(ga1dNAmid)[1])
# invalid
bLog <- rep(FALSE, length(ga1d))
bLog[5] <- TRUE
expect_equal(ga1dNAmid[bLog], NA)
expect_silent(oneLayer <- ga1dNAmid[bLog, drop=FALSE])
expect_equivalent(oneLayer@index, as.numeric(NA))
expect_equivalent(oneLayer@stack, list())
# front case
# (valid)
bLog <- rep(FALSE, length(ga1d))
bLog[5] <- TRUE
expect_equal(ga1dNAfront[bLog], ga1d@stack[[3]])
expect_silent(oneLayer <- ga1dNAfront[bLog, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga1dNAfront@stack[3])
expect_equal(names(oneLayer), names(ga1dNAfront)[5])
# invalid
bLog <- rep(FALSE, length(ga1d))
bLog[1] <- TRUE
expect_equal(ga1dNAfront[bLog], NA)
expect_silent(oneLayer <- ga1dNAfront[bLog, drop=FALSE])
expect_equivalent(oneLayer@index, as.numeric(NA))
expect_equivalent(oneLayer@stack, list())
# end case
# (valid)
bLog <- rep(FALSE, length(ga1d))
bLog[1] <- TRUE
expect_equal(ga1dNAend[bLog], ga1d@stack[[1]])
expect_silent(oneLayer <- ga1dNAend[bLog, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga1dNAend@stack[1])
expect_equal(names(oneLayer), names(ga1dNAend)[1])
# invalid
bLog <- rep(FALSE, length(ga1d))
bLog[12] <- TRUE
expect_equal(ga1dNAend[bLog], NA)
expect_silent(oneLayer <- ga1dNAend[bLog, drop=FALSE])
expect_equivalent(oneLayer@index, as.numeric(NA))
expect_equivalent(oneLayer@stack, list())
# Multiple values
# mid case
# All valid
bLog <- rep(FALSE, length(ga1d))
bLog[c(3,4)] <- TRUE
expect_silent(twoLayer <- ga1dNAmid[bLog])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga1dNAmid@stack[[3]])
expect_equal(twoLayer@stack[[2]], ga1dNAmid@stack[[4]])
expect_equal(names(twoLayer), names(ga1dNAmid)[3:4])
# one missing - first
bLog <- rep(FALSE, length(ga1d))
bLog[c(4,5)] <- TRUE
expect_silent(twoLayer <- ga1dNAmid[bLog])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer@stack), 1)
expect_equal(twoLayer@stack[[1]], ga1dNAmid@stack[[4]])
expect_equal(names(twoLayer), names(ga1dNAmid)[4:5])
# one missing - second
bLog <- rep(FALSE, length(ga1d))
bLog[c(5,6)] <- TRUE
expect_silent(twoLayer <- ga1dNAmid[bLog])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer@stack), 1)
expect_equal(twoLayer@stack[[1]], ga1dNAmid@stack[[5]])
# all missing
bLog <- rep(FALSE, length(ga1d))
bLog[c(5,11)] <- TRUE
expect_silent(twoLayer <- ga1dNAmid[bLog])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer), 2)
expect_equal(length(twoLayer@stack), 0)
# front case
# All valid
bLog <- rep(FALSE, length(ga1d))
bLog[c(3,4)] <- TRUE
expect_silent(twoLayer <- ga1dNAfront[bLog])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga1dNAmid@stack[[1]])
expect_equal(twoLayer@stack[[2]], ga1dNAmid@stack[[2]])
expect_equal(names(twoLayer), names(ga1dNAfront)[3:4])
# one missing - first
bLog <- rep(FALSE, length(ga1d))
bLog[c(2,3)] <- TRUE
expect_silent(twoLayer <- ga1dNAfront[bLog])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer@stack), 1)
expect_equal(twoLayer@stack[[1]], ga1dNAfront@stack[[1]])
expect_equal(names(twoLayer), names(ga1dNAfront)[2:3])
# all missing
bLog <- rep(FALSE, length(ga1d))
bLog[c(1,2)] <- TRUE
expect_silent(twoLayer <- ga1dNAfront[bLog])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer), 2)
expect_equal(length(twoLayer@stack), 0)
# end case
# All valid
bLog <- rep(FALSE, length(ga1d))
bLog[c(3,4)] <- TRUE
expect_silent(twoLayer <- ga1dNAend[bLog])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga1dNAmid@stack[[3]])
expect_equal(twoLayer@stack[[2]], ga1dNAmid@stack[[4]])
expect_equal(names(twoLayer), names(ga1dNAmid)[3:4])
# one missing - second
bLog <- rep(FALSE, length(ga1d))
bLog[c(10,11)] <- TRUE
expect_silent(twoLayer <- ga1dNAend[bLog])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer@stack), 1)
expect_equal(twoLayer@stack[[1]], ga1dNAfront@stack[[10]])
expect_equal(names(twoLayer), names(ga1dNAfront)[10:11])
# all missing
bLog <- rep(FALSE, length(ga1d))
bLog[11:12] <- TRUE
expect_silent(twoLayer <- ga1dNAend[bLog])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(length(twoLayer), 2)
expect_equal(length(twoLayer@stack), 0)
################################################################################
# Negative subscripts
# FRONT
# single value
# front omission
expect_silent(oneOmit <- ga1dNAfront[-1])
expect_equal(oneOmit@stack, ga1dNAfront@stack)
expect_equal(names(oneOmit), names(ga1dNAfront)[-1])
# mid omission
expect_silent(oneOmit <- ga1dNAfront[-5])
expect_equal(oneOmit@stack, ga1dNAfront@stack[c(1:2, 4:10)])
expect_equal(names(oneOmit), names(ga1d)[-5])
# end omission
expect_silent(oneOmit <- ga1dNAfront[-12])
expect_equal(oneOmit@stack, ga1dNAfront@stack[c(1:9)])
expect_equal(names(oneOmit), names(ga1dNAfront)[-12])
# multiple values
# continuous
expect_silent(twoOmit <- ga1dNAfront[-(1:2)])
expect_equal(twoOmit@stack, ga1dNAfront@stack)
expect_equal(names(twoOmit), names(ga1d)[-c(1:2)])
# disjunct
expect_silent(twoOmit <- ga1dNAfront[-c(3,6)])
expect_equal(twoOmit@stack, ga1dNAfront@stack[c(2,3,5:10)])
expect_equal(names(twoOmit), names(ga1dNAfront)[-c(3,6)])
# MID
# single value
# front omission
expect_silent(oneOmit <- ga1dNAmid[-1])
expect_equal(oneOmit@stack, ga1dNAmid@stack[-1])
expect_equal(names(oneOmit), names(ga1dNAmid)[-1])
# mid omission
expect_silent(oneOmit <- ga1dNAmid[-5])
expect_equal(oneOmit@stack, ga1dNAfront@stack)
expect_equal(names(oneOmit), names(ga1dNAmid)[-5])
# end omission
expect_silent(oneOmit <- ga1dNAmid[-12])
expect_equal(oneOmit@stack, ga1dNAmid@stack[c(1:9)])
expect_equal(names(oneOmit), names(ga1dNAmid)[-12])
# multiple values
# continuous
expect_silent(twoOmit <- ga1dNAmid[-(1:2)])
expect_equal(twoOmit@stack, ga1dNAmid@stack[-(1:2)])
expect_equal(names(twoOmit), names(ga1dNAmid)[-c(1:2)])
# disjunct
expect_silent(twoOmit <- ga1dNAmid[-c(3,6)])
expect_equal(twoOmit@stack, ga1dNAmid@stack[-c(3,5)])
expect_equal(names(twoOmit), names(ga1dNAmid)[-c(3,6)])
# continuous - part Missing
expect_silent(twoOmit <- ga1dNAmid[-(4:5)])
expect_equal(twoOmit@stack, ga1dNAmid@stack[-(4)])
expect_equal(names(twoOmit), names(ga1dNAmid)[-c(4,5)])
# disjunct - part missing
expect_silent(twoOmit <- ga1dNAmid[-c(3,5)])
expect_equal(twoOmit@stack, ga1dNAmid@stack[-c(3)])
expect_equal(names(twoOmit), names(ga1dNAmid)[-c(3,5)])
# END
# single value
# front omission
expect_silent(oneOmit <- ga1dNAend[-1])
expect_equal(oneOmit@stack, ga1dNAend@stack[-1])
expect_equal(names(oneOmit), names(ga1dNAend)[-1])
# mid omission
expect_silent(oneOmit <- ga1dNAend[-5])
expect_equal(oneOmit@stack, ga1dNAend@stack[-5])
expect_equal(names(oneOmit), names(ga1dNAend)[-5])
# end omission
expect_silent(oneOmit <- ga1dNAend[-12])
expect_equal(oneOmit@stack, ga1dNAend@stack)
expect_equal(names(oneOmit), names(ga1dNAend)[-12])
# multiple values
# continuous
expect_silent(twoOmit <- ga1dNAend[-(1:2)])
expect_equal(twoOmit@stack, ga1dNAend@stack[-(1:2)])
expect_equal(names(twoOmit), names(ga1dNAend)[-c(1:2)])
# disjunct
expect_silent(twoOmit <- ga1dNAend[-c(3,6)])
expect_equal(twoOmit@stack, ga1dNAend@stack[-c(3,6)])
expect_equal(names(twoOmit), names(ga1dNAend)[-c(3,6)])
# continuous - part Missing
expect_silent(twoOmit <- ga1dNAend[-(10:11)])
expect_equal(twoOmit@stack, ga1dNAend@stack[-(10)])
expect_equal(names(twoOmit), names(ga1dNAend)[-c(10:11)])
# disjunct - part missing
expect_silent(twoOmit <- ga1dNAend[-c(3,11)])
expect_equal(twoOmit@stack, ga1dNAend@stack[-c(3)])
expect_equal(names(twoOmit), names(ga1dNAend)[-c(3,11)])
|
/scratch/gouwar.j/cran-all/cranData/via/inst/tinytest/test_XArray_subset_1D.R
|
library(tinytest)
# X-attribs of GenArray objects
source("./XArray_objects.R")
################################################################################
# I. Complete cases
# IA. Single dimensional selection
# IA-1. Numeric subscripts
# IA-1a. NULL case
expect_silent(nullLayer <- ga2d[0])
expect_equivalent(class(nullLayer), "XArray")
expect_equal(length(nullLayer), 0)
# IA-1b. Single values
# DROP
expect_equal(ga2d[1], ga2d@stack[[1]])
expect_silent(oneLayer <- ga2d[1])
expect_equal(oneLayer, ga2d@stack[[1]])
# NO DROP
expect_silent(oneLayer <- ga2d[1, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga2d@stack[1])
# Assert DROP
expect_silent(oneLayer <- ga2d[1, drop=TRUE])
expect_equal(oneLayer, ga2d@stack[[1]])
# IA-1c. Multiple values
expect_silent(twoLayer <- ga2d[3:4])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga2d@stack[[3]])
expect_equal(twoLayer@stack[[2]], ga2d@stack[[4]])
expect_equal(names(twoLayer), names(ga2d)[3:4])
# with Drop - call processing oK?
expect_silent(twoLayer <- ga2d[3:4, drop=TRUE])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga2d@stack[[3]])
expect_equal(twoLayer@stack[[2]], ga2d@stack[[4]])
expect_equal(names(twoLayer), names(ga2d)[3:4])
# IA-2. Character subscripts
## IA-2a. single values
expect_equivalent(ga2d["wrong"], NA)
expect_equivalent(ga2d["wrong", drop=TRUE], NA)
expect_silent(naLayer <- ga2d["wrong", drop=FALSE])
expect_equivalent(class(naLayer), "XArray")
expect_equivalent(naLayer@stack, list())
## IA-2b. multiple values
expect_silent(naLayer <- ga2d[c("wrong", "bad")])
expect_silent(naLayer <- ga2d[c("wrong", "bad"), drop=FALSE])
expect_equivalent(class(naLayer), "XArray")
expect_equivalent(naLayer@index, c(NA_integer_, NA_integer_))
expect_equivalent(naLayer@stack, list())
# IA-3. Logical subscripts
# IA-3a. NULL case
bLog <- rep(FALSE, length(ga2d))
expect_silent(nullLayer <- ga2d[bLog])
expect_equivalent(class(nullLayer), "XArray")
expect_equal(length(nullLayer), 0)
# IA-3b. Single value
bLog <- rep(FALSE, length(ga2d))
bLog[1] <- TRUE
# IA-3b. Single values
# DROP
expect_equal(ga2d[bLog], ga2d@stack[[1]])
expect_silent(oneLayer <- ga2d[bLog])
expect_equal(oneLayer, ga2d@stack[[which(bLog)]])
# NO DROP
expect_silent(oneLayer <- ga2d[bLog, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga2d@stack[1])
# Assert DROP
expect_silent(oneLayer <- ga2d[bLog, drop=TRUE])
expect_equal(oneLayer, ga2d@stack[[1]])
## IA-3c. multiple values
bLog[2] <- TRUE
expect_silent(twoLayer <- ga2d[bLog])
expect_silent(twoLayer <- ga2d[bLog, drop=FALSE])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack, ga2d@stack[1:2])
expect_equal(length(twoLayer), 2)
#######################----------------------------------
# IB. Multi-dimensional selection
# IB-1. Numerics
# IB-1a. Out of bounds error
expect_error(ga2d[5,10])
# IB-1b. Single numeric value
# DROP
expect_equal(ga2d[1,1], ga2d@stack[[1]])
expect_silent(oneLayer <- ga2d[1,1])
expect_equal(oneLayer, ga2d@stack[[1]])
# NO DROP
expect_silent(oneLayer <- ga2d[1, 1, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga2d@stack[1])
# Assert DROP
expect_silent(oneLayer <- ga2d[1,1, drop=TRUE])
expect_equal(oneLayer, ga2d@stack[[1]])
# IB-1c. Column-based
# error
expect_error(oneLayerVector <- ga2d[ , 5])
# single columns
expect_silent(oneLayerVector <- ga2d[, 1])
expect_equal(oneLayerVector@stack, ga2d@stack[1:3])
expect_equal(nrow(ga2d), length(oneLayerVector))
expect_equal(names(oneLayerVector), rownames(ga2d))
# multiple columns
expect_silent(twoColumns <- ga2d[, 3:4])
expect_equal(twoColumns@stack, ga2d@stack[7:12])
expect_equal(nrow(ga2d), nrow(twoColumns))
expect_equal(2, ncol(twoColumns))
expect_equal(rownames(twoColumns), rownames(ga2d))
expect_equal(colnames(twoColumns), colnames(ga2d)[3:4])
# IB-1d. Row-based
# error
expect_error(oneLayerVector <- ga2d[5 , ])
# single columns
expect_silent(oneLayerVector <- ga2d[1 , ])
expect_equal(oneLayerVector@stack, ga2d@stack[c(1,4,7,10)])
expect_equal(ncol(ga2d), length(oneLayerVector))
expect_equal(names(oneLayerVector), colnames(ga2d))
# multiple columns
expect_silent(twoRows <- ga2d[2:3,])
expect_equal(twoRows@stack, ga2d@stack[c(2,3,5,6,8,9,11,12)])
expect_equal(ncol(ga2d), ncol(twoRows))
expect_equal(2, nrow(twoRows))
expect_equal(colnames(twoRows), colnames(ga2d))
expect_equal(rownames(twoRows), rownames(ga2d)[2:3])
# IB-1e. Both row and columns
# vectors of values - by row
expect_silent(oneVect <- ga2d[1:2,2])
expect_equal(oneVect@stack, ga2d@stack[c(4,5)])
expect_equal(length(oneVect), 2)
expect_equal(names(oneVect), rownames(ga2d)[1:2])
# vectors of values - by column
expect_silent(oneVect <- ga2d[1,1:2])
expect_equal(oneVect@stack, ga2d@stack[c(1, 4)])
expect_equal(length(oneVect), 2)
expect_equal(names(oneVect), colnames(ga2d)[1:2])
# matrices
# coherent
expect_silent(oneMat <- ga2d[1:2,1:2])
expect_equal(oneMat@stack, ga2d@stack[c(1,2,4,5)])
expect_equal(length(oneMat), 4)
expect_equal(ncol(oneMat), 2)
expect_equal(nrow(oneMat), 2)
expect_equal(colnames(oneMat), colnames(ga2d)[1:2])
expect_equal(rownames(oneMat), rownames(ga2d)[1:2])
#disjunct
expect_silent(oneMat <- ga2d[c(1,3), (1:3)])
expect_equal(oneMat@stack, ga2d@stack[c(1,3,4,6,7,9)])
expect_equal(length(oneMat), 6)
expect_equal(ncol(oneMat), 3)
expect_equal(nrow(oneMat), 2)
expect_equal(colnames(oneMat), colnames(ga2d)[1:3])
expect_equal(rownames(oneMat), rownames(ga2d)[c(1,3)])
# IB-2. Character subscripts
# IB-2a. Out of bounds error
expect_error(ga2d["d","E"])
# IB-2b. Single numeric value
# DROP
expect_equal(ga2d["a","A"], ga2d@stack[[1]])
expect_silent(oneLayer <- ga2d["a","A"])
expect_equal(oneLayer, ga2d@stack[[1]])
# NO DROP
expect_silent(oneLayer <- ga2d["a","A", drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga2d@stack[1])
# Assert DROP
expect_silent(oneLayer <- ga2d["a","A", drop=TRUE])
expect_equal(oneLayer, ga2d@stack[[1]])
# IB-2c. Column-based
# error
expect_error(oneLayerVector <- ga2d[ , "E"])
# single columns
expect_silent(oneLayerVector <- ga2d[, "A"])
expect_equal(oneLayerVector@stack, ga2d@stack[1:3])
expect_equal(nrow(ga2d), length(oneLayerVector))
expect_equal(names(oneLayerVector), rownames(ga2d))
# multiple columns
expect_silent(twoColumns <- ga2d[, c("C", "D")])
expect_equal(twoColumns@stack, ga2d@stack[7:12])
expect_equal(nrow(ga2d), nrow(twoColumns))
expect_equal(2, ncol(twoColumns))
expect_equal(rownames(twoColumns), rownames(ga2d))
expect_equal(colnames(twoColumns), colnames(ga2d)[3:4])
# IB-2d. Row-based
# error
expect_error(oneLayerVector <- ga2d["g" , ])
# single columns
expect_silent(oneLayerVector <- ga2d["a" , ])
expect_equal(oneLayerVector@stack, ga2d@stack[c(1,4,7,10)])
expect_equal(ncol(ga2d), length(oneLayerVector))
expect_equal(names(oneLayerVector), colnames(ga2d))
# multiple columns
expect_silent(twoRows <- ga2d[c("b", "c"),])
expect_equal(twoRows@stack, ga2d@stack[c(2,3,5,6,8,9,11,12)])
expect_equal(ncol(ga2d), ncol(twoRows))
expect_equal(2, nrow(twoRows))
expect_equal(colnames(twoRows), colnames(ga2d))
expect_equal(rownames(twoRows), rownames(ga2d)[2:3])
# IB-2e. Both row and columns
# vectors of values - by row
expect_silent(oneVect <- ga2d[c("a", "b"),"B"])
expect_equal(oneVect@stack, ga2d@stack[c(4,5)])
expect_equal(length(oneVect), 2)
expect_equal(names(oneVect), rownames(ga2d)[1:2])
# vectors of values - by column
expect_silent(oneVect <- ga2d["a",c("A", "B")])
expect_equal(oneVect@stack, ga2d@stack[c(1, 4)])
expect_equal(length(oneVect), 2)
expect_equal(names(oneVect), colnames(ga2d)[1:2])
# matrices
# coherent
expect_silent(oneMat <- ga2d[c("a","b"),c("A","B")])
expect_equal(oneMat@stack, ga2d@stack[c(1,2,4,5)])
expect_equal(length(oneMat), 4)
expect_equal(ncol(oneMat), 2)
expect_equal(nrow(oneMat), 2)
expect_equal(colnames(oneMat), colnames(ga2d)[1:2])
expect_equal(rownames(oneMat), rownames(ga2d)[1:2])
#disjunct
expect_silent(oneMat <- ga2d[c("a","c"), c("A","B","C")])
expect_equal(oneMat@stack, ga2d@stack[c(1,3,4,6,7,9)])
expect_equal(length(oneMat), 6)
expect_equal(ncol(oneMat), 3)
expect_equal(nrow(oneMat), 2)
expect_equal(colnames(oneMat), colnames(ga2d)[1:3])
expect_equal(rownames(oneMat), rownames(ga2d)[c(1,3)])
# IB-3. Logical subscripts
bRow <- rep(FALSE, 3)
bCol <- rep(FALSE, 4)
# IB-3a. NULL
expect_silent(null <- ga2d[bRow, bCol])
expect_equivalent(class(null), "XArray")
expect_equivalent(length(null), 0)
expect_equivalent(null@stack, list())
# IB-3b. subscript too long
expect_error(null <- ga2d[bCol, bRow])
# IB-3c. single value
bRow[1] <- TRUE
bCol[1] <- TRUE
# auto DROP
expect_equal(ga2d[bRow, bCol], ga2d@stack[[1]])
expect_silent(oneLayer <- ga2d[bRow, bCol])
expect_equal(oneLayer, ga2d@stack[[1]])
# NO DROP
expect_silent(oneLayer <- ga2d[bRow, bCol, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equal(oneLayer@stack, ga2d@stack[1])
# Assert DROP
expect_silent(oneLayer <- ga2d[bRow, bCol, drop=TRUE])
expect_equal(oneLayer, ga2d@stack[[1]])
# IB-3d. Column-based
# error - subscript too long
expect_error(oneLayerVector <- ga2d[ , c(FALSE, FALSE, FALSE, FALSE, TRUE)])
# single columns
expect_silent(oneLayerVector <- ga2d[, bCol])
expect_equal(oneLayerVector@stack, ga2d@stack[1:3])
expect_equal(nrow(ga2d), length(oneLayerVector))
expect_equal(names(oneLayerVector), rownames(ga2d))
# multiple columns
bCol <- rep(FALSE, 4)
bCol[c(3,4)] <- TRUE
expect_silent(twoColumns <- ga2d[, bCol])
expect_equal(twoColumns@stack, ga2d@stack[7:12])
expect_equal(nrow(ga2d), nrow(twoColumns))
expect_equal(2, ncol(twoColumns))
expect_equal(rownames(twoColumns), rownames(ga2d))
expect_equal(colnames(twoColumns), colnames(ga2d)[3:4])
# IB-3e. Row-based
# error - subscript too long
expect_error(oneLayerVector <- ga2d[bCol , ])
# single columns
expect_silent(oneLayerVector <- ga2d[bRow , ])
expect_equal(oneLayerVector@stack, ga2d@stack[c(1,4,7,10)])
expect_equal(ncol(ga2d), length(oneLayerVector))
expect_equal(names(oneLayerVector), colnames(ga2d))
# multiple columns
bRow <- rep(FALSE, 3)
bRow[c(2,3)] <- TRUE
expect_silent(twoRows <- ga2d[bRow,])
expect_equal(twoRows@stack, ga2d@stack[c(2,3,5,6,8,9,11,12)])
expect_equal(ncol(ga2d), ncol(twoRows))
expect_equal(2, nrow(twoRows))
expect_equal(colnames(twoRows), colnames(ga2d))
expect_equal(rownames(twoRows), rownames(ga2d)[2:3])
# IB-3f. Both row and columns
# vectors of values - by row
bRow <- rep(FALSE, 3)
bRow[c(1,2)] <- TRUE
bCol <- rep(FALSE, 4)
bCol[2] <- TRUE
expect_silent(oneVect <- ga2d[bRow,bCol])
expect_equal(oneVect@stack, ga2d@stack[c(4,5)])
expect_equal(length(oneVect), 2)
expect_equal(names(oneVect), rownames(ga2d)[1:2])
# vectors of values - by column
bRow <- rep(FALSE, 3)
bRow[1] <- TRUE
bCol <- rep(FALSE, 4)
bCol[c(1,2)] <- TRUE
expect_silent(oneVect <- ga2d[bRow,bCol])
expect_equal(oneVect@stack, ga2d@stack[c(1, 4)])
expect_equal(length(oneVect), 2)
expect_equal(names(oneVect), colnames(ga2d)[1:2])
# matrices
# coherent
bRow <- rep(FALSE, 3)
bRow[c(1,2)] <- TRUE
bCol <- rep(FALSE, 4)
bCol[c(1,2)] <- TRUE
expect_silent(oneMat <- ga2d[bRow,bCol])
expect_equal(oneMat@stack, ga2d@stack[c(1,2,4,5)])
expect_equal(length(oneMat), 4)
expect_equal(ncol(oneMat), 2)
expect_equal(nrow(oneMat), 2)
expect_equal(colnames(oneMat), colnames(ga2d)[1:2])
expect_equal(rownames(oneMat), rownames(ga2d)[1:2])
#disjunct
bRow <- rep(FALSE, 3)
bRow[c(1,3)] <- TRUE
bCol <- rep(FALSE, 4)
bCol[c(1,2,3)] <- TRUE
expect_silent(oneMat <- ga2d[bRow, bCol])
expect_equal(oneMat@stack, ga2d@stack[c(1,3,4,6,7,9)])
expect_equal(length(oneMat), 6)
expect_equal(ncol(oneMat), 3)
expect_equal(nrow(oneMat), 2)
expect_equal(colnames(oneMat), colnames(ga2d)[1:3])
expect_equal(rownames(oneMat), rownames(ga2d)[c(1,3)])
################################################################################
# II. 2d objects with Missing values
################################################################################
# IIA. Single dimensional selection
# IIA-1. Numeric subscripts
# IIA-1a. NULL case
expect_silent(nullLayer <- ga2dNAfront[0])
expect_equivalent(class(nullLayer), "XArray")
expect_equal(length(nullLayer), 0)
# IIA-1b. Single values
#-Front
# DROP
expect_equal(ga2dNAfront[1], NA)
expect_silent(oneLayer <- ga2dNAfront[1])
# NO DROP
expect_silent(oneLayer <- ga2dNAfront[1, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equivalent(oneLayer@stack, list())
expect_equal(oneLayer@index, NA_integer_)
# Assert DROP
expect_silent(oneLayer <- ga2dNAfront[1, drop=TRUE])
expect_equal(oneLayer, NA)
# IA-1c. Multiple values
expect_silent(twoLayer <- ga2dNAfront[2:3])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@index[[1]], NA_integer_)
expect_equal(twoLayer@stack[[1]], ga2dNAfront@stack[[1]])
expect_equal(names(twoLayer), names(ga2dNAfront)[2:3])
# with Drop - call processing oK?
expect_silent(twoLayer <- ga2dNAfront[2:3, drop=TRUE])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@index[[1]], NA_integer_)
expect_equal(twoLayer@stack[[1]], ga2dNAfront@stack[[1]])
expect_equal(names(twoLayer), names(ga2dNAfront)[2:3])
#-Mid
# DROP
expect_silent(oneLayer <- ga2dNAmid[12])
expect_equal(oneLayer, ga2dNAmid@stack[[10]] )
# NO DROP
expect_silent(oneLayer <- ga2dNAmid[1, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equivalent(oneLayer@stack, ga2dNAmid@stack[1])
# Assert DROP
expect_silent(oneLayer <- ga2dNAmid[1, drop=TRUE])
expect_equal(oneLayer, ga2dNAmid@stack[[1]] )
# IIA-1c. Multiple values
expect_silent(twoLayer <- ga2dNAmid[5:6])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@index[[1]], NA_integer_)
expect_equal(twoLayer@stack[[1]], ga2dNAmid@stack[[5]])
expect_equal(names(twoLayer), names(ga2dNAmid)[5:6])
# with Drop - call processing oK?
expect_silent(twoLayer <- ga2dNAmid[5:6, drop=TRUE])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@index[[1]], NA_integer_)
expect_equal(twoLayer@stack[[1]], ga2dNAmid@stack[[5]])
expect_equal(names(twoLayer), names(ga2dNAmid)[5:6])
#-End
# DROP
expect_silent(oneLayer <- ga2dNAend[12])
expect_equal(oneLayer, NA )
# NO DROP
expect_silent(oneLayer <- ga2dNAend[1, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equivalent(oneLayer@stack, ga2dNAmid@stack[1])
expect_silent(oneLayer <- ga2dNAend[12, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equivalent(oneLayer@stack, list())
expect_equivalent(oneLayer@index, NA_integer_)
# Assert DROP
expect_silent(oneLayer <- ga2dNAend[1, drop=TRUE])
expect_equal(oneLayer, ga2dNAend@stack[[1]] )
expect_silent(oneLayer <- ga2dNAend[12, drop=TRUE])
expect_equal(oneLayer, NA)
# IIA-1c. Multiple values
expect_silent(twoLayer <- ga2dNAend[10:11])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga2dNAend@stack[[10]])
expect_equal(twoLayer@index[[2]], NA_integer_)
expect_equal(names(twoLayer), names(ga2dNAend)[10:11])
# with Drop - call processing oK?
expect_silent(twoLayer <- ga2dNAend[10:11, drop=TRUE])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga2dNAend@stack[[10]])
expect_equal(twoLayer@index[[2]], NA_integer_)
expect_equal(names(twoLayer), names(ga2dNAend)[10:11])
# IIA-2. Character subscripts
# IIA-2a. NULL case - out of bounds with one-dim
# without drop
expect_silent(nullLayer <- ga2dNAfront["ads"])
expect_equivalent(nullLayer, NA)
# explicit drop
expect_silent(nullLayer <- ga2dNAfront["ads", drop=TRUE])
expect_equivalent(nullLayer, NA)
# no drop
expect_silent(nullLayer <- ga2dNAfront["ads", drop=FALSE])
expect_equivalent(class(nullLayer), "XArray")
expect_equivalent(nullLayer@stack, list())
expect_equivalent(nullLayer@index, NA_integer_)
# IIA-2b. multiple NULL
expect_silent(nullLayer <- ga2dNAfront[c("ads", "bullshit")])
expect_equivalent(class(nullLayer), "XArray")
expect_equivalent(nullLayer@index, c(NA_integer_,NA_integer_))
expect_equivalent(nullLayer@stack, list())
# IIA-3. Logical subscripts
# IIA-3a. NULL case
bLog <- rep(FALSE, 12)
expect_silent(nullLayer <- ga2dNAfront[bLog])
expect_equivalent(class(nullLayer), "XArray")
expect_equal(length(nullLayer), 0)
# IIA-3b. Single values
#-Front
bLog <- rep(FALSE, 12)
bLog[1] <- TRUE
# DROP
expect_equal(ga2dNAfront[bLog], NA)
expect_silent(oneLayer <- ga2dNAfront[bLog])
# NO DROP
expect_silent(oneLayer <- ga2dNAfront[bLog, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equivalent(oneLayer@stack, list())
expect_equal(oneLayer@index, NA_integer_)
# Assert DROP
expect_silent(oneLayer <- ga2dNAfront[bLog, drop=TRUE])
expect_equal(oneLayer, NA)
# IA-3c. Multiple values
bLog <- rep(FALSE, 12)
bLog[c(2:3)] <- TRUE
expect_silent(twoLayer <- ga2dNAfront[bLog])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@index[[1]], NA_integer_)
expect_equal(twoLayer@stack[[1]], ga2dNAfront@stack[[1]])
expect_equal(names(twoLayer), names(ga2dNAfront)[2:3])
# with Drop - call processing oK?
expect_silent(twoLayer <- ga2dNAfront[bLog, drop=TRUE])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@index[[1]], NA_integer_)
expect_equal(twoLayer@stack[[1]], ga2dNAfront@stack[[1]])
expect_equal(names(twoLayer), names(ga2dNAfront)[2:3])
#-Mid
# DROP
bLog <- rep(FALSE, 12)
bLog[12] <- TRUE
expect_silent(oneLayer <- ga2dNAmid[bLog])
expect_equal(oneLayer, ga2dNAmid@stack[[10]] )
# NO DROP
bLog <- rep(FALSE, 12)
bLog[1] <- TRUE
expect_silent(oneLayer <- ga2dNAmid[bLog, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equivalent(oneLayer@stack, ga2dNAmid@stack[1])
# Assert DROP
expect_silent(oneLayer <- ga2dNAmid[bLog, drop=TRUE])
expect_equal(oneLayer, ga2dNAmid@stack[[1]] )
# IIA-1c. Multiple values
bLog <- rep(FALSE, 12)
bLog[c(5,6)] <- TRUE
expect_silent(twoLayer <- ga2dNAmid[5:6])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@index[[1]], NA_integer_)
expect_equal(twoLayer@stack[[1]], ga2dNAmid@stack[[5]])
expect_equal(names(twoLayer), names(ga2dNAmid)[5:6])
# with Drop - call processing oK?
bLog <- rep(FALSE, 12)
bLog[c(5,6)] <- TRUE
expect_silent(twoLayer <- ga2dNAmid[bLog, drop=TRUE])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@index[[1]], NA_integer_)
expect_equal(twoLayer@stack[[1]], ga2dNAmid@stack[[5]])
expect_equal(names(twoLayer), names(ga2dNAmid)[5:6])
#-End
# DROP
bLog <- rep(FALSE, 12)
bLog[12] <- TRUE
expect_silent(oneLayer <- ga2dNAend[bLog])
expect_equal(oneLayer, NA )
# NO DROP
bLog <- rep(FALSE, 12)
bLog[1] <- TRUE
expect_silent(oneLayer <- ga2dNAend[bLog, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equivalent(oneLayer@stack, ga2dNAmid@stack[1])
bLog <- rep(FALSE, 12)
bLog[12] <- TRUE
expect_silent(oneLayer <- ga2dNAend[bLog, drop=FALSE])
expect_equivalent(class(oneLayer), "XArray")
expect_equivalent(oneLayer@stack, list())
expect_equivalent(oneLayer@index, NA_integer_)
# Assert DROP
bLog <- rep(FALSE, 12)
bLog[1] <- TRUE
expect_silent(oneLayer <- ga2dNAend[bLog, drop=TRUE])
expect_equal(oneLayer, ga2dNAend@stack[[1]] )
bLog <- rep(FALSE, 12)
bLog[12] <- TRUE
expect_silent(oneLayer <- ga2dNAend[bLog, drop=TRUE])
expect_equal(oneLayer, NA)
# IIA-1c. Multiple values
bLog <- rep(FALSE, 12)
bLog[10:11] <- TRUE
expect_silent(twoLayer <- ga2dNAend[bLog])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga2dNAend@stack[[10]])
expect_equal(twoLayer@index[[2]], NA_integer_)
expect_equal(names(twoLayer), names(ga2dNAend)[10:11])
# with Drop - call processing oK?
expect_silent(twoLayer <- ga2dNAend[bLog, drop=TRUE])
expect_equivalent(class(twoLayer), "XArray")
expect_equal(twoLayer@stack[[1]], ga2dNAend@stack[[10]])
expect_equal(twoLayer@index[[2]], NA_integer_)
expect_equal(names(twoLayer), names(ga2dNAend)[10:11])
|
/scratch/gouwar.j/cran-all/cranData/via/inst/tinytest/test_XArray_subset_2D.R
|
#' VIAF Authorities
#'
#' A dataset containing the names and schemes of 55 organizations
#' and libraries that participate in the VIAF.
#'
#' @docType data
#' @keywords datasets
#' @name authorities
#'
#' @usage data(authorities)
#' @format A tibble with 55 rows and 2 variables.
"authorities"
#' VIAF Name Types
#'
#' A dataset containing the 5 possible name types in the VIAF.
#'
#' @docType data
#' @keywords datasets
#' @name name_types
#'
#' @usage data(name_types)
#' @format A tibble with 5 rows and 2 variables.
"name_types"
|
/scratch/gouwar.j/cran-all/cranData/viafr/R/data.R
|
#' Get Data for VIAF Identifier(s)
#'
#' Get authority cluster data based on supplied VIAF identifier(s).
#'
#' @param query The VIAF identifier(s) to get data for.
#' @param ... Optional VIAF API query parameters.
#' @return A tibble with data items.
#'
#' @note An internet connection is required. The MARC 21 field
#' definitions are used.
#'
#' @examples
#' \donttest{viaf_get(c("64013650", "102333412"))}
#'
#' @importFrom purrr map
#' @importFrom magrittr "%>%"
#'
#' @rdname get
#' @export
viaf_get <- function(query = NULL, ...) {
if (is.null(query)) {
stop("VIAF query could not be parsed.")
}
if (is.list(query)) query <- unlist(query)
assertthat::assert_that(is.vector(query))
if (any(sapply(query, nchar) == 0)) {
warning("At least one VIAF query is empty.")
}
items <- map(query, viaf_retrieve, ...) %>%
map(get_identifier) %>% dplyr::bind_rows()
return(items)
}
#' @importFrom tibble tibble
#' @importFrom magrittr "%>%"
get_identifier <- function(x) {
if (is.null(x)) {
return(
tibble(
viaf_id = NA, source_ids = list(),
name_type = NA, text = list()
)
)
}
metadata <- tibble(
viaf_id = x$viafID, source_ids = list(
get_source_ids(x$sources$source)
),
name_type = get_name_type(x$nameType)[[1]],
text = list(get_text(x))
)
return(metadata)
}
|
/scratch/gouwar.j/cran-all/cranData/viafr/R/get.R
|
#' @import utils
if (getRversion() >= "2.15.1") {
utils::globalVariables(c("."))
}
|
/scratch/gouwar.j/cran-all/cranData/viafr/R/globals.R
|
#' Search VIAF records
#'
#' Search VIAF records where the authority includes the given terms.
#'
#' @param query The search query (or queries) to get data for.
#' @param ... Optional VIAF API query parameters.
#' @return A named list of tibbles with data items.
#'
#' @note An internet connection is required. The MARC 21 field
#' definitions are used.
#'
#' @examples
#' \donttest{viaf_search(c("Rembrandt", "Jane Austen"))}
#'
#' @importFrom purrr map set_names
#' @importFrom magrittr "%>%"
#'
#' @rdname search
#' @export
viaf_search <- function(query = NULL, ...) {
if (is.null(query)) {
stop("VIAF query could not be parsed.")
}
if (is.list(query)) query <- unlist(query)
assertthat::assert_that(is.vector(query))
if (any(sapply(query, nchar) == 0)) {
warning("At least one VIAF query is empty.")
}
endpoint <- "search"
items <- map(query, viaf_retrieve_query, endpoint = endpoint,
...) %>% map(get_search) %>% set_names(query)
return(items)
}
#' @importFrom dplyr rename select
#' @importFrom magrittr "%>%"
#' @importFrom purrr map_chr
#' @importFrom rlang .data
get_search <- function(x) {
if (!is.null(x)) {
response <- x$searchRetrieveResponse
n_records <- response$numberOfRecords
} else {
n_records <- 0 # acts as a surrogate
}
if (as.integer(n_records) == 0) {
return(
tibble(
viaf_id = NA, source_ids = list(),
name_type = NA, text = list()
)
)
}
x <- response$records$record$recordData
source_ids <- x$sources$source
if (is.data.frame(source_ids)) {
source_ids <- purrr::transpose(source_ids)
}
metadata <- tibble::as_tibble(x) %>%
rename(viaf_id = "viafID", name_type = "nameType") %>%
mutate(
source_ids = map(!!source_ids, get_source_ids),
text = map(split(x, 1:nrow(x)), get_text)
)
metadata <- get_name_type(metadata) %>%
select(
.data$viaf_id, .data$source_ids,
.data$name_type, .data$text
)
return(metadata)
}
|
/scratch/gouwar.j/cran-all/cranData/viafr/R/search.R
|
#' Suggest VIAF records
#'
#' Suggest VIAF records based on given terms passed in a query.
#'
#' @param query The search query (or queries) to get data for.
#' @param ... Optional VIAF API query parameters.
#' @return A named list of tibbles with data items.
#'
#' @note An internet connection is required.
#'
#' @examples
#' \donttest{viaf_suggest(c("rembrandt", "austen"))}
#'
#' @importFrom purrr map set_names
#' @importFrom magrittr "%>%"
#'
#' @rdname suggest
#' @export
viaf_suggest <- function(query = NULL, ...) {
if (is.null(query)) {
stop("VIAF query could not be parsed.")
}
if (is.list(query)) query <- unlist(query)
assertthat::assert_that(is.vector(query))
if (any(sapply(query, nchar) == 0)) {
warning("At least one VIAF query is empty.")
}
endpoint <- "AutoSuggest"
items <- map(query, viaf_retrieve_query, endpoint = endpoint,
...) %>% map(get_suggest) %>% set_names(query)
return(items)
}
#' @importFrom tidyr unnest drop_na
#' @importFrom purrr transpose map
#' @importFrom magrittr "%>%"
#' @importFrom rlang .data
#' @import dplyr tibble
get_suggest <- function(x) {
if (is.null(x$result)) {
return(
tibble(
viaf_id = NA, source_ids = list(),
name_type = NA, text = NA, score = NA
)
)
}
authorities <- get("authorities")
metadata <- as_tibble(x$result) %>%
rename(
viaf_id = "viafid", text = "term",
name_type = "nametype"
)
if (ncol(metadata) > 6) {
metadata <- metadata %>%
mutate(
source_ids = map(
# columns that every record exhibits
transpose(select(., -c(
.data$text, .data$displayForm, .data$name_type,
.data$viaf_id, .data$score, .data$recordID
))),
~ enframe(.) %>% unnest(cols = names(.)) %>%
drop_na() %>% # drop totally empty columns
rename(id = "value", scheme = "name") %>%
mutate(scheme = toupper(.data$scheme)) %>%
left_join(authorities, by = "scheme") %>%
select(.data$id, .data$scheme, .data$name)
)
)
} else {
metadata <- mutate(metadata, source_ids = list(NULL))
}
metadata <- get_name_type(metadata) %>%
select(
.data$viaf_id, .data$source_ids,
.data$name_type, .data$text, .data$score
)
return(normalize(ungroup(metadata)))
}
|
/scratch/gouwar.j/cran-all/cranData/viafr/R/suggest.R
|
#' @importFrom utf8 utf8_normalize
normalize <- function(x) {
result <- purrr::set_names(x, utf8_normalize(colnames(x))) # %>%
# dplyr::mutate_if(is.character, list(~ utf8_normalize(.)))
return(result)
}
#' @note modified from rcrossref:::rcrossref_ua
#' @importFrom utils packageVersion
viaf_ua <- function() {
versions <- c(
paste0("r-curl/", packageVersion("curl")),
paste0("crul/", packageVersion("crul")),
paste0("viafr/", packageVersion("viafr"))
)
return(paste0(versions, collapse = " "))
}
#' @note modified from rcrossref:::cr_GET
viaf_retrieve <- function(endpoint = NULL, ...) {
args <- list(...)
if (is.null(endpoint) && length(args) == 0) {
stop("VIAF query could not be parsed.")
}
url <- "https://www.viaf.org/viaf/"
if (!is.null(endpoint)) {
url <- paste0(url, endpoint)
}
cli <- crul::HttpClient$new(
url = url,
headers = list(
`User-Agent` = viaf_ua(),
`X-USER-AGENT` = viaf_ua(),
Accept = "application/json"
)
)
# always overwrite query parameter
args$httpAccept <- "application/json"
result <- cli$get(query = args)
return_value <- NULL
if (result$status_code == 200L) {
return_value <- tryCatch({
jsonlite::fromJSON(
result$parse("UTF-8")
)
}, error = function(e)
return(return_value)
)
} else {
message(
sprintf("Query to %s failed with status code %s.",
result$url, result$status_code)
)
}
return(return_value)
}
viaf_retrieve_query <- function(query, endpoint, ...) {
args <- list(...); args$query <- query
do.call(viaf_retrieve, c(endpoint, args))
}
#' @importFrom dplyr rename mutate left_join
#' @importFrom purrr map_chr pluck
#' @importFrom magrittr "%>%"
#' @importFrom rlang .data
get_source_ids <- function(x) {
result <- tibble::as_tibble(x) %>%
rename(id = "@nsid", scheme = "#text") %>%
mutate(
id = stringr::str_remove_all(.data$id, "^\\.|.*/"),
scheme = map_chr(
.data$scheme, ~ strsplit(., "\\|") %>% pluck(1, 1)
)
) %>%
left_join(get("authorities"), by = "scheme")
return(result)
}
#' @importFrom tibble tibble as_tibble
#' @importFrom dplyr mutate left_join
#' @importFrom rlang .data
get_name_type <- function(x) {
if (length(unlist(x)) == 1) {
x <- tibble(name_type = x)
}
result <- as_tibble(x) %>%
mutate(
name_type = tolower(.data$name_type) %>%
stringr::str_remove_all("\\s")
) %>%
left_join(
get("name_types"), by = c("name_type" = "id")
) %>%
mutate(name_type = .data$name)
return(result)
}
#' @importFrom stringr str_subset
find_field <- function(x, name, exclude = NULL) {
x <- unlist(x) # entirely flatten list of lists
# unique numerical prefix for proper addressing
names(x) <- paste0(seq_along(x), ".", names(x))
field <- str_subset(names(x), paste0(".*\\.", name))
if (!is.null(exclude)) {
assertthat::assert_that(is.vector(exclude))
exclude <- paste(paste0("\\.", exclude), collapse = "|")
field <- str_subset(field, exclude, negate = TRUE)
}
field <- purrr::map(field, ~ x[.])
return(unlist(field, recursive = FALSE))
}
#' @importFrom magrittr "%>%"
#' @importFrom tidyr spread
#' @importFrom rlang .data
#' @import dplyr stringr
get_text <- function(x) {
x <- find_field(x, name = "subfield", exclude = "x500")
result <- tibble::tibble(
code = str_subset(names(x), "@code.*$"),
text = str_subset(names(x), "#text.*$")
) %>%
mutate(
id = cumsum(str_detect(.data$code, "code(?:1)?$")),
code = x[.data$code], text = x[.data$text]
) %>%
mutate(
text = case_when(
str_detect(code, "^[0-9]") ~ text,
!is.na(code) ~ str_remove_all(
text, "^[.,:()]|[,:()]$|(?<=\\W)\\.$"
)
)
) %>%
distinct() %>% group_by(.data$id, .data$code) %>%
summarise(text = paste(.data$text, collapse = ", ")) %>%
group_by(.data$id) %>% spread(.data$code, .data$text) %>%
ungroup() %>% select(-id) %>% group_by_all() %>%
add_tally(sort = TRUE, name = "count") %>% distinct() %>%
mutate(id = row_number()) %>% select(id, count, everything())
# reorder columns first by letter, then by number
id <- str_detect(colnames(result), "^[0-9]")
result <- select(result, one_of(
c(colnames(result)[!id], colnames(result)[id])
))
return(normalize(ungroup(result)))
}
|
/scratch/gouwar.j/cran-all/cranData/viafr/R/utils.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.